diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000000..42b2da2827 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,132 @@ +# Bittensor PR Review Guidelines + +You are reviewing code for a Substrate-based blockchain with a $4B market cap. Lives and livelihoods depend on the security and correctness of this code. Be thorough, precise, and uncompromising on safety. + +## Branch Strategy +* Unless this is a hotfix or deployment PR (`devnet-ready` => `devnet`, `devnet` => `testnet`, or `testnet` => `main`), all PRs must target `devnet-ready` +* Flag PRs targeting `main` directly unless they are hotfixes +* `devnet` and `testnet` branches must only receive merges from their respective `-ready` branches + +## CRITICAL: Runtime Safety (Chain-Bricking Prevention) +The runtime CANNOT panic under any circumstances. A single panic can brick the entire chain. + +**Panic Sources to Flag:** +* Direct indexing: `vec[i]`, `arr[3]` → Must use `.get()` returning `Option` +* `.unwrap()`, `.expect()` → Must handle `Result`/`Option` properly +* `.unwrap_or()` is acceptable only with safe defaults +* Unchecked arithmetic: `a + b`, `a - b`, `a * b`, `a / b` → Must use `checked_*` or `saturating_*` +* Division without zero checks +* Type conversions: `.try_into()` without handling, casting that could truncate +* Iterator operations that assume non-empty collections: `.first().unwrap()`, `.last().unwrap()` +* String operations: slicing without bounds checking +* `unsafe` blocks (absolutely prohibited in runtime) + +## Substrate-Specific Vulnerabilities + +### Storage Safety +* Unbounded storage iterations (DoS vector) - check for loops over storage maps without limits +* Missing storage deposits/bonds for user-created entries (state bloat attack) +* Storage migrations without proper version checks or error handling +* Direct storage manipulation without proper weight accounting +* `kill_storage()` or storage removals without cleanup of dependent data + +### Weight & Resource Exhaustion +* Missing or incorrect `#[pallet::weight]` annotations +* Computational complexity not reflected in weight calculations +* Database reads/writes not accounted for in weights +* Potential for weight exhaustion attacks through parameter manipulation +* Loops with user-controlled bounds in extrinsics + +### Origin & Permission Checks +* Missing `ensure_signed`, `ensure_root`, or `ensure_none` checks +* Origin checks that can be bypassed +* Privilege escalation paths +* Missing checks before state-modifying operations +* Incorrect origin forwarding in cross-pallet calls + +### Economic & Cryptoeconomic Exploits +* Integer overflow/underflow in token/balance calculations +* Rounding errors that can be exploited (especially in repeated operations) +* MEV/front-running vulnerabilities in auction/pricing mechanisms +* Flash loan-style attacks or single-block exploits +* Reward calculation errors or manipulation vectors +* Slashing logic vulnerabilities +* Economic denial of service (forcing expensive operations on others) + +### Migration Safety +* Migrations without try-state checks or validation +* Missing version guards (checking current vs new version) +* Unbounded migrations that could time out +* Data loss risks during migration +* Missing rollback handling for failed migrations + +### Consensus & Chain State +* Anything that could cause non-deterministic behavior (randomness sources, timestamps without validation) +* Fork-causing conditions due to different execution paths +* Block production or finalization blockers +*Validator set manipulation vulnerabilities + +### Cross-Pallet Interactions +* Reentrancy-like patterns when calling other pallets +* Circular dependencies between pallets +* Assumptions about other pallet state that could be violated +* Missing error handling from pallet calls + +## Supply Chain & Dependency Security + +**Flag any PR that:** +* Adds new dependencies (require justification and thorough vetting) +* Updates cryptographic or core dependencies +* Uses dependencies with known vulnerabilities (check advisories) +* Depends on unmaintained or obscure crates +* Introduces git dependencies or path dependencies pointing outside the repo +* Uses pre-release versions of critical dependencies +* Includes large dependency version jumps without explanation + +**For dependency changes, verify:** +* Changelog review for security fixes or breaking changes +* Maintainer reputation and project activity +* Number of reverse dependencies (more = more scrutiny) +* Whether it introduces new transitive dependencies + +## Code Quality & Maintainability + +* Code duplication that could lead to inconsistent bug fixes +* Overly complex logic that obscures security issues +* Missing error messages or unclear panic contexts in tests +* Insufficient test coverage for new extrinsics or storage operations +* Missing or inadequate documentation for complex algorithms +* Magic numbers without explanation +* TODO/FIXME comments introducing technical debt in critical paths + +## External Contributor Scrutiny +For contributors without "Nucleus" role, apply **maximum scrutiny**: +* Verify the PR solves a real, documented issue +* Check for hidden backdoors or logic bombs +* Review commit history for suspicious patterns +* Validate that changes match the stated purpose +* Question any unusual patterns or overcomplicated solutions +* Require clear explanations for non-obvious changes + +## Build & Tooling +* If lints fail (clippy, rustfmt, cargo check), suggest running `./scripts/fix_rust.sh` +* Uncommitted `Cargo.lock` changes should be included in commits +* Ensure CI passes before deep review + +## Review Style +* Be **concise** - report only legitimate issues, no nitpicks +* Provide **specific line numbers** and **concrete examples** +* Suggest **fixes** when possible, not just problems +* **Severity levels**: Use [CRITICAL], [HIGH], [MEDIUM], [LOW] tags +* Block PRs on [CRITICAL] and [HIGH] issues +* For security issues, consider discussing privately before commenting publicly + +## Final Check +Before approving, ask yourself: +1. Could this brick the chain? (panic, consensus break) +2. Could this lose or steal funds? (arithmetic, logic errors) +3. Could this DOS the network? (unbounded operations, weight issues) +4. Could this introduce a backdoor? (especially for external contributors) +5. Is this change necessary and minimal? + +**Remember: $4B market cap. Err on the side of caution. When in doubt, escalate.** \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1e279b2ebe..f2413b444b 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -31,7 +31,7 @@ Please ensure the following tasks are completed before requesting a review: - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas -- [ ] I have run `cargo fmt` and `cargo clippy` to ensure my code is formatted and linted correctly +- [ ] I have run `./scripts/fix_rust.sh` to ensure my code is formatted and linted correctly - [ ] I have made corresponding changes to the documentation - [ ] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works diff --git a/Cargo.lock b/Cargo.lock index ee3fa03e95..d242b5922f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,9 +290,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -1056,7 +1056,7 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "assets-common" version = "0.22.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "ethereum-standards", @@ -1175,7 +1175,7 @@ dependencies = [ "polling 3.11.0", "rustix 1.1.2", "slab", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -1270,7 +1270,7 @@ dependencies = [ "rustix 1.1.2", "signal-hook-registry", "slab", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -1435,7 +1435,7 @@ checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "binary-merkle-tree" version = "16.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "hash-db", "log", @@ -1704,7 +1704,7 @@ dependencies = [ [[package]] name = "bp-header-chain" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-runtime", "finality-grandpa", @@ -1721,7 +1721,7 @@ dependencies = [ [[package]] name = "bp-messages" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-runtime", @@ -1737,7 +1737,7 @@ dependencies = [ [[package]] name = "bp-parachains" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-polkadot-core", @@ -1754,7 +1754,7 @@ dependencies = [ [[package]] name = "bp-polkadot-core" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-messages", "bp-runtime", @@ -1770,7 +1770,7 @@ dependencies = [ [[package]] name = "bp-relayers" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-messages", @@ -1788,7 +1788,7 @@ dependencies = [ [[package]] name = "bp-runtime" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -1811,7 +1811,7 @@ dependencies = [ [[package]] name = "bp-test-utils" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-parachains", @@ -1831,7 +1831,7 @@ dependencies = [ [[package]] name = "bp-xcm-bridge-hub" version = "0.7.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-messages", "bp-runtime", @@ -1848,7 +1848,7 @@ dependencies = [ [[package]] name = "bp-xcm-bridge-hub-router" version = "0.18.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -1860,7 +1860,7 @@ dependencies = [ [[package]] name = "bridge-hub-common" version = "0.14.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1879,7 +1879,7 @@ dependencies = [ [[package]] name = "bridge-runtime-common" version = "0.22.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-messages", @@ -1946,9 +1946,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.23.2" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" [[package]] name = "byteorder" @@ -2025,9 +2025,9 @@ checksum = "fd6c0e7b807d60291f42f33f58480c0bfafe28ed08286446f45e463728cf9c1c" [[package]] name = "cc" -version = "1.2.39" +version = "1.2.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1354349954c6fc9cb0deab020f27f783cf0b604e8bb754dc4658ecf0d29c35f" +checksum = "e1d05d92f4b1fd76aad469d46cdd858ca761576082cd37df81416691e50199fb" dependencies = [ "find-msvc-tools", "jobserver", @@ -2241,7 +2241,7 @@ checksum = "fe6d2e5af09e8c8ad56c969f2157a3d4238cebc7c55f0a517728c38f7b200f81" dependencies = [ "serde", "termcolor", - "unicode-width", + "unicode-width 0.2.2", ] [[package]] @@ -2288,7 +2288,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b03b7db8e0b4b2fdad6c551e634134e99ec000e5c8c3b6856c65e8bbaded7a3b" dependencies = [ "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.2", ] [[package]] @@ -2308,15 +2308,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.11" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ "encode_unicode", + "lazy_static", "libc", - "once_cell", - "unicode-width", - "windows-sys 0.59.0", + "unicode-width 0.1.14", + "windows-sys 0.52.0", ] [[package]] @@ -2365,9 +2365,9 @@ checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" [[package]] name = "const_format" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" dependencies = [ "const_format_proc_macros", ] @@ -2711,7 +2711,7 @@ dependencies = [ [[package]] name = "cumulus-client-bootnodes" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "async-channel 1.9.0", @@ -2737,7 +2737,7 @@ dependencies = [ [[package]] name = "cumulus-client-cli" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "clap", "parity-scale-codec", @@ -2754,14 +2754,14 @@ dependencies = [ [[package]] name = "cumulus-client-collator" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", "cumulus-primitives-core", "futures", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", @@ -2777,7 +2777,7 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-aura" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-client-collator", @@ -2789,7 +2789,7 @@ dependencies = [ "cumulus-relay-chain-interface", "futures", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", @@ -2824,7 +2824,7 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-common" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-client-pov-recovery", @@ -2856,7 +2856,7 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-proposer" version = "0.20.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "anyhow", "async-trait", @@ -2871,14 +2871,14 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-relay-chain" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-client-consensus-common", "cumulus-primitives-core", "cumulus-relay-chain-interface", "futures", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-consensus", "sp-api", "sp-block-builder", @@ -2894,14 +2894,14 @@ dependencies = [ [[package]] name = "cumulus-client-network" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-relay-chain-interface", "futures", "futures-timer", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-parachain-primitives", @@ -2921,7 +2921,7 @@ dependencies = [ [[package]] name = "cumulus-client-parachain-inherent" version = "0.18.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -2931,7 +2931,7 @@ dependencies = [ "parity-scale-codec", "sc-client-api", "sc-consensus-babe", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-inherents", "sp-runtime", "sp-state-machine", @@ -2942,7 +2942,7 @@ dependencies = [ [[package]] name = "cumulus-client-pov-recovery" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -2970,7 +2970,7 @@ dependencies = [ [[package]] name = "cumulus-client-service" version = "0.25.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-channel 1.9.0", "cumulus-client-cli", @@ -3010,7 +3010,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-aura-ext" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-pallet-parachain-system", "frame-support", @@ -3027,7 +3027,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-dmp-queue" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "frame-benchmarking", @@ -3044,7 +3044,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-parachain-system" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bytes", "cumulus-pallet-parachain-system-proc-macro", @@ -3081,7 +3081,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", @@ -3092,7 +3092,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-session-benchmarking" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -3105,7 +3105,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-solo-to-para" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-pallet-parachain-system", "frame-support", @@ -3120,7 +3120,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-weight-reclaim" version = "0.3.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-storage-weight-reclaim", "derive-where", @@ -3139,7 +3139,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-xcm" version = "0.20.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -3154,7 +3154,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-xcmp-queue" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "approx", "bounded-collections 0.2.4", @@ -3179,7 +3179,7 @@ dependencies = [ [[package]] name = "cumulus-ping" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-pallet-xcm", "cumulus-primitives-core", @@ -3194,7 +3194,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-aura" version = "0.18.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "sp-api", "sp-consensus-aura", @@ -3203,7 +3203,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-core" version = "0.19.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-core-primitives", @@ -3220,7 +3220,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-parachain-inherent" version = "0.19.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -3234,7 +3234,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-proof-size-hostfunction" version = "0.13.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "sp-externalities", "sp-runtime-interface", @@ -3244,7 +3244,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-storage-weight-reclaim" version = "12.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", @@ -3261,7 +3261,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-utility" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -3278,7 +3278,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-inprocess-interface" version = "0.25.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-channel 1.9.0", "async-trait", @@ -3306,7 +3306,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-interface" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -3326,7 +3326,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-minimal-node" version = "0.25.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "async-channel 1.9.0", @@ -3362,7 +3362,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-rpc-interface" version = "0.24.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -3403,7 +3403,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-streams" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-relay-chain-interface", "futures", @@ -3417,7 +3417,7 @@ dependencies = [ [[package]] name = "cumulus-test-relay-sproof-builder" version = "0.20.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "parity-scale-codec", @@ -3610,7 +3610,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.11", + "parking_lot_core 0.9.12", ] [[package]] @@ -4049,9 +4049,9 @@ dependencies = [ [[package]] name = "encode_unicode" -version = "1.0.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "enum-as-inner" @@ -4158,7 +4158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -4198,7 +4198,7 @@ dependencies = [ [[package]] name = "ethereum-standards" version = "0.1.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "alloy-core", ] @@ -4415,7 +4415,7 @@ dependencies = [ [[package]] name = "fc-api" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "async-trait", "fp-storage", @@ -4427,7 +4427,7 @@ dependencies = [ [[package]] name = "fc-aura" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fc-rpc", "fp-storage", @@ -4443,7 +4443,7 @@ dependencies = [ [[package]] name = "fc-babe" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fc-rpc", "sc-client-api", @@ -4459,7 +4459,7 @@ dependencies = [ [[package]] name = "fc-consensus" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "async-trait", "fp-consensus", @@ -4475,7 +4475,7 @@ dependencies = [ [[package]] name = "fc-db" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "async-trait", "ethereum", @@ -4489,7 +4489,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-client-db", "smallvec", @@ -4505,7 +4505,7 @@ dependencies = [ [[package]] name = "fc-mapping-sync" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fc-db", "fc-storage", @@ -4514,7 +4514,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-utils", "sp-api", @@ -4528,7 +4528,7 @@ dependencies = [ [[package]] name = "fc-rpc" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "ethereum", "ethereum-types", @@ -4579,7 +4579,7 @@ dependencies = [ [[package]] name = "fc-rpc-core" version = "1.1.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "ethereum", "ethereum-types", @@ -4588,13 +4588,13 @@ dependencies = [ "rustc-hex", "serde", "serde_json", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", ] [[package]] name = "fc-storage" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "ethereum", "ethereum-types", @@ -4678,15 +4678,15 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "scale-info", ] [[package]] name = "find-msvc-tools" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" +checksum = "0399f9d26e5191ce32c498bebd31e7a3ceabc2745f0ac54af3f335126c3f24b3" [[package]] name = "fixed-hash" @@ -4759,7 +4759,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "13.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", ] @@ -4786,7 +4786,7 @@ dependencies = [ [[package]] name = "fp-account" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "hex", "impl-serde", @@ -4804,7 +4804,7 @@ dependencies = [ [[package]] name = "fp-consensus" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "ethereum", "parity-scale-codec", @@ -4815,7 +4815,7 @@ dependencies = [ [[package]] name = "fp-ethereum" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "ethereum", "ethereum-types", @@ -4827,7 +4827,7 @@ dependencies = [ [[package]] name = "fp-evm" version = "3.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "environmental", "evm", @@ -4843,7 +4843,7 @@ dependencies = [ [[package]] name = "fp-rpc" version = "3.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "ethereum", "ethereum-types", @@ -4859,7 +4859,7 @@ dependencies = [ [[package]] name = "fp-self-contained" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "frame-support", "parity-scale-codec", @@ -4871,7 +4871,7 @@ dependencies = [ [[package]] name = "fp-storage" version = "2.0.0" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "parity-scale-codec", "serde", @@ -4886,7 +4886,7 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" [[package]] name = "frame-benchmarking" version = "41.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-support-procedural", @@ -4910,7 +4910,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "49.1.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "Inflector", "array-bytes 6.2.3", @@ -4975,7 +4975,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-pallet-pov" version = "31.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -5003,7 +5003,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "16.1.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", @@ -5014,7 +5014,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -5031,7 +5031,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "41.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "aquamarine", "frame-support", @@ -5084,7 +5084,7 @@ dependencies = [ [[package]] name = "frame-metadata-hash-extension" version = "0.9.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "const-hex", @@ -5100,7 +5100,7 @@ dependencies = [ [[package]] name = "frame-storage-access-test-runtime" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-pallet-parachain-system", "parity-scale-codec", @@ -5114,7 +5114,7 @@ dependencies = [ [[package]] name = "frame-support" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "aquamarine", "array-bytes 6.2.3", @@ -5155,7 +5155,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "34.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "Inflector", "cfg-expr", @@ -5168,7 +5168,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "syn 2.0.106", ] @@ -5188,7 +5188,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "13.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support-procedural-tools-derive 12.0.0", "proc-macro-crate 3.4.0", @@ -5211,7 +5211,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "12.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro2", "quote", @@ -5221,7 +5221,7 @@ dependencies = [ [[package]] name = "frame-system" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cfg-if", "docify", @@ -5240,7 +5240,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -5254,7 +5254,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "parity-scale-codec", @@ -5264,7 +5264,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.47.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "parity-scale-codec", @@ -5368,7 +5368,7 @@ checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", - "parking_lot 0.12.4", + "parking_lot 0.12.5", ] [[package]] @@ -5594,7 +5594,7 @@ dependencies = [ "futures-timer", "no-std-compat", "nonzero_ext", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "portable-atomic", "quanta", "rand 0.8.5", @@ -5867,7 +5867,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "resolv-conf", "smallvec", @@ -5888,7 +5888,7 @@ dependencies = [ "ipconfig", "moka", "once_cell", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.9.2", "resolv-conf", "smallvec", @@ -6118,7 +6118,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.1", + "windows-core 0.62.2", ] [[package]] @@ -6646,7 +6646,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "rustc-hash 2.1.1", @@ -6788,7 +6788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf7a85fe66f9ff9cd74e169fdd2c94c6e1e74c412c99a73b4df3200b5d3760b2" dependencies = [ "kvdb", - "parking_lot 0.12.4", + "parking_lot 0.12.5", ] [[package]] @@ -6799,7 +6799,7 @@ checksum = "b644c70b92285f66bfc2032922a79000ea30af7bc2ab31902992a5dcb9b434f6" dependencies = [ "kvdb", "num_cpus", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "regex", "rocksdb", "smallvec", @@ -6927,7 +6927,7 @@ dependencies = [ "multihash 0.19.3", "multistream-select", "once_cell", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "quick-protobuf", "rand 0.8.5", @@ -6951,7 +6951,7 @@ dependencies = [ "hickory-resolver 0.24.4", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "smallvec", "tracing", ] @@ -7122,7 +7122,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-tls", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "quinn", "rand 0.8.5", "ring 0.17.14", @@ -7252,7 +7252,7 @@ dependencies = [ "futures-rustls", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project-lite", "rw-stream-sink", "soketto 0.8.1", @@ -7285,7 +7285,7 @@ checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ "bitflags 2.9.4", "libc", - "redox_syscall 0.5.17", + "redox_syscall 0.5.18", ] [[package]] @@ -7468,7 +7468,7 @@ dependencies = [ "multiaddr 0.17.1", "multihash 0.17.0", "network-interface", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "prost 0.13.5", "prost-build", @@ -7497,11 +7497,10 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] @@ -7641,11 +7640,11 @@ dependencies = [ [[package]] name = "matchers" -version = "0.2.0" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -7779,7 +7778,7 @@ dependencies = [ "hashlink 0.8.4", "lioness", "log", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "rand_chacha 0.3.1", "rand_distr", @@ -7791,7 +7790,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "46.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "log", @@ -7810,7 +7809,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -7858,7 +7857,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "equivalent", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "portable-atomic", "rustc_version 0.4.1", "smallvec", @@ -8279,6 +8278,7 @@ dependencies = [ "pallet-balances", "pallet-base-fee", "pallet-commitments", + "pallet-contracts 40.1.0", "pallet-crowdloan", "pallet-drand", "pallet-election-provider-multi-phase", @@ -8292,7 +8292,6 @@ dependencies = [ "pallet-grandpa", "pallet-hotfix-sufficients", "pallet-insecure-randomness-collective-flip", - "pallet-membership", "pallet-multisig", "pallet-nomination-pools", "pallet-nomination-pools-runtime-api", @@ -8307,7 +8306,6 @@ dependencies = [ "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-subtensor", - "pallet-subtensor-collective", "pallet-subtensor-proxy", "pallet-subtensor-swap", "pallet-subtensor-swap-runtime-api", @@ -8347,6 +8345,7 @@ dependencies = [ "sp-version", "substrate-fixed", "substrate-wasm-builder", + "subtensor-chain-extensions", "subtensor-custom-rpc-runtime-api", "subtensor-macros", "subtensor-precompiles", @@ -8402,11 +8401,12 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.50.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ - "windows-sys 0.52.0", + "overload", + "winapi", ] [[package]] @@ -8701,6 +8701,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "pallet-admin-utils" version = "4.0.0-dev" @@ -8737,7 +8743,7 @@ dependencies = [ [[package]] name = "pallet-alliance" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "frame-benchmarking", @@ -8749,7 +8755,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-io", "sp-runtime", ] @@ -8757,7 +8763,7 @@ dependencies = [ [[package]] name = "pallet-asset-conversion" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8775,7 +8781,7 @@ dependencies = [ [[package]] name = "pallet-asset-conversion-ops" version = "0.9.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8793,7 +8799,7 @@ dependencies = [ [[package]] name = "pallet-asset-conversion-tx-payment" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8808,7 +8814,7 @@ dependencies = [ [[package]] name = "pallet-asset-rate" version = "20.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8822,7 +8828,7 @@ dependencies = [ [[package]] name = "pallet-asset-rewards" version = "0.3.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8840,7 +8846,7 @@ dependencies = [ [[package]] name = "pallet-asset-tx-payment" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8856,7 +8862,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "43.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "ethereum-standards", "frame-benchmarking", @@ -8874,7 +8880,7 @@ dependencies = [ [[package]] name = "pallet-assets-freezer" version = "0.8.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "pallet-assets", @@ -8886,7 +8892,7 @@ dependencies = [ [[package]] name = "pallet-assets-holder" version = "0.3.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8901,7 +8907,7 @@ dependencies = [ [[package]] name = "pallet-atomic-swap" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-sdk-frame", @@ -8911,7 +8917,7 @@ dependencies = [ [[package]] name = "pallet-aura" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -8927,7 +8933,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -8942,7 +8948,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -8955,7 +8961,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -8978,7 +8984,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "aquamarine", "docify", @@ -8999,7 +9005,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "42.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -9015,7 +9021,7 @@ dependencies = [ [[package]] name = "pallet-base-fee" version = "1.0.0" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fp-evm", "frame-support", @@ -9029,7 +9035,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "42.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -9048,7 +9054,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "42.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "binary-merkle-tree", @@ -9073,7 +9079,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9090,7 +9096,7 @@ dependencies = [ [[package]] name = "pallet-bridge-grandpa" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-runtime", @@ -9109,7 +9115,7 @@ dependencies = [ [[package]] name = "pallet-bridge-messages" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-messages", @@ -9128,7 +9134,7 @@ dependencies = [ [[package]] name = "pallet-bridge-parachains" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-parachains", @@ -9148,7 +9154,7 @@ dependencies = [ [[package]] name = "pallet-bridge-relayers" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-header-chain", "bp-messages", @@ -9171,7 +9177,7 @@ dependencies = [ [[package]] name = "pallet-broker" version = "0.20.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "frame-benchmarking", @@ -9189,7 +9195,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9207,7 +9213,7 @@ dependencies = [ [[package]] name = "pallet-collator-selection" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9226,7 +9232,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -9243,7 +9249,7 @@ dependencies = [ [[package]] name = "pallet-collective-content" version = "0.19.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9281,10 +9287,41 @@ dependencies = [ "w3f-bls 0.1.3", ] +[[package]] +name = "pallet-contracts" +version = "40.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6#598feddb893f5ad3923a62e41a2f179b6e10c30c" +dependencies = [ + "environmental", + "frame-benchmarking", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "pallet-balances", + "pallet-contracts-proc-macro 23.0.3 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", + "pallet-contracts-uapi 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", + "parity-scale-codec", + "paste", + "rand 0.8.5", + "rand_pcg", + "scale-info", + "serde", + "smallvec", + "sp-api", + "sp-core", + "sp-io", + "sp-runtime", + "staging-xcm", + "staging-xcm-builder", + "wasm-instrument", + "wasmi 0.32.3", +] + [[package]] name = "pallet-contracts" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "environmental", "frame-benchmarking", @@ -9293,8 +9330,8 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-balances", - "pallet-contracts-proc-macro", - "pallet-contracts-uapi", + "pallet-contracts-proc-macro 23.0.3 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", + "pallet-contracts-uapi 14.0.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "parity-scale-codec", "paste", "rand 0.8.5", @@ -9315,14 +9352,14 @@ dependencies = [ [[package]] name = "pallet-contracts-mock-network" version = "18.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", "pallet-assets", "pallet-balances", - "pallet-contracts", - "pallet-contracts-uapi", + "pallet-contracts 41.0.0", + "pallet-contracts-uapi 14.0.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "pallet-message-queue", "pallet-timestamp", "pallet-xcm", @@ -9346,7 +9383,17 @@ dependencies = [ [[package]] name = "pallet-contracts-proc-macro" version = "23.0.3" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6#598feddb893f5ad3923a62e41a2f179b6e10c30c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "pallet-contracts-proc-macro" +version = "23.0.3" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro2", "quote", @@ -9356,7 +9403,18 @@ dependencies = [ [[package]] name = "pallet-contracts-uapi" version = "14.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6#598feddb893f5ad3923a62e41a2f179b6e10c30c" +dependencies = [ + "bitflags 1.3.2", + "parity-scale-codec", + "paste", + "scale-info", +] + +[[package]] +name = "pallet-contracts-uapi" +version = "14.0.0" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitflags 1.3.2", "parity-scale-codec", @@ -9367,7 +9425,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "assert_matches", "frame-benchmarking", @@ -9383,7 +9441,7 @@ dependencies = [ [[package]] name = "pallet-core-fellowship" version = "25.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9420,7 +9478,7 @@ dependencies = [ [[package]] name = "pallet-delegated-staking" version = "8.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -9435,7 +9493,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9452,7 +9510,7 @@ dependencies = [ [[package]] name = "pallet-dev-mode" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -9501,7 +9559,7 @@ dependencies = [ [[package]] name = "pallet-dummy-dim" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9519,7 +9577,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-block" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -9540,7 +9598,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -9561,7 +9619,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -9574,7 +9632,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "42.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9592,7 +9650,7 @@ dependencies = [ [[package]] name = "pallet-ethereum" version = "4.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "ethereum", "ethereum-types", @@ -9615,7 +9673,7 @@ dependencies = [ [[package]] name = "pallet-evm" version = "6.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "cumulus-primitives-storage-weight-reclaim", "environmental", @@ -9640,7 +9698,7 @@ dependencies = [ [[package]] name = "pallet-evm-chain-id" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "frame-support", "frame-system", @@ -9651,7 +9709,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-bn128" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fp-evm", "sp-core", @@ -9661,7 +9719,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-dispatch" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fp-evm", "frame-support", @@ -9673,7 +9731,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-modexp" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fp-evm", "num", @@ -9682,7 +9740,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-sha3fips" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fp-evm", "tiny-keccak", @@ -9691,7 +9749,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-simple" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "fp-evm", "ripemd", @@ -9701,7 +9759,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -9719,7 +9777,7 @@ dependencies = [ [[package]] name = "pallet-glutton" version = "27.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "blake2 0.10.6", "frame-benchmarking", @@ -9737,7 +9795,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9759,7 +9817,7 @@ dependencies = [ [[package]] name = "pallet-hotfix-sufficients" version = "1.0.0" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "frame-benchmarking", "frame-support", @@ -9774,7 +9832,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "enumflags2", "frame-benchmarking", @@ -9790,7 +9848,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9809,7 +9867,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9824,7 +9882,7 @@ dependencies = [ [[package]] name = "pallet-insecure-randomness-collective-flip" version = "29.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-sdk-frame", @@ -9835,7 +9893,7 @@ dependencies = [ [[package]] name = "pallet-lottery" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9848,7 +9906,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -9864,7 +9922,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "44.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "environmental", "frame-benchmarking", @@ -9883,7 +9941,7 @@ dependencies = [ [[package]] name = "pallet-meta-tx" version = "0.3.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -9901,7 +9959,7 @@ dependencies = [ [[package]] name = "pallet-migrations" version = "11.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -9920,7 +9978,7 @@ dependencies = [ [[package]] name = "pallet-mixnet" version = "0.17.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "parity-scale-codec", @@ -9934,7 +9992,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "parity-scale-codec", @@ -9946,7 +10004,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "parity-scale-codec", @@ -9957,7 +10015,7 @@ dependencies = [ [[package]] name = "pallet-nft-fractionalization" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "pallet-assets", @@ -9970,7 +10028,7 @@ dependencies = [ [[package]] name = "pallet-nfts" version = "35.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "enumflags2", "frame-benchmarking", @@ -9987,7 +10045,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-sdk-frame", @@ -9997,7 +10055,7 @@ dependencies = [ [[package]] name = "pallet-node-authorization" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "parity-scale-codec", @@ -10008,7 +10066,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "39.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10026,7 +10084,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "39.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -10046,7 +10104,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -10056,7 +10114,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10071,7 +10129,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -10094,7 +10152,7 @@ dependencies = [ [[package]] name = "pallet-origin-restriction" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10112,7 +10170,7 @@ dependencies = [ [[package]] name = "pallet-paged-list" version = "0.19.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "parity-scale-codec", @@ -10123,7 +10181,7 @@ dependencies = [ [[package]] name = "pallet-parameters" version = "0.12.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -10140,7 +10198,7 @@ dependencies = [ [[package]] name = "pallet-people" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10158,7 +10216,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10174,7 +10232,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-sdk-frame", @@ -10184,7 +10242,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10202,7 +10260,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-sdk-frame", @@ -10212,7 +10270,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "assert_matches", "frame-benchmarking", @@ -10247,7 +10305,7 @@ dependencies = [ [[package]] name = "pallet-remark" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10262,7 +10320,7 @@ dependencies = [ [[package]] name = "pallet-revive" version = "0.7.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "alloy-core", "derive_more 0.99.20", @@ -10308,7 +10366,7 @@ dependencies = [ [[package]] name = "pallet-revive-fixtures" version = "0.4.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "anyhow", "cargo_metadata", @@ -10322,7 +10380,7 @@ dependencies = [ [[package]] name = "pallet-revive-proc-macro" version = "0.3.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro2", "quote", @@ -10332,7 +10390,7 @@ dependencies = [ [[package]] name = "pallet-revive-uapi" version = "0.5.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitflags 1.3.2", "pallet-revive-proc-macro", @@ -10344,7 +10402,7 @@ dependencies = [ [[package]] name = "pallet-root-offences" version = "38.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10360,7 +10418,7 @@ dependencies = [ [[package]] name = "pallet-root-testing" version = "17.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10373,7 +10431,7 @@ dependencies = [ [[package]] name = "pallet-safe-mode" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "pallet-balances", @@ -10387,7 +10445,7 @@ dependencies = [ [[package]] name = "pallet-salary" version = "26.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "pallet-ranked-collective", @@ -10399,7 +10457,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "42.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -10416,7 +10474,7 @@ dependencies = [ [[package]] name = "pallet-scored-pool" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10429,7 +10487,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10450,7 +10508,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10466,7 +10524,7 @@ dependencies = [ [[package]] name = "pallet-skip-feeless-payment" version = "16.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10478,7 +10536,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10495,7 +10553,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -10517,7 +10575,7 @@ dependencies = [ [[package]] name = "pallet-staking-async" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -10540,7 +10598,7 @@ dependencies = [ [[package]] name = "pallet-staking-async-ah-client" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10559,7 +10617,7 @@ dependencies = [ [[package]] name = "pallet-staking-async-rc-client" version = "0.2.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10576,7 +10634,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "12.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", @@ -10587,7 +10645,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "sp-arithmetic", @@ -10596,7 +10654,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "27.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "sp-api", @@ -10606,7 +10664,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "46.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10622,7 +10680,7 @@ dependencies = [ [[package]] name = "pallet-statement" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -10655,10 +10713,8 @@ dependencies = [ "pallet-commitments", "pallet-crowdloan", "pallet-drand", - "pallet-membership", "pallet-preimage", "pallet-scheduler", - "pallet-subtensor-collective", "pallet-subtensor-proxy", "pallet-subtensor-swap", "pallet-subtensor-utility", @@ -10675,6 +10731,7 @@ dependencies = [ "share-pool", "sp-core", "sp-io", + "sp-keyring", "sp-runtime", "sp-std", "sp-tracing", @@ -10684,26 +10741,12 @@ dependencies = [ "subtensor-runtime-common", "subtensor-swap-interface", "tle", + "tracing", + "tracing-log", + "tracing-subscriber 0.3.18", "w3f-bls 0.1.3", ] -[[package]] -name = "pallet-subtensor-collective" -version = "4.0.0-dev" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "subtensor-macros", -] - [[package]] name = "pallet-subtensor-proxy" version = "40.1.0" @@ -10780,7 +10823,6 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-root-testing", - "pallet-subtensor-collective", "pallet-timestamp", "parity-scale-codec", "scale-info", @@ -10793,7 +10835,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -10808,7 +10850,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -10826,7 +10868,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10844,7 +10886,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10859,7 +10901,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "44.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -10875,7 +10917,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -10887,7 +10929,7 @@ dependencies = [ [[package]] name = "pallet-transaction-storage" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "frame-benchmarking", @@ -10906,7 +10948,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -10925,7 +10967,7 @@ dependencies = [ [[package]] name = "pallet-tx-pause" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "parity-scale-codec", @@ -10936,7 +10978,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10950,7 +10992,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10965,7 +11007,7 @@ dependencies = [ [[package]] name = "pallet-verify-signature" version = "0.4.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10980,7 +11022,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -10994,7 +11036,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-sdk-frame", @@ -11004,7 +11046,7 @@ dependencies = [ [[package]] name = "pallet-xcm" version = "20.1.3" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bounded-collections 0.2.4", "frame-benchmarking", @@ -11030,7 +11072,7 @@ dependencies = [ [[package]] name = "pallet-xcm-benchmarks" version = "21.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-benchmarking", "frame-support", @@ -11047,7 +11089,7 @@ dependencies = [ [[package]] name = "pallet-xcm-bridge-hub" version = "0.17.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-messages", "bp-runtime", @@ -11069,7 +11111,7 @@ dependencies = [ [[package]] name = "pallet-xcm-bridge-hub-router" version = "0.19.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-xcm-bridge-hub-router", "frame-benchmarking", @@ -11089,7 +11131,7 @@ dependencies = [ [[package]] name = "parachains-common" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-utility", @@ -11142,7 +11184,7 @@ dependencies = [ "log", "lz4", "memmap2 0.5.10", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "siphasher 0.3.11", "snap", @@ -11203,12 +11245,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", - "parking_lot_core 0.9.11", + "parking_lot_core 0.9.12", ] [[package]] @@ -11227,15 +11269,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.11" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.17", + "redox_syscall 0.5.18", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -11305,20 +11347,19 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.2" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e0a3a33733faeaf8651dfee72dd0f388f0c8e5ad496a3478fa5a922f49cfa8" +checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" dependencies = [ "memchr", - "thiserror 2.0.17", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.8.2" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc58706f770acb1dbd0973e6530a3cff4746fb721207feb3a8a6064cd0b6c663" +checksum = "187da9a3030dbafabbbfb20cb323b976dc7b7ce91fcd84f2f74d6e31d378e2de" dependencies = [ "pest", "pest_generator", @@ -11326,9 +11367,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.2" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d4f36811dfe07f7b8573462465d5cb8965fffc2e71ae377a33aecf14c2c9a2f" +checksum = "49b401d98f5757ebe97a26085998d6c0eecec4995cad6ab7fc30ffdf4b052843" dependencies = [ "pest", "pest_meta", @@ -11339,9 +11380,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.8.2" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42919b05089acbd0a5dcd5405fb304d17d1053847b81163d09c4ad18ce8e8420" +checksum = "72f27a2cfee9f9039c4d86faa5af122a0ac3851441a34865b8a043b46be0065a" dependencies = [ "pest", "sha2 0.10.9", @@ -11429,7 +11470,7 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "polkadot-approval-distribution" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "futures-timer", @@ -11447,7 +11488,7 @@ dependencies = [ [[package]] name = "polkadot-availability-bitfield-distribution" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "futures-timer", @@ -11462,7 +11503,7 @@ dependencies = [ [[package]] name = "polkadot-availability-distribution" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "fatality", "futures", @@ -11485,7 +11526,7 @@ dependencies = [ [[package]] name = "polkadot-availability-recovery" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "fatality", @@ -11518,7 +11559,7 @@ dependencies = [ [[package]] name = "polkadot-cli" version = "25.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "clap", "frame-benchmarking-cli", @@ -11542,7 +11583,7 @@ dependencies = [ [[package]] name = "polkadot-collator-protocol" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "fatality", @@ -11565,7 +11606,7 @@ dependencies = [ [[package]] name = "polkadot-core-primitives" version = "18.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -11576,7 +11617,7 @@ dependencies = [ [[package]] name = "polkadot-dispute-distribution" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "fatality", "futures", @@ -11598,7 +11639,7 @@ dependencies = [ [[package]] name = "polkadot-erasure-coding" version = "20.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-node-primitives", @@ -11612,7 +11653,7 @@ dependencies = [ [[package]] name = "polkadot-gossip-support" version = "24.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "futures-timer", @@ -11625,7 +11666,7 @@ dependencies = [ "sc-network", "sp-application-crypto", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-keystore", "tracing-gum", ] @@ -11633,7 +11674,7 @@ dependencies = [ [[package]] name = "polkadot-network-bridge" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "always-assert", "async-trait", @@ -11641,7 +11682,7 @@ dependencies = [ "fatality", "futures", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -11656,7 +11697,7 @@ dependencies = [ [[package]] name = "polkadot-node-collation-generation" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "parity-scale-codec", @@ -11674,7 +11715,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-approval-voting" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "bitvec", @@ -11706,7 +11747,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-approval-voting-parallel" version = "0.7.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -11730,7 +11771,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-av-store" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "futures", @@ -11749,7 +11790,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-backing" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "fatality", @@ -11770,7 +11811,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-bitfield-signing" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "polkadot-node-subsystem", @@ -11785,7 +11826,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-candidate-validation" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -11807,7 +11848,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-api" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "polkadot-node-metrics", @@ -11821,7 +11862,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-selection" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "futures-timer", @@ -11837,7 +11878,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-coordinator" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "fatality", "futures", @@ -11855,7 +11896,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-parachains-inherent" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -11872,7 +11913,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-prospective-parachains" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "fatality", "futures", @@ -11886,7 +11927,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-provisioner" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "fatality", @@ -11903,7 +11944,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "always-assert", "array-bytes 6.2.3", @@ -11931,7 +11972,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf-checker" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "polkadot-node-subsystem", @@ -11944,7 +11985,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf-common" version = "20.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cpu-time", "futures", @@ -11959,7 +12000,7 @@ dependencies = [ "sc-executor-wasmtime", "seccompiler", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-externalities", "sp-io", "sp-tracing", @@ -11970,7 +12011,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-runtime-api" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "polkadot-node-metrics", @@ -11985,7 +12026,7 @@ dependencies = [ [[package]] name = "polkadot-node-metrics" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bs58", "futures", @@ -12002,7 +12043,7 @@ dependencies = [ [[package]] name = "polkadot-node-network-protocol" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-channel 1.9.0", "async-trait", @@ -12027,7 +12068,7 @@ dependencies = [ [[package]] name = "polkadot-node-primitives" version = "20.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "bounded-vec", @@ -12051,7 +12092,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "polkadot-node-subsystem-types", "polkadot-overseer", @@ -12060,7 +12101,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-types" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "derive_more 0.99.20", @@ -12088,7 +12129,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-util" version = "24.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "fatality", "futures", @@ -12096,7 +12137,7 @@ dependencies = [ "kvdb", "parity-db", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "polkadot-erasure-coding", "polkadot-node-metrics", "polkadot-node-network-protocol", @@ -12119,7 +12160,7 @@ dependencies = [ [[package]] name = "polkadot-omni-node-lib" version = "0.7.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "clap", @@ -12205,7 +12246,7 @@ dependencies = [ [[package]] name = "polkadot-overseer" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -12225,7 +12266,7 @@ dependencies = [ [[package]] name = "polkadot-parachain-primitives" version = "17.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bounded-collections 0.2.4", "derive_more 0.99.20", @@ -12241,7 +12282,7 @@ dependencies = [ [[package]] name = "polkadot-primitives" version = "19.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "bounded-collections 0.2.4", @@ -12270,7 +12311,7 @@ dependencies = [ [[package]] name = "polkadot-rpc" version = "25.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "jsonrpsee", "mmr-rpc", @@ -12303,7 +12344,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-common" version = "20.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "frame-benchmarking", @@ -12353,7 +12394,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-metrics" version = "21.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bs58", "frame-benchmarking", @@ -12365,7 +12406,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-parachains" version = "20.0.2" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitflags 1.3.2", "bitvec", @@ -12413,7 +12454,7 @@ dependencies = [ [[package]] name = "polkadot-sdk" version = "2506.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "assets-common", "bridge-hub-common", @@ -12468,7 +12509,7 @@ dependencies = [ "pallet-collator-selection", "pallet-collective", "pallet-collective-content", - "pallet-contracts", + "pallet-contracts 41.0.0", "pallet-contracts-mock-network", "pallet-conviction-voting", "pallet-core-fellowship", @@ -12571,7 +12612,7 @@ dependencies = [ [[package]] name = "polkadot-sdk-frame" version = "0.10.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-benchmarking", @@ -12606,7 +12647,7 @@ dependencies = [ [[package]] name = "polkadot-service" version = "25.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "frame-benchmarking", @@ -12623,7 +12664,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-db", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", @@ -12714,7 +12755,7 @@ dependencies = [ [[package]] name = "polkadot-statement-distribution" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitvec", "fatality", @@ -12734,7 +12775,7 @@ dependencies = [ [[package]] name = "polkadot-statement-table" version = "20.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -12939,7 +12980,7 @@ dependencies = [ "hermit-abi 0.5.2", "pin-project-lite", "rustix 1.1.2", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -13007,7 +13048,7 @@ dependencies = [ [[package]] name = "precompile-utils" version = "0.1.0" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "environmental", "evm", @@ -13031,14 +13072,14 @@ dependencies = [ [[package]] name = "precompile-utils-macro" version = "0.1.0" -source = "git+https://github.com/opentensor/frontier?rev=a741e4695c4e864b443482a03e82529d761e5064#a741e4695c4e864b443482a03e82529d761e5064" +source = "git+https://github.com/opentensor/frontier?rev=e31d47f83a64c361ecf0fd02bf567de6db9bda43#e31d47f83a64c361ecf0fd02bf567de6db9bda43" dependencies = [ "case", "num_enum", "prettyplease", "proc-macro2", "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "syn 2.0.106", ] @@ -13220,7 +13261,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "syn 2.0.106", ] @@ -13234,7 +13275,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "thiserror 1.0.69", ] @@ -13246,7 +13287,7 @@ checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "prometheus-client-derive-encode", ] @@ -13275,7 +13316,7 @@ dependencies = [ "rand 0.9.2", "rand_chacha 0.9.0", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.8.6", "rusty-fork", "tempfile", "unarray", @@ -13358,9 +13399,9 @@ dependencies = [ [[package]] name = "psm" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e944464ec8536cd1beb0bbfd96987eb5e3b72f2ecdafdc5c769a37f1fa2ae1f" +checksum = "e66fcd288453b748497d8fb18bccc83a16b0518e3906d4b8df0a8d42d93dbb1c" dependencies = [ "cc", ] @@ -13632,9 +13673,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.17" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ "bitflags 2.9.4", ] @@ -13715,8 +13756,17 @@ checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.11", + "regex-syntax 0.8.6", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -13727,9 +13777,15 @@ checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.6", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.6" @@ -13835,7 +13891,7 @@ dependencies = [ [[package]] name = "rococo-runtime" version = "24.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "binary-merkle-tree", "bitvec", @@ -13933,7 +13989,7 @@ dependencies = [ [[package]] name = "rococo-runtime-constants" version = "21.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "polkadot-primitives", @@ -14136,7 +14192,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -14232,9 +14288,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ "fnv", "quick-error", @@ -14329,7 +14385,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "32.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "sp-core", @@ -14340,7 +14396,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.51.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -14371,7 +14427,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "log", @@ -14392,7 +14448,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.45.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "sp-api", @@ -14407,7 +14463,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "44.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "clap", @@ -14423,7 +14479,7 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-genesis-builder", "sp-io", "sp-runtime", @@ -14434,7 +14490,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "12.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", @@ -14445,7 +14501,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.53.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "chrono", @@ -14487,13 +14543,13 @@ dependencies = [ [[package]] name = "sc-client-api" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "fnv", "futures", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -14513,7 +14569,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.47.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "hash-db", "kvdb", @@ -14523,7 +14579,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-state-db", "schnellru", @@ -14541,13 +14597,13 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", "log", "mockall", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-network-types", "sc-utils", @@ -14564,7 +14620,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.51.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -14593,7 +14649,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.51.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "fork-tree", @@ -14603,7 +14659,7 @@ dependencies = [ "num-rational", "num-traits", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-consensus", "sc-consensus-epochs", @@ -14618,7 +14674,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-slots", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-inherents", "sp-keystore", "sp-runtime", @@ -14629,7 +14685,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.51.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "jsonrpsee", @@ -14651,7 +14707,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "30.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "async-channel 1.9.0", @@ -14659,7 +14715,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-consensus", "sc-network", @@ -14685,13 +14741,13 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "30.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-consensus-beefy", "sc-rpc", "serde", @@ -14705,7 +14761,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "fork-tree", "parity-scale-codec", @@ -14718,7 +14774,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.36.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "ahash", "array-bytes 6.2.3", @@ -14730,7 +14786,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "sc-block-builder", "sc-chain-spec", @@ -14752,7 +14808,7 @@ dependencies = [ "sp-consensus", "sp-consensus-grandpa", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", @@ -14762,7 +14818,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.36.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "finality-grandpa", "futures", @@ -14782,7 +14838,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" version = "0.52.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "assert_matches", "async-trait", @@ -14817,7 +14873,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -14840,10 +14896,10 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.43.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", @@ -14863,7 +14919,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.39.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "polkavm 0.24.0", "sc-allocator", @@ -14876,7 +14932,7 @@ dependencies = [ [[package]] name = "sc-executor-polkavm" version = "0.36.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "polkavm 0.24.0", @@ -14887,11 +14943,11 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.39.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "anyhow", "log", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rustix 0.36.17", "sc-allocator", "sc-executor-common", @@ -14903,7 +14959,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "console", "futures", @@ -14919,10 +14975,10 @@ dependencies = [ [[package]] name = "sc-keystore" version = "36.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "serde_json", "sp-application-crypto", "sp-core", @@ -14933,7 +14989,7 @@ dependencies = [ [[package]] name = "sc-mixnet" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "arrayvec 0.7.6", @@ -14944,7 +15000,7 @@ dependencies = [ "log", "mixnet", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-network", "sc-network-types", @@ -14961,7 +15017,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.51.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "async-channel 1.9.0", @@ -14980,7 +15036,7 @@ dependencies = [ "log", "mockall", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "partial_sort", "pin-project", "prost 0.12.6", @@ -15011,7 +15067,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.49.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bitflags 1.3.2", "parity-scale-codec", @@ -15021,7 +15077,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.51.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "ahash", "futures", @@ -15040,7 +15096,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "async-channel 1.9.0", @@ -15061,7 +15117,7 @@ dependencies = [ [[package]] name = "sc-network-statement" version = "0.33.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "async-channel 1.9.0", @@ -15081,7 +15137,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "async-channel 1.9.0", @@ -15116,7 +15172,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "futures", @@ -15135,7 +15191,7 @@ dependencies = [ [[package]] name = "sc-network-types" version = "0.17.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bs58", "bytes", @@ -15156,7 +15212,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "46.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bytes", "fnv", @@ -15169,7 +15225,7 @@ dependencies = [ "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "rustls", "sc-client-api", @@ -15190,7 +15246,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.20.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -15199,13 +15255,13 @@ dependencies = [ [[package]] name = "sc-rpc" version = "46.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -15231,7 +15287,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.50.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -15251,7 +15307,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "dyn-clone", "forwarded-header-value", @@ -15275,7 +15331,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.51.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "futures", @@ -15285,7 +15341,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "sc-chain-spec", "sc-client-api", @@ -15308,13 +15364,13 @@ dependencies = [ [[package]] name = "sc-runtime-utilities" version = "0.3.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "sc-executor", "sc-executor-common", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-state-machine", "sp-wasm-interface", "thiserror 1.0.69", @@ -15323,7 +15379,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.52.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "directories", @@ -15333,7 +15389,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "sc-chain-spec", @@ -15387,22 +15443,22 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.39.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sp-core", ] [[package]] name = "sc-statement-store" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "parity-db", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-keystore", "sp-api", @@ -15417,7 +15473,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.25.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "clap", "fs4", @@ -15430,7 +15486,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.51.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -15449,7 +15505,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "43.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "derive_more 0.99.20", "futures", @@ -15462,20 +15518,20 @@ dependencies = [ "serde", "serde_json", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-io", ] [[package]] name = "sc-telemetry" version = "29.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "chrono", "futures", "libp2p", "log", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "sc-utils", @@ -15488,7 +15544,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "chrono", "console", @@ -15496,7 +15552,7 @@ dependencies = [ "libc", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rustc-hash 1.1.0", "sc-client-api", "sc-tracing-proc-macro", @@ -15510,13 +15566,13 @@ dependencies = [ "thiserror 1.0.69", "tracing", "tracing-log", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.18", ] [[package]] name = "sc-tracing-proc-macro" version = "11.1.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", @@ -15527,7 +15583,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "40.1.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -15536,7 +15592,7 @@ dependencies = [ "itertools 0.11.0", "linked-hash-map", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sc-client-api", "sc-transaction-pool-api", "sc-utils", @@ -15544,7 +15600,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-runtime", "sp-tracing", "sp-transaction-pool", @@ -15558,7 +15614,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -15575,13 +15631,13 @@ dependencies = [ [[package]] name = "sc-utils" version = "19.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-channel 1.9.0", "futures", "futures-timer", "log", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "prometheus", "sp-arithmetic", ] @@ -15727,7 +15783,7 @@ version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -16072,15 +16128,14 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.14.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" +checksum = "6093cd8c01b25262b84927e0f7151692158fab02d961e04c979d3903eba7ecc5" dependencies = [ "base64 0.22.1", "chrono", "hex", - "serde", - "serde_derive", + "serde_core", "serde_json", "serde_with_macros", "time", @@ -16088,9 +16143,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.14.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e" +checksum = "a7e6c180db0816026a61afa1cff5344fb7ebded7e4d3062772179f2501481c27" dependencies = [ "darling 0.21.3", "proc-macro2", @@ -16274,7 +16329,7 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "slot-range-helper" version = "18.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "enumn", "parity-scale-codec", @@ -16462,7 +16517,7 @@ dependencies = [ "log", "lru 0.11.1", "no-std-net", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "rand_chacha 0.3.1", @@ -16498,7 +16553,7 @@ dependencies = [ "itertools 0.13.0", "log", "lru 0.12.5", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "rand_chacha 0.3.1", @@ -16537,7 +16592,7 @@ dependencies = [ [[package]] name = "snowbridge-core" version = "0.14.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bp-relayers", "frame-support", @@ -16621,7 +16676,7 @@ dependencies = [ [[package]] name = "sp-api" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "hash-db", @@ -16643,7 +16698,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "23.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "Inflector", "blake2 0.10.6", @@ -16657,7 +16712,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "41.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -16669,7 +16724,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "27.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "integer-sqrt", @@ -16692,7 +16747,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -16704,7 +16759,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "sp-api", "sp-inherents", @@ -16714,11 +16769,11 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "futures", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "schnellru", "sp-api", "sp-consensus", @@ -16733,7 +16788,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.43.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "futures", @@ -16747,7 +16802,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.43.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "parity-scale-codec", @@ -16763,7 +16818,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.43.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "parity-scale-codec", @@ -16781,7 +16836,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "25.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -16789,7 +16844,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-io", "sp-keystore", "sp-mmr-primitives", @@ -16801,7 +16856,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "24.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "finality-grandpa", "log", @@ -16818,7 +16873,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.43.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -16829,7 +16884,7 @@ dependencies = [ [[package]] name = "sp-core" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "ark-vrf", "array-bytes 6.2.3", @@ -16850,7 +16905,7 @@ dependencies = [ "merlin", "parity-bip39", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "paste", "primitive-types 0.13.1", "rand 0.8.5", @@ -16860,7 +16915,7 @@ dependencies = [ "secrecy 0.8.0", "serde", "sha2 0.10.9", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -16877,7 +16932,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.16.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -16911,7 +16966,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing" version = "0.1.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "blake2b_simd", "byteorder", @@ -16924,26 +16979,26 @@ dependencies = [ [[package]] name = "sp-crypto-hashing-proc-macro" version = "0.1.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "syn 2.0.106", ] [[package]] name = "sp-database" version = "10.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "kvdb", - "parking_lot 0.12.4", + "parking_lot 0.12.5", ] [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "proc-macro2", "quote", @@ -16953,7 +17008,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.30.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "environmental", "parity-scale-codec", @@ -16963,7 +17018,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.18.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -16975,7 +17030,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -16988,7 +17043,7 @@ dependencies = [ [[package]] name = "sp-io" version = "41.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bytes", "docify", @@ -17000,7 +17055,7 @@ dependencies = [ "rustversion", "secp256k1 0.28.2", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-externalities", "sp-keystore", "sp-runtime-interface", @@ -17014,7 +17069,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "42.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "sp-core", "sp-runtime", @@ -17024,10 +17079,10 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.43.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "sp-core", "sp-externalities", ] @@ -17035,7 +17090,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "11.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "thiserror 1.0.69", "zstd 0.12.4", @@ -17044,7 +17099,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.11.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-metadata 23.0.0", "parity-scale-codec", @@ -17054,7 +17109,7 @@ dependencies = [ [[package]] name = "sp-mixnet" version = "0.15.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -17065,7 +17120,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "log", "parity-scale-codec", @@ -17082,7 +17137,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -17095,7 +17150,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "sp-api", "sp-core", @@ -17105,7 +17160,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "13.0.2" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "backtrace", "regex", @@ -17114,7 +17169,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "35.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "rustc-hash 1.1.0", "serde", @@ -17124,7 +17179,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "42.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "binary-merkle-tree", "docify", @@ -17153,7 +17208,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "30.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -17172,7 +17227,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "19.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "Inflector", "expander", @@ -17185,7 +17240,7 @@ dependencies = [ [[package]] name = "sp-session" version = "39.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "scale-info", @@ -17199,7 +17254,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "39.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -17212,12 +17267,12 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.46.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "smallvec", "sp-core", @@ -17232,7 +17287,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "21.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "aes-gcm", "curve25519-dalek", @@ -17245,7 +17300,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80)", "sp-externalities", "sp-runtime", "sp-runtime-interface", @@ -17256,12 +17311,12 @@ dependencies = [ [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" [[package]] name = "sp-storage" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "impl-serde", "parity-scale-codec", @@ -17273,7 +17328,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "parity-scale-codec", @@ -17285,18 +17340,18 @@ dependencies = [ [[package]] name = "sp-tracing" version = "17.1.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "tracing", "tracing-core", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.18", ] [[package]] name = "sp-transaction-pool" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "sp-api", "sp-runtime", @@ -17305,7 +17360,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "37.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "async-trait", "parity-scale-codec", @@ -17319,7 +17374,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "ahash", "foldhash 0.1.5", @@ -17328,7 +17383,7 @@ dependencies = [ "memory-db", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "rand 0.8.5", "scale-info", "schnellru", @@ -17344,7 +17399,7 @@ dependencies = [ [[package]] name = "sp-version" version = "40.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "impl-serde", "parity-scale-codec", @@ -17361,7 +17416,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "15.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "parity-scale-codec", "proc-macro-warning", @@ -17373,7 +17428,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "22.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -17385,7 +17440,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "32.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "bounded-collections 0.2.4", "parity-scale-codec", @@ -17559,7 +17614,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "staging-chain-spec-builder" version = "12.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "clap", "docify", @@ -17572,7 +17627,7 @@ dependencies = [ [[package]] name = "staging-node-inspect" version = "0.29.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "clap", "parity-scale-codec", @@ -17590,7 +17645,7 @@ dependencies = [ [[package]] name = "staging-parachain-info" version = "0.21.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -17603,7 +17658,7 @@ dependencies = [ [[package]] name = "staging-xcm" version = "17.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "bounded-collections 0.2.4", @@ -17624,7 +17679,7 @@ dependencies = [ [[package]] name = "staging-xcm-builder" version = "21.1.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "environmental", "frame-support", @@ -17648,7 +17703,7 @@ dependencies = [ [[package]] name = "staging-xcm-executor" version = "20.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "environmental", "frame-benchmarking", @@ -17680,8 +17735,8 @@ dependencies = [ "bitflags 1.3.2", "cfg_aliases 0.2.1", "libc", - "parking_lot 0.12.4", - "parking_lot_core 0.9.11", + "parking_lot 0.12.5", + "parking_lot_core 0.9.12", "static_init_macro", "winapi", ] @@ -17760,7 +17815,7 @@ dependencies = [ [[package]] name = "substrate-bip39" version = "0.6.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "hmac 0.12.1", "pbkdf2", @@ -17785,7 +17840,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "11.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" [[package]] name = "substrate-fixed" @@ -17801,7 +17856,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "45.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "docify", "frame-system-rpc-runtime-api", @@ -17821,7 +17876,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.17.6" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "http-body-util", "hyper 1.7.0", @@ -17835,7 +17890,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "44.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -17862,7 +17917,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "27.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "array-bytes 6.2.3", "build-helper", @@ -17901,6 +17956,35 @@ dependencies = [ "walkdir", ] +[[package]] +name = "subtensor-chain-extensions" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "log", + "num_enum", + "pallet-balances", + "pallet-contracts 40.1.0", + "pallet-crowdloan", + "pallet-drand", + "pallet-preimage", + "pallet-scheduler", + "pallet-subtensor", + "pallet-subtensor-proxy", + "pallet-subtensor-swap", + "pallet-subtensor-utility", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "subtensor-runtime-common", + "subtensor-swap-interface", +] + [[package]] name = "subtensor-custom-rpc" version = "0.0.2" @@ -18000,6 +18084,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "substrate-fixed", + "subtensor-macros", "subtensor-runtime-common", ] @@ -18381,14 +18466,14 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] name = "termcolor" -version = "1.4.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -18618,7 +18703,7 @@ dependencies = [ "io-uring", "libc", "mio", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project-lite", "signal-hook-registry", "slab", @@ -18859,7 +18944,7 @@ dependencies = [ [[package]] name = "tracing-gum" version = "20.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "coarsetime", "polkadot-primitives", @@ -18870,7 +18955,7 @@ dependencies = [ [[package]] name = "tracing-gum-proc-macro" version = "5.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "expander", "proc-macro-crate 3.4.0", @@ -18901,15 +18986,15 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "parking_lot 0.12.4", - "regex-automata", + "parking_lot 0.12.5", + "regex", "sharded-slab", "smallvec", "thread_local", @@ -19055,9 +19140,15 @@ checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.2.1" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unicode-width" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" [[package]] name = "unicode-xid" @@ -19794,14 +19885,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.2", + "webpki-root-certs 1.0.3", ] [[package]] name = "webpki-root-certs" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" +checksum = "05d651ec480de84b762e7be71e6efa7461699c19d9e2c272c8d93455f567786e" dependencies = [ "rustls-pki-types", ] @@ -19815,7 +19906,7 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "westend-runtime" version = "24.0.1" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "binary-merkle-tree", "bitvec", @@ -19922,7 +20013,7 @@ dependencies = [ [[package]] name = "westend-runtime-constants" version = "21.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "polkadot-primitives", @@ -19973,7 +20064,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -20023,22 +20114,22 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.62.1" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", "windows-link", - "windows-result 0.4.0", + "windows-result 0.4.1", "windows-strings", ] [[package]] name = "windows-implement" -version = "0.60.1" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb307e42a74fb6de9bf3a02d9712678b22399c87e6fa869d6dfcd8c1b7754e0" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", @@ -20047,9 +20138,9 @@ dependencies = [ [[package]] name = "windows-interface" -version = "0.59.2" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0abd1ddbc6964ac14db11c7213d6532ef34bd9aa042c2e5935f59d7908b46a5" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", @@ -20058,9 +20149,9 @@ dependencies = [ [[package]] name = "windows-link" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" @@ -20073,18 +20164,18 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] @@ -20131,14 +20222,14 @@ version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.4", + "windows-targets 0.53.5", ] [[package]] name = "windows-sys" -version = "0.61.1" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f109e41dd4a3c848907eb83d5a42ea98b3769495597450cf6d153507b166f0f" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ "windows-link", ] @@ -20191,19 +20282,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.4" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d42b7b7f66d2a06854650af09cfdf8713e427a439c97ad65a6375318033ac4b" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ "windows-link", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -20226,9 +20317,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -20250,9 +20341,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -20274,9 +20365,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -20286,9 +20377,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -20310,9 +20401,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -20334,9 +20425,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -20358,9 +20449,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -20382,9 +20473,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" @@ -20475,7 +20566,7 @@ dependencies = [ [[package]] name = "xcm-procedural" version = "11.0.2" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "Inflector", "proc-macro2", @@ -20486,7 +20577,7 @@ dependencies = [ [[package]] name = "xcm-runtime-apis" version = "0.8.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "parity-scale-codec", @@ -20500,7 +20591,7 @@ dependencies = [ [[package]] name = "xcm-simulator" version = "21.0.0" -source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234#7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" +source = "git+https://github.com/opentensor/polkadot-sdk.git?rev=81fa2c54e94f824eba7dabe9dffd063481cb2d80#81fa2c54e94f824eba7dabe9dffd063481cb2d80" dependencies = [ "frame-support", "frame-system", @@ -20542,7 +20633,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "static_assertions", @@ -20557,7 +20648,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.4", + "parking_lot 0.12.5", "pin-project", "rand 0.9.2", "static_assertions", diff --git a/Cargo.toml b/Cargo.toml index 1d1a144bd9..6139004914 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,7 @@ members = [ "primitives/*", "runtime", "support/*", + "chain-extensions", ] resolver = "2" @@ -40,6 +41,7 @@ edition = "2024" [workspace.lints.clippy] arithmetic-side-effects = "deny" +expect-used = "deny" indexing-slicing = "deny" manual_inspect = "allow" result_large_err = "allow" @@ -50,7 +52,6 @@ useless_conversion = "allow" # until polkadot is patched [workspace.dependencies] node-subtensor-runtime = { path = "runtime", default-features = false } pallet-admin-utils = { path = "pallets/admin-utils", default-features = false } -pallet-subtensor-collective = { path = "pallets/collective", default-features = false } pallet-commitments = { path = "pallets/commitments", default-features = false } pallet-registry = { path = "pallets/registry", default-features = false } pallet-crowdloan = { path = "pallets/crowdloan", default-features = false } @@ -68,6 +69,7 @@ subtensor-precompiles = { default-features = false, path = "precompiles" } subtensor-runtime-common = { default-features = false, path = "common" } subtensor-swap-interface = { default-features = false, path = "pallets/swap-interface" } subtensor-transaction-fee = { default-features = false, path = "pallets/transaction-fee" } +subtensor-chain-extensions = { default-features = false, path = "chain-extensions" } ed25519-dalek = { version = "2.1.0", default-features = false } async-trait = "0.1" @@ -91,6 +93,9 @@ serde_bytes = { version = "0.11.14", default-features = false } serde_json = { version = "1.0.141", default-features = false } serde_with = { version = "3.14.0", default-features = false } smallvec = "1.13.2" +tracing = "0.1" +tracing-log = "0.2" +tracing-subscriber = { version = "=0.3.18" } litep2p = { git = "https://github.com/paritytech/litep2p", tag = "v0.7.0", default-features = false } syn = { version = "2.0.106", default-features = false } quote = { version = "1", default-features = false } @@ -112,6 +117,7 @@ expander = "2" ahash = { version = "0.8", default-features = false } regex = { version = "1.11.1", default-features = false } ethereum = { version = "0.18.2", default-features = false } +num_enum = { version = "0.7.4", default-features = false } frame = { package = "polkadot-sdk-frame", git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } @@ -132,7 +138,6 @@ pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "p pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } @@ -143,6 +148,7 @@ pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } pallet-root-testing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } +pallet-contracts = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } # NPoS frame-election-provider-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } @@ -228,35 +234,35 @@ polkadot-sdk = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = " runtime-common = { package = "polkadot-runtime-common", git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } # Frontier -fp-evm = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fp-rpc = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fp-self-contained = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fp-account = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-storage = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-db = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-consensus = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fp-consensus = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fp-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-api = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-rpc = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-rpc-core = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-aura = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-babe = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -fc-mapping-sync = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -precompile-utils = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } +fp-evm = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fp-rpc = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fp-self-contained = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fp-account = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-storage = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-db = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-consensus = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fp-consensus = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fp-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-api = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-rpc = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-rpc-core = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-aura = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-babe = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +fc-mapping-sync = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +precompile-utils = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } # Frontier FRAME -pallet-base-fee = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-ethereum = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-evm = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-evm-precompile-dispatch = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-evm-chain-id = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-evm-precompile-modexp = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-evm-precompile-sha3fips = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-evm-precompile-simple = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-evm-precompile-bn128 = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } -pallet-hotfix-sufficients = { git = "https://github.com/opentensor/frontier", rev = "a741e4695c4e864b443482a03e82529d761e5064", default-features = false } +pallet-base-fee = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-ethereum = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-evm = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-evm-precompile-dispatch = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-evm-chain-id = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-evm-precompile-modexp = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-evm-precompile-sha3fips = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-evm-precompile-simple = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-evm-precompile-bn128 = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } +pallet-hotfix-sufficients = { git = "https://github.com/opentensor/frontier", rev = "e31d47f83a64c361ecf0fd02bf567de6db9bda43", default-features = false } #DRAND pallet-drand = { path = "pallets/drand", default-features = false } @@ -300,200 +306,200 @@ pow-faucet = [] w3f-bls = { git = "https://github.com/opentensor/bls", branch = "fix-no-std" } # Patches automatically generated with `diener`: -# `diener patch --target https://github.com/paritytech/polkadot-sdk --point-to-git https://github.com/opentensor/polkadot-sdk.git --point-to-git-commit ff7026b2e31fc2b0aaeede8cc259417bce56b2a8 --crates-to-patch ../polkadot-sdk --ignore-unused` +# `diener patch --target https://github.com/paritytech/polkadot-sdk --point-to-git https://github.com/opentensor/polkadot-sdk.git --point-to-git-commit 81fa2c54e94f824eba7dabe9dffd063481cb2d80 --crates-to-patch ../polkadot-sdk --ignore-unused` # -# Using latest commit from `polkadot-stable2503-6-otf-patches`. +# Using latest commit from `polkadot-stable2506-2-otf-patches`. # # View code changes here: -# +# # # NOTE: The Diener will patch unnecesarry crates while this is waiting to be merged: . # You may install diener from `liamaharon:ignore-unused-flag` if you like in the meantime. [patch."https://github.com/paritytech/polkadot-sdk"] -frame-support = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -binary-merkle-tree = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-core = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-crypto-hashing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-crypto-hashing-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-debug-derive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-externalities = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-storage = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-runtime-interface = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-runtime-interface-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-std = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-tracing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-wasm-interface = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-io = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-keystore = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-state-machine = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-panic-handler = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-trie = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-runtime = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-application-crypto = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-arithmetic = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-weights = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-api-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-metadata-ir = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-version = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-version-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-block-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-block-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-inherents = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-blockchain = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-consensus = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-database = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-client-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -substrate-prometheus-endpoint = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-executor = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-executor-common = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-allocator = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-maybe-compressed-blob = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-executor-polkavm = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-executor-wasmtime = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -substrate-wasm-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-tracing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-tracing-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-executive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-system = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-try-runtime = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-balances = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-benchmarking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-support-procedural = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-support-procedural-tools = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-support-procedural-tools-derive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-client-db = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-state-db = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-sdk-frame = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-system-benchmarking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-system-rpc-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-consensus-aura = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-consensus-slots = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-timestamp = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-consensus-grandpa = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-genesis-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-keyring = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-offchain = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-session = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-staking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-transaction-pool = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-sdk = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -cumulus-primitives-core = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-core-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-parachain-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-authority-discovery = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -staging-xcm = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -xcm-procedural = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -cumulus-primitives-parachain-inherent = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -cumulus-primitives-proof-size-hostfunction = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-message-queue = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-runtime-parachains = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-authority-discovery = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-session = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-timestamp = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-authorship = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-babe = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-consensus-babe = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-election-provider-support = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-election-provider-solution-type = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-npos-elections = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-offences = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-staking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-bags-list = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-staking-reward-curve = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-broker = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-mmr = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-mmr-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-runtime-metrics = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -staging-xcm-executor = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-keystore = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -staging-xcm-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-asset-conversion = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-transaction-payment = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-grandpa = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-sudo = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-vesting = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-runtime-common = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-asset-rate = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-election-provider-multi-phase = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-election-provider-support-benchmarking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-fast-unstake = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-identity = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-staking-reward-fn = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-treasury = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-utility = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-root-testing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -slot-range-helper = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -cumulus-primitives-storage-weight-reclaim = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-aura = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -cumulus-test-relay-sproof-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-chain-spec = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-chain-spec-derive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-network = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-network-common = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-network-types = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-utils = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-telemetry = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-cli = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-mixnet = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-transaction-pool-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-mixnet = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-service = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-informant = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-network-sync = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -fork-tree = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-network-light = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-network-transactions = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-rpc-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-statement-store = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-transaction-pool = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-rpc-server = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-rpc-spec-v2 = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-sysinfo = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-transaction-storage-proof = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -cumulus-relay-chain-interface = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-overseer = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -tracing-gum = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -tracing-gum-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-node-metrics = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-node-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-node-subsystem-types = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-node-network-protocol = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-authority-discovery = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -polkadot-statement-table = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-benchmarking-cli = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -cumulus-client-parachain-inherent = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-runtime-utilities = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -frame-metadata-hash-extension = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-nomination-pools = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-membership = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-multisig = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-nomination-pools-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-preimage = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-proxy = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-scheduler = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-staking-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-offchain = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-babe = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-epochs = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-slots = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-transaction-payment-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-babe-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-network-gossip = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-grandpa = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-grandpa-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -substrate-frame-rpc-system = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-basic-authorship = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-proposer-metrics = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -substrate-build-script-utils = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-aura = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-insecure-randomness-collective-flip = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -pallet-safe-mode = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sc-consensus-manual-seal = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -sp-crypto-ec-utils = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } -substrate-bip39 = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "7d1855ebff04c96bb273b43cfd5a5cf6fa2a7234" } +frame-support = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +binary-merkle-tree = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-core = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-crypto-hashing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-crypto-hashing-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-debug-derive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-externalities = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-storage = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-runtime-interface = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-runtime-interface-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-std = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-tracing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-wasm-interface = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-io = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-keystore = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-state-machine = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-panic-handler = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-trie = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-runtime = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-application-crypto = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-arithmetic = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-weights = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-api-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-metadata-ir = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-version = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-version-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-block-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-block-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-inherents = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-blockchain = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-consensus = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-database = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-client-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +substrate-prometheus-endpoint = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-executor = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-executor-common = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-allocator = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-maybe-compressed-blob = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-executor-polkavm = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-executor-wasmtime = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +substrate-wasm-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-tracing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-tracing-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-executive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-system = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-try-runtime = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-balances = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-benchmarking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-support-procedural = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-support-procedural-tools = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-support-procedural-tools-derive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-client-db = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-state-db = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-sdk-frame = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-system-benchmarking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-system-rpc-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-consensus-aura = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-consensus-slots = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-timestamp = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-consensus-grandpa = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-genesis-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-keyring = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-offchain = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-session = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-staking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-transaction-pool = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-sdk = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +cumulus-primitives-core = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-core-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-parachain-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-authority-discovery = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +staging-xcm = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +xcm-procedural = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +cumulus-primitives-parachain-inherent = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +cumulus-primitives-proof-size-hostfunction = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-message-queue = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-runtime-parachains = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-authority-discovery = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-session = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-timestamp = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-authorship = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-babe = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-consensus-babe = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-election-provider-support = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-election-provider-solution-type = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-npos-elections = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-offences = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-staking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-bags-list = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-staking-reward-curve = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-broker = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-mmr = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-mmr-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-runtime-metrics = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +staging-xcm-executor = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-keystore = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +staging-xcm-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-asset-conversion = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-transaction-payment = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-grandpa = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-sudo = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-vesting = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-runtime-common = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-asset-rate = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-election-provider-multi-phase = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-election-provider-support-benchmarking = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-fast-unstake = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-identity = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-staking-reward-fn = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-treasury = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-utility = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-root-testing = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +slot-range-helper = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +cumulus-primitives-storage-weight-reclaim = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-aura = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +cumulus-test-relay-sproof-builder = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-chain-spec = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-chain-spec-derive = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-network = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-network-common = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-network-types = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-utils = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-telemetry = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-cli = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-mixnet = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-transaction-pool-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-mixnet = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-service = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-informant = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-network-sync = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +fork-tree = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-network-light = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-network-transactions = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-rpc-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-statement-store = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-transaction-pool = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-rpc-server = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-rpc-spec-v2 = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-sysinfo = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-transaction-storage-proof = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +cumulus-relay-chain-interface = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-overseer = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +tracing-gum = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +tracing-gum-proc-macro = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-node-metrics = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-node-primitives = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-node-subsystem-types = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-node-network-protocol = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-authority-discovery = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +polkadot-statement-table = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-benchmarking-cli = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +cumulus-client-parachain-inherent = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-runtime-utilities = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +frame-metadata-hash-extension = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-nomination-pools = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-membership = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-multisig = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-nomination-pools-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-preimage = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-proxy = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-scheduler = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-staking-runtime-api = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-offchain = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-babe = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-epochs = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-slots = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-transaction-payment-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-babe-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-network-gossip = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-grandpa = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-grandpa-rpc = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +substrate-frame-rpc-system = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-basic-authorship = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-proposer-metrics = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +substrate-build-script-utils = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-aura = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-insecure-randomness-collective-flip = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +pallet-safe-mode = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sc-consensus-manual-seal = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +sp-crypto-ec-utils = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } +substrate-bip39 = { git = "https://github.com/opentensor/polkadot-sdk.git", rev = "81fa2c54e94f824eba7dabe9dffd063481cb2d80" } diff --git a/README.md b/README.md index 5c3faaf033..2c74b9e8f1 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ This repository contains Bittensor's substrate-chain. Subtensor contains the tru 1. Runs Bittensor's [consensus mechanism](./docs/consensus.md); 2. Advertises neuron information, IPs, etc., and 3. Facilitates value transfer via TAO. +4. Supports wasm smart contract functionality via `pallet-contracts` (see [contracts documentation](./docs/contracts.md)). ## System Requirements diff --git a/chain-extensions/Cargo.toml b/chain-extensions/Cargo.toml new file mode 100644 index 0000000000..ae69b94d4c --- /dev/null +++ b/chain-extensions/Cargo.toml @@ -0,0 +1,67 @@ +[package] +name = "subtensor-chain-extensions" +version = "0.1.0" +edition.workspace = true +authors = ['Francisco Silva '] +homepage = "https://taostats.io/" +publish = false +repository = "https://github.com/opentensor/subtensor/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +frame-support.workspace = true +frame-system.workspace = true +log.workspace = true +sp-core.workspace = true +sp-io.workspace = true +sp-runtime.workspace = true +sp-std.workspace = true +codec = { workspace = true, features = ["derive"] } +scale-info = { workspace = true, features = ["derive"] } +subtensor-runtime-common.workspace = true +pallet-contracts.workspace = true +pallet-subtensor.workspace = true +pallet-subtensor-swap.workspace = true +pallet-balances.workspace = true +pallet-scheduler.workspace = true +pallet-preimage.workspace = true +pallet-timestamp.workspace = true +pallet-crowdloan.workspace = true +pallet-subtensor-utility.workspace = true +pallet-subtensor-proxy.workspace = true +pallet-drand.workspace = true +subtensor-swap-interface.workspace = true +num_enum.workspace = true + +[lints] +workspace = true + +[features] +default = ["std"] +std = [ + "frame-support/std", + "frame-system/std", + "log/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "codec/std", + "scale-info/std", + "subtensor-runtime-common/std", + "pallet-contracts/std", + "pallet-subtensor/std", + "pallet-subtensor-swap/std", + "pallet-balances/std", + "pallet-scheduler/std", + "pallet-preimage/std", + "pallet-timestamp/std", + "pallet-crowdloan/std", + "pallet-subtensor-utility/std", + "pallet-subtensor-proxy/std", + "pallet-drand/std", + "subtensor-swap-interface/std", + "num_enum/std", +] diff --git a/chain-extensions/src/lib.rs b/chain-extensions/src/lib.rs new file mode 100644 index 0000000000..e53fac765f --- /dev/null +++ b/chain-extensions/src/lib.rs @@ -0,0 +1,569 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub mod types; + +use crate::types::{FunctionId, Output}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{DebugNoBound, traits::Get}; +use frame_system::RawOrigin; +use pallet_contracts::chain_extension::{ + BufInBufOutState, ChainExtension, Environment, Ext, InitState, RetVal, SysConfig, +}; +use pallet_subtensor_proxy as pallet_proxy; +use pallet_subtensor_proxy::WeightInfo; +use sp_runtime::{DispatchError, Weight, traits::StaticLookup}; +use sp_std::marker::PhantomData; +use subtensor_runtime_common::{AlphaCurrency, NetUid, ProxyType, TaoCurrency}; + +#[derive(DebugNoBound)] +pub struct SubtensorChainExtension(PhantomData); + +impl Default for SubtensorChainExtension { + fn default() -> Self { + Self(PhantomData) + } +} + +impl ChainExtension for SubtensorChainExtension +where + T: pallet_subtensor::Config + + pallet_contracts::Config + + pallet_proxy::Config, + T::AccountId: Clone, + <::Lookup as StaticLookup>::Source: From<::AccountId>, +{ + fn call(&mut self, env: Environment) -> Result + where + E: Ext, + { + let mut adapter = ContractsEnvAdapter::::new(env); + Self::dispatch(&mut adapter) + } + + fn enabled() -> bool { + true + } +} + +impl SubtensorChainExtension +where + T: pallet_subtensor::Config + + pallet_contracts::Config + + pallet_proxy::Config, + T::AccountId: Clone, +{ + fn dispatch(env: &mut Env) -> Result + where + Env: SubtensorExtensionEnv, + <::Lookup as StaticLookup>::Source: From<::AccountId>, + { + let func_id: FunctionId = env.func_id().try_into().map_err(|_| { + DispatchError::Other( + "Invalid function id - does not correspond to any registered function", + ) + })?; + + match func_id { + FunctionId::GetStakeInfoForHotkeyColdkeyNetuidV1 => { + let (hotkey, coldkey, netuid): (T::AccountId, T::AccountId, NetUid) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let stake_info = + pallet_subtensor::Pallet::::get_stake_info_for_hotkey_coldkey_netuid( + hotkey, coldkey, netuid, + ); + + let encoded_result = stake_info.encode(); + + env.write_output(&encoded_result) + .map_err(|_| DispatchError::Other("Failed to write output"))?; + + Ok(RetVal::Converging(Output::Success as u32)) + } + FunctionId::AddStakeV1 => { + let weight = Weight::from_parts(340_800_000, 0) + .saturating_add(T::DbWeight::get().reads(24_u64)) + .saturating_add(T::DbWeight::get().writes(15)); + + env.charge_weight(weight)?; + + let (hotkey, netuid, amount_staked): (T::AccountId, NetUid, TaoCurrency) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(env.caller()).into(), + hotkey, + netuid, + amount_staked, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::RemoveStakeV1 => { + let weight = Weight::from_parts(196_800_000, 0) + .saturating_add(T::DbWeight::get().reads(19)) + .saturating_add(T::DbWeight::get().writes(10)); + + env.charge_weight(weight)?; + + let (hotkey, netuid, amount_unstaked): (T::AccountId, NetUid, AlphaCurrency) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::remove_stake( + RawOrigin::Signed(env.caller()).into(), + hotkey, + netuid, + amount_unstaked, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::UnstakeAllV1 => { + let weight = Weight::from_parts(28_830_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(0)); + + env.charge_weight(weight)?; + + let hotkey: T::AccountId = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::unstake_all( + RawOrigin::Signed(env.caller()).into(), + hotkey, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::UnstakeAllAlphaV1 => { + let weight = Weight::from_parts(358_500_000, 0) + .saturating_add(T::DbWeight::get().reads(36_u64)) + .saturating_add(T::DbWeight::get().writes(21_u64)); + + env.charge_weight(weight)?; + + let hotkey: T::AccountId = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::unstake_all_alpha( + RawOrigin::Signed(env.caller()).into(), + hotkey, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::MoveStakeV1 => { + let weight = Weight::from_parts(164_300_000, 0) + .saturating_add(T::DbWeight::get().reads(15_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)); + + env.charge_weight(weight)?; + + let ( + origin_hotkey, + destination_hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + ): (T::AccountId, T::AccountId, NetUid, NetUid, AlphaCurrency) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::move_stake( + RawOrigin::Signed(env.caller()).into(), + origin_hotkey, + destination_hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::TransferStakeV1 => { + let weight = Weight::from_parts(160_300_000, 0) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)); + + env.charge_weight(weight)?; + + let (destination_coldkey, hotkey, origin_netuid, destination_netuid, alpha_amount): ( + T::AccountId, + T::AccountId, + NetUid, + NetUid, + AlphaCurrency, + ) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::transfer_stake( + RawOrigin::Signed(env.caller()).into(), + destination_coldkey, + hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::SwapStakeV1 => { + let weight = Weight::from_parts(351_300_000, 0) + .saturating_add(T::DbWeight::get().reads(35_u64)) + .saturating_add(T::DbWeight::get().writes(22_u64)); + + env.charge_weight(weight)?; + + let (hotkey, origin_netuid, destination_netuid, alpha_amount): ( + T::AccountId, + NetUid, + NetUid, + AlphaCurrency, + ) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::swap_stake( + RawOrigin::Signed(env.caller()).into(), + hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::AddStakeLimitV1 => { + let weight = Weight::from_parts(402_900_000, 0) + .saturating_add(T::DbWeight::get().reads(24_u64)) + .saturating_add(T::DbWeight::get().writes(15)); + + env.charge_weight(weight)?; + + let (hotkey, netuid, amount_staked, limit_price, allow_partial): ( + T::AccountId, + NetUid, + TaoCurrency, + TaoCurrency, + bool, + ) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::add_stake_limit( + RawOrigin::Signed(env.caller()).into(), + hotkey, + netuid, + amount_staked, + limit_price, + allow_partial, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::RemoveStakeLimitV1 => { + let weight = Weight::from_parts(377_400_000, 0) + .saturating_add(T::DbWeight::get().reads(28_u64)) + .saturating_add(T::DbWeight::get().writes(14)); + + env.charge_weight(weight)?; + + let (hotkey, netuid, amount_unstaked, limit_price, allow_partial): ( + T::AccountId, + NetUid, + AlphaCurrency, + TaoCurrency, + bool, + ) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::remove_stake_limit( + RawOrigin::Signed(env.caller()).into(), + hotkey, + netuid, + amount_unstaked, + limit_price, + allow_partial, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::SwapStakeLimitV1 => { + let weight = Weight::from_parts(411_500_000, 0) + .saturating_add(T::DbWeight::get().reads(35_u64)) + .saturating_add(T::DbWeight::get().writes(22_u64)); + + env.charge_weight(weight)?; + + let ( + hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + limit_price, + allow_partial, + ): ( + T::AccountId, + NetUid, + NetUid, + AlphaCurrency, + TaoCurrency, + bool, + ) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::swap_stake_limit( + RawOrigin::Signed(env.caller()).into(), + hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + limit_price, + allow_partial, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::RemoveStakeFullLimitV1 => { + let weight = Weight::from_parts(395_300_000, 0) + .saturating_add(T::DbWeight::get().reads(28_u64)) + .saturating_add(T::DbWeight::get().writes(14_u64)); + + env.charge_weight(weight)?; + + let (hotkey, netuid, limit_price): (T::AccountId, NetUid, Option) = + env.read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::remove_stake_full_limit( + RawOrigin::Signed(env.caller()).into(), + hotkey, + netuid, + limit_price, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::SetColdkeyAutoStakeHotkeyV1 => { + let weight = Weight::from_parts(29_930_000, 0) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)); + + env.charge_weight(weight)?; + + let (netuid, hotkey): (NetUid, T::AccountId) = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let call_result = pallet_subtensor::Pallet::::set_coldkey_auto_stake_hotkey( + RawOrigin::Signed(env.caller()).into(), + netuid, + hotkey, + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::AddProxyV1 => { + let weight = ::WeightInfo::add_proxy( + ::MaxProxies::get(), + ); + + env.charge_weight(weight)?; + + let delegate: T::AccountId = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let delegate_lookup = + <::Lookup as StaticLookup>::Source::from(delegate); + + let call_result = pallet_proxy::Pallet::::add_proxy( + RawOrigin::Signed(env.caller()).into(), + delegate_lookup, + ProxyType::Staking, + 0u32.into(), + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + FunctionId::RemoveProxyV1 => { + let weight = ::WeightInfo::remove_proxy( + ::MaxProxies::get(), + ); + + env.charge_weight(weight)?; + + let delegate: T::AccountId = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let delegate_lookup = + <::Lookup as StaticLookup>::Source::from(delegate); + + let call_result = pallet_proxy::Pallet::::remove_proxy( + RawOrigin::Signed(env.caller()).into(), + delegate_lookup, + ProxyType::Staking, + 0u32.into(), + ); + + match call_result { + Ok(_) => Ok(RetVal::Converging(Output::Success as u32)), + Err(e) => { + let error_code = Output::from(e) as u32; + Ok(RetVal::Converging(error_code)) + } + } + } + } + } +} + +trait SubtensorExtensionEnv { + fn func_id(&self) -> u16; + fn charge_weight(&mut self, weight: Weight) -> Result<(), DispatchError>; + fn read_as(&mut self) -> Result; + fn write_output(&mut self, data: &[u8]) -> Result<(), DispatchError>; + fn caller(&mut self) -> AccountId; +} + +struct ContractsEnvAdapter<'a, 'b, T, E> +where + T: pallet_subtensor::Config + pallet_contracts::Config, + E: Ext, +{ + env: Environment<'a, 'b, E, BufInBufOutState>, + _marker: PhantomData, +} + +impl<'a, 'b, T, E> ContractsEnvAdapter<'a, 'b, T, E> +where + T: pallet_subtensor::Config + pallet_contracts::Config, + T::AccountId: Clone, + E: Ext, +{ + fn new(env: Environment<'a, 'b, E, InitState>) -> Self { + Self { + env: env.buf_in_buf_out(), + _marker: PhantomData, + } + } +} + +impl<'a, 'b, T, E> SubtensorExtensionEnv for ContractsEnvAdapter<'a, 'b, T, E> +where + T: pallet_subtensor::Config + pallet_contracts::Config, + T::AccountId: Clone, + E: Ext, +{ + fn func_id(&self) -> u16 { + self.env.func_id() + } + + fn charge_weight(&mut self, weight: Weight) -> Result<(), DispatchError> { + self.env.charge_weight(weight).map(|_| ()) + } + + fn read_as(&mut self) -> Result { + self.env.read_as() + } + + fn write_output(&mut self, data: &[u8]) -> Result<(), DispatchError> { + self.env.write(data, false, None) + } + + fn caller(&mut self) -> T::AccountId { + self.env.ext().address().clone() + } +} diff --git a/chain-extensions/src/mock.rs b/chain-extensions/src/mock.rs new file mode 100644 index 0000000000..98ea096199 --- /dev/null +++ b/chain-extensions/src/mock.rs @@ -0,0 +1,699 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::expect_used, + clippy::unwrap_used +)] + +use core::num::NonZeroU64; + +use frame_support::dispatch::DispatchResult; +use frame_support::traits::{Contains, Everything, InherentBuilder, InsideBoth}; +use frame_support::weights::Weight; +use frame_support::weights::constants::RocksDbWeight; +use frame_support::{PalletId, derive_impl}; +use frame_support::{assert_ok, parameter_types, traits::PrivilegeCmp}; +use frame_system as system; +use frame_system::{EnsureRoot, RawOrigin, limits, offchain::CreateTransactionBase}; +use pallet_contracts::HoldReason as ContractsHoldReason; +use pallet_subtensor::*; +use pallet_subtensor_proxy as pallet_proxy; +use pallet_subtensor_utility as pallet_utility; +use sp_core::{ConstU64, H256, U256, offchain::KeyTypeId}; +use sp_runtime::Perbill; +use sp_runtime::{ + BuildStorage, Percent, + traits::{BlakeTwo256, Convert, IdentityLookup}, +}; +use sp_std::{cell::RefCell, cmp::Ordering, sync::OnceLock}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system::{Pallet, Call, Config, Storage, Event} = 1, + Balances: pallet_balances::{Pallet, Call, Config, Storage, Event} = 2, + SubtensorModule: pallet_subtensor::{Pallet, Call, Storage, Event} = 7, + Utility: pallet_utility::{Pallet, Call, Storage, Event} = 8, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 9, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 10, + Drand: pallet_drand::{Pallet, Call, Storage, Event} = 11, + Swap: pallet_subtensor_swap::{Pallet, Call, Storage, Event} = 12, + Crowdloan: pallet_crowdloan::{Pallet, Call, Storage, Event} = 13, + Timestamp: pallet_timestamp::{Pallet, Call, Storage} = 14, + Contracts: pallet_contracts::{Pallet, Call, Storage, Event} = 15, + Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 16, + } +); + +pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); + +#[allow(dead_code)] +pub type TestRuntimeCall = frame_system::Call; + +#[allow(dead_code)] +pub type AccountId = U256; + +// Balance of an account. +#[allow(dead_code)] +pub type Balance = u64; + +// An index to a block. +#[allow(dead_code)] +pub type BlockNumber = u64; + +pub struct DummyContractsRandomness; + +impl frame_support::traits::Randomness for DummyContractsRandomness { + fn random(_subject: &[u8]) -> (H256, BlockNumber) { + (H256::zero(), 0) + } +} + +pub struct WeightToBalance; + +impl Convert for WeightToBalance { + fn convert(weight: Weight) -> Balance { + weight.ref_time() + } +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type WeightInfo = (); + type MaxReserves = (); + type ReserveIdentifier = (); + type RuntimeHoldReason = ContractsHoldReason; + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +#[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)] +impl pallet_timestamp::Config for Test { + type MinimumPeriod = ConstU64<1>; +} + +#[derive_impl(pallet_contracts::config_preludes::TestDefaultConfig)] +impl pallet_contracts::Config for Test { + type Time = Timestamp; + type Randomness = DummyContractsRandomness; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = ContractsHoldReason; + type RuntimeCall = RuntimeCall; + type CallFilter = Everything; + type WeightPrice = WeightToBalance; + type WeightInfo = (); + type ChainExtension = crate::SubtensorChainExtension; + type Schedule = ContractsSchedule; + type CallStack = [pallet_contracts::Frame; 5]; + type DepositPerByte = ContractsDepositPerByte; + type DepositPerItem = ContractsDepositPerItem; + type DefaultDepositLimit = ContractsDefaultDepositLimit; + type AddressGenerator = pallet_contracts::DefaultAddressGenerator; + type UnsafeUnstableInterface = ContractsUnstableInterface; + type UploadOrigin = frame_system::EnsureSigned; + type InstantiateOrigin = frame_system::EnsureSigned; + type CodeHashLockupDepositPercent = ContractsCodeHashLockupDepositPercent; + type MaxDelegateDependencies = ContractsMaxDelegateDependencies; + type MaxCodeLen = ContractsMaxCodeLen; + type MaxStorageKeyLen = ContractsMaxStorageKeyLen; + type MaxTransientStorageSize = ContractsMaxTransientStorageSize; + type MaxDebugBufferLen = ContractsMaxDebugBufferLen; + type Migrations = (); + type Debug = (); + type Environment = (); + type ApiVersion = (); + type Xcm = (); +} + +impl frame_support::traits::InstanceFilter for subtensor_runtime_common::ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + subtensor_runtime_common::ProxyType::Any => true, + subtensor_runtime_common::ProxyType::Staking => matches!( + c, + RuntimeCall::SubtensorModule(pallet_subtensor::Call::add_stake { .. }) + | RuntimeCall::SubtensorModule(pallet_subtensor::Call::add_stake_limit { .. }) + | RuntimeCall::SubtensorModule(pallet_subtensor::Call::remove_stake { .. }) + | RuntimeCall::SubtensorModule( + pallet_subtensor::Call::remove_stake_limit { .. } + ) + | RuntimeCall::SubtensorModule( + pallet_subtensor::Call::remove_stake_full_limit { .. } + ) + | RuntimeCall::SubtensorModule(pallet_subtensor::Call::unstake_all { .. }) + | RuntimeCall::SubtensorModule( + pallet_subtensor::Call::unstake_all_alpha { .. } + ) + | RuntimeCall::SubtensorModule(pallet_subtensor::Call::swap_stake { .. }) + | RuntimeCall::SubtensorModule(pallet_subtensor::Call::swap_stake_limit { .. }) + | RuntimeCall::SubtensorModule(pallet_subtensor::Call::move_stake { .. }) + | RuntimeCall::SubtensorModule(pallet_subtensor::Call::transfer_stake { .. }) + ), + _ => false, + } + } + + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (subtensor_runtime_common::ProxyType::Any, _) => true, + _ => self == o, + } + } +} + +impl pallet_proxy::Config for Test { + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = subtensor_runtime_common::ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = (); + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = System; +} + +pub struct NoNestingCallFilter; + +impl Contains for NoNestingCallFilter { + fn contains(call: &RuntimeCall) -> bool { + match call { + RuntimeCall::Utility(inner) => { + let calls = match inner { + pallet_utility::Call::force_batch { calls } => calls, + pallet_utility::Call::batch { calls } => calls, + pallet_utility::Call::batch_all { calls } => calls, + _ => &Vec::new(), + }; + + !calls.iter().any(|call| { + matches!(call, RuntimeCall::Utility(inner) if matches!(inner, pallet_utility::Call::force_batch { .. } | pallet_utility::Call::batch_all { .. } | pallet_utility::Call::batch { .. })) + }) + } + _ => true, + } + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl system::Config for Test { + type BaseCallFilter = InsideBoth; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = RocksDbWeight; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = U256; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + type Nonce = u64; + type Block = Block; +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +parameter_types! { + pub ContractsSchedule: pallet_contracts::Schedule = Default::default(); + pub const ContractsDepositPerByte: Balance = 1; + pub const ContractsDepositPerItem: Balance = 10; + pub const ContractsDefaultDepositLimit: Balance = 1_000_000_000; + pub const ContractsCodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); + pub const ContractsMaxDelegateDependencies: u32 = 32; + pub const ContractsMaxCodeLen: u32 = 120_000; + pub const ContractsMaxStorageKeyLen: u32 = 256; + pub const ContractsMaxTransientStorageSize: u32 = 1024 * 1024; + pub const ContractsMaxDebugBufferLen: u32 = 2 * 1024 * 1024; + pub const ContractsUnstableInterface: bool = true; +} + +parameter_types! { + pub const ProxyDepositBase: Balance = 1; + pub const ProxyDepositFactor: Balance = 1; + pub const MaxProxies: u32 = 32; + pub const MaxPending: u32 = 32; + pub const AnnouncementDepositBase: Balance = 1; + pub const AnnouncementDepositFactor: Balance = 1; +} + +parameter_types! { + pub const InitialMinAllowedWeights: u16 = 0; + pub const InitialEmissionValue: u16 = 0; + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::with_sensible_defaults( + Weight::from_parts(2_000_000_000_000, u64::MAX), + Perbill::from_percent(75), + ); + pub const ExistentialDeposit: Balance = 1; + pub const TransactionByteFee: Balance = 100; + pub const SDebug:u64 = 1; + pub const InitialRho: u16 = 30; + pub const InitialAlphaSigmoidSteepness: i16 = 1000; + pub const InitialKappa: u16 = 32_767; + pub const InitialTempo: u16 = 360; + pub const SelfOwnership: u64 = 2; + pub const InitialImmunityPeriod: u16 = 2; + pub const InitialMinAllowedUids: u16 = 2; + pub const InitialMaxAllowedUids: u16 = 4; + pub const InitialBondsMovingAverage: u64 = 900_000; + pub const InitialBondsPenalty:u16 = u16::MAX; + pub const InitialBondsResetOn: bool = false; + pub const InitialStakePruningMin: u16 = 0; + pub const InitialFoundationDistribution: u64 = 0; + pub const InitialDefaultDelegateTake: u16 = 11_796; // 18%, same as in production + pub const InitialMinDelegateTake: u16 = 5_898; // 9%; + pub const InitialDefaultChildKeyTake: u16 = 0 ;// 0 % + pub const InitialMinChildKeyTake: u16 = 0; // 0 %; + pub const InitialMaxChildKeyTake: u16 = 11_796; // 18 %; + pub const InitialWeightsVersionKey: u16 = 0; + pub const InitialServingRateLimit: u64 = 0; // No limit. + pub const InitialTxRateLimit: u64 = 0; // Disable rate limit for testing + pub const InitialTxDelegateTakeRateLimit: u64 = 1; // 1 block take rate limit for testing + pub const InitialTxChildKeyTakeRateLimit: u64 = 1; // 1 block take rate limit for testing + pub const InitialBurn: u64 = 0; + pub const InitialMinBurn: u64 = 500_000; + pub const InitialMaxBurn: u64 = 1_000_000_000; + pub const MinBurnUpperBound: TaoCurrency = TaoCurrency::new(1_000_000_000); // 1 TAO + pub const MaxBurnLowerBound: TaoCurrency = TaoCurrency::new(100_000_000); // 0.1 TAO + pub const InitialValidatorPruneLen: u64 = 0; + pub const InitialScalingLawPower: u16 = 50; + pub const InitialMaxAllowedValidators: u16 = 100; + pub const InitialIssuance: u64 = 0; + pub const InitialDifficulty: u64 = 10000; + pub const InitialActivityCutoff: u16 = 5000; + pub const InitialAdjustmentInterval: u16 = 100; + pub const InitialAdjustmentAlpha: u64 = 0; // no weight to previous value. + pub const InitialMaxRegistrationsPerBlock: u16 = 3; + pub const InitialTargetRegistrationsPerInterval: u16 = 2; + pub const InitialPruningScore : u16 = u16::MAX; + pub const InitialRegistrationRequirement: u16 = u16::MAX; // Top 100% + pub const InitialMinDifficulty: u64 = 1; + pub const InitialMaxDifficulty: u64 = u64::MAX; + pub const InitialRAORecycledForRegistration: u64 = 0; + pub const InitialNetworkImmunityPeriod: u64 = 1_296_000; + pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; + pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. + pub const InitialNetworkLockReductionInterval: u64 = 2; // 2 blocks. + pub const InitialNetworkRateLimit: u64 = 0; + pub const InitialKeySwapCost: u64 = 1_000_000_000; + pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default + pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default + pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn + pub const InitialYuma3On: bool = false; // Default value for Yuma3On + // pub const InitialNetworkMaxStake: u64 = u64::MAX; // (DEPRECATED) + pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days + pub const InitialColdkeySwapRescheduleDuration: u64 = 24 * 60 * 60 / 12; // Default as 1 day + pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days + pub const InitialTaoWeight: u64 = 0; // 100% global weight. + pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // Default as 7 days + pub const InitialKeySwapOnSubnetCost: u64 = 10_000_000; + pub const HotkeySwapOnSubnetInterval: u64 = 15; // 15 block, should be bigger than subnet number, then trigger clean up for all subnets + pub const MaxContributorsPerLeaseToRemove: u32 = 3; + pub const LeaseDividendsDistributionInterval: u32 = 100; + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); + pub const EvmKeyAssociateRateLimit: u64 = 10; +} + +impl pallet_subtensor::Config for Test { + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type InitialIssuance = InitialIssuance; + type SudoRuntimeCall = TestRuntimeCall; + type Scheduler = Scheduler; + type InitialMinAllowedWeights = InitialMinAllowedWeights; + type InitialEmissionValue = InitialEmissionValue; + type InitialTempo = InitialTempo; + type InitialDifficulty = InitialDifficulty; + type InitialAdjustmentInterval = InitialAdjustmentInterval; + type InitialAdjustmentAlpha = InitialAdjustmentAlpha; + type InitialTargetRegistrationsPerInterval = InitialTargetRegistrationsPerInterval; + type InitialRho = InitialRho; + type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; + type InitialKappa = InitialKappa; + type InitialMinAllowedUids = InitialMinAllowedUids; + type InitialMaxAllowedUids = InitialMaxAllowedUids; + type InitialValidatorPruneLen = InitialValidatorPruneLen; + type InitialScalingLawPower = InitialScalingLawPower; + type InitialImmunityPeriod = InitialImmunityPeriod; + type InitialActivityCutoff = InitialActivityCutoff; + type InitialMaxRegistrationsPerBlock = InitialMaxRegistrationsPerBlock; + type InitialPruningScore = InitialPruningScore; + type InitialBondsMovingAverage = InitialBondsMovingAverage; + type InitialBondsPenalty = InitialBondsPenalty; + type InitialBondsResetOn = InitialBondsResetOn; + type InitialMaxAllowedValidators = InitialMaxAllowedValidators; + type InitialDefaultDelegateTake = InitialDefaultDelegateTake; + type InitialMinDelegateTake = InitialMinDelegateTake; + type InitialDefaultChildKeyTake = InitialDefaultChildKeyTake; + type InitialMinChildKeyTake = InitialMinChildKeyTake; + type InitialMaxChildKeyTake = InitialMaxChildKeyTake; + type InitialTxChildKeyTakeRateLimit = InitialTxChildKeyTakeRateLimit; + type InitialWeightsVersionKey = InitialWeightsVersionKey; + type InitialMaxDifficulty = InitialMaxDifficulty; + type InitialMinDifficulty = InitialMinDifficulty; + type InitialServingRateLimit = InitialServingRateLimit; + type InitialTxRateLimit = InitialTxRateLimit; + type InitialTxDelegateTakeRateLimit = InitialTxDelegateTakeRateLimit; + type InitialBurn = InitialBurn; + type InitialMaxBurn = InitialMaxBurn; + type InitialMinBurn = InitialMinBurn; + type MinBurnUpperBound = MinBurnUpperBound; + type MaxBurnLowerBound = MaxBurnLowerBound; + type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; + type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; + type InitialNetworkMinLockCost = InitialNetworkMinLockCost; + type InitialSubnetOwnerCut = InitialSubnetOwnerCut; + type InitialNetworkLockReductionInterval = InitialNetworkLockReductionInterval; + type InitialNetworkRateLimit = InitialNetworkRateLimit; + type KeySwapCost = InitialKeySwapCost; + type AlphaHigh = InitialAlphaHigh; + type AlphaLow = InitialAlphaLow; + type LiquidAlphaOn = InitialLiquidAlphaOn; + type Yuma3On = InitialYuma3On; + type Preimages = Preimage; + type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialColdkeySwapRescheduleDuration = InitialColdkeySwapRescheduleDuration; + type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; + type InitialTaoWeight = InitialTaoWeight; + type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; + type SwapInterface = pallet_subtensor_swap::Pallet; + type KeySwapOnSubnetCost = InitialKeySwapOnSubnetCost; + type HotkeySwapOnSubnetInterval = HotkeySwapOnSubnetInterval; + type ProxyInterface = FakeProxier; + type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; + type GetCommitments = (); + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; + type CommitmentsInterface = CommitmentsI; + type EvmKeyAssociateRateLimit = EvmKeyAssociateRateLimit; +} + +// Swap-related parameter types +parameter_types! { + pub const SwapProtocolId: PalletId = PalletId(*b"ten/swap"); + pub const SwapMaxFeeRate: u16 = 10000; // 15.26% + pub const SwapMaxPositions: u32 = 100; + pub const SwapMinimumLiquidity: u64 = 1_000; + pub const SwapMinimumReserve: NonZeroU64 = NonZeroU64::new(100).unwrap(); +} + +impl pallet_subtensor_swap::Config for Test { + type SubnetInfo = SubtensorModule; + type BalanceOps = SubtensorModule; + type ProtocolId = SwapProtocolId; + type TaoReserve = TaoCurrencyReserve; + type AlphaReserve = AlphaCurrencyReserve; + type MaxFeeRate = SwapMaxFeeRate; + type MaxPositions = SwapMaxPositions; + type MinimumLiquidity = SwapMinimumLiquidity; + type MinimumReserve = SwapMinimumReserve; + type WeightInfo = (); +} + +pub struct OriginPrivilegeCmp; + +impl PrivilegeCmp for OriginPrivilegeCmp { + fn cmp_privilege(_left: &OriginCaller, _right: &OriginCaller) -> Option { + Some(Ordering::Less) + } +} + +pub struct CommitmentsI; +impl CommitmentsInterface for CommitmentsI { + fn purge_netuid(_netuid: NetUid) {} +} + +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; + pub const MaxScheduledPerBlock: u32 = 50; + pub const NoPreimagePostponement: Option = Some(10); +} + +impl pallet_scheduler::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = pallet_scheduler::weights::SubstrateWeight; + type OriginPrivilegeCmp = OriginPrivilegeCmp; + type Preimages = Preimage; + type BlockNumberProvider = System; +} + +impl pallet_utility::Config for Test { + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = pallet_utility::weights::SubstrateWeight; +} + +parameter_types! { + pub const PreimageMaxSize: u32 = 4096 * 1024; + pub const PreimageBaseDeposit: Balance = 1; + pub const PreimageByteDeposit: Balance = 1; +} + +impl pallet_preimage::Config for Test { + type WeightInfo = pallet_preimage::weights::SubstrateWeight; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type Consideration = (); +} + +thread_local! { + pub static PROXIES: RefCell = const { RefCell::new(FakeProxier(vec![])) }; +} + +pub struct FakeProxier(pub Vec<(U256, U256)>); + +impl ProxyInterface for FakeProxier { + fn add_lease_beneficiary_proxy(beneficiary: &AccountId, lease: &AccountId) -> DispatchResult { + PROXIES.with_borrow_mut(|proxies| { + proxies.0.push((*beneficiary, *lease)); + }); + Ok(()) + } + + fn remove_lease_beneficiary_proxy( + beneficiary: &AccountId, + lease: &AccountId, + ) -> DispatchResult { + PROXIES.with_borrow_mut(|proxies| { + proxies.0.retain(|(b, l)| b != beneficiary && l != lease); + }); + Ok(()) + } +} + +parameter_types! { + pub const CrowdloanPalletId: PalletId = PalletId(*b"bt/cloan"); + pub const MinimumDeposit: u64 = 50; + pub const AbsoluteMinimumContribution: u64 = 10; + pub const MinimumBlockDuration: u64 = 20; + pub const MaximumBlockDuration: u64 = 100; + pub const RefundContributorsLimit: u32 = 5; + pub const MaxContributors: u32 = 10; +} + +impl pallet_crowdloan::Config for Test { + type PalletId = CrowdloanPalletId; + type Currency = Balances; + type RuntimeCall = RuntimeCall; + type WeightInfo = pallet_crowdloan::weights::SubstrateWeight; + type Preimages = Preimage; + type MinimumDeposit = MinimumDeposit; + type AbsoluteMinimumContribution = AbsoluteMinimumContribution; + type MinimumBlockDuration = MinimumBlockDuration; + type MaximumBlockDuration = MaximumBlockDuration; + type RefundContributorsLimit = RefundContributorsLimit; + type MaxContributors = MaxContributors; +} + +mod test_crypto { + use super::KEY_TYPE; + use sp_core::{ + U256, + sr25519::{Public as Sr25519Public, Signature as Sr25519Signature}, + }; + use sp_runtime::{ + app_crypto::{app_crypto, sr25519}, + traits::IdentifyAccount, + }; + + app_crypto!(sr25519, KEY_TYPE); + + pub struct TestAuthId; + + impl frame_system::offchain::AppCrypto for TestAuthId { + type RuntimeAppPublic = Public; + type GenericSignature = Sr25519Signature; + type GenericPublic = Sr25519Public; + } + + impl IdentifyAccount for Public { + type AccountId = U256; + + fn into_account(self) -> U256 { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(self.as_ref()); + U256::from_big_endian(&bytes) + } + } +} + +pub type TestAuthId = test_crypto::TestAuthId; + +impl pallet_drand::Config for Test { + type AuthorityId = TestAuthId; + type Verifier = pallet_drand::verifier::QuicknetVerifier; + type UnsignedPriority = ConstU64<{ 1 << 20 }>; + type HttpFetchTimeout = ConstU64<1_000>; +} + +impl frame_system::offchain::SigningTypes for Test { + type Public = test_crypto::Public; + type Signature = test_crypto::Signature; +} + +pub type UncheckedExtrinsic = sp_runtime::testing::TestXt; + +impl frame_system::offchain::CreateTransactionBase for Test +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_inherent(call) + } +} + +impl frame_system::offchain::CreateSignedTransaction for Test +where + RuntimeCall: From, +{ + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( + call: >::RuntimeCall, + _public: Self::Public, + _account: Self::AccountId, + nonce: Self::Nonce, + ) -> Option { + Some(UncheckedExtrinsic::new_signed(call, nonce.into(), (), ())) + } +} + +static TEST_LOGS_INIT: OnceLock<()> = OnceLock::new(); + +pub fn init_logs_for_tests() { + if TEST_LOGS_INIT.get().is_some() { + return; + } + let _ = TEST_LOGS_INIT.set(()); +} + +#[allow(dead_code)] +// Build genesis storage according to the mock runtime. +pub fn new_test_ext(block_number: BlockNumber) -> sp_io::TestExternalities { + init_logs_for_tests(); + let t = frame_system::GenesisConfig::::default() + .build_storage() + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(block_number)); + ext +} + +#[allow(dead_code)] +pub fn register_ok_neuron( + netuid: NetUid, + hotkey_account_id: U256, + coldkey_account_id: U256, + start_nonce: u64, +) { + let block_number: u64 = SubtensorModule::get_current_block_as_u64(); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + start_nonce, + &hotkey_account_id, + ); + let result = SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work, + hotkey_account_id, + coldkey_account_id, + ); + assert_ok!(result); + log::info!( + "Register ok neuron: netuid: {netuid:?}, coldkey: {hotkey_account_id:?}, hotkey: {coldkey_account_id:?}" + ); +} + +#[allow(dead_code)] +pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> NetUid { + let netuid = SubtensorModule::get_next_netuid(); + let lock_cost = SubtensorModule::get_network_lock_cost(); + SubtensorModule::add_balance_to_coldkey_account(coldkey, lock_cost.into()); + + assert_ok!(SubtensorModule::register_network( + RawOrigin::Signed(*coldkey).into(), + *hotkey + )); + NetworkRegistrationAllowed::::insert(netuid, true); + NetworkPowRegistrationAllowed::::insert(netuid, true); + FirstEmissionBlockNumber::::insert(netuid, 0); + SubtokenEnabled::::insert(netuid, true); + netuid +} + +#[allow(dead_code)] +pub(crate) fn remove_stake_rate_limit_for_tests(hotkey: &U256, coldkey: &U256, netuid: NetUid) { + StakingOperationRateLimiter::::remove((hotkey, coldkey, netuid)); +} + +#[allow(dead_code)] +pub(crate) fn setup_reserves(netuid: NetUid, tao: TaoCurrency, alpha: AlphaCurrency) { + SubnetTAO::::set(netuid, tao); + SubnetAlphaIn::::set(netuid, alpha); +} diff --git a/chain-extensions/src/tests.rs b/chain-extensions/src/tests.rs new file mode 100644 index 0000000000..378fa084a1 --- /dev/null +++ b/chain-extensions/src/tests.rs @@ -0,0 +1,966 @@ +#![allow(clippy::unwrap_used)] + +use super::{SubtensorChainExtension, SubtensorExtensionEnv, mock}; +use crate::types::{FunctionId, Output}; +use codec::Encode; +use frame_support::{assert_ok, weights::Weight}; +use frame_system::RawOrigin; +use pallet_contracts::chain_extension::RetVal; +use pallet_subtensor::DefaultMinStake; +use sp_core::Get; +use sp_core::U256; +use sp_runtime::DispatchError; +use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyTrait, NetUid, TaoCurrency}; +use subtensor_swap_interface::SwapHandler; + +type AccountId = ::AccountId; + +#[derive(Clone)] +struct MockEnv { + func_id: u16, + caller: AccountId, + input: Vec, + output: Vec, + charged_weight: Option, + expected_weight: Option, +} + +#[test] +fn set_coldkey_auto_stake_hotkey_success_sets_destination() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4901); + let owner_coldkey = U256::from(4902); + let coldkey = U256::from(5901); + let hotkey = U256::from(5902); + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + + pallet_subtensor::Owner::::insert(hotkey, coldkey); + pallet_subtensor::OwnedHotkeys::::insert(coldkey, vec![hotkey]); + pallet_subtensor::Uids::::insert(netuid, hotkey, 0u16); + + assert_eq!( + pallet_subtensor::AutoStakeDestination::::get(coldkey, netuid), + None + ); + + let expected_weight = Weight::from_parts(29_930_000, 0) + .saturating_add(::DbWeight::get().reads(4)) + .saturating_add(::DbWeight::get().writes(2)); + + let mut env = MockEnv::new( + FunctionId::SetColdkeyAutoStakeHotkeyV1, + coldkey, + (netuid, hotkey).encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + assert_eq!( + pallet_subtensor::AutoStakeDestination::::get(coldkey, netuid), + Some(hotkey) + ); + let coldkeys = + pallet_subtensor::AutoStakeDestinationColdkeys::::get(hotkey, netuid); + assert!(coldkeys.contains(&coldkey)); + }); +} + +#[test] +fn remove_stake_full_limit_success_with_limit_price() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4801); + let owner_coldkey = U256::from(4802); + let coldkey = U256::from(5801); + let hotkey = U256::from(5802); + let stake_amount_raw: u64 = 340_000_000_000; + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + mock::setup_reserves( + netuid, + TaoCurrency::from(130_000_000_000), + AlphaCurrency::from(110_000_000_000), + ); + + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, + stake_amount_raw + 1_000_000_000, + ); + + assert_ok!(pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(coldkey).into(), + hotkey, + netuid, + stake_amount_raw.into(), + )); + + mock::remove_stake_rate_limit_for_tests(&hotkey, &coldkey, netuid); + + let expected_weight = Weight::from_parts(395_300_000, 0) + .saturating_add(::DbWeight::get().reads(28)) + .saturating_add(::DbWeight::get().writes(14)); + + let balance_before = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + + let mut env = MockEnv::new( + FunctionId::RemoveStakeFullLimitV1, + coldkey, + (hotkey, netuid, Option::::None).encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let alpha_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, + ); + let balance_after = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + + assert!(alpha_after.is_zero()); + assert!(balance_after > balance_before); + }); +} + +#[test] +fn swap_stake_limit_with_tight_price_returns_slippage_error() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey_a = U256::from(4701); + let owner_coldkey_a = U256::from(4702); + let owner_hotkey_b = U256::from(4703); + let owner_coldkey_b = U256::from(4704); + let coldkey = U256::from(5701); + let hotkey = U256::from(5702); + + let stake_alpha = AlphaCurrency::from(150_000_000_000u64); + + let netuid_a = mock::add_dynamic_network(&owner_hotkey_a, &owner_coldkey_a); + let netuid_b = mock::add_dynamic_network(&owner_hotkey_b, &owner_coldkey_b); + + mock::setup_reserves( + netuid_a, + TaoCurrency::from(150_000_000_000), + AlphaCurrency::from(110_000_000_000), + ); + mock::setup_reserves( + netuid_b, + TaoCurrency::from(120_000_000_000), + AlphaCurrency::from(90_000_000_000), + ); + + mock::register_ok_neuron(netuid_a, hotkey, coldkey, 0); + mock::register_ok_neuron(netuid_b, hotkey, coldkey, 1); + + pallet_subtensor::Pallet::::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid_a, + stake_alpha, + ); + + mock::remove_stake_rate_limit_for_tests(&hotkey, &coldkey, netuid_a); + + let alpha_origin_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_a, + ); + let alpha_destination_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_b, + ); + + let alpha_to_swap: AlphaCurrency = (alpha_origin_before.to_u64() / 8).into(); + let limit_price: TaoCurrency = 100u64.into(); + + let expected_weight = Weight::from_parts(411_500_000, 0) + .saturating_add(::DbWeight::get().reads(35)) + .saturating_add(::DbWeight::get().writes(22)); + + let mut env = MockEnv::new( + FunctionId::SwapStakeLimitV1, + coldkey, + (hotkey, netuid_a, netuid_b, alpha_to_swap, limit_price, true).encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let alpha_origin_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_a, + ); + let alpha_destination_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_b, + ); + + assert!(alpha_origin_after <= alpha_origin_before); + assert!(alpha_destination_after >= alpha_destination_before); + }); +} + +#[test] +fn remove_stake_limit_success_respects_price_limit() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4601); + let owner_coldkey = U256::from(4602); + let coldkey = U256::from(5601); + let hotkey = U256::from(5602); + let stake_amount_raw: u64 = 320_000_000_000; + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + mock::setup_reserves( + netuid, + TaoCurrency::from(120_000_000_000), + AlphaCurrency::from(100_000_000_000), + ); + + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, + stake_amount_raw + 1_000_000_000, + ); + + assert_ok!(pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(coldkey).into(), + hotkey, + netuid, + stake_amount_raw.into(), + )); + + mock::remove_stake_rate_limit_for_tests(&hotkey, &coldkey, netuid); + + let alpha_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, + ); + + let current_price = + ::SwapInterface::current_alpha_price( + netuid.into(), + ); + let limit_price_value = (current_price.to_num::() * 990_000_000f64).round() as u64; + let limit_price: TaoCurrency = limit_price_value.into(); + + let alpha_to_unstake: AlphaCurrency = (alpha_before.to_u64() / 2).into(); + + let expected_weight = Weight::from_parts(377_400_000, 0) + .saturating_add(::DbWeight::get().reads(28)) + .saturating_add(::DbWeight::get().writes(14)); + + let balance_before = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + + let mut env = MockEnv::new( + FunctionId::RemoveStakeLimitV1, + coldkey, + (hotkey, netuid, alpha_to_unstake, limit_price, true).encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let alpha_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, + ); + let balance_after = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + + assert!(alpha_after < alpha_before); + assert!(balance_after > balance_before); + }); +} + +#[test] +fn add_stake_limit_success_executes_within_price_guard() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4501); + let owner_coldkey = U256::from(4502); + let coldkey = U256::from(5501); + let hotkey = U256::from(5502); + let amount_raw: u64 = 900_000_000_000; + let limit_price: TaoCurrency = 24_000_000_000u64.into(); + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + + mock::setup_reserves( + netuid, + TaoCurrency::from(150_000_000_000), + AlphaCurrency::from(100_000_000_000), + ); + + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, + amount_raw + 1_000_000_000, + ); + + let stake_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, + ); + let balance_before = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + + let expected_weight = Weight::from_parts(402_900_000, 0) + .saturating_add(::DbWeight::get().reads(24)) + .saturating_add(::DbWeight::get().writes(15)); + + let mut env = MockEnv::new( + FunctionId::AddStakeLimitV1, + coldkey, + ( + hotkey, + netuid, + TaoCurrency::from(amount_raw), + limit_price, + true, + ) + .encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let stake_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, + ); + let balance_after = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + + assert!(stake_after > stake_before); + assert!(stake_after > AlphaCurrency::ZERO); + assert!(balance_after < balance_before); + }); +} + +#[test] +fn swap_stake_success_moves_between_subnets() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey_a = U256::from(4401); + let owner_coldkey_a = U256::from(4402); + let owner_hotkey_b = U256::from(4403); + let owner_coldkey_b = U256::from(4404); + let coldkey = U256::from(5401); + let hotkey = U256::from(5402); + + let min_stake = DefaultMinStake::::get(); + let stake_amount_raw = min_stake.to_u64().saturating_mul(260); + + let netuid_a = mock::add_dynamic_network(&owner_hotkey_a, &owner_coldkey_a); + let netuid_b = mock::add_dynamic_network(&owner_hotkey_b, &owner_coldkey_b); + + mock::setup_reserves( + netuid_a, + stake_amount_raw.saturating_mul(18).into(), + AlphaCurrency::from(stake_amount_raw.saturating_mul(30)), + ); + mock::setup_reserves( + netuid_b, + stake_amount_raw.saturating_mul(20).into(), + AlphaCurrency::from(stake_amount_raw.saturating_mul(28)), + ); + + mock::register_ok_neuron(netuid_a, hotkey, coldkey, 0); + mock::register_ok_neuron(netuid_b, hotkey, coldkey, 1); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, + stake_amount_raw + 1_000_000_000, + ); + + assert_ok!(pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(coldkey).into(), + hotkey, + netuid_a, + stake_amount_raw.into(), + )); + + mock::remove_stake_rate_limit_for_tests(&hotkey, &coldkey, netuid_a); + + let alpha_origin_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_a, + ); + let alpha_destination_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_b, + ); + let alpha_to_swap: AlphaCurrency = (alpha_origin_before.to_u64() / 3).into(); + + let expected_weight = Weight::from_parts(351_300_000, 0) + .saturating_add(::DbWeight::get().reads(35)) + .saturating_add(::DbWeight::get().writes(22)); + + let mut env = MockEnv::new( + FunctionId::SwapStakeV1, + coldkey, + (hotkey, netuid_a, netuid_b, alpha_to_swap).encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let alpha_origin_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_a, + ); + let alpha_destination_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid_b, + ); + + assert!(alpha_origin_after < alpha_origin_before); + assert!( + alpha_destination_after > alpha_destination_before, + "destination stake should increase" + ); + }); +} + +#[test] +fn transfer_stake_success_moves_between_coldkeys() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4301); + let owner_coldkey = U256::from(4302); + let origin_coldkey = U256::from(5301); + let destination_coldkey = U256::from(5302); + let hotkey = U256::from(5303); + + let min_stake = DefaultMinStake::::get(); + let stake_amount_raw = min_stake.to_u64().saturating_mul(250); + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + mock::setup_reserves( + netuid, + stake_amount_raw.saturating_mul(15).into(), + AlphaCurrency::from(stake_amount_raw.saturating_mul(25)), + ); + + mock::register_ok_neuron(netuid, hotkey, origin_coldkey, 0); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &origin_coldkey, + stake_amount_raw + 1_000_000_000, + ); + + assert_ok!(pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(origin_coldkey).into(), + hotkey, + netuid, + stake_amount_raw.into(), + )); + + mock::remove_stake_rate_limit_for_tests(&hotkey, &origin_coldkey, netuid); + + let alpha_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &origin_coldkey, + netuid, + ); + let alpha_to_transfer: AlphaCurrency = (alpha_before.to_u64() / 3).into(); + + let expected_weight = Weight::from_parts(160_300_000, 0) + .saturating_add(::DbWeight::get().reads(13)) + .saturating_add(::DbWeight::get().writes(6)); + + let mut env = MockEnv::new( + FunctionId::TransferStakeV1, + origin_coldkey, + ( + destination_coldkey, + hotkey, + netuid, + netuid, + alpha_to_transfer, + ) + .encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let origin_alpha_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &origin_coldkey, + netuid, + ); + let destination_alpha_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &destination_coldkey, + netuid, + ); + + assert_eq!(origin_alpha_after, alpha_before - alpha_to_transfer); + assert_eq!(destination_alpha_after, alpha_to_transfer); + }); +} + +#[test] +fn move_stake_success_moves_alpha_between_hotkeys() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4201); + let owner_coldkey = U256::from(4202); + let coldkey = U256::from(5201); + let origin_hotkey = U256::from(5202); + let destination_hotkey = U256::from(5203); + + let min_stake = DefaultMinStake::::get(); + let stake_amount_raw = min_stake.to_u64().saturating_mul(240); + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + mock::setup_reserves( + netuid, + stake_amount_raw.saturating_mul(15).into(), + AlphaCurrency::from(stake_amount_raw.saturating_mul(25)), + ); + + mock::register_ok_neuron(netuid, origin_hotkey, coldkey, 0); + mock::register_ok_neuron(netuid, destination_hotkey, coldkey, 1); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, + stake_amount_raw + 1_000_000_000, + ); + + assert_ok!(pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(coldkey).into(), + origin_hotkey, + netuid, + stake_amount_raw.into(), + )); + + mock::remove_stake_rate_limit_for_tests(&origin_hotkey, &coldkey, netuid); + + let alpha_before = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &origin_hotkey, + &coldkey, + netuid, + ); + let alpha_to_move: AlphaCurrency = (alpha_before.to_u64() / 2).into(); + + let expected_weight = Weight::from_parts(164_300_000, 0) + .saturating_add(::DbWeight::get().reads(15)) + .saturating_add(::DbWeight::get().writes(7)); + + let mut env = MockEnv::new( + FunctionId::MoveStakeV1, + coldkey, + ( + origin_hotkey, + destination_hotkey, + netuid, + netuid, + alpha_to_move, + ) + .encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let origin_alpha_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &origin_hotkey, + &coldkey, + netuid, + ); + let destination_alpha_after = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &destination_hotkey, + &coldkey, + netuid, + ); + + assert_eq!(origin_alpha_after, alpha_before - alpha_to_move); + assert_eq!(destination_alpha_after, alpha_to_move); + }); +} + +#[test] +fn unstake_all_alpha_success_moves_stake_to_root() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4101); + let owner_coldkey = U256::from(4102); + let coldkey = U256::from(5101); + let hotkey = U256::from(5102); + let min_stake = DefaultMinStake::::get(); + let stake_amount_raw = min_stake.to_u64().saturating_mul(220); + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + + mock::setup_reserves( + netuid, + stake_amount_raw.saturating_mul(20).into(), + AlphaCurrency::from(stake_amount_raw.saturating_mul(30)), + ); + + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, + stake_amount_raw + 1_000_000_000, + ); + + assert_ok!(pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(coldkey).into(), + hotkey, + netuid, + stake_amount_raw.into(), + )); + + mock::remove_stake_rate_limit_for_tests(&hotkey, &coldkey, netuid); + + let expected_weight = Weight::from_parts(358_500_000, 0) + .saturating_add(::DbWeight::get().reads(36)) + .saturating_add(::DbWeight::get().writes(21)); + + let mut env = MockEnv::new(FunctionId::UnstakeAllAlphaV1, coldkey, hotkey.encode()) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let subnet_alpha = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, + ); + assert!(subnet_alpha <= AlphaCurrency::from(1_000)); + + let root_alpha = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + ); + assert!(root_alpha > AlphaCurrency::ZERO); + }); +} + +#[test] +fn add_proxy_success_creates_proxy_relationship() { + mock::new_test_ext(1).execute_with(|| { + let delegator = U256::from(6001); + let delegate = U256::from(6002); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &delegator, + 1_000_000_000, + ); + + assert_eq!( + pallet_subtensor_proxy::Proxies::::get(delegator) + .0 + .len(), + 0 + ); + + let mut env = MockEnv::new(FunctionId::AddProxyV1, delegator, delegate.encode()); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + + let proxies = pallet_subtensor_proxy::Proxies::::get(delegator).0; + assert_eq!(proxies.len(), 1); + if let Some(proxy) = proxies.first() { + assert_eq!(proxy.delegate, delegate); + assert_eq!( + proxy.proxy_type, + subtensor_runtime_common::ProxyType::Staking + ); + assert_eq!(proxy.delay, 0u64); + } else { + panic!("proxies should contain one element"); + } + }); +} + +#[test] +fn remove_proxy_success_removes_proxy_relationship() { + mock::new_test_ext(1).execute_with(|| { + let delegator = U256::from(7001); + let delegate = U256::from(7002); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &delegator, + 1_000_000_000, + ); + + let mut add_env = MockEnv::new(FunctionId::AddProxyV1, delegator, delegate.encode()); + let ret = SubtensorChainExtension::::dispatch(&mut add_env).unwrap(); + assert_success(ret); + + let proxies_before = pallet_subtensor_proxy::Proxies::::get(delegator).0; + assert_eq!(proxies_before.len(), 1); + + let mut remove_env = MockEnv::new(FunctionId::RemoveProxyV1, delegator, delegate.encode()); + let ret = SubtensorChainExtension::::dispatch(&mut remove_env).unwrap(); + assert_success(ret); + + let proxies_after = pallet_subtensor_proxy::Proxies::::get(delegator).0; + assert_eq!(proxies_after.len(), 0); + }); +} + +impl MockEnv { + fn new(func_id: FunctionId, caller: AccountId, input: Vec) -> Self { + Self { + func_id: func_id as u16, + caller, + input, + output: Vec::new(), + charged_weight: None, + expected_weight: None, + } + } + + fn with_expected_weight(mut self, weight: Weight) -> Self { + self.expected_weight = Some(weight); + self + } + + fn charged_weight(&self) -> Option { + self.charged_weight + } + + fn output(&self) -> &[u8] { + &self.output + } +} + +impl SubtensorExtensionEnv for MockEnv { + fn func_id(&self) -> u16 { + self.func_id + } + + fn charge_weight(&mut self, weight: Weight) -> Result<(), DispatchError> { + if let Some(expected) = self.expected_weight { + if weight != expected { + return Err(DispatchError::Other( + "unexpected weight charged by mock env", + )); + } + } + self.charged_weight = Some(weight); + Ok(()) + } + + fn read_as(&mut self) -> Result { + T::decode(&mut &self.input[..]).map_err(|_| DispatchError::Other("mock env decode failure")) + } + + fn write_output(&mut self, data: &[u8]) -> Result<(), DispatchError> { + self.output.clear(); + self.output.extend_from_slice(data); + Ok(()) + } + + fn caller(&mut self) -> AccountId { + self.caller + } +} + +fn assert_success(ret: RetVal) { + match ret { + RetVal::Converging(code) => { + assert_eq!(code, Output::Success as u32, "expected success code") + } + _ => panic!("unexpected return value"), + } +} + +#[test] +fn get_stake_info_returns_encoded_runtime_value() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(1); + let owner_coldkey = U256::from(2); + let hotkey = U256::from(11); + let coldkey = U256::from(22); + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + + let expected = + pallet_subtensor::Pallet::::get_stake_info_for_hotkey_coldkey_netuid( + hotkey, coldkey, netuid, + ) + .encode(); + + let mut env = MockEnv::new( + FunctionId::GetStakeInfoForHotkeyColdkeyNetuidV1, + coldkey, + (hotkey, coldkey, netuid).encode(), + ); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + + assert_success(ret); + assert_eq!(env.output(), expected.as_slice()); + assert!(env.charged_weight().is_none()); + }); +} + +#[test] +fn add_stake_success_updates_stake_and_returns_success_code() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(1); + let owner_coldkey = U256::from(2); + let coldkey = U256::from(101); + let hotkey = U256::from(202); + let min_stake = DefaultMinStake::::get(); + let amount_raw = min_stake.to_u64().saturating_mul(10); + let amount: TaoCurrency = amount_raw.into(); + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + mock::setup_reserves( + netuid, + (amount_raw * 1_000_000).into(), + AlphaCurrency::from(amount_raw * 10_000_000), + ); + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, amount_raw, + ); + + assert!( + pallet_subtensor::Pallet::::get_total_stake_for_hotkey(&hotkey).is_zero() + ); + + let expected_weight = Weight::from_parts(340_800_000, 0) + .saturating_add(::DbWeight::get().reads(24)) + .saturating_add(::DbWeight::get().writes(15)); + + let mut env = MockEnv::new( + FunctionId::AddStakeV1, + coldkey, + (hotkey, netuid, amount).encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let total_stake = + pallet_subtensor::Pallet::::get_total_stake_for_hotkey(&hotkey); + assert!(total_stake > TaoCurrency::ZERO); + }); +} + +#[test] +fn remove_stake_with_no_stake_returns_amount_too_low() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(1); + let owner_coldkey = U256::from(2); + let coldkey = U256::from(301); + let hotkey = U256::from(302); + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + + let min_stake = DefaultMinStake::::get(); + let amount: AlphaCurrency = AlphaCurrency::from(min_stake.to_u64()); + + let expected_weight = Weight::from_parts(196_800_000, 0) + .saturating_add(::DbWeight::get().reads(19)) + .saturating_add(::DbWeight::get().writes(10)); + + let mut env = MockEnv::new( + FunctionId::RemoveStakeV1, + coldkey, + (hotkey, netuid, amount).encode(), + ) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + + match ret { + RetVal::Converging(code) => { + assert_eq!(code, Output::AmountTooLow as u32, "mismatched error output") + } + _ => panic!("unexpected return value"), + } + assert_eq!(env.charged_weight(), Some(expected_weight)); + assert!( + pallet_subtensor::Pallet::::get_total_stake_for_hotkey(&hotkey).is_zero() + ); + }); +} + +#[test] +fn unstake_all_success_unstakes_balance() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(4001); + let owner_coldkey = U256::from(4002); + let coldkey = U256::from(5001); + let hotkey = U256::from(5002); + let min_stake = DefaultMinStake::::get(); + let stake_amount_raw = min_stake.to_u64().saturating_mul(200); + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + + mock::setup_reserves( + netuid, + stake_amount_raw.saturating_mul(10).into(), + AlphaCurrency::from(stake_amount_raw.saturating_mul(20)), + ); + + mock::register_ok_neuron(netuid, hotkey, coldkey, 0); + pallet_subtensor::Pallet::::add_balance_to_coldkey_account( + &coldkey, + stake_amount_raw + 1_000_000_000, + ); + + assert_ok!(pallet_subtensor::Pallet::::add_stake( + RawOrigin::Signed(coldkey).into(), + hotkey, + netuid, + stake_amount_raw.into(), + )); + + mock::remove_stake_rate_limit_for_tests(&hotkey, &coldkey, netuid); + + let expected_weight = Weight::from_parts(28_830_000, 0) + .saturating_add(::DbWeight::get().reads(6)) + .saturating_add(::DbWeight::get().writes(0)); + + let pre_balance = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + + let mut env = MockEnv::new(FunctionId::UnstakeAllV1, coldkey, hotkey.encode()) + .with_expected_weight(expected_weight); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert_eq!(env.charged_weight(), Some(expected_weight)); + + let remaining_alpha = + pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, + ); + assert!(remaining_alpha <= AlphaCurrency::from(1_000)); + + let post_balance = pallet_subtensor::Pallet::::get_coldkey_balance(&coldkey); + assert!(post_balance > pre_balance); + }); +} diff --git a/chain-extensions/src/types.rs b/chain-extensions/src/types.rs new file mode 100644 index 0000000000..7c0bfe4202 --- /dev/null +++ b/chain-extensions/src/types.rs @@ -0,0 +1,98 @@ +use codec::{Decode, Encode}; +use num_enum::{IntoPrimitive, TryFromPrimitive}; +use sp_runtime::{DispatchError, ModuleError}; + +#[repr(u16)] +#[derive(TryFromPrimitive, IntoPrimitive, Decode, Encode)] +pub enum FunctionId { + GetStakeInfoForHotkeyColdkeyNetuidV1 = 0, + AddStakeV1 = 1, + RemoveStakeV1 = 2, + UnstakeAllV1 = 3, + UnstakeAllAlphaV1 = 4, + MoveStakeV1 = 5, + TransferStakeV1 = 6, + SwapStakeV1 = 7, + AddStakeLimitV1 = 8, + RemoveStakeLimitV1 = 9, + SwapStakeLimitV1 = 10, + RemoveStakeFullLimitV1 = 11, + SetColdkeyAutoStakeHotkeyV1 = 12, + AddProxyV1 = 13, + RemoveProxyV1 = 14, +} + +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] +pub enum Output { + /// Success + Success = 0, + /// Unknown error + RuntimeError = 1, + /// Not enough balance to stake + NotEnoughBalanceToStake = 2, + /// Coldkey is not associated with the hotkey + NonAssociatedColdKey = 3, + /// Error withdrawing balance + BalanceWithdrawalError = 4, + /// Hotkey is not registered + NotRegistered = 5, + /// Not enough stake to withdraw + NotEnoughStakeToWithdraw = 6, + /// Transaction rate limit exceeded + TxRateLimitExceeded = 7, + /// Slippage is too high for the transaction + SlippageTooHigh = 8, + /// Subnet does not exist + SubnetNotExists = 9, + /// Hotkey is not registered in subnet + HotKeyNotRegisteredInSubNet = 10, + /// Same auto stake hotkey already set + SameAutoStakeHotkeyAlreadySet = 11, + /// Insufficient balance + InsufficientBalance = 12, + /// Amount is too low + AmountTooLow = 13, + /// Insufficient liquidity + InsufficientLiquidity = 14, + /// Same netuid + SameNetuid = 15, + /// Too many proxies registered + ProxyTooMany = 16, + /// Proxy already exists + ProxyDuplicate = 17, + /// Cannot add self as proxy + ProxyNoSelfProxy = 18, + /// Proxy relationship not found + ProxyNotFound = 19, +} + +impl From for Output { + fn from(input: DispatchError) -> Self { + let error_text = match input { + DispatchError::Module(ModuleError { message, .. }) => message, + _ => Some("No module error Info"), + }; + match error_text { + Some("NotEnoughBalanceToStake") => Output::NotEnoughBalanceToStake, + Some("NonAssociatedColdKey") => Output::NonAssociatedColdKey, + Some("BalanceWithdrawalError") => Output::BalanceWithdrawalError, + Some("HotKeyNotRegisteredInSubNet") => Output::NotRegistered, + Some("HotKeyAccountNotExists") => Output::NotRegistered, + Some("NotEnoughStakeToWithdraw") => Output::NotEnoughStakeToWithdraw, + Some("TxRateLimitExceeded") => Output::TxRateLimitExceeded, + Some("SlippageTooHigh") => Output::SlippageTooHigh, + Some("SubnetNotExists") => Output::SubnetNotExists, + Some("SameAutoStakeHotkeyAlreadySet") => Output::SameAutoStakeHotkeyAlreadySet, + Some("InsufficientBalance") => Output::InsufficientBalance, + Some("AmountTooLow") => Output::AmountTooLow, + Some("InsufficientLiquidity") => Output::InsufficientLiquidity, + Some("SameNetuid") => Output::SameNetuid, + Some("TooMany") => Output::ProxyTooMany, + Some("Duplicate") => Output::ProxyDuplicate, + Some("NoSelfProxy") => Output::ProxyNoSelfProxy, + Some("NotFound") => Output::ProxyNotFound, + _ => Output::RuntimeError, + } + } +} diff --git a/common/src/currency.rs b/common/src/currency.rs index 8233383e95..48826b1933 100644 --- a/common/src/currency.rs +++ b/common/src/currency.rs @@ -227,7 +227,9 @@ macro_rules! impl_approx { }; } -pub trait Currency: ToFixed + Into + From + Clone + Copy { +pub trait Currency: + ToFixed + Into + From + Clone + Copy + Eq + Ord + PartialEq + PartialOrd + Display +{ const MAX: Self; const ZERO: Self; diff --git a/common/src/lib.rs b/common/src/lib.rs index a5d09ad974..a98a957ad8 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -147,7 +147,7 @@ pub enum ProxyType { NonCritical, NonTransfer, Senate, - NonFungibile, // Nothing involving moving TAO + NonFungible, // Nothing involving moving TAO Triumvirate, Governance, // Both above governance Staking, @@ -159,6 +159,7 @@ pub enum ProxyType { SudoUncheckedSetCode, SwapHotkey, SubnetLeaseBeneficiary, // Used to operate the leased subnet + RootClaim, } impl Default for ProxyType { @@ -169,8 +170,6 @@ impl Default for ProxyType { } pub trait SubnetInfo { - fn tao_reserve(netuid: NetUid) -> TaoCurrency; - fn alpha_reserve(netuid: NetUid) -> AlphaCurrency; fn exists(netuid: NetUid) -> bool; fn mechanism(netuid: NetUid) -> u16; fn is_owner(account_id: &AccountId, netuid: NetUid) -> bool; @@ -180,6 +179,12 @@ pub trait SubnetInfo { fn hotkey_of_uid(netuid: NetUid, uid: u16) -> Option; } +pub trait CurrencyReserve { + fn reserve(netuid: NetUid) -> C; + fn increase_provided(netuid: NetUid, amount: C); + fn decrease_provided(netuid: NetUid, amount: C); +} + pub trait BalanceOps { fn tao_balance(account_id: &AccountId) -> TaoCurrency; fn alpha_balance(netuid: NetUid, coldkey: &AccountId, hotkey: &AccountId) -> AlphaCurrency; @@ -200,10 +205,6 @@ pub trait BalanceOps { netuid: NetUid, alpha: AlphaCurrency, ) -> Result; - fn increase_provided_tao_reserve(netuid: NetUid, tao: TaoCurrency); - fn decrease_provided_tao_reserve(netuid: NetUid, tao: TaoCurrency); - fn increase_provided_alpha_reserve(netuid: NetUid, alpha: AlphaCurrency); - fn decrease_provided_alpha_reserve(netuid: NetUid, alpha: AlphaCurrency); } pub mod time { diff --git a/docs/wasm-contracts.md b/docs/wasm-contracts.md new file mode 100644 index 0000000000..ed6e9ecdd3 --- /dev/null +++ b/docs/wasm-contracts.md @@ -0,0 +1,112 @@ +# WebAssembly Smart Contracts + +## Overview + +Subtensor now supports WebAssembly (WASM) smart contract functionality through the integration of `pallet-contracts`, enabling developers to deploy and execute WASM smart contracts on the network. Contracts are written in [ink!](https://use.ink/), a Rust-based embedded domain-specific language (eDSL) for writing smart contracts on Substrate-based chains. For compatibility, WASM contracts can also be compiled from Solidity using [Solang](https://github.com/hyperledger-solang/solang). + +> [!NOTE] +> If you're looking for information on EVM contracts, please see the documentation: https://docs.learnbittensor.org/evm-tutorials + +## Getting Started + +For general smart contract development on Subtensor, please refer to the official ink! documentation: +- [ink! Documentation](https://use.ink/docs/v5/) +- [ink! Getting Started Guide](https://use.ink/docs/v5/getting-started/setup) +- [ink! Examples](https://github.com/use-ink/ink-examples/tree/v5.x.x) + +> [!WARNING] +> ink! `>= 6.0` drops support for `pallet-contracts`, please use `ink < 6.0`. +> See: https://github.com/use-ink/ink/releases/tag/v6.0.0-alpha + +## Subtensor-Specific Features + +### Chain Extension + +Subtensor provides a custom chain extension that allows smart contracts to interact with Subtensor-specific functionality: + +#### Available Functions + +| Function ID | Name | Description | Parameters | Returns | +|------------|------|-------------|------------|---------| +| 0 | `get_stake_info_for_hotkey_coldkey_netuid` | Query stake information | `(AccountId, AccountId, NetUid)` | `Option` | +| 1 | `add_stake` | Delegate stake from coldkey to hotkey | `(AccountId, NetUid, TaoCurrency)` | Error code | +| 2 | `remove_stake` | Withdraw stake from hotkey back to coldkey | `(AccountId, NetUid, AlphaCurrency)` | Error code | +| 3 | `unstake_all` | Unstake all TAO from a hotkey | `(AccountId)` | Error code | +| 4 | `unstake_all_alpha` | Unstake all Alpha from a hotkey | `(AccountId)` | Error code | +| 5 | `move_stake` | Move stake between hotkeys | `(AccountId, AccountId, NetUid, NetUid, AlphaCurrency)` | Error code | +| 6 | `transfer_stake` | Transfer stake between coldkeys | `(AccountId, AccountId, NetUid, NetUid, AlphaCurrency)` | Error code | +| 7 | `swap_stake` | Swap stake allocations between subnets | `(AccountId, NetUid, NetUid, AlphaCurrency)` | Error code | +| 8 | `add_stake_limit` | Delegate stake with a price limit | `(AccountId, NetUid, TaoCurrency, TaoCurrency, bool)` | Error code | +| 9 | `remove_stake_limit` | Withdraw stake with a price limit | `(AccountId, NetUid, AlphaCurrency, TaoCurrency, bool)` | Error code | +| 10 | `swap_stake_limit` | Swap stake between subnets with price limit | `(AccountId, NetUid, NetUid, AlphaCurrency, TaoCurrency, bool)` | Error code | +| 11 | `remove_stake_full_limit` | Fully withdraw stake with optional price limit | `(AccountId, NetUid, Option)` | Error code | +| 12 | `set_coldkey_auto_stake_hotkey` | Configure automatic stake destination | `(NetUid, AccountId)` | Error code | +| 13 | `add_proxy` | Add a staking proxy for the caller | `(AccountId)` | Error code | +| 14 | `remove_proxy` | Remove a staking proxy for the caller | `(AccountId)` | Error code | + +Example usage in your ink! contract: +```rust +#[ink::chain_extension(extension = 0)] +pub trait SubtensorExtension { + type ErrorCode = SubtensorError; + + #[ink(function = 0)] + fn get_stake_info( + hotkey: AccountId, + coldkey: AccountId, + netuid: u16, + ) -> Result, SubtensorError>; +} +``` + +#### Error Codes + +Chain extension functions that modify state return error codes as `u32` values. The following codes are defined: + +| Code | Name | Description | +|------|------|-------------| +| 0 | `Success` | Operation completed successfully | +| 1 | `RuntimeError` | Unknown runtime error occurred | +| 2 | `NotEnoughBalanceToStake` | Insufficient balance to complete stake operation | +| 3 | `NonAssociatedColdKey` | Coldkey is not associated with the hotkey | +| 4 | `BalanceWithdrawalError` | Error occurred during balance withdrawal | +| 5 | `NotRegistered` | Hotkey is not registered in the subnet | +| 6 | `NotEnoughStakeToWithdraw` | Insufficient stake available for withdrawal | +| 7 | `TxRateLimitExceeded` | Transaction rate limit has been exceeded | +| 8 | `SlippageTooHigh` | Price slippage exceeds acceptable threshold | +| 9 | `SubnetNotExists` | Specified subnet does not exist | +| 10 | `HotKeyNotRegisteredInSubNet` | Hotkey is not registered in the specified subnet | +| 11 | `SameAutoStakeHotkeyAlreadySet` | Auto-stake hotkey is already configured | +| 12 | `InsufficientBalance` | Account has insufficient balance | +| 13 | `AmountTooLow` | Transaction amount is below minimum threshold | +| 14 | `InsufficientLiquidity` | Insufficient liquidity for swap operation | +| 15 | `SameNetuid` | Source and destination subnets are the same | +| 16 | `ProxyTooMany` | Too many proxies registered | +| 17 | `ProxyDuplicate` | Proxy already exists | +| 18 | `ProxyNoSelfProxy` | Cannot add self as proxy | +| 19 | `ProxyNotFound` | Proxy relationship not found | + +### Call Filter + +For security, contracts can only directly dispatch a limited set of runtime calls: + +**Whitelisted Calls:** +- `Proxy::proxy` - Execute proxy calls + +All other runtime calls are restricted and cannot be dispatched from contracts. + +### Configuration Parameters + +| Parameter | Value | Description | +|-----------|-------|-------------| +| Maximum code size | 128 KB | Maximum size of contract WASM code | +| Call stack depth | 5 frames | Maximum nested contract call depth | +| Runtime memory | 1 GB | Memory available during contract execution | +| Validator runtime memory | 2 GB | Memory available for validators | +| Transient storage | 1 MB | Maximum transient storage size | + + +## Additional Resources + +- [cargo-contract CLI Tool](https://github.com/paritytech/cargo-contract) +- [Contracts UI](https://contracts-ui.substrate.io/) diff --git a/evm-tests/src/contracts/staking.ts b/evm-tests/src/contracts/staking.ts index 7b9e671c23..4b48fd7d8d 100644 --- a/evm-tests/src/contracts/staking.ts +++ b/evm-tests/src/contracts/staking.ts @@ -12,7 +12,7 @@ export const IStakingABI = [ ], name: "addProxy", outputs: [], - stateMutability: "nonpayable", + stateMutability: "payable", type: "function", }, { @@ -43,7 +43,7 @@ export const IStakingABI = [ ], name: "removeProxy", outputs: [], - stateMutability: "nonpayable", + stateMutability: "payable", type: "function", }, { @@ -95,7 +95,7 @@ export const IStakingABI = [ ], name: "removeStake", outputs: [], - stateMutability: "nonpayable", + stateMutability: "payable", type: "function", }, ]; @@ -111,7 +111,7 @@ export const IStakingV2ABI = [ ], "name": "addProxy", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -275,7 +275,7 @@ export const IStakingV2ABI = [ ], "name": "removeProxy", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -298,7 +298,7 @@ export const IStakingV2ABI = [ ], "name": "removeStake", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -331,7 +331,7 @@ export const IStakingV2ABI = [ ], "name": "addStakeLimit", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -364,7 +364,7 @@ export const IStakingV2ABI = [ ], "name": "removeStakeLimit", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -382,7 +382,7 @@ export const IStakingV2ABI = [ ], "name": "removeStakeFull", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -405,7 +405,30 @@ export const IStakingV2ABI = [ ], "name": "removeStakeFullLimit", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "burnAlpha", + "outputs": [], + "stateMutability": "payable", "type": "function" } ]; \ No newline at end of file diff --git a/evm-tests/src/contracts/subnet.ts b/evm-tests/src/contracts/subnet.ts index b5a5a9dcbc..a55bd5030f 100644 --- a/evm-tests/src/contracts/subnet.ts +++ b/evm-tests/src/contracts/subnet.ts @@ -663,24 +663,6 @@ export const ISubnetABI = [ stateMutability: "payable", type: "function", }, - { - inputs: [ - { - internalType: "uint16", - name: "netuid", - type: "uint16", - }, - { - internalType: "uint16", - name: "maxWeightLimit", - type: "uint16", - }, - ], - name: "setMaxWeightLimit", - outputs: [], - stateMutability: "payable", - type: "function", - }, { inputs: [ { diff --git a/evm-tests/test/staking.precompile.burn-alpha.test.ts b/evm-tests/test/staking.precompile.burn-alpha.test.ts new file mode 100644 index 0000000000..825587602e --- /dev/null +++ b/evm-tests/test/staking.precompile.burn-alpha.test.ts @@ -0,0 +1,136 @@ +import * as assert from "assert"; +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { devnet } from "@polkadot-api/descriptors" +import { TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" +import { tao } from "../src/balance-math" +import { ethers } from "ethers" +import { generateRandomEthersWallet } from "../src/utils" +import { convertH160ToPublicKey } from "../src/address-utils" +import { + forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, + startCall, +} from "../src/subtensor" +import { ISTAKING_V2_ADDRESS, IStakingV2ABI } from "../src/contracts/staking" + +describe("Test staking precompile burn alpha", () => { + // init eth part + const wallet1 = generateRandomEthersWallet(); + // init substrate part + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + + let api: TypedApi + + before(async () => { + // init variables got from await and async + api = await getDevnetApi() + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + await forceSetBalanceToEthAddress(api, wallet1.address) + + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) + + console.log("test the case on subnet ", netuid) + + await burnedRegister(api, netuid, convertH160ToSS58(wallet1.address), coldkey) + }) + + it("Can burn alpha after adding stake", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + + // First add some stake + let stakeBalance = tao(50) + const contract = new ethers.Contract(ISTAKING_V2_ADDRESS, IStakingV2ABI, wallet1); + const addStakeTx = await contract.addStake(hotkey.publicKey, stakeBalance.toString(), netuid) + await addStakeTx.wait() + + // Get stake before burning + const stakeBefore = BigInt(await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid)) + + console.log("Stake before burn:", stakeBefore) + assert.ok(stakeBefore > BigInt(0), "Should have stake before burning") + + // Burn some alpha (burn 20 TAO worth) + let burnAmount = tao(20) + const burnTx = await contract.burnAlpha(hotkey.publicKey, burnAmount.toString(), netuid) + await burnTx.wait() + + // Get stake after burning + const stakeAfter = BigInt(await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid)) + + console.log("Stake after burn:", stakeAfter) + + // Verify that stake decreased by burn amount + assert.ok(stakeAfter < stakeBefore, "Stake should decrease after burning") + // assert.strictEqual(stakeBefore - stakeAfter, burnAmount, "Stake should decrease by exactly burn amount") + }) + + it("Cannot burn more alpha than staked", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + + // Get current stake + const currentStake = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + convertH160ToSS58(wallet1.address), + netuid + ) + + // Try to burn more than staked + let burnAmount = currentStake + tao(10000) + const contract = new ethers.Contract(ISTAKING_V2_ADDRESS, IStakingV2ABI, wallet1); + + try { + const burnTx = await contract.burnAlpha(hotkey.publicKey, burnAmount.toString(), netuid) + await burnTx.wait() + assert.fail("Transaction should have failed - cannot burn more than staked"); + } catch (error) { + // Transaction failed as expected + console.log("Correctly failed to burn more than staked amount") + assert.ok(true, "Burning more than staked should fail"); + } + }) + + it("Cannot burn alpha from non-existent subnet", async () => { + // wrong netuid + let netuid = 12345; + let burnAmount = tao(10) + const contract = new ethers.Contract(ISTAKING_V2_ADDRESS, IStakingV2ABI, wallet1); + + try { + const burnTx = await contract.burnAlpha(hotkey.publicKey, burnAmount.toString(), netuid) + await burnTx.wait() + assert.fail("Transaction should have failed - subnet doesn't exist"); + } catch (error) { + // Transaction failed as expected + console.log("Correctly failed to burn from non-existent subnet") + assert.ok(true, "Burning from non-existent subnet should fail"); + } + }) + + it("Cannot burn zero alpha", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + + // First add some stake for this test + let stakeBalance = tao(10) + const contract = new ethers.Contract(ISTAKING_V2_ADDRESS, IStakingV2ABI, wallet1); + const addStakeTx = await contract.addStake(hotkey.publicKey, stakeBalance.toString(), netuid) + await addStakeTx.wait() + + // Try to burn zero amount + let burnAmount = BigInt(0) + + try { + const burnTx = await contract.burnAlpha(hotkey.publicKey, burnAmount.toString(), netuid) + await burnTx.wait() + assert.fail("Transaction should have failed - cannot burn zero amount"); + } catch (error) { + // Transaction failed as expected + console.log("Correctly failed to burn zero amount") + assert.ok(true, "Burning zero amount should fail"); + } + }) +}) + diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index e3b5708e50..87968b6e9f 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -208,25 +208,17 @@ describe("Test the Subnet precompile contract", () => { assert.equal(valueFromContract, onchainValue); }) - it("Can set maxWeightLimit parameter", async () => { + it("Returns constant maxWeightLimit", async () => { const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); const netuid = totalNetwork - 1; - const newValue = 106; - const tx = await contract.setMaxWeightLimit(netuid, newValue); - await tx.wait(); - - let onchainValue = await api.query.SubtensorModule.MaxWeightsLimit.getValue(netuid) - - - let valueFromContract = Number( + const valueFromContract = Number( await contract.getMaxWeightLimit(netuid) ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); + assert.equal(valueFromContract, 0xFFFF) }) it("Can set immunityPeriod parameter", async () => { @@ -271,26 +263,27 @@ describe("Test the Subnet precompile contract", () => { assert.equal(valueFromContract, onchainValue); }) - it("Can set kappa parameter", async () => { + // disable the set kappa parameter test, because it is only callable by sudo now + // it("Can set kappa parameter", async () => { - const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() - const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); - const netuid = totalNetwork - 1; + // const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + // const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + // const netuid = totalNetwork - 1; - const newValue = 109; - const tx = await contract.setKappa(netuid, newValue); - await tx.wait(); + // const newValue = 109; + // const tx = await contract.setKappa(netuid, newValue); + // await tx.wait(); - let onchainValue = await api.query.SubtensorModule.Kappa.getValue(netuid) + // let onchainValue = await api.query.SubtensorModule.Kappa.getValue(netuid) - let valueFromContract = Number( - await contract.getKappa(netuid) - ); + // let valueFromContract = Number( + // await contract.getKappa(netuid) + // ); - assert.equal(valueFromContract, newValue) - assert.equal(valueFromContract, onchainValue); - }) + // assert.equal(valueFromContract, newValue) + // assert.equal(valueFromContract, onchainValue); + // }) it("Can set rho parameter", async () => { diff --git a/hyperparameters.md b/hyperparameters.md index 2d69a1f378..870f343a3b 100644 --- a/hyperparameters.md +++ b/hyperparameters.md @@ -2,6 +2,7 @@ ```rust DefaultTake: u16 = 11_796; // 18% honest number. TxRateLimit: u64 = 1; // [1 @ 64,888] +MaxWeightsLimit: u16 = u16::MAX; // constant limit ``` ### netuid 1 (text_prompting) @@ -13,7 +14,6 @@ MaxAllowedUids: u16 = 1024; Issuance: u64 = 0; MinAllowedWeights: u16 = 8; EmissionValue: u64 = 142_223_000; -MaxWeightsLimit: 455; // 455/2^16 = 0.0069 ValidatorBatchSize: u16 = 1; ValidatorSequenceLen: u16 = 2048; // 2048 ValidatorEpochLen: u16 = 100; @@ -54,7 +54,6 @@ MaxAllowedUids: u16 = 4096; Issuance: u64 = 0; MinAllowedWeights: u16 = 50; EmissionValue: u64 = 857_777_000; -MaxWeightsLimit: u16 = 655; // 655/2^16 = 0.01 [655 @ 7,160] ValidatorBatchSize: u16 = 32; // 32 ValidatorSequenceLen: u16 = 256; // 256 ValidatorEpochLen: u16 = 250; // [250 @ 7,161] diff --git a/node/src/benchmarking.rs b/node/src/benchmarking.rs index ad2abfc935..4acece56f4 100644 --- a/node/src/benchmarking.rs +++ b/node/src/benchmarking.rs @@ -105,6 +105,7 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { // Create a transaction using the given `call`. // // Note: Should only be used for benchmarking. +#[allow(clippy::expect_used)] pub fn create_benchmark_extrinsic( client: &FullClient, sender: sp_core::sr25519::Pair, diff --git a/node/src/chain_spec/localnet.rs b/node/src/chain_spec/localnet.rs index 577f42e54a..d65849ae94 100644 --- a/node/src/chain_spec/localnet.rs +++ b/node/src/chain_spec/localnet.rs @@ -99,18 +99,6 @@ fn localnet_genesis( } } - let trimvirate_members: Vec = bounded_vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - ]; - - let senate_members: Vec = bounded_vec![ - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - ]; - serde_json::json!({ "balances": { "balances": balances }, "aura": { @@ -125,12 +113,6 @@ fn localnet_genesis( "sudo": { "key": Some(get_account_id_from_seed::("Alice")) }, - "triumvirateMembers": { - "members": trimvirate_members - }, - "senateMembers": { - "members": senate_members, - }, "evmChainId": { "chainId": 42, }, diff --git a/node/src/chain_spec/mod.rs b/node/src/chain_spec/mod.rs index 733f416e69..85ca78d353 100644 --- a/node/src/chain_spec/mod.rs +++ b/node/src/chain_spec/mod.rs @@ -1,5 +1,5 @@ // Allowed since it's actually better to panic during chain setup when there is an error -#![allow(clippy::unwrap_used)] +#![allow(clippy::expect_used, clippy::unwrap_used)] pub mod devnet; pub mod finney; @@ -12,7 +12,7 @@ use sc_service::ChainType; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::crypto::Ss58Codec; -use sp_core::{H256, Pair, Public, bounded_vec, sr25519}; +use sp_core::{H256, Pair, Public, sr25519}; use sp_runtime::AccountId32; use sp_runtime::traits::{IdentifyAccount, Verify}; use std::collections::HashSet; diff --git a/node/src/command.rs b/node/src/command.rs index 31e31a1196..67cb200e43 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -245,6 +245,7 @@ pub fn run() -> sc_cli::Result<()> { } } +#[allow(clippy::expect_used)] fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { let cli = Cli::from_arg_matches(arg_matches).expect("Bad arg_matches"); let runner = cli.create_runner(&cli.run)?; @@ -281,6 +282,7 @@ fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { } } +#[allow(clippy::expect_used)] fn start_aura_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { let cli = Cli::from_arg_matches(arg_matches).expect("Bad arg_matches"); let runner = cli.create_runner(&cli.run)?; @@ -313,6 +315,7 @@ fn start_aura_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { } } +#[allow(clippy::expect_used)] fn customise_config(arg_matches: &ArgMatches, config: Configuration) -> Configuration { let cli = Cli::from_arg_matches(arg_matches).expect("Bad arg_matches"); diff --git a/node/src/consensus/babe_consensus.rs b/node/src/consensus/babe_consensus.rs index 8a848ff408..8c9a974d20 100644 --- a/node/src/consensus/babe_consensus.rs +++ b/node/src/consensus/babe_consensus.rs @@ -44,6 +44,7 @@ impl ConsensusMechanism for BabeConsensus { sp_timestamp::InherentDataProvider, ); + #[allow(clippy::expect_used)] fn start_authoring( self, task_manager: &mut TaskManager, diff --git a/node/src/consensus/hybrid_import_queue.rs b/node/src/consensus/hybrid_import_queue.rs index 5d2a1e9162..30d8ff4065 100644 --- a/node/src/consensus/hybrid_import_queue.rs +++ b/node/src/consensus/hybrid_import_queue.rs @@ -77,6 +77,7 @@ impl HybridBlockImport { FrontierBlockImport::new(grandpa_block_import.clone(), client.clone()), ); + #[allow(clippy::expect_used)] let (babe_import, babe_link) = sc_consensus_babe::block_import( babe_config, grandpa_block_import.clone(), diff --git a/node/src/service.rs b/node/src/service.rs index 8670c277a2..2ef1904f08 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -60,6 +60,7 @@ pub type BIQ<'a> = Box< + 'a, >; +#[allow(clippy::expect_used)] pub fn new_partial( config: &Configuration, eth_config: &EthConfiguration, @@ -250,6 +251,7 @@ pub fn build_manual_seal_import_queue( } /// Builds a new service for a full client. +#[allow(clippy::expect_used)] pub async fn new_full( mut config: Configuration, eth_config: EthConfiguration, diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 7e6e17dd5c..08589e530b 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -292,19 +292,6 @@ mod benchmarks { _(RawOrigin::Root, 1u16.into()/*netuid*/, 100u16/*immunity_period*/)/*sudo_set_immunity_period*/; } - #[benchmark] - fn sudo_set_max_weight_limit() { - // disable admin freeze window - pallet_subtensor::Pallet::::set_admin_freeze_window(0); - pallet_subtensor::Pallet::::init_new_network( - 1u16.into(), /*netuid*/ - 1u16, /*tempo*/ - ); - - #[extrinsic_call] - _(RawOrigin::Root, 1u16.into()/*netuid*/, 100u16/*max_weight_limit*/)/*sudo_set_max_weight_limit*/; - } - #[benchmark] fn sudo_set_max_registrations_per_block() { // disable admin freeze window diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index b794756995..01e9e7b33e 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -18,6 +18,7 @@ mod tests; #[deny(missing_docs)] #[frame_support::pallet] +#[allow(clippy::expect_used)] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; @@ -30,7 +31,7 @@ pub mod pallet { utils::rate_limiting::{Hyperparameter, TransactionType}, }; use sp_runtime::BoundedVec; - use substrate_fixed::types::I96F32; + use substrate_fixed::types::{I64F64, I96F32, U64F64}; use subtensor_runtime_common::{MechId, NetUid, TaoCurrency}; /// The main data structure of the module. @@ -114,6 +115,8 @@ pub mod pallet { MaxAllowedUidsLessThanMinAllowedUids, /// The maximum allowed UIDs must be less than the default maximum allowed UIDs. MaxAllowedUidsGreaterThanDefaultMaxAllowedUids, + /// Bad parameter value + InvalidValue, } /// Enum for specifying the type of precompile operation. #[derive( @@ -162,6 +165,8 @@ pub mod pallet { /// Dispatchable functions allows users to interact with the pallet and invoke state changes. #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// The extrinsic sets the new authorities for Aura consensus. /// It is only callable by the root account. /// The extrinsic will call the Aura pallet to change the authorities. @@ -423,40 +428,6 @@ pub mod pallet { Ok(()) } - /// The extrinsic sets the adjustment beta for a subnet. - /// It is only callable by the root account or subnet owner. - /// The extrinsic will call the Subtensor pallet to set the adjustment beta. - #[pallet::call_index(12)] - #[pallet::weight(Weight::from_parts(26_890_000, 0) - .saturating_add(::DbWeight::get().reads(3_u64)) - .saturating_add(::DbWeight::get().writes(1_u64)))] - pub fn sudo_set_max_weight_limit( - origin: OriginFor, - netuid: NetUid, - max_weight_limit: u16, - ) -> DispatchResult { - let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( - origin, - netuid, - &[Hyperparameter::MaxWeightLimit.into()], - )?; - - ensure!( - pallet_subtensor::Pallet::::if_subnet_exist(netuid), - Error::::SubnetDoesNotExist - ); - pallet_subtensor::Pallet::::set_max_weight_limit(netuid, max_weight_limit); - pallet_subtensor::Pallet::::record_owner_rl( - maybe_owner, - netuid, - &[Hyperparameter::MaxWeightLimit.into()], - ); - log::debug!( - "MaxWeightLimitSet( netuid: {netuid:?} max_weight_limit: {max_weight_limit:?} ) " - ); - Ok(()) - } - /// The extrinsic sets the immunity period for a subnet. /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the immunity period. @@ -574,27 +545,17 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the kappa. #[pallet::call_index(16)] - #[pallet::weight(Weight::from_parts(26_210_000, 0) - .saturating_add(::DbWeight::get().reads(3_u64)) + #[pallet::weight(Weight::from_parts(15_390_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_kappa(origin: OriginFor, netuid: NetUid, kappa: u16) -> DispatchResult { - let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( - origin, - netuid, - &[Hyperparameter::Kappa.into()], - )?; - + ensure_root(origin)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_kappa(netuid, kappa); log::debug!("KappaSet( netuid: {netuid:?} kappa: {kappa:?} ) "); - pallet_subtensor::Pallet::::record_owner_rl( - maybe_owner, - netuid, - &[Hyperparameter::Kappa.into()], - ); Ok(()) } @@ -731,7 +692,7 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the target registrations per interval. #[pallet::call_index(21)] - #[pallet::weight(Weight::from_parts(44_320_000, 0) + #[pallet::weight(Weight::from_parts(25_980_000, 0) .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_target_registrations_per_interval( @@ -1054,6 +1015,7 @@ pub mod pallet { #[pallet::call_index(33)] #[pallet::weight(( Weight::from_parts(2_875_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes @@ -1123,7 +1085,7 @@ pub mod pallet { Weight::from_parts(14_000_000, 0) .saturating_add(::DbWeight::get().writes(1)), DispatchClass::Operational, - Pays::No + Pays::Yes ))] pub fn sudo_set_subnet_limit(origin: OriginFor, max_subnets: u16) -> DispatchResult { ensure_root(origin)?; @@ -1185,7 +1147,9 @@ pub mod pallet { /// The extrinsic will call the Subtensor pallet to set the weights min stake. #[pallet::call_index(42)] #[pallet::weight(( - Weight::from_parts(5_000_000, 0).saturating_add(T::DbWeight::get().writes(1_u64)), + Weight::from_parts(5_000_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes ))] @@ -1228,7 +1192,9 @@ pub mod pallet { /// The extrinsic will call the Subtensor pallet to set the rate limit for delegate take transactions. #[pallet::call_index(45)] #[pallet::weight(( - Weight::from_parts(5_019_000, 0).saturating_add(T::DbWeight::get().writes(1_u64)), + Weight::from_parts(5_019_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes ))] @@ -1249,7 +1215,9 @@ pub mod pallet { /// The extrinsic will call the Subtensor pallet to set the minimum delegate take. #[pallet::call_index(46)] #[pallet::weight(( - Weight::from_parts(5_000_000, 0).saturating_add(T::DbWeight::get().writes(1_u64)), + Weight::from_parts(7_214_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes ))] @@ -1305,6 +1273,7 @@ pub mod pallet { #[pallet::call_index(50)] #[pallet::weight(( Weight::from_parts(18_300_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Normal, Pays::Yes @@ -1379,6 +1348,7 @@ pub mod pallet { #[pallet::call_index(54)] #[pallet::weight(( Weight::from_parts(5_000_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes @@ -1416,6 +1386,7 @@ pub mod pallet { #[pallet::call_index(55)] #[pallet::weight(( Weight::from_parts(5_000_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes @@ -1549,6 +1520,7 @@ pub mod pallet { #[pallet::call_index(61)] #[pallet::weight(( Weight::from_parts(20_460_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Normal, Pays::Yes @@ -1620,8 +1592,9 @@ pub mod pallet { /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(62)] #[pallet::weight(( - Weight::from_parts(6_392_000, 3507) - .saturating_add(T::DbWeight::get().reads(1_u64)), + Weight::from_parts(10_020_000, 3507) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Operational, Pays::Yes ))] @@ -1655,6 +1628,7 @@ pub mod pallet { #[pallet::call_index(63)] #[pallet::weight(( Weight::from_parts(3_000_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes @@ -1680,17 +1654,18 @@ pub mod pallet { /// # Weight /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(64)] - #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + #[pallet::weight(( + Weight::from_parts(3_918_000, 0) // TODO: add benchmarks + .saturating_add(T::DbWeight::get().writes(1_u64)), + DispatchClass::Operational, + Pays::Yes + ))] pub fn sudo_set_subnet_owner_hotkey( origin: OriginFor, netuid: NetUid, hotkey: ::AccountId, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner(origin.clone(), netuid)?; - pallet_subtensor::Pallet::::set_subnet_owner_hotkey(netuid, &hotkey); - - log::debug!("SubnetOwnerHotkeySet( netuid: {netuid:?}, hotkey: {hotkey:?} )"); - Ok(()) + pallet_subtensor::Pallet::::do_set_sn_owner_hotkey(origin, netuid, &hotkey) } /// @@ -1706,7 +1681,8 @@ pub mod pallet { /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(65)] #[pallet::weight(( - Weight::from_parts(3_918_000, 0) + Weight::from_parts(6_201_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes @@ -1793,6 +1769,7 @@ pub mod pallet { #[pallet::call_index(69)] #[pallet::weight(( Weight::from_parts(20_460_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Normal, Pays::Yes @@ -1830,7 +1807,8 @@ pub mod pallet { /// This function has a fixed weight of 0 and is classified as an operational transaction that does not incur any fees. #[pallet::call_index(70)] #[pallet::weight(( - Weight::from_parts(22_340_000, 0) + Weight::from_parts(32_930_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Normal, Pays::Yes @@ -1918,6 +1896,7 @@ pub mod pallet { #[pallet::call_index(66)] #[pallet::weight(( Weight::from_parts(17_980_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::Yes @@ -1996,7 +1975,7 @@ pub mod pallet { /// Only callable by root. #[pallet::call_index(74)] #[pallet::weight(( - Weight::from_parts(5_771_000, 0) + Weight::from_parts(5_510_000, 0) .saturating_add(::DbWeight::get().reads(0_u64)) .saturating_add(::DbWeight::get().writes(1_u64)), DispatchClass::Operational @@ -2142,6 +2121,71 @@ pub mod pallet { ); Ok(()) } + + /// Sets TAO flow cutoff value (A) + #[pallet::call_index(81)] + #[pallet::weight(( + Weight::from_parts(7_343_000, 0) + .saturating_add(::DbWeight::get().reads(0)) + .saturating_add(::DbWeight::get().writes(1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn sudo_set_tao_flow_cutoff( + origin: OriginFor, + flow_cutoff: I64F64, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_tao_flow_cutoff(flow_cutoff); + log::debug!("set_tao_flow_cutoff( {flow_cutoff:?} ) "); + Ok(()) + } + + /// Sets TAO flow normalization exponent (p) + #[pallet::call_index(82)] + #[pallet::weight(( + Weight::from_parts(7_343_000, 0) + .saturating_add(::DbWeight::get().reads(0)) + .saturating_add(::DbWeight::get().writes(1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn sudo_set_tao_flow_normalization_exponent( + origin: OriginFor, + exponent: U64F64, + ) -> DispatchResult { + ensure_root(origin)?; + + let one = U64F64::saturating_from_num(1); + let two = U64F64::saturating_from_num(2); + ensure!( + (one <= exponent) && (exponent <= two), + Error::::InvalidValue + ); + + pallet_subtensor::Pallet::::set_tao_flow_normalization_exponent(exponent); + log::debug!("set_tao_flow_normalization_exponent( {exponent:?} ) "); + Ok(()) + } + + /// Sets TAO flow smoothing factor (alpha) + #[pallet::call_index(83)] + #[pallet::weight(( + Weight::from_parts(7_343_000, 0) + .saturating_add(::DbWeight::get().reads(0)) + .saturating_add(::DbWeight::get().writes(1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn sudo_set_tao_flow_smoothing_factor( + origin: OriginFor, + smoothing_factor: u64, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_tao_flow_smoothing_factor(smoothing_factor); + log::debug!("set_tao_flow_smoothing_factor( {smoothing_factor:?} ) "); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index aca6b7570e..0140808baa 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -7,7 +7,7 @@ use frame_support::{ traits::{Everything, Hooks, InherentBuilder, PrivilegeCmp}, }; use frame_system::{self as system, offchain::CreateTransactionBase}; -use frame_system::{EnsureNever, EnsureRoot, limits}; +use frame_system::{EnsureRoot, limits}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityList as GrandpaAuthorityList; use sp_core::U256; @@ -77,7 +77,6 @@ pub type UncheckedExtrinsic = TestXt; parameter_types! { pub const InitialMinAllowedWeights: u16 = 0; pub const InitialEmissionValue: u16 = 0; - pub const InitialMaxWeightsLimit: u16 = u16::MAX; pub BlockWeights: limits::BlockWeights = limits::BlockWeights::with_sensible_defaults( Weight::from_parts(2_000_000_000_000, u64::MAX), Perbill::from_percent(75), @@ -128,7 +127,6 @@ parameter_types! { pub const InitialMinDifficulty: u64 = 1; pub const InitialMaxDifficulty: u64 = u64::MAX; pub const InitialRAORecycledForRegistration: u64 = 0; - pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake pub const InitialNetworkImmunityPeriod: u64 = 1_296_000; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. @@ -160,13 +158,9 @@ impl pallet_subtensor::Config for Test { type Currency = Balances; type InitialIssuance = InitialIssuance; type SudoRuntimeCall = TestRuntimeCall; - type CouncilOrigin = EnsureNever; - type SenateMembers = (); - type TriumvirateInterface = (); type Scheduler = Scheduler; type InitialMinAllowedWeights = InitialMinAllowedWeights; type InitialEmissionValue = InitialEmissionValue; - type InitialMaxWeightsLimit = InitialMaxWeightsLimit; type InitialTempo = InitialTempo; type InitialDifficulty = InitialDifficulty; type InitialAdjustmentInterval = InitialAdjustmentInterval; @@ -205,7 +199,6 @@ impl pallet_subtensor::Config for Test { type MinBurnUpperBound = MinBurnUpperBound; type MaxBurnLowerBound = MaxBurnLowerBound; type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; - type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; @@ -341,6 +334,8 @@ impl pallet_subtensor_swap::Config for Test { type SubnetInfo = SubtensorModule; type BalanceOps = SubtensorModule; type ProtocolId = SwapProtocolId; + type TaoReserve = pallet_subtensor::TaoCurrencyReserve; + type AlphaReserve = pallet_subtensor::AlphaCurrencyReserve; type MaxFeeRate = SwapMaxFeeRate; type MaxPositions = SwapMaxPositions; type MinimumLiquidity = SwapMinimumLiquidity; diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 4e534c3210..1aaefc8f8d 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -391,39 +391,6 @@ fn test_sudo_subnet_owner_cut() { }); } -#[test] -fn test_sudo_set_max_weight_limit() { - new_test_ext().execute_with(|| { - let netuid = NetUid::from(1); - let to_be_set: u16 = 10; - add_network(netuid, 10); - let init_value: u16 = SubtensorModule::get_max_weight_limit(netuid); - assert_eq!( - AdminUtils::sudo_set_max_weight_limit( - <::RuntimeOrigin>::signed(U256::from(1)), - netuid, - to_be_set - ), - Err(DispatchError::BadOrigin) - ); - assert_eq!( - AdminUtils::sudo_set_max_weight_limit( - <::RuntimeOrigin>::root(), - netuid.next(), - to_be_set - ), - Err(Error::::SubnetDoesNotExist.into()) - ); - assert_eq!(SubtensorModule::get_max_weight_limit(netuid), init_value); - assert_ok!(AdminUtils::sudo_set_max_weight_limit( - <::RuntimeOrigin>::root(), - netuid, - to_be_set - )); - assert_eq!(SubtensorModule::get_max_weight_limit(netuid), to_be_set); - }); -} - #[test] fn test_sudo_set_issuance() { new_test_ext().execute_with(|| { @@ -1745,48 +1712,48 @@ fn test_sets_a_lower_value_clears_small_nominations() { }); } -#[test] -fn test_sudo_set_subnet_owner_hotkey() { - new_test_ext().execute_with(|| { - let netuid = NetUid::from(1); - - let coldkey: U256 = U256::from(1); - let hotkey: U256 = U256::from(2); - let new_hotkey: U256 = U256::from(3); - - let coldkey_origin = <::RuntimeOrigin>::signed(coldkey); - let root = RuntimeOrigin::root(); - let random_account = RuntimeOrigin::signed(U256::from(123456)); - - pallet_subtensor::SubnetOwner::::insert(netuid, coldkey); - pallet_subtensor::SubnetOwnerHotkey::::insert(netuid, hotkey); - assert_eq!( - pallet_subtensor::SubnetOwnerHotkey::::get(netuid), - hotkey - ); - - assert_ok!(AdminUtils::sudo_set_subnet_owner_hotkey( - coldkey_origin, - netuid, - new_hotkey - )); - - assert_eq!( - pallet_subtensor::SubnetOwnerHotkey::::get(netuid), - new_hotkey - ); - - assert_noop!( - AdminUtils::sudo_set_subnet_owner_hotkey(random_account, netuid, new_hotkey), - DispatchError::BadOrigin - ); - - assert_noop!( - AdminUtils::sudo_set_subnet_owner_hotkey(root, netuid, new_hotkey), - DispatchError::BadOrigin - ); - }); -} +// #[test] +// fn test_sudo_set_subnet_owner_hotkey() { +// new_test_ext().execute_with(|| { +// let netuid = NetUid::from(1); + +// let coldkey: U256 = U256::from(1); +// let hotkey: U256 = U256::from(2); +// let new_hotkey: U256 = U256::from(3); + +// let coldkey_origin = <::RuntimeOrigin>::signed(coldkey); +// let root = RuntimeOrigin::root(); +// let random_account = RuntimeOrigin::signed(U256::from(123456)); + +// pallet_subtensor::SubnetOwner::::insert(netuid, coldkey); +// pallet_subtensor::SubnetOwnerHotkey::::insert(netuid, hotkey); +// assert_eq!( +// pallet_subtensor::SubnetOwnerHotkey::::get(netuid), +// hotkey +// ); + +// assert_ok!(AdminUtils::sudo_set_subnet_owner_hotkey( +// coldkey_origin, +// netuid, +// new_hotkey +// )); + +// assert_eq!( +// pallet_subtensor::SubnetOwnerHotkey::::get(netuid), +// new_hotkey +// ); + +// assert_noop!( +// AdminUtils::sudo_set_subnet_owner_hotkey(random_account, netuid, new_hotkey), +// DispatchError::BadOrigin +// ); + +// assert_noop!( +// AdminUtils::sudo_set_subnet_owner_hotkey(root, netuid, new_hotkey), +// DispatchError::BadOrigin +// ); +// }); +// } // cargo test --package pallet-admin-utils --lib -- tests::test_sudo_set_ema_halving --exact --show-output #[test] @@ -2058,7 +2025,7 @@ fn test_freeze_window_blocks_root_and_owner() { let owner: U256 = U256::from(9); SubnetOwner::::insert(netuid, owner); assert_noop!( - AdminUtils::sudo_set_kappa( + AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 77 @@ -2145,14 +2112,14 @@ fn test_owner_hyperparam_update_rate_limit_enforced() { )); // First update succeeds - assert_ok!(AdminUtils::sudo_set_kappa( + assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 11 )); // Immediate second update fails due to TxRateLimitExceeded assert_noop!( - AdminUtils::sudo_set_kappa( + AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 12 @@ -2163,7 +2130,7 @@ fn test_owner_hyperparam_update_rate_limit_enforced() { // Advance less than limit still fails run_to_block(SubtensorModule::get_current_block_as_u64() + 1); assert_noop!( - AdminUtils::sudo_set_kappa( + AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 13 @@ -2173,7 +2140,7 @@ fn test_owner_hyperparam_update_rate_limit_enforced() { // Advance one more block to pass the limit; should succeed run_to_block(SubtensorModule::get_current_block_as_u64() + 1); - assert_ok!(AdminUtils::sudo_set_kappa( + assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 14 @@ -2200,7 +2167,7 @@ fn test_hyperparam_rate_limit_enforced_by_tempo() { )); // First owner update should succeed - assert_ok!(AdminUtils::sudo_set_kappa( + assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 1 @@ -2208,13 +2175,17 @@ fn test_hyperparam_rate_limit_enforced_by_tempo() { // Immediate second update should fail due to tempo-based RL assert_noop!( - AdminUtils::sudo_set_kappa(<::RuntimeOrigin>::signed(owner), netuid, 2), + AdminUtils::sudo_set_commit_reveal_weights_interval( + <::RuntimeOrigin>::signed(owner), + netuid, + 2 + ), SubtensorError::::TxRateLimitExceeded ); // Advance 2 blocks (2 tempos with tempo=1) then succeed run_to_block(SubtensorModule::get_current_block_as_u64() + 2); - assert_ok!(AdminUtils::sudo_set_kappa( + assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 3 @@ -2244,7 +2215,7 @@ fn test_owner_hyperparam_rate_limit_independent_per_param() { )); // First update to kappa should succeed - assert_ok!(AdminUtils::sudo_set_kappa( + assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 10 @@ -2252,7 +2223,7 @@ fn test_owner_hyperparam_rate_limit_independent_per_param() { // Immediate second update to the SAME param (kappa) should be blocked by RL assert_noop!( - AdminUtils::sudo_set_kappa( + AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 11 @@ -2269,7 +2240,7 @@ fn test_owner_hyperparam_rate_limit_independent_per_param() { // kappa should still be blocked until its own RL window passes assert_noop!( - AdminUtils::sudo_set_kappa( + AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 12 @@ -2287,7 +2258,7 @@ fn test_owner_hyperparam_rate_limit_independent_per_param() { run_to_block(SubtensorModule::get_current_block_as_u64() + 2); // Now both hyperparameters can be updated again - assert_ok!(AdminUtils::sudo_set_kappa( + assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( <::RuntimeOrigin>::signed(owner), netuid, 13 @@ -2663,7 +2634,6 @@ fn test_trim_to_max_allowed_uids() { assert!(!AlphaDividendsPerSubnet::::contains_key( netuid, hotkey )); - assert!(!TaoDividendsPerSubnet::::contains_key(netuid, hotkey)); assert!(!Axons::::contains_key(netuid, hotkey)); assert!(!NeuronCertificates::::contains_key(netuid, hotkey)); assert!(!Prometheus::::contains_key(netuid, hotkey)); diff --git a/pallets/collective/Cargo.toml b/pallets/collective/Cargo.toml deleted file mode 100644 index 4cb8e1422e..0000000000 --- a/pallets/collective/Cargo.toml +++ /dev/null @@ -1,55 +0,0 @@ -[package] -name = "pallet-subtensor-collective" -version = "4.0.0-dev" -authors = ["Parity Technologies , Opentensor Technologies"] -edition.workspace = true -license = "Apache-2.0" -homepage = "https://bittensor.com" -repository = "https://github.com/opentensor/subtensor" -description = "Collective system: Members of a set of account IDs can make their collective feelings known through dispatched calls from one of two specialized origins." -readme = "README.md" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -subtensor-macros.workspace = true -codec = { workspace = true, features = ["derive"] } -log.workspace = true -scale-info = { workspace = true, features = ["derive"] } -frame-benchmarking = { workspace = true, optional = true } -frame-support.workspace = true -frame-system.workspace = true -sp-core.workspace = true -sp-io.workspace = true -sp-runtime.workspace = true -sp-std.workspace = true - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/pallets/collective/README.md b/pallets/collective/README.md deleted file mode 100644 index 444927e51d..0000000000 --- a/pallets/collective/README.md +++ /dev/null @@ -1,25 +0,0 @@ -Collective system: Members of a set of account IDs can make their collective feelings known -through dispatched calls from one of two specialized origins. - -The membership can be provided in one of two ways: either directly, using the Root-dispatchable -function `set_members`, or indirectly, through implementing the `ChangeMembers`. -The pallet assumes that the amount of members stays at or below `MaxMembers` for its weight -calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. - -A "prime" member may be set to help determine the default vote behavior based on chain -config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any -abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then -abstentations will first follow the majority of the collective voting, and then the prime -member. - -Voting happens through motions comprising a proposal (i.e. a dispatchable) plus a -number of approvals required for it to pass and be called. Motions are open for members to -vote on for a minimum period given by `MotionDuration`. As soon as the required number of -approvals is given, the motion is closed and executed. If the number of approvals is not reached -during the voting period, then `close` may be called by any account in order to force the end -the motion explicitly. If a prime member is defined, then their vote is used instead of any -abstentions and the proposal is executed if there are enough approvals counting the new votes. - -If there are not, or if no prime member is set, then the motion is dropped without being executed. - -License: Apache-2.0 diff --git a/pallets/collective/src/benchmarking.rs b/pallets/collective/src/benchmarking.rs deleted file mode 100644 index dcca9dd3b0..0000000000 --- a/pallets/collective/src/benchmarking.rs +++ /dev/null @@ -1,602 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Staking pallet benchmarking. -#![allow(clippy::arithmetic_side_effects, clippy::indexing_slicing)] - -use super::*; -use crate::Pallet as Collective; - -use sp_runtime::traits::Bounded; -use sp_std::mem::size_of; - -use frame_benchmarking::v1::{account, benchmarks_instance_pallet, whitelisted_caller}; -use frame_system::{Call as SystemCall, Pallet as System, RawOrigin as SystemOrigin}; - -const SEED: u32 = 0; - -const MAX_BYTES: u32 = 1_024; - -fn assert_last_event( - generic_event: ::RuntimeEvent, -) { - frame_system::Pallet::::assert_last_event(generic_event.into()); -} - -fn id_to_remark_data(id: u32, length: usize) -> Vec { - id.to_le_bytes().into_iter().cycle().take(length).collect() -} - -benchmarks_instance_pallet! { - set_members { - let m in 0 .. T::MaxMembers::get(); - let n in 0 .. T::MaxMembers::get(); - let p in 0 .. T::MaxProposals::get(); - - // Set old members. - // We compute the difference of old and new members, so it should influence timing. - let mut old_members = vec![]; - for i in 0 .. m { - let old_member = account::("old member", i, SEED); - old_members.push(old_member); - } - let old_members_count = old_members.len() as u32; - - Collective::::set_members( - SystemOrigin::Root.into(), - old_members.clone(), - old_members.last().cloned(), - T::MaxMembers::get(), - )?; - - // If there were any old members generate a bunch of proposals. - if m > 0 { - // Set a high threshold for proposals passing so that they stay around. - let threshold = m.max(2); - // Length of the proposals should be irrelevant to `set_members`. - let length = 100; - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, length) }.into(); - Collective::::propose( - SystemOrigin::Signed(old_members.last().expect("m is greater than 0; old_members must have at least 1 element; qed").clone()).into(), - Box::new(proposal.clone()), - MAX_BYTES, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - let hash = T::Hashing::hash_of(&proposal); - // Vote on the proposal to increase state relevant for `set_members`. - // Not voting for last old member because they proposed and not voting for the first member - // to keep the proposal from passing. - for j in 2 .. m - 1 { - let voter = &old_members[j as usize]; - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - hash, - i, - approve, - )?; - } - } - } - - // Construct `new_members`. - // It should influence timing since it will sort this vector. - let mut new_members = vec![]; - for i in 0 .. n { - let member = account::("member", i, SEED); - new_members.push(member); - } - - }: _(SystemOrigin::Root, new_members.clone(), new_members.last().cloned(), T::MaxMembers::get()) - verify { - new_members.sort(); - assert_eq!(Collective::::members(), new_members); - } - - execute { - let b in 2 .. MAX_BYTES; - let m in 1 .. T::MaxMembers::get(); - - let bytes_in_storage = b + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - for i in 0 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - - let caller: T::AccountId = whitelisted_caller(); - members.push(caller.clone()); - - Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; - - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(1, b as usize) }.into(); - - }: _(SystemOrigin::Signed(caller), Box::new(proposal.clone()), bytes_in_storage) - verify { - let proposal_hash = T::Hashing::hash_of(&proposal); - // Note that execution fails due to mis-matched origin - assert_last_event::( - Event::MemberExecuted { proposal_hash, result: Ok(()) }.into() - ); - } - - // This tests when proposal is created and queued as "proposed" - propose_proposed { - let b in 2 .. MAX_BYTES; - let m in 2 .. T::MaxMembers::get(); - let p in 1 .. T::MaxProposals::get(); - - let bytes_in_storage = b + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - for i in 0 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - let caller: T::AccountId = whitelisted_caller(); - members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; - - let threshold = (m / 2) + 1; - // Add previous proposals. - for i in 0 .. p - 1 { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, b as usize) }.into(); - Collective::::propose( - SystemOrigin::Signed(caller.clone()).into(), - Box::new(proposal), - bytes_in_storage, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - } - - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(p, b as usize) }.into(); - - }: propose(SystemOrigin::Signed(caller.clone()), Box::new(proposal.clone()), bytes_in_storage, TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.")) - verify { - // New proposal is recorded - assert_eq!(Collective::::proposals().len(), p as usize); - let proposal_hash = T::Hashing::hash_of(&proposal); - assert_last_event::(Event::Proposed { account: caller, proposal_index: p - 1, proposal_hash, threshold }.into()); - } - - vote { - // We choose 5 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 5 .. T::MaxMembers::get(); - - let p = T::MaxProposals::get(); - let b = MAX_BYTES; - let bytes_in_storage = b + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - let proposer: T::AccountId = account::("proposer", 0, SEED); - members.push(proposer.clone()); - for i in 1 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - let voter: T::AccountId = account::("voter", 0, SEED); - members.push(voter.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; - - // Threshold is 1 less than the number of members so that one person can vote nay - let threshold = m - 1; - - // Add previous proposals - let mut last_hash = T::Hash::default(); - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, b as usize) }.into(); - Collective::::propose( - SystemOrigin::Signed(proposer.clone()).into(), - Box::new(proposal.clone()), - bytes_in_storage, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - last_hash = T::Hashing::hash_of(&proposal); - } - - let index = p - 1; - // Have almost everyone vote aye on last proposal, while keeping it from passing. - for j in 0 .. m - 3 { - let voter = &members[j as usize]; - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash, - index, - approve, - )?; - } - // Voter votes aye without resolving the vote. - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash, - index, - approve, - )?; - - assert_eq!(Collective::::proposals().len(), p as usize); - - // Voter switches vote to nay, but does not kill the vote, just updates + inserts - let approve = false; - - // Whitelist voter account from further DB operations. - let voter_key = frame_system::Account::::hashed_key_for(&voter); - frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: _(SystemOrigin::Signed(voter), last_hash, index, approve) - verify { - // All proposals exist and the last proposal has just been updated. - assert_eq!(Collective::::proposals().len(), p as usize); - let voting = Collective::::voting(last_hash).ok_or("Proposal Missing")?; - assert_eq!(voting.ayes.len(), (m - 3) as usize); - assert_eq!(voting.nays.len(), 1); - } - - close_early_disapproved { - // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. T::MaxMembers::get(); - let p in 1 .. T::MaxProposals::get(); - - let bytes = 100; - let bytes_in_storage = bytes + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - let proposer = account::("proposer", 0, SEED); - members.push(proposer.clone()); - for i in 1 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - let voter = account::("voter", 0, SEED); - members.push(voter.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; - - // Add previous proposals - let mut last_hash = T::Hash::default(); - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, bytes as usize) }.into(); - Collective::::propose( - SystemOrigin::Signed(proposer.clone()).into(), - Box::new(proposal.clone()), - bytes_in_storage, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - last_hash = T::Hashing::hash_of(&proposal); - } - - let index = p - 1; - // Have most everyone vote nay on last proposal, while keeping it from passing. - for j in 0 .. m - 2 { - let voter = &members[j as usize]; - let approve = false; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash, - index, - approve, - )?; - } - - // Voter votes aye without resolving the vote. - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash, - index, - approve, - )?; - - assert_eq!(Collective::::proposals().len(), p as usize); - - // Whitelist voter account from further DB operations. - let voter_key = frame_system::Account::::hashed_key_for(&voter); - frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Root, last_hash, index, Weight::MAX, bytes_in_storage) - verify { - // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); - } - - close_early_approved { - let b in 2 .. MAX_BYTES; - // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. T::MaxMembers::get(); - let p in 1 .. T::MaxProposals::get(); - - let bytes_in_storage = b + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - for i in 0 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - let caller: T::AccountId = whitelisted_caller(); - members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; - - // Add previous proposals - let mut last_hash = T::Hash::default(); - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, b as usize) }.into(); - Collective::::propose( - SystemOrigin::Signed(caller.clone()).into(), - Box::new(proposal.clone()), - bytes_in_storage, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - last_hash = T::Hashing::hash_of(&proposal); - } - - // Caller switches vote to nay on their own proposal, allowing them to be the deciding approval vote - Collective::::vote( - SystemOrigin::Signed(caller.clone()).into(), - last_hash, - p - 1, - false, - )?; - - // Have almost everyone vote aye on last proposal, while keeping it from failing. - for j in 2 .. m - 1 { - let voter = &members[j as usize]; - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash, - p - 1, - approve, - )?; - } - - // Member zero is the first aye - Collective::::vote( - SystemOrigin::Signed(members[0].clone()).into(), - last_hash, - p - 1, - true, - )?; - - assert_eq!(Collective::::proposals().len(), p as usize); - - // Caller switches vote to aye, which passes the vote - let index = p - 1; - let approve = true; - Collective::::vote( - SystemOrigin::Signed(caller.clone()).into(), - last_hash, - index, approve, - )?; - - }: close(SystemOrigin::Root, last_hash, index, Weight::MAX, bytes_in_storage) - verify { - // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); - } - - close_disapproved { - // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. T::MaxMembers::get(); - let p in 1 .. T::MaxProposals::get(); - - let bytes = 100; - let bytes_in_storage = bytes + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - for i in 0 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - let caller: T::AccountId = whitelisted_caller(); - members.push(caller.clone()); - Collective::::set_members( - SystemOrigin::Root.into(), - members.clone(), - Some(caller.clone()), - T::MaxMembers::get(), - )?; - - // Add proposals - let mut last_hash = T::Hash::default(); - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, bytes as usize) }.into(); - Collective::::propose( - SystemOrigin::Signed(caller.clone()).into(), - Box::new(proposal.clone()), - bytes_in_storage, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - last_hash = T::Hashing::hash_of(&proposal); - } - - let index = p - 1; - // Have almost everyone vote aye on last proposal, while keeping it from passing. - // A few abstainers will be the nay votes needed to fail the vote. - let mut yes_votes: MemberCount = 0; - for j in 2 .. m / 2 { - let voter = &members[j as usize]; - let approve = true; - yes_votes += 1; - // vote aye till a prime nay vote keeps the proposal disapproved. - if <>::DefaultVote as DefaultVote>::default_vote( - Some(false), - yes_votes, - 0, - m,) { - break; - } - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash, - index, - approve, - )?; - } - - // caller is prime, prime votes nay - Collective::::vote( - SystemOrigin::Signed(caller.clone()).into(), - last_hash, - index, - false, - )?; - - System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); - - // Prime nay will close it as disapproved - }: close(SystemOrigin::Root, last_hash, index, Weight::MAX, bytes_in_storage) - verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); - } - - close_approved { - let b in 2 .. MAX_BYTES; - // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. T::MaxMembers::get(); - let p in 1 .. T::MaxProposals::get(); - - let bytes_in_storage = b + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - for i in 0 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - let caller: T::AccountId = whitelisted_caller(); - members.push(caller.clone()); - Collective::::set_members( - SystemOrigin::Root.into(), - members.clone(), - Some(caller.clone()), - T::MaxMembers::get(), - )?; - - // Add proposals - let mut last_hash = T::Hash::default(); - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, b as usize) }.into(); - Collective::::propose( - SystemOrigin::Signed(caller.clone()).into(), - Box::new(proposal.clone()), - bytes_in_storage, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - last_hash = T::Hashing::hash_of(&proposal); - } - - // The prime member votes aye, so abstentions default to aye. - Collective::::vote( - SystemOrigin::Signed(caller.clone()).into(), - last_hash, - p - 1, - true // Vote aye. - )?; - - // Have almost everyone vote nay on last proposal, while keeping it from failing. - // A few abstainers will be the aye votes needed to pass the vote. - for j in 2 .. m / 2 { - let voter = &members[j as usize]; - let approve = false; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - last_hash, - p - 1, - approve - )?; - } - - // caller is prime, prime already votes aye by creating the proposal - System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); - - // Prime aye will close it as approved - }: close(SystemOrigin::Root, last_hash, p - 1, Weight::MAX, bytes_in_storage) - verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); - } - - disapprove_proposal { - let p in 1 .. T::MaxProposals::get(); - - let m = 3; - let b = MAX_BYTES; - let bytes_in_storage = b + size_of::() as u32; - - // Construct `members`. - let mut members = vec![]; - for i in 0 .. m - 1 { - let member = account::("member", i, SEED); - members.push(member); - } - let caller = account::("caller", 0, SEED); - members.push(caller.clone()); - Collective::::set_members( - SystemOrigin::Root.into(), - members.clone(), - Some(caller.clone()), - T::MaxMembers::get(), - )?; - - // Threshold is one less than total members so that two nays will disapprove the vote - let threshold = m - 1; - - // Add proposals - let mut last_hash = T::Hash::default(); - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(i, b as usize) }.into(); - Collective::::propose( - SystemOrigin::Signed(caller.clone()).into(), - Box::new(proposal.clone()), - bytes_in_storage, - TryInto::>::try_into(3u64).ok().expect("convert u64 to block number.") - )?; - last_hash = T::Hashing::hash_of(&proposal); - } - - System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); - - }: _(SystemOrigin::Root, last_hash) - verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); - } - - impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); -} diff --git a/pallets/collective/src/lib.rs b/pallets/collective/src/lib.rs deleted file mode 100644 index e964a28539..0000000000 --- a/pallets/collective/src/lib.rs +++ /dev/null @@ -1,1243 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Collective system: Members of a set of account IDs can make their collective feelings known -//! through dispatched calls from one of two specialized origins. -//! -//! The membership can be provided in one of two ways: either directly, using the Root-dispatchable -//! function `set_members`, or indirectly, through implementing the `ChangeMembers`. -//! The pallet assumes that the amount of members stays at or below `MaxMembers` for its weight -//! calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. -//! -//! A "prime" member may be set to help determine the default vote behavior based on chain -//! config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any -//! abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then -//! abstentions will first follow the majority of the collective voting, and then the prime -//! member. -//! -//! Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a -//! number of approvals required for it to pass and be called. Motions are open for members to -//! vote on for a minimum period given by `MotionDuration`. As soon as the needed number of -//! approvals is given, the motion is closed and executed. If the number of approvals is not reached -//! during the voting period, then `close` may be called by any account in order to force the end -//! the motion explicitly. If a prime member is defined then their vote is used in place of any -//! abstentions and the proposal is executed if there are enough approvals counting the new votes. -//! -//! If there are not, or if no prime is set, then the motion is dropped without being executed. - -#![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit = "128"] - -use codec::DecodeWithMemTracking; -use frame_support::{ - dispatch::{DispatchResultWithPostInfo, GetDispatchInfo, Pays, PostDispatchInfo}, - ensure, - pallet_prelude::*, - traits::{ - Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, - }, - weights::Weight, -}; -use scale_info::TypeInfo; -use sp_io::storage; -use sp_runtime::traits::Dispatchable; -use sp_runtime::{RuntimeDebug, Saturating, traits::Hash}; -use sp_std::{marker::PhantomData, prelude::*, result}; - -#[cfg(test)] -mod tests; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; -pub mod weights; - -pub use pallet::*; -use subtensor_macros::freeze_struct; -pub use weights::WeightInfo; - -const LOG_TARGET: &str = "runtime::collective"; - -/// Simple index type for proposal counting. -pub type ProposalIndex = u32; - -/// A number of members. -/// -/// This also serves as a number of voting members, and since for motions, each member may -/// vote exactly once, therefore also the number of votes for any given motion. -pub type MemberCount = u32; - -/// Default voting strategy when a member is inactive. -pub trait DefaultVote { - /// Get the default voting strategy, given: - /// - /// - Whether the prime member voted Aye. - /// - Raw number of yes votes. - /// - Raw number of no votes. - /// - Total number of member count. - fn default_vote( - prime_vote: Option, - yes_votes: MemberCount, - no_votes: MemberCount, - len: MemberCount, - ) -> bool; -} - -/// Set the prime member's vote as the default vote. -pub struct PrimeDefaultVote; - -impl DefaultVote for PrimeDefaultVote { - fn default_vote( - prime_vote: Option, - _yes_votes: MemberCount, - _no_votes: MemberCount, - _len: MemberCount, - ) -> bool { - prime_vote.unwrap_or(false) - } -} - -/// First see if yes vote are over majority of the whole collective. If so, set the default vote -/// as yes. Otherwise, use the prime member's vote as the default vote. -pub struct MoreThanMajorityThenPrimeDefaultVote; - -impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote { - fn default_vote( - prime_vote: Option, - yes_votes: MemberCount, - _no_votes: MemberCount, - len: MemberCount, - ) -> bool { - let more_than_majority = yes_votes.saturating_mul(2) > len; - more_than_majority || prime_vote.unwrap_or(false) - } -} - -/// Origin for the collective module. -#[derive( - PartialEq, - Eq, - Clone, - RuntimeDebug, - Encode, - Decode, - DecodeWithMemTracking, - TypeInfo, - MaxEncodedLen, -)] -#[scale_info(skip_type_params(I))] -#[codec(mel_bound(AccountId: MaxEncodedLen))] -pub enum RawOrigin { - /// It has been condoned by a given number of members of the collective from a given total. - Members(MemberCount, MemberCount), - /// It has been condoned by a single member of the collective. - Member(AccountId), - /// Dummy to manage the fact we have instancing. - _Phantom(PhantomData), -} - -impl GetBacking for RawOrigin { - fn get_backing(&self) -> Option { - match self { - RawOrigin::Members(n, d) => Some(Backing { - approvals: *n, - eligible: *d, - }), - _ => None, - } - } -} - -/// Info for keeping track of a motion being voted on. -#[freeze_struct("a8e7b0b34ad52b17")] -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct Votes { - /// The proposal's unique index. - index: ProposalIndex, - /// The number of approval votes that are needed to pass the motion. - threshold: MemberCount, - /// The current set of voters that approved it. - ayes: Vec, - /// The current set of voters that rejected it. - nays: Vec, - /// The hard end time of this vote. - end: BlockNumber, -} - -#[deny(missing_docs)] -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_system::pallet_prelude::*; - - /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); - - #[pallet::pallet] - #[pallet::storage_version(STORAGE_VERSION)] - #[pallet::without_storage_info] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The runtime origin type. - type RuntimeOrigin: From>; - - /// The runtime call dispatch type. - type Proposal: Parameter - + Dispatchable< - RuntimeOrigin = >::RuntimeOrigin, - PostInfo = PostDispatchInfo, - > + From> - + GetDispatchInfo; - - /// The time-out for council motions. - type MotionDuration: Get>; - - /// Maximum number of proposals allowed to be active in parallel. - type MaxProposals: Get; - - /// The maximum number of members supported by the pallet. Used for weight estimation. - /// - /// NOTE: - /// + Benchmarks will need to be re-run and weights adjusted if this changes. - /// + This pallet assumes that dependents keep to the limit without enforcing it. - type MaxMembers: Get; - - /// Default vote strategy of this collective. - type DefaultVote: DefaultVote; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - - /// Origin allowed to set collective members - type SetMembersOrigin: EnsureOrigin<::RuntimeOrigin>; - - /// Origin allowed to propose - type CanPropose: CanPropose; - - /// Origin allowed to vote - type CanVote: CanVote; - - /// Members to expect in a vote - type GetVotingMembers: GetVotingMembers; - } - - #[pallet::genesis_config] - pub struct GenesisConfig, I: 'static = ()> { - /// The phantom just for type place holder. - pub phantom: PhantomData, - /// The initial members of the collective. - pub members: Vec, - } - - impl, I: 'static> Default for GenesisConfig { - fn default() -> Self { - Self { - phantom: Default::default(), - members: Default::default(), - } - } - } - - #[pallet::genesis_build] - impl, I: 'static> BuildGenesisConfig for GenesisConfig { - fn build(&self) { - use sp_std::collections::btree_set::BTreeSet; - let members_set: BTreeSet<_> = self.members.iter().collect(); - assert_eq!( - members_set.len(), - self.members.len(), - "Members cannot contain duplicate accounts." - ); - - Pallet::::initialize_members(&self.members) - } - } - - /// Origin for the collective pallet. - #[pallet::origin] - pub type Origin = RawOrigin<::AccountId, I>; - - /// The hashes of the active proposals. - #[pallet::storage] - #[pallet::getter(fn proposals)] - pub type Proposals, I: 'static = ()> = - StorageValue<_, BoundedVec, ValueQuery>; - - /// Actual proposal for a given hash, if it's current. - #[pallet::storage] - #[pallet::getter(fn proposal_of)] - pub type ProposalOf, I: 'static = ()> = - StorageMap<_, Identity, T::Hash, >::Proposal, OptionQuery>; - - /// Votes on a given proposal, if it is ongoing. - #[pallet::storage] - #[pallet::getter(fn voting)] - pub type Voting, I: 'static = ()> = - StorageMap<_, Identity, T::Hash, Votes>, OptionQuery>; - - /// Proposals so far. - #[pallet::storage] - #[pallet::getter(fn proposal_count)] - pub type ProposalCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; - - /// The current members of the collective. This is stored sorted (just by value). - #[pallet::storage] - #[pallet::getter(fn members)] - pub type Members, I: 'static = ()> = - StorageValue<_, Vec, ValueQuery>; - - /// The prime member that helps determine the default vote behavior in case of absentations. - #[pallet::storage] - #[pallet::getter(fn prime)] - pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event, I: 'static = ()> { - /// A motion (given hash) has been proposed (by given account) with a threshold (given - /// `MemberCount`). - Proposed { - /// The account that proposed the motion. - account: T::AccountId, - /// The index of the proposal. - proposal_index: ProposalIndex, - /// The hash of the proposal. - proposal_hash: T::Hash, - /// The threshold of member for the proposal. - threshold: MemberCount, - }, - /// A motion (given hash) has been voted on by given account, leaving - /// a tally (yes votes and no votes given respectively as `MemberCount`). - Voted { - /// The account that voted. - account: T::AccountId, - /// The hash of the proposal. - proposal_hash: T::Hash, - /// Whether the account voted aye. - voted: bool, - /// The number of yes votes. - yes: MemberCount, - /// The number of no votes. - no: MemberCount, - }, - /// A motion was approved by the required threshold. - Approved { - /// The hash of the proposal. - proposal_hash: T::Hash, - }, - /// A motion was not approved by the required threshold. - Disapproved { - /// The hash of the proposal. - proposal_hash: T::Hash, - }, - /// A motion was executed; result will be `Ok` if it returned without error. - Executed { - /// The hash of the proposal. - proposal_hash: T::Hash, - /// The result of the execution. - result: DispatchResult, - }, - /// A single member did some action; result will be `Ok` if it returned without error. - MemberExecuted { - /// The hash of the proposal. - proposal_hash: T::Hash, - /// The result of the execution. - result: DispatchResult, - }, - /// A proposal was closed because its threshold was reached or after its duration was up. - Closed { - /// The hash of the proposal. - proposal_hash: T::Hash, - /// Whether the proposal was approved. - yes: MemberCount, - /// Whether the proposal was rejected. - no: MemberCount, - }, - } - - #[pallet::error] - pub enum Error { - /// Account is not a member of collective - NotMember, - /// Duplicate proposals not allowed - DuplicateProposal, - /// Proposal must exist - ProposalNotExists, - /// Index mismatched the proposal hash - IndexMismatchProposalHash, - /// Duplicate vote ignored - DuplicateVote, - /// The call to close the proposal was made too early, before the end of the voting - TooEarlyToCloseProposal, - /// There can only be a maximum of `MaxProposals` active proposals. - TooManyActiveProposals, - /// The given weight-bound for the proposal was too low. - ProposalWeightLessThanDispatchCallWeight, - /// The given length-bound for the proposal was too low. - ProposalLengthBoundLessThanProposalLength, - /// The given motion duration for the proposal was too low. - DurationLowerThanConfiguredMotionDuration, - } - - // Note that councillor operations are assigned to the operational class. - #[pallet::call] - impl, I: 'static> Pallet { - /// Set the collective's membership. - /// - /// - `new_members`: The new member list. Be nice to the chain and provide it sorted. - /// - `prime`: The prime member whose vote sets the default. - /// - `old_count`: The upper bound for the previous number of members in storage. Used for - /// weight estimation. - /// - /// The dispatch of this call must be `SetMembersOrigin`. - /// - /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but - /// the weight estimations rely on it to estimate dispatchable weight. - /// - /// # WARNING: - /// - /// The `pallet-collective` can also be managed by logic outside of the pallet through the - /// implementation of the trait [`ChangeMembers`]. - /// Any call to `set_members` must be careful that the member set doesn't get out of sync - /// with other logic managing the member set. - /// - /// ## Complexity: - /// - `O(MP + N)` where: - /// - `M` old-members-count (code- and governance-bounded) - /// - `N` new-members-count (code- and governance-bounded) - /// - `P` proposals-count (code-bounded) - #[pallet::call_index(0)] - #[pallet::weight(( - T::WeightInfo::set_members( - *old_count, // M - new_members.len() as u32, // N - T::MaxProposals::get() // P - ), - DispatchClass::Operational - ))] - pub fn set_members( - origin: OriginFor, - new_members: Vec, - prime: Option, - old_count: MemberCount, - ) -> DispatchResultWithPostInfo { - T::SetMembersOrigin::ensure_origin(origin)?; - if new_members.len() > T::MaxMembers::get() as usize { - log::error!( - target: LOG_TARGET, - "New members count ({}) exceeds maximum amount of members expected ({}).", - new_members.len(), - T::MaxMembers::get(), - ); - } - - let old = Members::::get(); - if old.len() > old_count as usize { - log::warn!( - target: LOG_TARGET, - "Wrong count used to estimate set_members weight. expected ({}) vs actual ({})", - old_count, - old.len(), - ); - } - let mut new_members = new_members; - new_members.sort(); - >::set_members_sorted(&new_members, &old); - Prime::::set(prime); - - Ok(Some(T::WeightInfo::set_members( - old.len() as u32, // M - new_members.len() as u32, // N - T::MaxProposals::get(), // P - )) - .into()) - } - - /// Dispatch a proposal from a member using the `Member` origin. - /// - /// Origin must be a member of the collective. - /// - /// ## Complexity: - /// - `O(B + M + P)` where: - /// - `B` is `proposal` size in bytes (length-fee-bounded) - /// - `M` members-count (code-bounded) - /// - `P` complexity of dispatching `proposal` - #[pallet::call_index(1)] - #[pallet::weight(( - T::WeightInfo::execute( - *length_bound, // B - T::MaxMembers::get(), // M - ).saturating_add(proposal.get_dispatch_info().call_weight), // P - DispatchClass::Operational - ))] - pub fn execute( - origin: OriginFor, - proposal: Box<>::Proposal>, - #[pallet::compact] length_bound: u32, - ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - let members = Self::members(); - ensure!(members.contains(&who), Error::::NotMember); - - let proposal_len = proposal.encoded_size(); - ensure!( - proposal_len <= length_bound as usize, - Error::::ProposalLengthBoundLessThanProposalLength - ); - - let proposal_hash = T::Hashing::hash_of(&proposal); - let result = proposal.dispatch(RawOrigin::Member(who).into()); - Self::deposit_event(Event::MemberExecuted { - proposal_hash, - result: result.map(|_| ()).map_err(|e| e.error), - }); - - Ok(get_result_weight(result) - .map(|w| { - T::WeightInfo::execute( - proposal_len as u32, // B - members.len() as u32, // M - ) - .saturating_add(w) // P - }) - .into()) - } - - /// Add a new proposal to either be voted on or executed directly. - /// - /// Requires the sender to be member. - /// - /// `threshold` determines whether `proposal` is executed directly (`threshold < 2`) - /// or put up for voting. - /// - /// ## Complexity - /// - `O(B + M + P1)` or `O(B + M + P2)` where: - /// - `B` is `proposal` size in bytes (length-fee-bounded) - /// - `M` is members-count (code- and governance-bounded) - /// - branching is influenced by `threshold` where: - /// - `P1` is proposal execution complexity (`threshold < 2`) - /// - `P2` is proposals-count (code-bounded) (`threshold >= 2`) - #[pallet::call_index(2)] - #[pallet::weight(( - T::WeightInfo::propose_proposed( - *length_bound, // B - T::MaxMembers::get(), // M - T::MaxProposals::get(), // P2 - ), - DispatchClass::Operational - ))] - pub fn propose( - origin: OriginFor, - proposal: Box<>::Proposal>, - #[pallet::compact] length_bound: u32, - duration: BlockNumberFor, - ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin.clone())?; - ensure!(T::CanPropose::can_propose(&who), Error::::NotMember); - - ensure!( - duration >= T::MotionDuration::get(), - Error::::DurationLowerThanConfiguredMotionDuration - ); - - let threshold = T::GetVotingMembers::get_count() - .checked_div(2) - .unwrap_or(0) - .saturating_add(1); - - let members = Self::members(); - let (proposal_len, active_proposals) = - Self::do_propose_proposed(who, threshold, proposal, length_bound, duration)?; - - Ok(Some(T::WeightInfo::propose_proposed( - proposal_len, // B - members.len() as u32, // M - active_proposals, // P2 - )) - .into()) - } - - /// Add an aye or nay vote for the sender to the given proposal. - /// - /// Requires the sender to be a member. - /// - /// Transaction fees will be waived if the member is voting on any particular proposal - /// for the first time and the call is successful. Subsequent vote changes will charge a - /// fee. - /// ## Complexity - /// - `O(M)` where `M` is members-count (code- and governance-bounded) - #[pallet::call_index(3)] - #[pallet::weight((T::WeightInfo::vote(T::MaxMembers::get()), DispatchClass::Operational))] - pub fn vote( - origin: OriginFor, - proposal: T::Hash, - #[pallet::compact] index: ProposalIndex, - approve: bool, - ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin.clone())?; - ensure!(T::CanVote::can_vote(&who), Error::::NotMember); - - let members = Self::members(); - // Detects first vote of the member in the motion - let is_account_voting_first_time = Self::do_vote(who, proposal, index, approve)?; - - if is_account_voting_first_time { - Ok((Some(T::WeightInfo::vote(members.len() as u32)), Pays::No).into()) - } else { - Ok((Some(T::WeightInfo::vote(members.len() as u32)), Pays::Yes).into()) - } - } - - // NOTE: call_index(4) was `close_old_weight` and was removed due to weights v1 - // deprecation - - /// Disapprove a proposal, close, and remove it from the system, regardless of its current - /// state. - /// - /// Must be called by the Root origin. - /// - /// Parameters: - /// * `proposal_hash`: The hash of the proposal that should be disapproved. - /// - /// ## Complexity - /// O(P) where P is the number of max proposals - #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::disapprove_proposal(T::MaxProposals::get()))] - pub fn disapprove_proposal( - origin: OriginFor, - proposal_hash: T::Hash, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - let proposal_count = Self::do_disapprove_proposal(proposal_hash); - Ok(Some(T::WeightInfo::disapprove_proposal(proposal_count)).into()) - } - - /// Close a vote that is either approved, disapproved or whose voting period has ended. - /// - /// May be called by any signed account in order to finish voting and close the proposal. - /// - /// If called before the end of the voting period it will only close the vote if it is - /// has enough votes to be approved or disapproved. - /// - /// If called after the end of the voting period abstentions are counted as rejections - /// unless there is a prime member set and the prime member cast an approval. - /// - /// If the close operation completes successfully with disapproval, the transaction fee will - /// be waived. Otherwise execution of the approved operation will be charged to the caller. - /// - /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed - /// proposal. - /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via - /// `storage::read` so it is `size_of::() == 4` larger than the pure length. - /// - /// ## Complexity - /// - `O(B + M + P1 + P2)` where: - /// - `B` is `proposal` size in bytes (length-fee-bounded) - /// - `M` is members-count (code- and governance-bounded) - /// - `P1` is the complexity of `proposal` preimage. - /// - `P2` is proposal-count (code-bounded) - #[pallet::call_index(6)] - #[pallet::weight(( - { - let b = *length_bound; - let m = T::MaxMembers::get(); - let p1 = *proposal_weight_bound; - let p2 = T::MaxProposals::get(); - T::WeightInfo::close_early_approved(b, m, p2) - .max(T::WeightInfo::close_early_disapproved(m, p2)) - .max(T::WeightInfo::close_approved(b, m, p2)) - .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1) - }, - DispatchClass::Operational - ))] - pub fn close( - origin: OriginFor, - proposal_hash: T::Hash, - #[pallet::compact] index: ProposalIndex, - proposal_weight_bound: Weight, - #[pallet::compact] length_bound: u32, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - - Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) - } - } -} - -use frame_system::pallet_prelude::BlockNumberFor; - -/// Return the weight of a dispatch call result as an `Option`. -/// -/// Will return the weight regardless of what the state of the result is. -fn get_result_weight(result: DispatchResultWithPostInfo) -> Option { - match result { - Ok(post_info) => post_info.actual_weight, - Err(err) => err.post_info.actual_weight, - } -} - -impl, I: 'static> Pallet { - /// Check whether `who` is a member of the collective. - pub fn is_member(who: &T::AccountId) -> bool { - // Note: The dispatchables *do not* use this to check membership so make sure - // to update those if this is changed. - Self::members().contains(who) - } - - /// Add a new proposal to be voted. - pub fn do_propose_proposed( - who: T::AccountId, - threshold: MemberCount, - proposal: Box<>::Proposal>, - length_bound: MemberCount, - duration: BlockNumberFor, - ) -> Result<(u32, u32), DispatchError> { - let proposal_len = proposal.encoded_size(); - ensure!( - proposal_len <= length_bound as usize, - Error::::ProposalLengthBoundLessThanProposalLength - ); - - let proposal_hash = T::Hashing::hash_of(&proposal); - ensure!( - !>::contains_key(proposal_hash), - Error::::DuplicateProposal - ); - - let active_proposals = - >::try_mutate(|proposals| -> Result { - proposals - .try_push(proposal_hash) - .map_err(|_| Error::::TooManyActiveProposals)?; - Ok(proposals.len()) - })?; - - let index = Self::proposal_count(); - >::try_mutate(|i| { - *i = i - .checked_add(1) - .ok_or(Error::::TooManyActiveProposals)?; - Ok::<(), Error>(()) - })?; - >::insert(proposal_hash, proposal); - let votes = { - let end = frame_system::Pallet::::block_number().saturating_add(duration); - Votes { - index, - threshold, - ayes: vec![], - nays: vec![], - end, - } - }; - >::insert(proposal_hash, votes); - - Self::deposit_event(Event::Proposed { - account: who, - proposal_index: index, - proposal_hash, - threshold, - }); - Ok((proposal_len as u32, active_proposals as u32)) - } - - /// Add an aye or nay vote for the member to the given proposal, returns true if it's the first - /// vote of the member in the motion - pub fn do_vote( - who: T::AccountId, - proposal: T::Hash, - index: ProposalIndex, - approve: bool, - ) -> Result { - let mut voting = Self::voting(proposal).ok_or(Error::::ProposalNotExists)?; - ensure!( - voting.index == index, - Error::::IndexMismatchProposalHash - ); - - let position_yes = voting.ayes.iter().position(|a| a == &who); - let position_no = voting.nays.iter().position(|a| a == &who); - - // Detects first vote of the member in the motion - let is_account_voting_first_time = position_yes.is_none() && position_no.is_none(); - - if approve { - if position_yes.is_none() { - voting.ayes.push(who.clone()); - } else { - return Err(Error::::DuplicateVote.into()); - } - if let Some(pos) = position_no { - voting.nays.swap_remove(pos); - } - } else { - if position_no.is_none() { - voting.nays.push(who.clone()); - } else { - return Err(Error::::DuplicateVote.into()); - } - if let Some(pos) = position_yes { - voting.ayes.swap_remove(pos); - } - } - - let yes_votes = voting.ayes.len() as MemberCount; - let no_votes = voting.nays.len() as MemberCount; - Self::deposit_event(Event::Voted { - account: who, - proposal_hash: proposal, - voted: approve, - yes: yes_votes, - no: no_votes, - }); - - Voting::::insert(proposal, voting); - - Ok(is_account_voting_first_time) - } - - /// Close a vote that is either approved, disapproved or whose voting period has ended. - pub fn do_close( - proposal_hash: T::Hash, - index: ProposalIndex, - proposal_weight_bound: Weight, - length_bound: u32, - ) -> DispatchResultWithPostInfo { - let voting = Self::voting(proposal_hash).ok_or(Error::::ProposalNotExists)?; - ensure!( - voting.index == index, - Error::::IndexMismatchProposalHash - ); - - let mut no_votes = voting.nays.len() as MemberCount; - let mut yes_votes = voting.ayes.len() as MemberCount; - let seats = T::GetVotingMembers::get_count() as MemberCount; - let approved = yes_votes >= voting.threshold; - let disapproved = seats.saturating_sub(no_votes) < voting.threshold; - // Allow (dis-)approving the proposal as soon as there are enough votes. - if approved { - let (proposal, len) = Self::validate_and_get_proposal( - &proposal_hash, - length_bound, - proposal_weight_bound, - )?; - Self::deposit_event(Event::Closed { - proposal_hash, - yes: yes_votes, - no: no_votes, - }); - let (proposal_weight, proposal_count) = - Self::do_approve_proposal(seats, yes_votes, proposal_hash, proposal); - return Ok(( - Some( - T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight), - ), - Pays::Yes, - ) - .into()); - } else if disapproved { - Self::deposit_event(Event::Closed { - proposal_hash, - yes: yes_votes, - no: no_votes, - }); - let proposal_count = Self::do_disapprove_proposal(proposal_hash); - return Ok(( - Some(T::WeightInfo::close_early_disapproved( - seats, - proposal_count, - )), - Pays::No, - ) - .into()); - } - - // Only allow actual closing of the proposal after the voting period has ended. - ensure!( - frame_system::Pallet::::block_number() >= voting.end, - Error::::TooEarlyToCloseProposal - ); - - let prime_vote = Self::prime().map(|who| voting.ayes.iter().any(|a| a == &who)); - - // default voting strategy. - let default = T::DefaultVote::default_vote(prime_vote, yes_votes, no_votes, seats); - - let abstentions = seats.saturating_sub(yes_votes.saturating_add(no_votes)); - match default { - true => yes_votes = yes_votes.saturating_add(abstentions), - false => no_votes = no_votes.saturating_add(abstentions), - } - let approved = yes_votes >= voting.threshold; - - if approved { - let (proposal, len) = Self::validate_and_get_proposal( - &proposal_hash, - length_bound, - proposal_weight_bound, - )?; - Self::deposit_event(Event::Closed { - proposal_hash, - yes: yes_votes, - no: no_votes, - }); - let (proposal_weight, proposal_count) = - Self::do_approve_proposal(seats, yes_votes, proposal_hash, proposal); - Ok(( - Some( - T::WeightInfo::close_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight), - ), - Pays::Yes, - ) - .into()) - } else { - Self::deposit_event(Event::Closed { - proposal_hash, - yes: yes_votes, - no: no_votes, - }); - let proposal_count = Self::do_disapprove_proposal(proposal_hash); - Ok(( - Some(T::WeightInfo::close_disapproved(seats, proposal_count)), - Pays::No, - ) - .into()) - } - } - - /// Ensure that the right proposal bounds were passed and get the proposal from storage. - /// - /// Checks the length in storage via `storage::read` which adds an extra `size_of::() == 4` - /// to the length. - fn validate_and_get_proposal( - hash: &T::Hash, - length_bound: u32, - weight_bound: Weight, - ) -> Result<(>::Proposal, usize), DispatchError> { - let key = ProposalOf::::hashed_key_for(hash); - // read the length of the proposal storage entry directly - let proposal_len = - storage::read(&key, &mut [0; 0], 0).ok_or(Error::::ProposalNotExists)?; - ensure!( - proposal_len <= length_bound, - Error::::ProposalLengthBoundLessThanProposalLength - ); - let proposal = ProposalOf::::get(hash).ok_or(Error::::ProposalNotExists)?; - let proposal_weight = proposal.get_dispatch_info().call_weight; - ensure!( - proposal_weight.all_lte(weight_bound), - Error::::ProposalWeightLessThanDispatchCallWeight - ); - Ok((proposal, proposal_len as usize)) - } - - /// Weight: - /// If `approved`: - /// - the weight of `proposal` preimage. - /// - two events deposited. - /// - two removals, one mutation. - /// - computation and i/o `O(P + L)` where: - /// - `P` is number of active proposals, - /// - `L` is the encoded length of `proposal` preimage. - /// - /// If not `approved`: - /// - one event deposited. - /// - two removals, one mutation. - /// - computation and i/o `O(P)` where: - /// - `P` is number of active proposals - fn do_approve_proposal( - seats: MemberCount, - yes_votes: MemberCount, - proposal_hash: T::Hash, - proposal: >::Proposal, - ) -> (Weight, u32) { - Self::deposit_event(Event::Approved { proposal_hash }); - - let dispatch_weight = proposal.get_dispatch_info().call_weight; - let origin = RawOrigin::Members(yes_votes, seats).into(); - let result = proposal.dispatch(origin); - Self::deposit_event(Event::Executed { - proposal_hash, - result: result.map(|_| ()).map_err(|e| e.error), - }); - // default to the dispatch info weight for safety - let proposal_weight = get_result_weight(result).unwrap_or(dispatch_weight); // P1 - - let proposal_count = Self::remove_proposal(proposal_hash); - (proposal_weight, proposal_count) - } - - /// Removes a proposal from the pallet, and deposit the `Disapproved` event. - pub fn do_disapprove_proposal(proposal_hash: T::Hash) -> u32 { - // disapproved - Self::deposit_event(Event::Disapproved { proposal_hash }); - Self::remove_proposal(proposal_hash) - } - - // Removes a proposal from the pallet, cleaning up votes and the vector of proposals. - fn remove_proposal(proposal_hash: T::Hash) -> u32 { - // remove proposal and vote - ProposalOf::::remove(proposal_hash); - Voting::::remove(proposal_hash); - let num_proposals = Proposals::::mutate(|proposals| { - proposals.retain(|h| h != &proposal_hash); - proposals.len().saturating_add(1) // calculate weight based on original length - }); - num_proposals as u32 - } - - pub fn remove_votes(who: &T::AccountId) -> Result { - for h in Self::proposals().into_iter() { - >::mutate(h, |v| { - if let Some(mut votes) = v.take() { - votes.ayes.retain(|i| i != who); - votes.nays.retain(|i| i != who); - *v = Some(votes); - } - }); - } - - Ok(true) - } - - pub fn has_voted( - proposal: T::Hash, - index: ProposalIndex, - who: &T::AccountId, - ) -> Result { - let voting = Self::voting(proposal).ok_or(Error::::ProposalNotExists)?; - ensure!( - voting.index == index, - Error::::IndexMismatchProposalHash - ); - - let position_yes = voting.ayes.iter().position(|a| a == who); - let position_no = voting.nays.iter().position(|a| a == who); - - Ok(position_yes.is_some() || position_no.is_some()) - } -} - -impl, I: 'static> ChangeMembers for Pallet { - /// Update the members of the collective. Votes are updated and the prime is reset. - /// - /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but - /// the weight estimations rely on it to estimate dispatchable weight. - /// - /// ## Complexity - /// - `O(MP + N)` - /// - where `M` old-members-count (governance-bounded) - /// - where `N` new-members-count (governance-bounded) - /// - where `P` proposals-count - fn change_members_sorted( - _incoming: &[T::AccountId], - outgoing: &[T::AccountId], - new: &[T::AccountId], - ) { - if new.len() > T::MaxMembers::get() as usize { - log::error!( - target: LOG_TARGET, - "New members count ({}) exceeds maximum amount of members expected ({}).", - new.len(), - T::MaxMembers::get(), - ); - } - // remove accounts from all current voting in motions. - let mut outgoing = outgoing.to_vec(); - outgoing.sort(); - for h in Self::proposals().into_iter() { - >::mutate(h, |v| { - if let Some(mut votes) = v.take() { - votes.ayes.retain(|i| outgoing.binary_search(i).is_err()); - votes.nays.retain(|i| outgoing.binary_search(i).is_err()); - *v = Some(votes); - } - }); - } - Members::::put(new); - Prime::::kill(); - } - - fn set_prime(prime: Option) { - Prime::::set(prime); - } - - fn get_prime() -> Option { - Prime::::get() - } -} - -impl, I: 'static> InitializeMembers for Pallet { - fn initialize_members(members: &[T::AccountId]) { - if !members.is_empty() { - assert!( - >::get().is_empty(), - "Members are already initialized!" - ); - >::put(members); - } - } -} - -/// Ensure that the origin `o` represents at least `n` members. Returns `Ok` or an `Err` -/// otherwise. -pub fn ensure_members( - o: OuterOrigin, - n: MemberCount, -) -> result::Result -where - OuterOrigin: Into, OuterOrigin>>, -{ - match o.into() { - Ok(RawOrigin::Members(x, _)) if x >= n => Ok(n), - _ => Err("bad origin: expected to be a threshold number of members"), - } -} - -pub struct EnsureMember(PhantomData<(AccountId, I)>); -impl< - O: Into, O>> + From>, - I, - AccountId: Decode, -> EnsureOrigin for EnsureMember -{ - type Success = AccountId; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Member(id) => Ok(id), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - let zero_account_id = - AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) - .expect("infinite length input; no invalid inputs for type; qed"); - Ok(O::from(RawOrigin::Member(zero_account_id))) - } -} - -pub struct EnsureMembers(PhantomData<(AccountId, I)>); -impl< - O: Into, O>> + From>, - AccountId, - I, - const N: u32, -> EnsureOrigin for EnsureMembers -{ - type Success = (MemberCount, MemberCount); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Members(n, m) if n >= N => Ok((n, m)), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(RawOrigin::Members(N, N))) - } -} - -pub struct EnsureProportionMoreThan( - PhantomData<(AccountId, I)>, -); -impl< - O: Into, O>> + From>, - AccountId, - I, - const N: u32, - const D: u32, -> EnsureOrigin for EnsureProportionMoreThan -{ - type Success = (); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Members(n, m) if n.saturating_mul(D) > N.saturating_mul(m) => Ok(()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(RawOrigin::Members(1u32, 0u32))) - } -} - -pub struct EnsureProportionAtLeast( - PhantomData<(AccountId, I)>, -); -impl< - O: Into, O>> + From>, - AccountId, - I, - const N: u32, - const D: u32, -> EnsureOrigin for EnsureProportionAtLeast -{ - type Success = (); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Members(n, m) if n.saturating_mul(D) >= N.saturating_mul(m) => Ok(()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(RawOrigin::Members(0u32, 0u32))) - } -} - -/// CanPropose -pub trait CanPropose { - /// Check whether or not the passed AccountId can propose a new motion - fn can_propose(account: &AccountId) -> bool; -} - -impl CanPropose for () { - fn can_propose(_: &T) -> bool { - false - } -} - -/// CanVote -pub trait CanVote { - /// Check whether or not the passed AccountId can vote on a motion - fn can_vote(account: &AccountId) -> bool; -} - -impl CanVote for () { - fn can_vote(_: &T) -> bool { - false - } -} - -pub trait GetVotingMembers { - fn get_count() -> MemberCount; -} - -impl GetVotingMembers for () { - fn get_count() -> MemberCount { - 0 - } -} diff --git a/pallets/collective/src/tests.rs b/pallets/collective/src/tests.rs deleted file mode 100644 index d447384746..0000000000 --- a/pallets/collective/src/tests.rs +++ /dev/null @@ -1,1582 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![allow(non_camel_case_types, clippy::indexing_slicing, clippy::unwrap_used)] - -use super::{Event as CollectiveEvent, *}; -use crate as pallet_collective; -use frame_support::{ - Hashable, assert_noop, assert_ok, derive_impl, parameter_types, traits::ConstU64, -}; -use frame_system::{EnsureRoot, EventRecord, Phase}; -use sp_core::H256; -use sp_runtime::{ - BuildStorage, - testing::{Header, TestXt}, - traits::{BlakeTwo256, IdentityLookup}, -}; - -pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = TestXt; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Event} = 1, - Collective: pallet_collective::::{Pallet, Call, Event, Origin, Config} = 2, - CollectiveMajority: pallet_collective::::{Pallet, Call, Event, Origin, Config} = 3, - DefaultCollective: pallet_collective::{Pallet, Call, Event, Origin, Config} = 4, - Democracy: mock_democracy::{Pallet, Call, Event} = 5, - } -); -mod mock_democracy { - pub use pallet::*; - #[frame_support::pallet(dev_mode)] - pub mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config + Sized { - type ExternalMajorityOrigin: EnsureOrigin; - } - - #[pallet::call] - impl Pallet { - #[pallet::call_index(0)] - pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { - T::ExternalMajorityOrigin::ensure_origin(origin)?; - Self::deposit_event(Event::::ExternalProposed); - Ok(()) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - ExternalProposed, - } - } -} - -pub type MaxMembers = ConstU32<100>; - -parameter_types! { - pub const MotionDuration: u64 = 3; - pub const MaxProposals: u32 = 257; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; - type Block = Block; - type Nonce = u64; -} - -pub struct CanProposeCollective; -impl CanPropose<::AccountId> for CanProposeCollective { - fn can_propose(who: &::AccountId) -> bool { - Collective::is_member(who) - } -} - -pub struct CanVoteCollective; -impl CanVote<::AccountId> for CanVoteCollective { - fn can_vote(who: &::AccountId) -> bool { - Collective::is_member(who) - } -} - -pub struct GetCollectiveCount; -impl GetVotingMembers for GetCollectiveCount { - fn get_count() -> MemberCount { - Collective::members().len() as u32 - } -} -impl Get for GetCollectiveCount { - fn get() -> MemberCount { - ::get() - } -} - -impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type MotionDuration = ConstU64<3>; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = PrimeDefaultVote; - type WeightInfo = (); - type SetMembersOrigin = EnsureRoot; - type CanPropose = CanProposeCollective; - type CanVote = CanVoteCollective; - type GetVotingMembers = GetCollectiveCount; -} - -pub struct CanProposeCollectiveMajority; -impl CanPropose<::AccountId> for CanProposeCollectiveMajority { - fn can_propose(who: &::AccountId) -> bool { - CollectiveMajority::is_member(who) - } -} - -pub struct CanVoteCollectiveMajority; -impl CanVote<::AccountId> for CanVoteCollectiveMajority { - fn can_vote(who: &::AccountId) -> bool { - CollectiveMajority::is_member(who) - } -} - -pub struct GetCollectiveMajorityCount; -impl GetVotingMembers for GetCollectiveMajorityCount { - fn get_count() -> MemberCount { - CollectiveMajority::members().len() as u32 - } -} -impl Get for GetCollectiveMajorityCount { - fn get() -> MemberCount { - ::get() - } -} - -impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type MotionDuration = ConstU64<3>; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; - type WeightInfo = (); - type SetMembersOrigin = EnsureRoot; - type CanPropose = CanProposeCollectiveMajority; - type CanVote = CanVoteCollectiveMajority; - type GetVotingMembers = GetCollectiveMajorityCount; -} -impl mock_democracy::Config for Test { - type ExternalMajorityOrigin = EnsureProportionAtLeast; -} - -pub struct CanProposeDefaultCollective; -impl CanPropose<::AccountId> for CanProposeDefaultCollective { - fn can_propose(who: &::AccountId) -> bool { - DefaultCollective::is_member(who) - } -} - -pub struct CanVoteDefaultCollective; -impl CanVote<::AccountId> for CanVoteDefaultCollective { - fn can_vote(who: &::AccountId) -> bool { - DefaultCollective::is_member(who) - } -} - -pub struct GetDefaultCollectiveCount; -impl GetVotingMembers for GetDefaultCollectiveCount { - fn get_count() -> MemberCount { - DefaultCollective::members().len() as u32 - } -} -impl Get for GetDefaultCollectiveCount { - fn get() -> MemberCount { - ::get() - } -} - -impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type MotionDuration = ConstU64<3>; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = PrimeDefaultVote; - type WeightInfo = (); - type SetMembersOrigin = EnsureRoot; - type CanPropose = CanProposeDefaultCollective; - type CanVote = CanVoteDefaultCollective; - type GetVotingMembers = GetDefaultCollectiveCount; -} - -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { - collective: pallet_collective::GenesisConfig { - members: vec![1, 2, 3], - phantom: Default::default(), - }, - collective_majority: pallet_collective::GenesisConfig { - members: vec![1, 2, 3, 4, 5], - phantom: Default::default(), - }, - default_collective: Default::default(), - } - .build_storage() - .unwrap() - .into(); - ext.execute_with(|| System::set_block_number(1)); - ext -} - -fn make_proposal(value: u64) -> RuntimeCall { - RuntimeCall::System(frame_system::Call::remark_with_event { - remark: value.to_be_bytes().to_vec(), - }) -} - -fn record(event: RuntimeEvent) -> EventRecord { - EventRecord { - phase: Phase::Initialization, - event, - topics: vec![], - } -} - -#[test] -fn motions_basic_environment_works() { - new_test_ext().execute_with(|| { - assert_eq!(Collective::members(), vec![1, 2, 3]); - assert_eq!(*Collective::proposals(), Vec::::new()); - }); -} - -#[test] -fn close_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - - System::set_block_number(3); - assert_noop!( - Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - ), - Error::::TooEarlyToCloseProposal - ); - - System::set_block_number(4); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 1, - no: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })) - ] - ); - }); -} - -#[test] -fn proposal_weight_limit_works_on_approve() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Collective(crate::Call::set_members { - new_members: vec![1, 2, 3], - prime: None, - old_count: ::get(), - }); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - // Set 1 as prime voter - Prime::::set(Some(1)); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - // With 1's prime vote, this should pass - System::set_block_number(4); - assert_noop!( - Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight - Weight::from_parts(100, 0), - proposal_len - ), - Error::::ProposalWeightLessThanDispatchCallWeight - ); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - }) -} - -#[test] -fn proposal_weight_limit_ignored_on_disapprove() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Collective(crate::Call::set_members { - new_members: vec![1, 2, 3], - prime: None, - old_count: ::get(), - }); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - // No votes, this proposal wont pass - System::set_block_number(4); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight - Weight::from_parts(100, 0), - proposal_len - )); - }) -} - -#[test] -fn close_with_prime_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members( - RuntimeOrigin::root(), - vec![1, 2, 3], - Some(3), - ::get() - )); - - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - - System::set_block_number(4); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 1, - no: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })) - ] - ); - }); -} - -#[test] -fn close_with_voting_prime_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members( - RuntimeOrigin::root(), - vec![1, 2, 3], - Some(1), - ::get() - )); - - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - - System::set_block_number(4); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 3, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { - proposal_hash: hash - })), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { - proposal_hash: hash, - result: Err(DispatchError::BadOrigin) - })) - ] - ); - }); -} - -#[test] -fn close_with_no_prime_but_majority_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(CollectiveMajority::set_members( - RuntimeOrigin::root(), - vec![1, 2, 3, 4, 5], - Some(5), - ::get() - )); - - assert_ok!(CollectiveMajority::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(CollectiveMajority::vote( - RuntimeOrigin::signed(1), - hash, - 0, - true - )); - assert_ok!(CollectiveMajority::vote( - RuntimeOrigin::signed(2), - hash, - 0, - true - )); - assert_ok!(CollectiveMajority::vote( - RuntimeOrigin::signed(3), - hash, - 0, - true - )); - - System::set_block_number(4); - assert_ok!(CollectiveMajority::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::CollectiveMajority( - CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 3 - } - )), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { - account: 2, - proposal_hash: hash, - voted: true, - yes: 2, - no: 0 - })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { - account: 3, - proposal_hash: hash, - voted: true, - yes: 3, - no: 0 - })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 3, - no: 0 - })), - record(RuntimeEvent::CollectiveMajority( - CollectiveEvent::Approved { - proposal_hash: hash - } - )), - record(RuntimeEvent::CollectiveMajority( - CollectiveEvent::Executed { - proposal_hash: hash, - result: Err(DispatchError::BadOrigin) - } - )) - ] - ); - }); -} - -#[test] -fn removal_of_old_voters_votes_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - let end = 4; - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![1, 2], - nays: vec![], - end - }) - ); - Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![2], - nays: vec![], - end - }) - ); - - let proposal = make_proposal(69); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(2), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 1, - threshold: 2, - ayes: vec![2], - nays: vec![3], - end - }) - ); - Collective::change_members_sorted(&[], &[3], &[2, 4]); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 1, - threshold: 2, - ayes: vec![2], - nays: vec![], - end - }) - ); - }); -} - -#[test] -fn removal_of_old_voters_votes_works_with_set_members() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - let end = 4; - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![1, 2], - nays: vec![], - end - }) - ); - assert_ok!(Collective::set_members( - RuntimeOrigin::root(), - vec![2, 3, 4], - None, - ::get() - )); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![2], - nays: vec![], - end - }) - ); - - let proposal = make_proposal(69); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(2), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 1, - threshold: 2, - ayes: vec![2], - nays: vec![3], - end - }) - ); - assert_ok!(Collective::set_members( - RuntimeOrigin::root(), - vec![2, 4], - None, - ::get() - )); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 1, - threshold: 2, - ayes: vec![2], - nays: vec![], - end - }) - ); - }); -} - -#[test] -fn propose_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = proposal.blake2_256().into(); - let end = 4; - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_eq!(*Collective::proposals(), vec![hash]); - assert_eq!(Collective::proposal_of(hash), Some(proposal)); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![], - nays: vec![], - end - }) - ); - - assert_eq!( - System::events(), - vec![record(RuntimeEvent::Collective( - CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - } - ))] - ); - }); -} - -#[test] -fn limit_active_proposals() { - new_test_ext().execute_with(|| { - for i in 0..::get() { - let proposal = make_proposal(i as u64); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64) - .expect("convert u64 to block number.") - )); - } - let proposal = make_proposal(::get() as u64 + 1); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_noop!( - Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64) - .expect("convert u64 to block number.") - ), - Error::::TooManyActiveProposals - ); - }) -} - -#[test] -fn correct_validate_and_get_proposal() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Collective(crate::Call::set_members { - new_members: vec![1, 2, 3], - prime: None, - old_count: ::get(), - }); - let length = proposal.encode().len() as u32; - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - length, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - - let hash = BlakeTwo256::hash_of(&proposal); - let weight = proposal.get_dispatch_info().call_weight; - assert_noop!( - Collective::validate_and_get_proposal( - &BlakeTwo256::hash_of(&vec![3; 4]), - length, - weight - ), - Error::::ProposalNotExists - ); - assert_noop!( - Collective::validate_and_get_proposal(&hash, length - 2, weight), - Error::::ProposalLengthBoundLessThanProposalLength - ); - assert_noop!( - Collective::validate_and_get_proposal( - &hash, - length, - weight - Weight::from_parts(10, 0) - ), - Error::::ProposalWeightLessThanDispatchCallWeight - ); - let res = Collective::validate_and_get_proposal(&hash, length, weight); - assert_ok!(res.clone()); - let (retrieved_proposal, len) = res.unwrap(); - assert_eq!(length as usize, len); - assert_eq!(proposal, retrieved_proposal); - }) -} - -#[test] -fn motions_ignoring_non_collective_proposals_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_noop!( - Collective::propose( - RuntimeOrigin::signed(42), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64) - .expect("convert u64 to block number.") - ), - Error::::NotMember - ); - }); -} - -#[test] -fn motions_ignoring_non_collective_votes_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_noop!( - Collective::vote(RuntimeOrigin::signed(42), hash, 0, true), - Error::::NotMember, - ); - }); -} - -#[test] -fn motions_ignoring_bad_index_collective_vote_works() { - new_test_ext().execute_with(|| { - System::set_block_number(3); - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_noop!( - Collective::vote(RuntimeOrigin::signed(2), hash, 1, true), - Error::::IndexMismatchProposalHash, - ); - }); -} - -#[test] -fn motions_vote_after_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - let end = 4; - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - // Initially there a no votes when the motion is proposed. - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![], - nays: vec![], - end - }) - ); - // Cast first aye vote. - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![1], - nays: vec![], - end - }) - ); - // Try to cast a duplicate aye vote. - assert_noop!( - Collective::vote(RuntimeOrigin::signed(1), hash, 0, true), - Error::::DuplicateVote, - ); - // Cast a nay vote. - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![], - nays: vec![1], - end - }) - ); - // Try to cast a duplicate nay vote. - assert_noop!( - Collective::vote(RuntimeOrigin::signed(1), hash, 0, false), - Error::::DuplicateVote, - ); - - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: false, - yes: 0, - no: 1 - })), - ] - ); - }); -} - -#[test] -fn motions_all_first_vote_free_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - let end = 4; - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_eq!( - Collective::voting(hash), - Some(Votes { - index: 0, - threshold: 2, - ayes: vec![], - nays: vec![], - end - }) - ); - - // For the motion, acc 2's first vote, expecting Ok with Pays::No. - let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(2), hash, 0, true); - assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); - - // Duplicate vote, expecting error with Pays::Yes. - let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(2), hash, 0, true); - assert_eq!(vote_rval.unwrap_err().post_info.pays_fee, Pays::Yes); - - // Modifying vote, expecting ok with Pays::Yes. - let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(2), hash, 0, false); - assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); - - // For the motion, acc 3's first vote, expecting Ok with Pays::No. - let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(3), hash, 0, true); - assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); - - // acc 3 modify the vote, expecting Ok with Pays::Yes. - let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(3), hash, 0, false); - assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); - - // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info - - let proposal_weight = proposal.get_dispatch_info().call_weight; - let close_rval: DispatchResultWithPostInfo = Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len, - ); - assert_eq!(close_rval.unwrap().pays_fee, Pays::No); - - // Trying to close the proposal, which is already closed - // Error: "ProposalNotExists" with Pays::Yes. - let close_rval: DispatchResultWithPostInfo = Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len, - ); - assert_eq!(close_rval.unwrap_err().post_info.pays_fee, Pays::Yes); - }); -} - -#[test] -fn motions_reproposing_disapproved_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); - - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - assert_eq!(*Collective::proposals(), vec![]); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_eq!(*Collective::proposals(), vec![hash]); - }); -} - -#[test] -fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {}); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash: H256 = proposal.blake2_256().into(); - // The voting threshold is 2, but the required votes for `ExternalMajorityOrigin` is 3. - // The proposal will be executed regardless of the voting threshold - // as long as we have enough yes votes. - // - // Failed to execute with only 2 yes votes. - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 2, - proposal_hash: hash, - voted: true, - yes: 2, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 2, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { - proposal_hash: hash - })), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { - proposal_hash: hash, - result: Err(DispatchError::BadOrigin) - })), - ] - ); - - System::reset_events(); - - // Executed with 3 yes votes. - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, true)); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 1, - proposal_weight, - proposal_len - )); - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 1, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 2, - proposal_hash: hash, - voted: true, - yes: 2, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 3, - proposal_hash: hash, - voted: true, - yes: 3, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 3, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { - proposal_hash: hash - })), - record(RuntimeEvent::Democracy( - mock_democracy::pallet::Event::::ExternalProposed - )), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { - proposal_hash: hash, - result: Ok(()) - })), - ] - ); - }); -} - -#[test] -fn motions_disapproval_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: false, - yes: 0, - no: 1 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 2, - proposal_hash: hash, - voted: false, - yes: 0, - no: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 0, - no: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })), - ] - ); - }); -} - -#[test] -fn motions_approval_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 2, - proposal_hash: hash, - voted: true, - yes: 2, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 2, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { - proposal_hash: hash - })), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { - proposal_hash: hash, - result: Err(DispatchError::BadOrigin) - })), - ] - ); - }); -} - -#[test] -fn motion_with_no_votes_closes_with_disapproval() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - assert_eq!( - System::events()[0], - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })) - ); - - // Closing the motion too early is not possible because it has neither - // an approving or disapproving simple majority due to the lack of votes. - assert_noop!( - Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - ), - Error::::TooEarlyToCloseProposal - ); - - // Once the motion duration passes, - let closing_block = System::block_number() + ::get(); - System::set_block_number(closing_block); - // we can successfully close the motion. - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - // Events show that the close ended in a disapproval. - assert_eq!( - System::events()[1], - record(RuntimeEvent::Collective(CollectiveEvent::Closed { - proposal_hash: hash, - yes: 0, - no: 3 - })) - ); - assert_eq!( - System::events()[2], - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })) - ); - }) -} - -#[test] -fn close_disapprove_does_not_care_about_weight_or_len() { - // This test confirms that if you close a proposal that would be disapproved, - // we do not care about the proposal length or proposal weight since it will - // not be read from storage or executed. - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - // First we make the proposal succeed - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - // It will not close with bad weight/len information - assert_noop!( - Collective::close(RuntimeOrigin::root(), hash, 0, Weight::zero(), 0), - Error::::ProposalLengthBoundLessThanProposalLength, - ); - assert_noop!( - Collective::close(RuntimeOrigin::root(), hash, 0, Weight::zero(), proposal_len), - Error::::ProposalWeightLessThanDispatchCallWeight, - ); - // Now we make the proposal fail - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); - // It can close even if the weight/len information is bad - assert_ok!(Collective::close( - RuntimeOrigin::root(), - hash, - 0, - Weight::zero(), - 0 - )); - }) -} - -#[test] -fn disapprove_proposal_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(3u64).expect("convert u64 to block number.") - )); - // Proposal would normally succeed - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - // But Root can disapprove and remove it anyway - assert_ok!(Collective::disapprove_proposal(RuntimeOrigin::root(), hash)); - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { - account: 1, - proposal_index: 0, - proposal_hash: hash, - threshold: 2 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 1, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { - account: 2, - proposal_hash: hash, - voted: true, - yes: 2, - no: 0 - })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })), - ] - ); - }) -} - -#[test] -#[should_panic(expected = "Members cannot contain duplicate accounts.")] -fn genesis_build_panics_with_duplicate_members() { - pallet_collective::GenesisConfig:: { - members: vec![1, 2, 3, 1], - phantom: Default::default(), - } - .build_storage() - .unwrap(); -} diff --git a/pallets/collective/src/weights.rs b/pallets/collective/src/weights.rs deleted file mode 100644 index df233fc248..0000000000 --- a/pallets/collective/src/weights.rs +++ /dev/null @@ -1,554 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for pallet_collective -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/substrate -// benchmark -// pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/collective/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_collective. -pub trait WeightInfo { - fn set_members(m: u32, n: u32, p: u32, ) -> Weight; - fn execute(b: u32, m: u32, ) -> Weight; - fn propose_execute(b: u32, m: u32, ) -> Weight; - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight; - fn vote(m: u32, ) -> Weight; - fn close_early_disapproved(m: u32, p: u32, ) -> Weight; - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight; - fn close_disapproved(m: u32, p: u32, ) -> Weight; - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight; - fn disapprove_proposal(p: u32, ) -> Weight; -} - -/// Weights for pallet_collective using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + m * (3233 ±0) + p * (3223 ±0)` - // Estimated: `16586 + m * (7809 ±24) + p * (10238 ±24)` - // Minimum execution time: 17_093 nanoseconds. - Weight::from_parts(17_284_000, 16586) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(5_143_145, 0).saturating_mul(m.into())) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(7_480_941, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 7809).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 10238).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `730 + m * (32 ±0)` - // Minimum execution time: 15_972 nanoseconds. - Weight::from_parts(14_971_445, 730) - // Standard Error: 32 - .saturating_add(Weight::from_parts(1_775, 0).saturating_mul(b.into())) - // Standard Error: 334 - .saturating_add(Weight::from_parts(17_052, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `3440 + m * (64 ±0)` - // Minimum execution time: 17_950 nanoseconds. - Weight::from_parts(17_019_558, 3440) - // Standard Error: 41 - .saturating_add(Weight::from_parts(1_807, 0).saturating_mul(b.into())) - // Standard Error: 432 - .saturating_add(Weight::from_parts(27_986, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `556 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `6355 + m * (165 ±0) + p * (180 ±0)` - // Minimum execution time: 24_817 nanoseconds. - Weight::from_parts(24_778_955, 6355) - // Standard Error: 73 - .saturating_add(Weight::from_parts(2_355, 0).saturating_mul(b.into())) - // Standard Error: 765 - .saturating_add(Weight::from_parts(20_518, 0).saturating_mul(m.into())) - // Standard Error: 755 - .saturating_add(Weight::from_parts(85_670, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 165).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1006 + m * (64 ±0)` - // Estimated: `4980 + m * (128 ±0)` - // Minimum execution time: 19_790 nanoseconds. - Weight::from_parts(20_528_275, 4980) - // Standard Error: 651 - .saturating_add(Weight::from_parts(48_856, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 128).saturating_mul(m.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `626 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `5893 + m * (260 ±0) + p * (144 ±0)` - // Minimum execution time: 25_564 nanoseconds. - Weight::from_parts(25_535_497, 5893) - // Standard Error: 610 - .saturating_add(Weight::from_parts(27_956, 0).saturating_mul(m.into())) - // Standard Error: 595 - .saturating_add(Weight::from_parts(84_835, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 260).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `962 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `9164 + b * (4 ±0) + m * (264 ±0) + p * (160 ±0)` - // Minimum execution time: 36_515 nanoseconds. - Weight::from_parts(36_626_648, 9164) - // Standard Error: 98 - .saturating_add(Weight::from_parts(2_295, 0).saturating_mul(b.into())) - // Standard Error: 1_036 - .saturating_add(Weight::from_parts(22_182, 0).saturating_mul(m.into())) - // Standard Error: 1_010 - .saturating_add(Weight::from_parts(100_034, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 264).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 160).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `646 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `7095 + m * (325 ±0) + p * (180 ±0)` - // Minimum execution time: 28_858 nanoseconds. - Weight::from_parts(28_050_047, 7095) - // Standard Error: 614 - .saturating_add(Weight::from_parts(34_031, 0).saturating_mul(m.into())) - // Standard Error: 599 - .saturating_add(Weight::from_parts(85_744, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 325).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `982 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `10565 + b * (5 ±0) + m * (330 ±0) + p * (200 ±0)` - // Minimum execution time: 38_608 nanoseconds. - Weight::from_parts(39_948_329, 10565) - // Standard Error: 84 - .saturating_add(Weight::from_parts(2_045, 0).saturating_mul(b.into())) - // Standard Error: 895 - .saturating_add(Weight::from_parts(22_669, 0).saturating_mul(m.into())) - // Standard Error: 872 - .saturating_add(Weight::from_parts(95_525, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 330).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 200).saturating_mul(p.into())) - } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `391 + p * (32 ±0)` - // Estimated: `1668 + p * (96 ±0)` - // Minimum execution time: 14_785 nanoseconds. - Weight::from_parts(16_393_818, 1668) - // Standard Error: 612 - .saturating_add(Weight::from_parts(76_786, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(p.into())) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + m * (3233 ±0) + p * (3223 ±0)` - // Estimated: `16586 + m * (7809 ±24) + p * (10238 ±24)` - // Minimum execution time: 17_093 nanoseconds. - Weight::from_parts(17_284_000, 16586) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(5_143_145, 0).saturating_mul(m.into())) - // Standard Error: 64_700 - .saturating_add(Weight::from_parts(7_480_941, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 7809).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 10238).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `730 + m * (32 ±0)` - // Minimum execution time: 15_972 nanoseconds. - Weight::from_parts(14_971_445, 730) - // Standard Error: 32 - .saturating_add(Weight::from_parts(1_775, 0).saturating_mul(b.into())) - // Standard Error: 334 - .saturating_add(Weight::from_parts(17_052, 0).saturating_mul(m.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `234 + m * (32 ±0)` - // Estimated: `3440 + m * (64 ±0)` - // Minimum execution time: 17_950 nanoseconds. - Weight::from_parts(17_019_558, 3440) - // Standard Error: 41 - .saturating_add(Weight::from_parts(1_807, 0).saturating_mul(b.into())) - // Standard Error: 432 - .saturating_add(Weight::from_parts(27_986, 0).saturating_mul(m.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `556 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `6355 + m * (165 ±0) + p * (180 ±0)` - // Minimum execution time: 24_817 nanoseconds. - Weight::from_parts(24_778_955, 6355) - // Standard Error: 73 - .saturating_add(Weight::from_parts(2_355, 0).saturating_mul(b.into())) - // Standard Error: 765 - .saturating_add(Weight::from_parts(20_518, 0).saturating_mul(m.into())) - // Standard Error: 755 - .saturating_add(Weight::from_parts(85_670, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 165).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1006 + m * (64 ±0)` - // Estimated: `4980 + m * (128 ±0)` - // Minimum execution time: 19_790 nanoseconds. - Weight::from_parts(20_528_275, 4980) - // Standard Error: 651 - .saturating_add(Weight::from_parts(48_856, 0).saturating_mul(m.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 128).saturating_mul(m.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `626 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `5893 + m * (260 ±0) + p * (144 ±0)` - // Minimum execution time: 25_564 nanoseconds. - Weight::from_parts(25_535_497, 5893) - // Standard Error: 610 - .saturating_add(Weight::from_parts(27_956, 0).saturating_mul(m.into())) - // Standard Error: 595 - .saturating_add(Weight::from_parts(84_835, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 260).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 144).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `962 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `9164 + b * (4 ±0) + m * (264 ±0) + p * (160 ±0)` - // Minimum execution time: 36_515 nanoseconds. - Weight::from_parts(36_626_648, 9164) - // Standard Error: 98 - .saturating_add(Weight::from_parts(2_295, 0).saturating_mul(b.into())) - // Standard Error: 1_036 - .saturating_add(Weight::from_parts(22_182, 0).saturating_mul(m.into())) - // Standard Error: 1_010 - .saturating_add(Weight::from_parts(100_034, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 264).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 160).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `646 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `7095 + m * (325 ±0) + p * (180 ±0)` - // Minimum execution time: 28_858 nanoseconds. - Weight::from_parts(28_050_047, 7095) - // Standard Error: 614 - .saturating_add(Weight::from_parts(34_031, 0).saturating_mul(m.into())) - // Standard Error: 599 - .saturating_add(Weight::from_parts(85_744, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 325).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 180).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `982 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `10565 + b * (5 ±0) + m * (330 ±0) + p * (200 ±0)` - // Minimum execution time: 38_608 nanoseconds. - Weight::from_parts(39_948_329, 10565) - // Standard Error: 84 - .saturating_add(Weight::from_parts(2_045, 0).saturating_mul(b.into())) - // Standard Error: 895 - .saturating_add(Weight::from_parts(22_669, 0).saturating_mul(m.into())) - // Standard Error: 872 - .saturating_add(Weight::from_parts(95_525, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 330).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 200).saturating_mul(p.into())) - } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `391 + p * (32 ±0)` - // Estimated: `1668 + p * (96 ±0)` - // Minimum execution time: 14_785 nanoseconds. - Weight::from_parts(16_393_818, 1668) - // Standard Error: 612 - .saturating_add(Weight::from_parts(76_786, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(p.into())) - } -} diff --git a/pallets/commitments/src/benchmarking.rs b/pallets/commitments/src/benchmarking.rs index d09083f749..d385c6f57f 100644 --- a/pallets/commitments/src/benchmarking.rs +++ b/pallets/commitments/src/benchmarking.rs @@ -1,6 +1,6 @@ //! Benchmarking setup #![cfg(feature = "runtime-benchmarks")] -#![allow(clippy::arithmetic_side_effects)] +#![allow(clippy::arithmetic_side_effects, clippy::expect_used)] use super::*; #[allow(unused)] diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index f61ecf22a6..a627220f76 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -37,6 +37,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; #[deny(missing_docs)] #[frame_support::pallet] +#[allow(clippy::expect_used)] pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::ReservableCurrency}; @@ -204,6 +205,8 @@ pub mod pallet { #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// Set the commitment for a given netuid #[pallet::call_index(0)] #[pallet::weight(( diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 9b6f6b034a..24e259b23c 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -1,3 +1,4 @@ +#![allow(clippy::expect_used)] use crate as pallet_commitments; use frame_support::{ derive_impl, diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 5f19070ea2..9270a84bb7 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1,3 +1,5 @@ +#![allow(clippy::expect_used, clippy::indexing_slicing)] + use codec::Encode; use sp_std::prelude::*; use subtensor_runtime_common::NetUid; @@ -19,7 +21,6 @@ use frame_support::{ }; use frame_system::{Pallet as System, RawOrigin}; -#[allow(clippy::indexing_slicing)] #[test] fn manual_data_type_info() { let mut registry = scale_info::Registry::new(); diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 0467fee8f3..cdca47922b 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -100,7 +100,7 @@ impl Decode for Data { n @ 1..=129 => { let mut r: BoundedVec<_, _> = vec![0u8; (n as usize).saturating_sub(1)] .try_into() - .expect("bound checked in match arm condition; qed"); + .map_err(|_| codec::Error::from("bound checked in match arm condition; qed"))?; input.read(&mut r[..])?; Data::Raw(r) } diff --git a/pallets/crowdloan/src/lib.rs b/pallets/crowdloan/src/lib.rs index 3f5449a49c..82b1402bc6 100644 --- a/pallets/crowdloan/src/lib.rs +++ b/pallets/crowdloan/src/lib.rs @@ -88,6 +88,7 @@ pub type CrowdloanInfoOf = CrowdloanInfo< >; #[frame_support::pallet] +#[allow(clippy::expect_used)] pub mod pallet { use super::*; @@ -285,6 +286,8 @@ pub mod pallet { #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// Create a crowdloan that will raise funds up to a maximum cap and if successful, /// will transfer funds to the target address if provided and dispatch the call /// (using creator origin). @@ -547,7 +550,7 @@ pub mod pallet { Ok(()) } - /// Finalize a successful crowdloan. + /// Finalize crowdloan that has reached the cap. /// /// The call will transfer the raised amount to the target address if it was provided when the crowdloan was created /// and dispatch the call that was provided using the creator origin. The CurrentCrowdloanId will be set to the @@ -565,14 +568,12 @@ pub mod pallet { #[pallet::compact] crowdloan_id: CrowdloanId, ) -> DispatchResult { let who = ensure_signed(origin)?; - let now = frame_system::Pallet::::block_number(); let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; - // Ensure the origin is the creator of the crowdloan and the crowdloan has ended, - // raised the cap and is not finalized. + // Ensure the origin is the creator of the crowdloan and the crowdloan has raised the cap + // and is not finalized. ensure!(who == crowdloan.creator, Error::::InvalidOrigin); - ensure!(now >= crowdloan.end, Error::::ContributionPeriodNotEnded); ensure!(crowdloan.raised == crowdloan.cap, Error::::CapNotRaised); ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); @@ -621,7 +622,7 @@ pub mod pallet { Ok(()) } - /// Refund a failed crowdloan. + /// Refund contributors of a non-finalized crowdloan. /// /// The call will try to refund all contributors (excluding the creator) up to the limit defined by the `RefundContributorsLimit`. /// If the limit is reached, the call will stop and the crowdloan will be marked as partially refunded. @@ -637,15 +638,16 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] crowdloan_id: CrowdloanId, ) -> DispatchResultWithPostInfo { - let now = frame_system::Pallet::::block_number(); - ensure_signed(origin)?; + let who = ensure_signed(origin)?; let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; - // Ensure the crowdloan has ended and is not finalized - ensure!(now >= crowdloan.end, Error::::ContributionPeriodNotEnded); + // Ensure the crowdloan is not finalized ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); + // Only the creator can refund the crowdloan + ensure!(who == crowdloan.creator, Error::::InvalidOrigin); + let mut refunded_contributors: Vec = vec![]; let mut refund_count = 0; diff --git a/pallets/crowdloan/src/mock.rs b/pallets/crowdloan/src/mock.rs index d96069c05e..fa2f5533d2 100644 --- a/pallets/crowdloan/src/mock.rs +++ b/pallets/crowdloan/src/mock.rs @@ -1,5 +1,9 @@ #![cfg(test)] -#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] +#![allow( + clippy::arithmetic_side_effects, + clippy::expect_used, + clippy::unwrap_used +)] use frame_support::{ PalletId, derive_impl, parameter_types, traits::{OnFinalize, OnInitialize, fungible, fungible::*, tokens::Preservation}, diff --git a/pallets/crowdloan/src/tests.rs b/pallets/crowdloan/src/tests.rs index 1e03854b1f..67d097e1e1 100644 --- a/pallets/crowdloan/src/tests.rs +++ b/pallets/crowdloan/src/tests.rs @@ -1147,9 +1147,6 @@ fn test_finalize_succeeds() { amount )); - // run some more blocks past the end of the contribution period - run_to_block(60); - // finalize the crowdloan assert_ok!(Crowdloan::finalize( RuntimeOrigin::signed(creator), @@ -1340,54 +1337,6 @@ fn test_finalize_fails_if_not_creator_origin() { }); } -#[test] -fn test_finalize_fails_if_crowdloan_has_not_ended() { - TestState::default() - .with_balance(U256::from(1), 100) - .with_balance(U256::from(2), 100) - .build_and_execute(|| { - // create a crowdloan - let creator: AccountOf = U256::from(1); - let deposit: BalanceOf = 50; - let min_contribution: BalanceOf = 10; - let cap: BalanceOf = 100; - let end: BlockNumberFor = 50; - - assert_ok!(Crowdloan::create( - RuntimeOrigin::signed(creator), - deposit, - min_contribution, - cap, - end, - Some(noop_call()), - None, - )); - - // run some blocks - run_to_block(10); - - // some contribution - let crowdloan_id: CrowdloanId = 0; - let contributor: AccountOf = U256::from(2); - let amount: BalanceOf = 50; - - assert_ok!(Crowdloan::contribute( - RuntimeOrigin::signed(contributor), - crowdloan_id, - amount - )); - - // run some more blocks before end of contribution period - run_to_block(10); - - // try to finalize - assert_err!( - Crowdloan::finalize(RuntimeOrigin::signed(creator), crowdloan_id), - pallet_crowdloan::Error::::ContributionPeriodNotEnded - ); - }); -} - #[test] fn test_finalize_fails_if_crowdloan_cap_is_not_raised() { TestState::default() @@ -1585,8 +1534,8 @@ fn test_refund_succeeds() { .is_some_and(|c| c.contributors_count == 7) ); - // run some more blocks past the end of the contribution period - run_to_block(60); + // run some more blocks before the end of the contribution period + run_to_block(20); // first round of refund assert_ok!(Crowdloan::refund( @@ -1614,7 +1563,7 @@ fn test_refund_succeeds() { pallet_crowdloan::Event::::PartiallyRefunded { crowdloan_id }.into() ); - // run some more blocks + // run some more blocks past the end of the contribution period run_to_block(70); // second round of refund @@ -1669,43 +1618,12 @@ fn test_refund_succeeds() { } #[test] -fn test_refund_fails_if_bad_origin() { - TestState::default().build_and_execute(|| { - let crowdloan_id: CrowdloanId = 0; - - assert_err!( - Crowdloan::refund(RuntimeOrigin::none(), crowdloan_id), - DispatchError::BadOrigin - ); - - assert_err!( - Crowdloan::refund(RuntimeOrigin::root(), crowdloan_id), - DispatchError::BadOrigin - ); - }); -} - -#[test] -fn test_refund_fails_if_crowdloan_does_not_exist() { - TestState::default() - .with_balance(U256::from(1), 100) - .build_and_execute(|| { - let creator: AccountOf = U256::from(1); - let crowdloan_id: CrowdloanId = 0; - - assert_err!( - Crowdloan::refund(RuntimeOrigin::signed(creator), crowdloan_id), - pallet_crowdloan::Error::::InvalidCrowdloanId - ); - }); -} - -#[test] -fn test_refund_fails_if_crowdloan_has_not_ended() { +fn test_refund_fails_if_bad_or_invalid_origin() { TestState::default() .with_balance(U256::from(1), 100) .build_and_execute(|| { // create a crowdloan + let crowdloan_id: CrowdloanId = 0; let creator: AccountOf = U256::from(1); let initial_deposit: BalanceOf = 50; let min_contribution: BalanceOf = 10; @@ -1721,14 +1639,39 @@ fn test_refund_fails_if_crowdloan_has_not_ended() { None, )); + assert_err!( + Crowdloan::refund(RuntimeOrigin::none(), crowdloan_id), + DispatchError::BadOrigin + ); + + assert_err!( + Crowdloan::refund(RuntimeOrigin::root(), crowdloan_id), + DispatchError::BadOrigin + ); + // run some blocks - run_to_block(10); + run_to_block(60); // try to refund + let unknown_contributor: AccountOf = U256::from(2); + assert_err!( + Crowdloan::refund(RuntimeOrigin::signed(unknown_contributor), crowdloan_id), + pallet_crowdloan::Error::::InvalidOrigin, + ); + }); +} + +#[test] +fn test_refund_fails_if_crowdloan_does_not_exist() { + TestState::default() + .with_balance(U256::from(1), 100) + .build_and_execute(|| { + let creator: AccountOf = U256::from(1); let crowdloan_id: CrowdloanId = 0; + assert_err!( Crowdloan::refund(RuntimeOrigin::signed(creator), crowdloan_id), - pallet_crowdloan::Error::::ContributionPeriodNotEnded + pallet_crowdloan::Error::::InvalidCrowdloanId ); }); } diff --git a/pallets/drand/src/lib.rs b/pallets/drand/src/lib.rs index 865edfd2de..c5811345e6 100644 --- a/pallets/drand/src/lib.rs +++ b/pallets/drand/src/lib.rs @@ -307,6 +307,7 @@ pub mod pallet { signature, &payload.block_number, &payload.public, + None, ) } Call::write_pulse { @@ -314,11 +315,13 @@ pub mod pallet { signature, } => { let signature = signature.as_ref().ok_or(InvalidTransaction::BadSigner)?; + let rounds: Vec = payload.pulses.iter().map(|p| p.round).collect(); Self::validate_signature_and_parameters( payload, signature, &payload.block_number, &payload.public, + Some(&rounds), ) } _ => InvalidTransaction::Call.into(), @@ -480,32 +483,34 @@ impl Pallet { pulses.push(pulse); } - let signer = Signer::::all_accounts(); - - let results = signer.send_unsigned_transaction( - |account| PulsesPayload { - block_number, - pulses: pulses.clone(), - public: account.public.clone(), - }, - |pulses_payload, signature| Call::write_pulse { - pulses_payload, - signature: Some(signature), - }, - ); - - for (acc, res) in &results { - match res { - Ok(()) => log::debug!( - "Drand: [{:?}] Submitted new pulses up to round: {:?}", - acc.id, - last_stored_round.saturating_add(rounds_to_fetch) - ), - Err(e) => log::error!( - "Drand: [{:?}] Failed to submit transaction: {:?}", - acc.id, - e - ), + let signer = Signer::::any_account(); + + // Submit one tx per pulse, ascending rounds. + for pulse in pulses.into_iter() { + let round = pulse.round; + + if let Some((acc, res)) = signer.send_unsigned_transaction( + |account| PulsesPayload { + block_number, + pulses: vec![pulse.clone()], + public: account.public.clone(), + }, + |pulses_payload, signature| Call::write_pulse { + pulses_payload, + signature: Some(signature), + }, + ) { + match res { + Ok(()) => log::debug!("Drand: [{:?}] submitted round {:?}", acc.id, round), + Err(e) => log::debug!( + "Drand: [{:?}] failed to submit round {:?}: {:?}", + acc.id, + round, + e + ), + } + } else { + log::debug!("Drand: No local account available to submit round {round:?}"); } } } @@ -631,54 +636,74 @@ impl Pallet { signature: &T::Signature, block_number: &BlockNumberFor, public: &T::Public, + rounds: Option<&[RoundNumber]>, ) -> TransactionValidity { let signature_valid = SignedPayload::::verify::(payload, signature.clone()); if !signature_valid { return InvalidTransaction::BadProof.into(); } - Self::validate_transaction_parameters(block_number, public) + Self::validate_transaction_parameters(block_number, public, rounds) } fn validate_transaction_parameters( block_number: &BlockNumberFor, public: &T::Public, + rounds: Option<&[RoundNumber]>, ) -> TransactionValidity { - // Now let's check if the transaction has any chance to succeed. let next_unsigned_at = NextUnsignedAt::::get(); - if &next_unsigned_at > block_number { - return InvalidTransaction::Stale.into(); - } - // Let's make sure to reject transactions from the future. let current_block = frame_system::Pallet::::block_number(); - if ¤t_block < block_number { + + if current_block < *block_number { return InvalidTransaction::Future.into(); } - let provides_tag = (next_unsigned_at, public.encode()).using_encoded(blake2_256); - - ValidTransaction::with_tag_prefix("DrandOffchainWorker") - // We set the priority to the value stored at `UnsignedPriority`. - .priority(T::UnsignedPriority::get()) - // This transaction does not require anything else to go before into the pool. - // In theory we could require `previous_unsigned_at` transaction to go first, - // but it's not necessary in our case. - // We set the `provides` tag to be the same as `next_unsigned_at`. This makes - // sure only one transaction produced after `next_unsigned_at` will ever - // get to the transaction pool and will end up in the block. - // We can still have multiple transactions compete for the same "spot", - // and the one with higher priority will replace other one in the pool. - .and_provides(provides_tag) - // The transaction is only valid for next block. After that it's - // going to be revalidated by the pool. - .longevity(1) - // It's fine to propagate that transaction to other peers, which means it can be - // created even by nodes that don't produce blocks. - // Note that sometimes it's better to keep it for yourself (if you are the block - // producer), since for instance in some schemes others may copy your solution and - // claim a reward. - .propagate(true) - .build() + match rounds { + Some(rs) => { + let r_opt = rs.first().copied(); + let has_second = rs.get(1).is_some(); + let r = match (r_opt, has_second) { + (Some(round), false) => round, + _ => return InvalidTransaction::Call.into(), + }; + + // Allow multiple unsigned txs in the same block even after the first updates the gate. + if next_unsigned_at > current_block { + return InvalidTransaction::Stale.into(); + } + + // Drop stale rounds at mempool time to avoid re-including last block's rounds. + let last = LastStoredRound::::get(); + if r <= last { + return InvalidTransaction::Stale.into(); + } + + // Priority favors lower rounds first. + let priority = + T::UnsignedPriority::get().saturating_add(u64::MAX.saturating_sub(r)); + + ValidTransaction::with_tag_prefix("DrandOffchainWorker") + .priority(priority) + .and_provides((b"drand", r).using_encoded(blake2_256)) + .longevity(3) + .propagate(false) + .build() + } + + None => { + if next_unsigned_at > *block_number { + return InvalidTransaction::Stale.into(); + } + + let provides_tag = (next_unsigned_at, public.encode()).using_encoded(blake2_256); + ValidTransaction::with_tag_prefix("DrandOffchainWorker") + .priority(T::UnsignedPriority::get()) + .and_provides(provides_tag) + .longevity(1) + .propagate(true) + .build() + } + } } fn prune_old_pulses(last_stored_round: RoundNumber) { diff --git a/pallets/drand/src/tests.rs b/pallets/drand/src/tests.rs index fdc450b5e2..e07fbb08ca 100644 --- a/pallets/drand/src/tests.rs +++ b/pallets/drand/src/tests.rs @@ -275,9 +275,16 @@ fn test_validate_unsigned_write_pulse() { let block_number = 100_000_000; let alice = sp_keyring::Sr25519Keyring::Alice; System::set_block_number(block_number); + + let pulse = Pulse { + round: 1, + randomness: frame_support::BoundedVec::truncate_from(vec![0u8; 32]), + signature: frame_support::BoundedVec::truncate_from(vec![1u8; 96]), + }; + let pulses_payload = PulsesPayload { block_number, - pulses: vec![], + pulses: vec![pulse], public: alice.public(), }; let signature = alice.sign(&pulses_payload.encode()); diff --git a/pallets/proxy/src/benchmarking.rs b/pallets/proxy/src/benchmarking.rs index 8b1f5ababf..9a759e1846 100644 --- a/pallets/proxy/src/benchmarking.rs +++ b/pallets/proxy/src/benchmarking.rs @@ -18,7 +18,7 @@ // Benchmarks for Proxy Pallet #![cfg(feature = "runtime-benchmarks")] -#![allow(clippy::arithmetic_side_effects)] +#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] use super::*; use crate::Pallet as Proxy; @@ -321,7 +321,8 @@ mod benchmarks { 0, ); - let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); + let pure_account = + Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None).unwrap(); assert_last_event::( Event::PureCreated { pure: pure_account, @@ -348,7 +349,8 @@ mod benchmarks { )?; let height = T::BlockNumberProvider::current_block_number(); let ext_index = frame_system::Pallet::::extrinsic_index().unwrap_or(0); - let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); + let pure_account = + Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None).unwrap(); add_proxies::(p, Some(pure_account.clone()))?; ensure!( diff --git a/pallets/proxy/src/lib.rs b/pallets/proxy/src/lib.rs index 9fbb481591..93ac568668 100644 --- a/pallets/proxy/src/lib.rs +++ b/pallets/proxy/src/lib.rs @@ -113,6 +113,7 @@ pub enum DepositKind { } #[frame::pallet] +#[allow(clippy::expect_used)] pub mod pallet { use super::*; @@ -215,6 +216,8 @@ pub mod pallet { #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// Dispatch the given `call` from an account that the sender is authorised for through /// `add_proxy`. /// @@ -333,7 +336,7 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - let pure = Self::pure_account(&who, &proxy_type, index, None); + let pure = Self::pure_account(&who, &proxy_type, index, None)?; ensure!(!Proxies::::contains_key(&pure), Error::::Duplicate); let proxy_def = ProxyDefinition { @@ -389,7 +392,7 @@ pub mod pallet { let spawner = T::Lookup::lookup(spawner)?; let when = (height, ext_index); - let proxy = Self::pure_account(&spawner, &proxy_type, index, Some(when)); + let proxy = Self::pure_account(&spawner, &proxy_type, index, Some(when))?; ensure!(proxy == who, Error::::NoPermission); let (_, deposit) = Proxies::::take(&who); @@ -445,24 +448,24 @@ pub mod pallet { pending .try_push(announcement) .map_err(|_| Error::::TooMany)?; - Self::rejig_deposit( + let new_deposit = Self::rejig_deposit( &who, *deposit, T::AnnouncementDepositBase::get(), T::AnnouncementDepositFactor::get(), pending.len(), - ) - .map(|d| { - d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed") - }) - .map(|d| *deposit = d) + )? + .ok_or(Error::::AnnouncementDepositInvariantViolated)?; + + *deposit = new_deposit; + Ok::<(), DispatchError>(()) })?; + Self::deposit_event(Event::Announced { real, proxy: who, call_hash, }); - Ok(()) } @@ -743,6 +746,10 @@ pub mod pallet { Unannounced, /// Cannot add self as proxy. NoSelfProxy, + /// Invariant violated: deposit recomputation returned None after updating announcements. + AnnouncementDepositInvariantViolated, + /// Failed to derive a valid account id from the provided entropy. + InvalidDerivedAccountId, } /// The set of account proxies. Maps the account which has delegated to the accounts @@ -829,7 +836,7 @@ impl Pallet { proxy_type: &T::ProxyType, index: u16, maybe_when: Option<(BlockNumberFor, u32)>, - ) -> T::AccountId { + ) -> Result { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( T::BlockNumberProvider::current_block_number(), @@ -845,8 +852,9 @@ impl Pallet { index, ) .using_encoded(blake2_256); - Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) - .expect("infinite length input; no invalid inputs for type; qed") + + T::AccountId::decode(&mut TrailingZeroInput::new(entropy.as_ref())) + .map_err(|_| Error::::InvalidDerivedAccountId.into()) } /// Register a proxy account for the delegator that is able to make calls on its behalf. diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index e7e729318c..ea64aef030 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -494,7 +494,7 @@ fn filtering_works() { .into(), ); - let derivative_id = Utility::derivative_account_id(1, 0); + let derivative_id = Utility::derivative_account_id(1, 0).unwrap(); Balances::make_free_balance_be(&derivative_id, 1000); let inner = Box::new(call_transfer(6, 1)); @@ -876,7 +876,7 @@ fn pure_works() { 0, 0 )); - let anon = Proxy::pure_account(&1, &ProxyType::Any, 0, None); + let anon = Proxy::pure_account(&1, &ProxyType::Any, 0, None).unwrap(); System::assert_last_event( ProxyEvent::PureCreated { pure: anon, @@ -900,7 +900,7 @@ fn pure_works() { 0, 1 )); - let anon2 = Proxy::pure_account(&2, &ProxyType::Any, 0, None); + let anon2 = Proxy::pure_account(&2, &ProxyType::Any, 0, None).unwrap(); assert_ok!(Proxy::create_pure( RuntimeOrigin::signed(2), ProxyType::Any, diff --git a/pallets/registry/src/benchmarking.rs b/pallets/registry/src/benchmarking.rs index f75ff1726e..7a008a4e40 100644 --- a/pallets/registry/src/benchmarking.rs +++ b/pallets/registry/src/benchmarking.rs @@ -1,6 +1,10 @@ //! Benchmarking setup #![cfg(feature = "runtime-benchmarks")] -#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] +#![allow( + clippy::arithmetic_side_effects, + clippy::expect_used, + clippy::unwrap_used +)] use super::*; #[allow(unused)] diff --git a/pallets/registry/src/lib.rs b/pallets/registry/src/lib.rs index 87f164c3e1..9d52e691ef 100644 --- a/pallets/registry/src/lib.rs +++ b/pallets/registry/src/lib.rs @@ -22,6 +22,7 @@ type BalanceOf = <::Currency as fungible::Inspect<::AccountId>>::Balance; #[deny(missing_docs)] #[frame_support::pallet] +#[allow(clippy::expect_used)] pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::tokens::fungible}; @@ -107,6 +108,8 @@ pub mod pallet { #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// Register an identity for an account. This will overwrite any existing identity. #[pallet::call_index(0)] #[pallet::weight(( diff --git a/pallets/registry/src/types.rs b/pallets/registry/src/types.rs index eb19e145d6..0e5cbe3332 100644 --- a/pallets/registry/src/types.rs +++ b/pallets/registry/src/types.rs @@ -71,7 +71,7 @@ impl Decode for Data { n @ 1..=65 => { let mut r: BoundedVec<_, _> = vec![0u8; (n as usize).saturating_sub(1)] .try_into() - .expect("bound checked in match arm condition; qed"); + .map_err(|_| codec::Error::from("bounded vec length exceeds limit"))?; input.read(&mut r[..])?; Data::Raw(r) } diff --git a/pallets/subtensor/Cargo.toml b/pallets/subtensor/Cargo.toml index bb53aa8af7..fdd5e5f9ab 100644 --- a/pallets/subtensor/Cargo.toml +++ b/pallets/subtensor/Cargo.toml @@ -42,11 +42,10 @@ approx.workspace = true subtensor-swap-interface.workspace = true runtime-common.workspace = true subtensor-runtime-common = { workspace = true, features = ["approx"] } +sp-keyring.workspace = true pallet-drand.workspace = true -pallet-subtensor-collective.workspace = true pallet-commitments.workspace = true -pallet-membership.workspace = true hex-literal.workspace = true num-traits = { workspace = true, features = ["libm"] } tle.workspace = true @@ -68,19 +67,20 @@ rand.workspace = true sp-core.workspace = true sp-std.workspace = true pallet-preimage.workspace = true +tracing.workspace = true +tracing-log.workspace = true +tracing-subscriber = { workspace = true, features = ["fmt", "env-filter"] } [features] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", - "pallet-membership/try-runtime", "pallet-preimage/try-runtime", "pallet-scheduler/try-runtime", "pallet-transaction-payment/try-runtime", "runtime-common/try-runtime", "sp-runtime/try-runtime", - "pallet-subtensor-collective/try-runtime", "pallet-commitments/try-runtime", "pallet-crowdloan/try-runtime", "pallet-drand/try-runtime", @@ -96,7 +96,6 @@ std = [ "frame-support/std", "frame-system/std", "pallet-balances/std", - "pallet-membership/std", "pallet-preimage/std", "pallet-scheduler/std", "pallet-transaction-payment/std", @@ -107,8 +106,8 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-version/std", + "sp-keyring/std", "subtensor-runtime-common/std", - "pallet-subtensor-collective/std", "pallet-commitments/std", "pallet-crowdloan/std", "pallet-drand/std", @@ -136,13 +135,11 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "pallet-membership/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-transaction-payment/runtime-benchmarks", "runtime-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-subtensor-collective/runtime-benchmarks", "pallet-commitments/runtime-benchmarks", "pallet-crowdloan/runtime-benchmarks", "pallet-drand/runtime-benchmarks", diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 5b73ac3f41..2a13a18aa0 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -14,6 +14,7 @@ use sp_runtime::{ BoundedVec, Percent, traits::{BlakeTwo256, Hash}, }; +use sp_std::collections::btree_set::BTreeSet; use sp_std::vec; use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; @@ -657,18 +658,6 @@ mod pallet_benchmarks { _(RawOrigin::Signed(coldkey.clone()), netuid); } - #[benchmark] - fn adjust_senate() { - let coldkey: T::AccountId = whitelisted_caller(); - let hotkey: T::AccountId = account("Alice", 0, 1); - - Subtensor::::init_new_network(NetUid::ROOT, 1); - Uids::::insert(NetUid::ROOT, &hotkey, 0u16); - - #[extrinsic_call] - _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); - } - #[benchmark] fn add_stake_limit() { let netuid = NetUid::from(1); @@ -1577,4 +1566,102 @@ mod pallet_benchmarks { #[extrinsic_call] _(RawOrigin::Signed(coldkey.clone()), netuid, hotkey.clone()); } + #[benchmark] + fn set_root_claim_type() { + let coldkey: T::AccountId = whitelisted_caller(); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), RootClaimTypeEnum::Keep); + } + + #[benchmark] + fn claim_root() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("A", 0, 1); + + let netuid = Subtensor::::get_next_netuid(); + + let lock_cost = Subtensor::::get_network_lock_cost(); + Subtensor::::add_balance_to_coldkey_account(&coldkey, lock_cost.into()); + + assert_ok!(Subtensor::::register_network( + RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone() + )); + + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + NetworkRegistrationAllowed::::insert(netuid, true); + FirstEmissionBlockNumber::::insert(netuid, 0); + + SubnetMechanism::::insert(netuid, 1); + SubnetworkN::::insert(netuid, 1); + Subtensor::::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 100_000_000u64; + Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 100_000_000u64; + Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + let pending_root_alpha = 10_000_000u64; + Subtensor::::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + let initial_stake = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + + assert_ok!(Subtensor::::set_root_claim_type( + RawOrigin::Signed(coldkey.clone()).into(), + RootClaimTypeEnum::Keep + ),); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), BTreeSet::from([netuid])); + + // Verification + let new_stake = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + + assert!(new_stake > initial_stake); + } + + #[benchmark] + fn sudo_set_num_root_claims() { + #[extrinsic_call] + _(RawOrigin::Root, 40); + } + + #[benchmark] + fn sudo_set_root_claim_threshold() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("A", 0, 1); + + let netuid = Subtensor::::get_next_netuid(); + + let lock_cost = Subtensor::::get_network_lock_cost(); + Subtensor::::add_balance_to_coldkey_account(&coldkey, lock_cost.into()); + + assert_ok!(Subtensor::::register_network( + RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone() + )); + + #[extrinsic_call] + _(RawOrigin::Root, netuid, 100); + } } diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 6a96090b05..818235a955 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -7,7 +7,8 @@ impl Pallet { /// Executes the necessary operations for each block. pub fn block_step() -> Result<(), &'static str> { let block_number: u64 = Self::get_current_block_as_u64(); - log::debug!("block_step for block: {block_number:?} "); + let last_block_hash: T::Hash = >::parent_hash(); + // --- 1. Adjust difficulties. Self::adjust_registration_terms_for_networks(); // --- 2. Get the current coinbase emission. @@ -21,6 +22,11 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); + // --- 5. Run auto-claim root divs. + Self::run_auto_claim_root_divs(last_block_hash); + // --- 6. Populate root coldkey maps. + Self::populate_root_coldkey_staking_maps(); + // Return ok. Ok(()) } diff --git a/pallets/subtensor/src/coinbase/mod.rs b/pallets/subtensor/src/coinbase/mod.rs index cd95dbb7a7..8d06228593 100644 --- a/pallets/subtensor/src/coinbase/mod.rs +++ b/pallets/subtensor/src/coinbase/mod.rs @@ -4,3 +4,4 @@ pub mod block_step; pub mod reveal_commits; pub mod root; pub mod run_coinbase; +pub mod subnet_emissions; diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 6b09c9ed46..642a7f18ac 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -17,9 +17,7 @@ use super::*; use crate::CommitmentsInterface; -use frame_support::{dispatch::Pays, weights::Weight}; use safe_math::*; -use sp_core::Get; use substrate_fixed::types::{I64F64, U96F32}; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; @@ -162,16 +160,12 @@ impl Pallet { ); } - // --- 13. Join the Senate if eligible. - // Returns the replaced member, if any. - let _ = Self::join_senate_if_eligible(&hotkey)?; - - // --- 14. Force all members on root to become a delegate. + // --- 13. Force all members on root to become a delegate. if !Self::hotkey_is_delegate(&hotkey) { Self::delegate_hotkey(&hotkey, 11_796); // 18% cut defaulted. } - // --- 15. Update the registration counters for both the block and interval. + // --- 14. Update the registration counters for both the block and interval. #[allow(clippy::arithmetic_side_effects)] // note this RA + clippy false positive is a known substrate issue RegistrationsThisInterval::::mutate(NetUid::ROOT, |val| *val += 1); @@ -179,7 +173,7 @@ impl Pallet { // note this RA + clippy false positive is a known substrate issue RegistrationsThisBlock::::mutate(NetUid::ROOT, |val| *val += 1); - // --- 16. Log and announce the successful registration. + // --- 15. Log and announce the successful registration. log::debug!( "RootRegistered(netuid:{:?} uid:{:?} hotkey:{:?})", NetUid::ROOT, @@ -192,166 +186,10 @@ impl Pallet { hotkey, )); - // --- 17. Finish and return success. - Ok(()) - } - - // Checks if a hotkey should be a member of the Senate, and if so, adds them. - // - // This function is responsible for adding a hotkey to the Senate if they meet the requirements. - // The root key with the least stake is pruned in the event of a filled membership. - // - // # Arguments: - // * 'origin': Represents the origin of the call. - // * 'hotkey': The hotkey that the user wants to register to the root network. - // - // # Returns: - // * 'DispatchResult': A result type indicating success or failure of the registration. - // - pub fn do_adjust_senate(origin: T::RuntimeOrigin, hotkey: T::AccountId) -> DispatchResult { - ensure!( - Self::if_subnet_exist(NetUid::ROOT), - Error::::RootNetworkDoesNotExist - ); - - // --- 1. Ensure that the call originates from a signed source and retrieve the caller's account ID (coldkey). - let coldkey = ensure_signed(origin)?; - log::debug!("do_root_register( coldkey: {coldkey:?}, hotkey: {hotkey:?} )"); - - // --- 2. Check if the hotkey is already registered to the root network. If not, error out. - ensure!( - Uids::::contains_key(NetUid::ROOT, &hotkey), - Error::::HotKeyNotRegisteredInSubNet - ); - - // --- 3. Create a network account for the user if it doesn't exist. - Self::create_account_if_non_existent(&coldkey, &hotkey); - - // --- 4. Join the Senate if eligible. - // Returns the replaced member, if any. - let replaced = Self::join_senate_if_eligible(&hotkey)?; - - if replaced.is_none() { - // Not eligible to join the Senate, or no replacement needed. - // Check if the hotkey is *now* a member of the Senate. - // Otherwise, error out. - ensure!( - T::SenateMembers::is_member(&hotkey), - Error::::StakeTooLowForRoot, // Had less stake than the lowest stake incumbent. - ); - } - - // --- 5. Log and announce the successful Senate adjustment. - log::debug!("SenateAdjusted(old_hotkey:{replaced:?} hotkey:{hotkey:?})"); - Self::deposit_event(Event::SenateAdjusted { - old_member: replaced.cloned(), - new_member: hotkey, - }); - - // --- 6. Finish and return success. + // --- 16. Finish and return success. Ok(()) } - // Checks if a hotkey should be a member of the Senate, and if so, adds them. - // - // # Arguments: - // * 'hotkey': The hotkey that the user wants to register to the root network. - // - // # Returns: - // * 'Result, Error>': A result containing the replaced member, if any. - // - fn join_senate_if_eligible(hotkey: &T::AccountId) -> Result, Error> { - // --- 1. Check the hotkey is registered in the root network. - ensure!( - Uids::::contains_key(NetUid::ROOT, hotkey), - Error::::HotKeyNotRegisteredInSubNet - ); - - // --- 2. Verify the hotkey is NOT already a member of the Senate. - ensure!( - !T::SenateMembers::is_member(hotkey), - Error::::HotKeyAlreadyRegisteredInSubNet - ); - - // --- 3. Grab the hotkey's stake. - let current_stake = Self::get_stake_for_hotkey_on_subnet(hotkey, NetUid::ROOT); - - // Add the hotkey to the Senate. - // If we're full, we'll swap out the lowest stake member. - let members = T::SenateMembers::members(); - let last: Option<&T::AccountId> = None; - if (members.len() as u32) == T::SenateMembers::max_members() { - let mut sorted_members = members.clone(); - sorted_members.sort_by(|a, b| { - let a_stake = Self::get_stake_for_hotkey_on_subnet(a, NetUid::ROOT); - let b_stake = Self::get_stake_for_hotkey_on_subnet(b, NetUid::ROOT); - - b_stake.cmp(&a_stake) - }); - - if let Some(last) = sorted_members.last() { - let last_stake = Self::get_stake_for_hotkey_on_subnet(last, NetUid::ROOT); - - if last_stake < current_stake { - // Swap the member with the lowest stake. - T::SenateMembers::swap_member(last, hotkey) - .map_err(|_| Error::::CouldNotJoinSenate)?; - } - } - } else { - T::SenateMembers::add_member(hotkey).map_err(|_| Error::::CouldNotJoinSenate)?; - } - - // Return the swapped out member, if any. - Ok(last) - } - - pub fn do_vote_root( - origin: T::RuntimeOrigin, - hotkey: &T::AccountId, - proposal: T::Hash, - index: u32, - approve: bool, - ) -> DispatchResultWithPostInfo { - // --- 1. Ensure that the caller has signed with their coldkey. - let coldkey = ensure_signed(origin.clone())?; - - // --- 2. Ensure that the calling coldkey owns the associated hotkey. - ensure!( - Self::coldkey_owns_hotkey(&coldkey, hotkey), - Error::::NonAssociatedColdKey - ); - - // --- 3. Ensure that the calling hotkey is a member of the senate. - ensure!( - T::SenateMembers::is_member(hotkey), - Error::::NotSenateMember - ); - - // --- 4. Detects first vote of the member in the motion - let is_account_voting_first_time = - T::TriumvirateInterface::add_vote(hotkey, proposal, index, approve)?; - - // --- 5. Calculate extrinsic weight - let members = T::SenateMembers::members(); - let member_count = members.len() as u32; - let vote_weight = Weight::from_parts(20_528_275, 4980) - .saturating_add(Weight::from_parts(48_856, 0).saturating_mul(member_count.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 128).saturating_mul(member_count.into())); - - Ok(( - Some(vote_weight), - if is_account_voting_first_time { - Pays::No - } else { - Pays::Yes - }, - ) - .into()) - } - /// Facilitates the removal of a user's subnetwork. /// /// # Args: @@ -366,22 +204,24 @@ impl Pallet { /// * 'NotSubnetOwner': If the caller does not own the specified subnet. /// pub fn do_dissolve_network(netuid: NetUid) -> dispatch::DispatchResult { - // 1. --- The network exists? + // --- The network exists? ensure!( Self::if_subnet_exist(netuid) && netuid != NetUid::ROOT, Error::::SubnetNotExists ); - // 2. --- Perform the cleanup before removing the network. + Self::finalize_all_subnet_root_dividends(netuid); + + // --- Perform the cleanup before removing the network. T::SwapInterface::dissolve_all_liquidity_providers(netuid)?; Self::destroy_alpha_in_out_stakes(netuid)?; T::SwapInterface::clear_protocol_liquidity(netuid)?; T::CommitmentsInterface::purge_netuid(netuid); - // 3. --- Remove the network + // --- Remove the network Self::remove_network(netuid); - // 4. --- Emit the NetworkRemoved event + // --- Emit the NetworkRemoved event log::info!("NetworkRemoved( netuid:{netuid:?} )"); Self::deposit_event(Event::NetworkRemoved(netuid)); @@ -446,7 +286,6 @@ impl Pallet { MaxAllowedUids::::remove(netuid); ImmunityPeriod::::remove(netuid); ActivityCutoff::::remove(netuid); - MaxWeightsLimit::::remove(netuid); MinAllowedWeights::::remove(netuid); RegistrationsThisInterval::::remove(netuid); POWRegistrationsThisInterval::::remove(netuid); @@ -476,8 +315,7 @@ impl Pallet { // --- 15. Mechanism step / emissions bookkeeping. FirstEmissionBlockNumber::::remove(netuid); PendingEmission::::remove(netuid); - PendingRootDivs::::remove(netuid); - PendingAlphaSwapped::::remove(netuid); + PendingRootAlphaDivs::::remove(netuid); PendingOwnerCut::::remove(netuid); BlocksSinceLastStep::::remove(netuid); LastMechansimStepBlock::::remove(netuid); @@ -510,6 +348,7 @@ impl Pallet { RAORecycledForRegistration::::remove(netuid); MaxRegistrationsPerBlock::::remove(netuid); WeightsVersionKey::::remove(netuid); + PendingRootAlphaDivs::::remove(netuid); // --- 17. Subtoken / feature flags. LiquidAlphaOn::::remove(netuid); @@ -528,7 +367,6 @@ impl Pallet { let _ = NeuronCertificates::::clear_prefix(netuid, u32::MAX, None); let _ = Prometheus::::clear_prefix(netuid, u32::MAX, None); let _ = AlphaDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); - let _ = TaoDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); let _ = PendingChildKeys::::clear_prefix(netuid, u32::MAX, None); let _ = AssociatedEvmAddress::::clear_prefix(netuid, u32::MAX, None); diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 8748fe8eec..53ac3c6ac5 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -30,24 +30,10 @@ impl Pallet { .filter(|netuid| *netuid != NetUid::ROOT) .collect(); log::debug!("All subnet netuids: {subnets:?}"); - // Filter out subnets with no first emission block number. - let subnets_to_emit_to: Vec = subnets - .clone() - .into_iter() - .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) - .collect(); - log::debug!("Subnets to emit to: {subnets_to_emit_to:?}"); - // --- 2. Get sum of tao reserves ( in a later version we will switch to prices. ) - let mut acc_total_moving_prices = U96F32::saturating_from_num(0.0); - // Only get price EMA for subnets that we emit to. - for netuid_i in subnets_to_emit_to.iter() { - // Get and update the moving price of each subnet adding the total together. - acc_total_moving_prices = - acc_total_moving_prices.saturating_add(Self::get_moving_alpha_price(*netuid_i)); - } - let total_moving_prices = acc_total_moving_prices; - log::debug!("total_moving_prices: {total_moving_prices:?}"); + // 2. Get subnets to emit to and emissions + let subnet_emissions = Self::get_subnet_block_emissions(&subnets, block_emission); + let subnets_to_emit_to: Vec = subnet_emissions.keys().copied().collect(); // --- 3. Get subnet terms (tao_in, alpha_in, and alpha_out) // Computation is described in detail in the dtao whitepaper. @@ -60,14 +46,11 @@ impl Pallet { // Get subnet price. let price_i = T::SwapInterface::current_alpha_price((*netuid_i).into()); log::debug!("price_i: {price_i:?}"); - // Get subnet TAO. - let moving_price_i: U96F32 = Self::get_moving_alpha_price(*netuid_i); - log::debug!("moving_price_i: {moving_price_i:?}"); // Emission is price over total. - let default_tao_in_i: U96F32 = block_emission - .saturating_mul(moving_price_i) - .checked_div(total_moving_prices) - .unwrap_or(asfloat!(0.0)); + let default_tao_in_i: U96F32 = subnet_emissions + .get(netuid_i) + .copied() + .unwrap_or(asfloat!(0)); log::debug!("default_tao_in_i: {default_tao_in_i:?}"); // Get alpha_emission total let alpha_emission_i: U96F32 = asfloat!( @@ -91,7 +74,7 @@ impl Pallet { let buy_swap_result = Self::swap_tao_for_alpha( *netuid_i, tou64!(difference_tao).into(), - T::SwapInterface::max_price().into(), + T::SwapInterface::max_price(), true, ); if let Ok(buy_swap_result_ok) = buy_swap_result { @@ -191,7 +174,7 @@ impl Pallet { let tao_weight: U96F32 = root_tao.saturating_mul(Self::get_tao_weight()); log::debug!("tao_weight: {tao_weight:?}"); - // --- 6. Seperate out root dividends in alpha and sell them into tao. + // --- 6. Seperate out root dividends in alpha and keep them. // Then accumulate those dividends for later. for netuid_i in subnets_to_emit_to.iter() { // Get remaining alpha out. @@ -214,27 +197,14 @@ impl Pallet { // Get pending alpha as original alpha_out - root_alpha. let pending_alpha: U96F32 = alpha_out_i.saturating_sub(root_alpha); log::debug!("pending_alpha: {pending_alpha:?}"); - // Sell root emission through the pool (do not pay fees) + let subsidized: bool = *is_subsidized.get(netuid_i).unwrap_or(&false); if !subsidized { - let swap_result = Self::swap_alpha_for_tao( - *netuid_i, - tou64!(root_alpha).into(), - T::SwapInterface::min_price().into(), - true, - ); - if let Ok(ok_result) = swap_result { - let root_tao: u64 = ok_result.amount_paid_out; - // Accumulate root divs for subnet. - PendingRootDivs::::mutate(*netuid_i, |total| { - *total = total.saturating_add(root_tao.into()); - }); - } + PendingRootAlphaDivs::::mutate(*netuid_i, |total| { + *total = total.saturating_add(tou64!(root_alpha).into()); + }); } - // Accumulate alpha emission in pending. - PendingAlphaSwapped::::mutate(*netuid_i, |total| { - *total = total.saturating_add(tou64!(root_alpha).into()); - }); + // Accumulate alpha emission in pending. PendingEmission::::mutate(*netuid_i, |total| { *total = total.saturating_add(tou64!(pending_alpha).into()); @@ -256,7 +226,9 @@ impl Pallet { log::warn!("Failed to reveal commits for subnet {netuid} due to error: {e:?}"); }; // Pass on subnets that have not reached their tempo. - if Self::should_run_epoch(netuid, current_block) { + if Self::should_run_epoch(netuid, current_block) + && Self::is_epoch_input_state_consistent(netuid) + { // Restart counters. BlocksSinceLastStep::::insert(netuid, 0); LastMechansimStepBlock::::insert(netuid, current_block); @@ -265,26 +237,16 @@ impl Pallet { let pending_alpha = PendingEmission::::get(netuid); PendingEmission::::insert(netuid, AlphaCurrency::ZERO); - // Get and drain the subnet pending root divs. - let pending_tao = PendingRootDivs::::get(netuid); - PendingRootDivs::::insert(netuid, TaoCurrency::ZERO); - - // Get this amount as alpha that was swapped for pending root divs. - let pending_swapped = PendingAlphaSwapped::::get(netuid); - PendingAlphaSwapped::::insert(netuid, AlphaCurrency::ZERO); + // Get and drain the subnet pending root alpha divs. + let pending_root_alpha = PendingRootAlphaDivs::::get(netuid); + PendingRootAlphaDivs::::insert(netuid, AlphaCurrency::ZERO); // Get owner cut and drain. let owner_cut = PendingOwnerCut::::get(netuid); PendingOwnerCut::::insert(netuid, AlphaCurrency::ZERO); - // Drain pending root divs, alpha emission, and owner cut. - Self::drain_pending_emission( - netuid, - pending_alpha, - pending_tao, - pending_swapped, - owner_cut, - ); + // Drain pending root alpha divs, alpha emission, and owner cut. + Self::drain_pending_emission(netuid, pending_alpha, pending_root_alpha, owner_cut); } else { // Increment BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); @@ -327,7 +289,7 @@ impl Pallet { pub fn calculate_dividend_distribution( pending_alpha: AlphaCurrency, - pending_tao: TaoCurrency, + pending_root_alpha: AlphaCurrency, tao_weight: U96F32, stake_map: BTreeMap, dividends: BTreeMap, @@ -338,13 +300,13 @@ impl Pallet { log::debug!("dividends: {dividends:?}"); log::debug!("stake_map: {stake_map:?}"); log::debug!("pending_alpha: {pending_alpha:?}"); - log::debug!("pending_tao: {pending_tao:?}"); + log::debug!("pending_root_alpha: {pending_root_alpha:?}"); log::debug!("tao_weight: {tao_weight:?}"); // Setup. let zero: U96F32 = asfloat!(0.0); - // Accumulate root divs and alpha_divs. For each hotkey we compute their + // Accumulate root alpha divs and alpha_divs. For each hotkey we compute their // local and root dividend proportion based on their alpha_stake/root_stake let mut total_root_divs: U96F32 = asfloat!(0); let mut total_alpha_divs: U96F32 = asfloat!(0); @@ -390,22 +352,22 @@ impl Pallet { log::debug!("total_root_divs: {total_root_divs:?}"); log::debug!("total_alpha_divs: {total_alpha_divs:?}"); - // Compute root divs as TAO. Here we take - let mut tao_dividends: BTreeMap = BTreeMap::new(); + // Compute root alpha divs. Here we take + let mut root_alpha_dividends: BTreeMap = BTreeMap::new(); for (hotkey, root_divs) in root_dividends { // Root proportion. let root_share: U96F32 = root_divs.checked_div(total_root_divs).unwrap_or(zero); log::debug!("hotkey: {hotkey:?}, root_share: {root_share:?}"); - // Root proportion in TAO - let root_tao: U96F32 = asfloat!(pending_tao).saturating_mul(root_share); - log::debug!("hotkey: {hotkey:?}, root_tao: {root_tao:?}"); + // Root proportion in alpha + let root_alpha: U96F32 = asfloat!(pending_root_alpha).saturating_mul(root_share); + log::debug!("hotkey: {hotkey:?}, root_alpha: {root_alpha:?}"); // Record root dividends as TAO. - tao_dividends + root_alpha_dividends .entry(hotkey) - .and_modify(|e| *e = root_tao) - .or_insert(root_tao); + .and_modify(|e| *e = root_alpha) + .or_insert(root_alpha); } - log::debug!("tao_dividends: {tao_dividends:?}"); + log::debug!("root_alpha_dividends: {root_alpha_dividends:?}"); // Compute proportional alpha divs using the pending alpha and total alpha divs from the epoch. let mut prop_alpha_dividends: BTreeMap = BTreeMap::new(); @@ -425,7 +387,7 @@ impl Pallet { } log::debug!("prop_alpha_dividends: {prop_alpha_dividends:?}"); - (prop_alpha_dividends, tao_dividends) + (prop_alpha_dividends, root_alpha_dividends) } fn get_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { @@ -465,7 +427,7 @@ impl Pallet { owner_cut: AlphaCurrency, incentives: BTreeMap, alpha_dividends: BTreeMap, - tao_dividends: BTreeMap, + root_alpha_dividends: BTreeMap, ) { // Distribute the owner cut. if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) { @@ -565,37 +527,31 @@ impl Pallet { TotalHotkeyAlphaLastEpoch::::insert(hotkey, netuid, total_hotkey_alpha); } - // Distribute root tao divs. - let _ = TaoDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); - for (hotkey, mut root_tao) in tao_dividends { + // Distribute root alpha divs. + for (hotkey, mut root_alpha) in root_alpha_dividends { // Get take prop - let tao_take: U96F32 = Self::get_hotkey_take_float(&hotkey).saturating_mul(root_tao); - // Remove take prop from root_tao - root_tao = root_tao.saturating_sub(tao_take); + let alpha_take: U96F32 = + Self::get_hotkey_take_float(&hotkey).saturating_mul(root_alpha); + // Remove take prop from root_alpha + root_alpha = root_alpha.saturating_sub(alpha_take); // Give the validator their take. - log::debug!("hotkey: {hotkey:?} tao_take: {tao_take:?}"); - let validator_stake = Self::increase_stake_for_hotkey_and_coldkey_on_subnet( + log::debug!("hotkey: {hotkey:?} alpha_take: {alpha_take:?}"); + let _validator_stake = Self::increase_stake_for_hotkey_and_coldkey_on_subnet( &hotkey, &Owner::::get(hotkey.clone()), - NetUid::ROOT, - tou64!(tao_take).into(), + netuid, + tou64!(alpha_take).into(), ); - // Give rest to nominators. - log::debug!("hotkey: {hotkey:?} root_tao: {root_tao:?}"); - Self::increase_stake_for_hotkey_on_subnet( + + Self::increase_root_claimable_for_hotkey_and_subnet( &hotkey, - NetUid::ROOT, - tou64!(root_tao).into(), + netuid, + tou64!(root_alpha).into(), ); - // Record root dividends for this validator on this subnet. - TaoDividendsPerSubnet::::mutate(netuid, hotkey.clone(), |divs| { - *divs = divs.saturating_add(tou64!(root_tao).into()); - }); - // Update the total TAO on the subnet with root tao dividends. - SubnetTAO::::mutate(NetUid::ROOT, |total| { - *total = total - .saturating_add(validator_stake.to_u64().into()) - .saturating_add(tou64!(root_tao).into()); + + // Record root alpha dividends for this validator on this subnet. + AlphaDividendsPerSubnet::::mutate(netuid, hotkey.clone(), |divs| { + *divs = divs.saturating_add(tou64!(root_alpha).into()); }); } } @@ -617,7 +573,7 @@ impl Pallet { pub fn calculate_dividend_and_incentive_distribution( netuid: NetUid, - pending_tao: TaoCurrency, + pending_root_alpha: AlphaCurrency, pending_validator_alpha: AlphaCurrency, hotkey_emission: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)>, tao_weight: U96F32, @@ -633,33 +589,32 @@ impl Pallet { let stake_map = Self::get_stake_map(netuid, dividends.keys().collect::>()); - let (alpha_dividends, tao_dividends) = Self::calculate_dividend_distribution( + let (alpha_dividends, root_alpha_dividends) = Self::calculate_dividend_distribution( pending_validator_alpha, - pending_tao, + pending_root_alpha, tao_weight, stake_map, dividends, ); - (incentives, (alpha_dividends, tao_dividends)) + (incentives, (alpha_dividends, root_alpha_dividends)) } pub fn drain_pending_emission( netuid: NetUid, pending_alpha: AlphaCurrency, - pending_tao: TaoCurrency, - pending_swapped: AlphaCurrency, + pending_root_alpha: AlphaCurrency, owner_cut: AlphaCurrency, ) { log::debug!( - "Draining pending alpha emission for netuid {netuid:?}, pending_alpha: {pending_alpha:?}, pending_tao: {pending_tao:?}, pending_swapped: {pending_swapped:?}, owner_cut: {owner_cut:?}" + "Draining pending alpha emission for netuid {netuid:?}, pending_alpha: {pending_alpha:?}, pending_root_alpha: {pending_root_alpha:?}, owner_cut: {owner_cut:?}" ); let tao_weight = Self::get_tao_weight(); // Run the epoch. let hotkey_emission: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> = - Self::epoch_with_mechanisms(netuid, pending_alpha.saturating_add(pending_swapped)); + Self::epoch_with_mechanisms(netuid, pending_alpha.saturating_add(pending_root_alpha)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Compute the pending validator alpha. @@ -676,18 +631,18 @@ impl Pallet { let pending_validator_alpha = if !incentive_sum.is_zero() { pending_alpha - .saturating_add(pending_swapped) + .saturating_add(pending_root_alpha) .saturating_div(2.into()) - .saturating_sub(pending_swapped) + .saturating_sub(pending_root_alpha) } else { // If the incentive is 0, then Validators get 100% of the alpha. pending_alpha }; - let (incentives, (alpha_dividends, tao_dividends)) = + let (incentives, (alpha_dividends, root_alpha_dividends)) = Self::calculate_dividend_and_incentive_distribution( netuid, - pending_tao, + pending_root_alpha, pending_validator_alpha, hotkey_emission, tao_weight, @@ -698,7 +653,7 @@ impl Pallet { owner_cut, incentives, alpha_dividends, - tao_dividends, + root_alpha_dividends, ); } diff --git a/pallets/subtensor/src/coinbase/subnet_emissions.rs b/pallets/subtensor/src/coinbase/subnet_emissions.rs new file mode 100644 index 0000000000..80ecf85a42 --- /dev/null +++ b/pallets/subtensor/src/coinbase/subnet_emissions.rs @@ -0,0 +1,217 @@ +use super::*; +use crate::alloc::borrow::ToOwned; +use alloc::collections::BTreeMap; +use safe_math::FixedExt; +use substrate_fixed::transcendental::{exp, ln}; +use substrate_fixed::types::{I32F32, I64F64, U64F64, U96F32}; +use subtensor_swap_interface::SwapHandler; + +impl Pallet { + pub fn get_subnet_block_emissions( + subnets: &[NetUid], + block_emission: U96F32, + ) -> BTreeMap { + // Filter out subnets with no first emission block number. + let subnets_to_emit_to: Vec = subnets + .to_owned() + .clone() + .into_iter() + .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) + .collect(); + log::debug!("Subnets to emit to: {subnets_to_emit_to:?}"); + + // Get subnet TAO emissions. + let shares = Self::get_shares(&subnets_to_emit_to); + log::debug!("Subnet emission shares = {shares:?}"); + + shares + .into_iter() + .map(|(netuid, share)| { + let emission = U64F64::saturating_from_num(block_emission).saturating_mul(share); + (netuid, U96F32::saturating_from_num(emission)) + }) + .collect::>() + } + + pub fn record_tao_inflow(netuid: NetUid, tao: TaoCurrency) { + SubnetTaoFlow::::mutate(netuid, |flow| { + *flow = flow.saturating_add(u64::from(tao) as i64); + }); + } + + pub fn record_tao_outflow(netuid: NetUid, tao: TaoCurrency) { + SubnetTaoFlow::::mutate(netuid, |flow| { + *flow = flow.saturating_sub(u64::from(tao) as i64) + }); + } + + pub fn reset_tao_outflow(netuid: NetUid) { + SubnetTaoFlow::::remove(netuid); + } + + // Update SubnetEmaTaoFlow if needed and return its value for + // the current block + fn get_ema_flow(netuid: NetUid) -> I64F64 { + let current_block: u64 = Self::get_current_block_as_u64(); + + // Calculate net ema flow for the next block + let block_flow = I64F64::saturating_from_num(SubnetTaoFlow::::get(netuid)); + if let Some((last_block, last_block_ema)) = SubnetEmaTaoFlow::::get(netuid) { + // EMA flow already initialized + if last_block != current_block { + let flow_alpha = I64F64::saturating_from_num(FlowEmaSmoothingFactor::::get()) + .safe_div(I64F64::saturating_from_num(i64::MAX)); + let one = I64F64::saturating_from_num(1); + let ema_flow = (one.saturating_sub(flow_alpha)) + .saturating_mul(last_block_ema) + .saturating_add(flow_alpha.saturating_mul(block_flow)); + SubnetEmaTaoFlow::::insert(netuid, (current_block, ema_flow)); + + // Drop the accumulated flow in the last block + Self::reset_tao_outflow(netuid); + ema_flow + } else { + last_block_ema + } + } else { + // Initialize EMA flow, set S(current_block) = min(price, ema_price) * init_factor + let moving_price = I64F64::saturating_from_num(Self::get_moving_alpha_price(netuid)); + let current_price = + I64F64::saturating_from_num(T::SwapInterface::current_alpha_price(netuid)); + let ema_flow = moving_price.min(current_price); + SubnetEmaTaoFlow::::insert(netuid, (current_block, ema_flow)); + ema_flow + } + } + + // Either the minimal EMA flow L = min{Si}, or an artificial + // cut off at some higher value A (TaoFlowCutoff) + // L = max {A, min{min{S[i], 0}}} + fn get_lower_limit(ema_flows: &BTreeMap) -> I64F64 { + let zero = I64F64::saturating_from_num(0); + let min_flow = ema_flows + .values() + .map(|flow| flow.min(&zero)) + .min() + .unwrap_or(&zero); + let flow_cutoff = TaoFlowCutoff::::get(); + flow_cutoff.max(*min_flow) + } + + // Estimate the upper value of pow with hardcoded p = 2 + fn pow_estimate(val: U64F64) -> U64F64 { + val.saturating_mul(val) + } + + fn safe_pow(val: U64F64, p: U64F64) -> U64F64 { + // If val is too low so that ln(val) doesn't fit I32F32::MIN, + // return 0 from the function + let zero = U64F64::saturating_from_num(0); + let i32f32_max = I32F32::saturating_from_num(i32::MAX); + if let Ok(val_ln) = ln(I32F32::saturating_from_num(val)) { + // If exp doesn't fit, do the best we can - max out on I32F32::MAX + U64F64::saturating_from_num(I32F32::saturating_from_num( + exp(I32F32::saturating_from_num(p).saturating_mul(val_ln)).unwrap_or(i32f32_max), + )) + } else { + zero + } + } + + fn inplace_scale(offset_flows: &mut BTreeMap) { + let zero = U64F64::saturating_from_num(0); + let flow_max = offset_flows.values().copied().max().unwrap_or(zero); + + // Calculate scale factor so that max becomes 1.0 + let flow_factor = U64F64::saturating_from_num(1).safe_div(flow_max); + + // Upscale/downscale in-place + for flow in offset_flows.values_mut() { + *flow = flow_factor.saturating_mul(*flow); + } + } + + pub(crate) fn inplace_pow_normalize(offset_flows: &mut BTreeMap, p: U64F64) { + // Scale offset flows so that that are no overflows and underflows when we use safe_pow: + // flow_factor * subnet_count * (flow_max ^ p) <= I32F32::MAX + let zero = U64F64::saturating_from_num(0); + let subnet_count = offset_flows.len(); + + // Pre-scale to max 1.0 + Self::inplace_scale(offset_flows); + + // Scale to maximize precision + let flow_max = offset_flows.values().copied().max().unwrap_or(zero); + log::debug!("Offset flow max: {flow_max:?}"); + let flow_max_pow_est = Self::pow_estimate(flow_max); + log::debug!("flow_max_pow_est: {flow_max_pow_est:?}"); + + let max_times_count = + U64F64::saturating_from_num(subnet_count).saturating_mul(flow_max_pow_est); + let i32f32_max = U64F64::saturating_from_num(i32::MAX); + let precision_min = i32f32_max.safe_div(U64F64::saturating_from_num(u64::MAX)); + + // If max_times_count < precision_min, all flow values are too low to fit I32F32. + if max_times_count >= precision_min { + let epsilon = + U64F64::saturating_from_num(1).safe_div(U64F64::saturating_from_num(1_000)); + let flow_factor = i32f32_max + .safe_div(max_times_count) + .checked_sqrt(epsilon) + .unwrap_or(zero); + + // Calculate sum + let sum = offset_flows + .clone() + .into_values() + .map(|flow| flow_factor.saturating_mul(flow)) + .map(|scaled_flow| Self::safe_pow(scaled_flow, p)) + .sum(); + log::debug!("Scaled offset flow sum: {sum:?}"); + + // Normalize in-place + for flow in offset_flows.values_mut() { + let scaled_flow = flow_factor.saturating_mul(*flow); + *flow = Self::safe_pow(scaled_flow, p).safe_div(sum); + } + } + } + + // Implementation of shares that uses TAO flow + fn get_shares_flow(subnets_to_emit_to: &[NetUid]) -> BTreeMap { + // Get raw flows + let ema_flows = subnets_to_emit_to + .iter() + .map(|netuid| (*netuid, Self::get_ema_flow(*netuid))) + .collect(); + log::debug!("EMA flows: {ema_flows:?}"); + + // Clip the EMA flow with lower limit L + // z[i] = max{S[i] − L, 0} + let lower_limit = Self::get_lower_limit(&ema_flows); + log::debug!("Lower flow limit: {lower_limit:?}"); + let mut offset_flows = ema_flows + .iter() + .map(|(netuid, flow)| { + ( + *netuid, + if *flow > lower_limit { + U64F64::saturating_from_num(flow.saturating_sub(lower_limit)) + } else { + U64F64::saturating_from_num(0) + }, + ) + }) + .collect::>(); + + // Normalize the set {z[i]}, using an exponent parameter (p ≥ 1) + let p = FlowNormExponent::::get(); + Self::inplace_pow_normalize(&mut offset_flows, p); + offset_flows + } + + // Combines ema price method and tao flow method linearly over FlowHalfLife blocks + pub(crate) fn get_shares(subnets_to_emit_to: &[NetUid]) -> BTreeMap { + Self::get_shares_flow(subnets_to_emit_to) + } +} diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 8c6a77c98b..5e4dd1f43e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1,6 +1,6 @@ use super::*; use crate::epoch::math::*; -use alloc::collections::BTreeMap; +use alloc::collections::{BTreeMap, BTreeSet}; use frame_support::IterableStorageDoubleMap; use safe_math::*; use sp_std::collections::btree_map::IntoIter; @@ -1169,12 +1169,17 @@ impl Pallet { Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec { - bonds - .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .push((uid_j, u16_to_fixed(bonds_ij))); + if let Some(row) = bonds.get_mut(uid_i as usize) { + row.push((uid_j, u16_to_fixed(bonds_ij))); + } else { + // If the index is unexpectedly out of bounds, skip and log math error + log::error!( + "math error: bonds row index out of bounds (uid_i={uid_i}, n={n}, netuid_index={netuid_index})", + ); + } } } + bonds } @@ -1187,14 +1192,22 @@ impl Pallet { Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec.into_iter().filter(|(uid_j, _)| *uid_j < n as u16) { - *bonds - .get_mut(uid_i as usize) - .expect("uid_i has been filtered to be less than n; qed") - .get_mut(uid_j as usize) - .expect("uid_j has been filtered to be less than n; qed") = - u16_to_fixed(bonds_ij); + if let Some(row) = bonds.get_mut(uid_i as usize) { + if let Some(cell) = row.get_mut(uid_j as usize) { + *cell = u16_to_fixed(bonds_ij); + } else { + log::error!( + "math error: uid_j index out of bounds (uid_i={uid_i}, uid_j={uid_j}, n={n}, netuid_index={netuid_index})" + ); + } + } else { + log::error!( + "math error: uid_i row index out of bounds (uid_i={uid_i}, n={n}, netuid_index={netuid_index})" + ); + } } } + bonds } @@ -1612,4 +1625,20 @@ impl Pallet { Ok(()) } + + /// This function ensures major assumptions made by epoch function: + /// 1. Keys map has no duplicate hotkeys + /// + pub fn is_epoch_input_state_consistent(netuid: NetUid) -> bool { + // Check if Keys map has duplicate hotkeys or uids + let mut hotkey_set: BTreeSet = BTreeSet::new(); + // `iter_prefix` over a double map yields (uid, value) for the given first key. + for (_uid, hotkey) in Keys::::iter_prefix(netuid) { + if !hotkey_set.insert(hotkey) { + log::error!("Duplicate hotkeys detected for netuid {netuid}"); + return false; + } + } + true + } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 6d178bb15f..5df8a9c429 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -23,7 +23,7 @@ use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::{DispatchError, transaction_validity::TransactionValidityError}; use sp_std::marker::PhantomData; -use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, CurrencyReserve, NetUid, TaoCurrency}; // ============================ // ==== Benchmark Imports ===== @@ -54,6 +54,14 @@ extern crate alloc; pub const MAX_CRV3_COMMIT_SIZE_BYTES: u32 = 5000; +pub const ALPHA_MAP_BATCH_SIZE: usize = 30; + +pub const MAX_NUM_ROOT_CLAIMS: u64 = 50; + +pub const MAX_SUBNET_CLAIMS: usize = 5; + +pub const MAX_ROOT_CLAIM_THRESHOLD: u64 = 10_000_000; + #[allow(deprecated)] #[deny(missing_docs)] #[import_section(errors::errors)] @@ -63,6 +71,7 @@ pub const MAX_CRV3_COMMIT_SIZE_BYTES: u32 = 5000; #[import_section(hooks::hooks)] #[import_section(config::config)] #[frame_support::pallet] +#[allow(clippy::expect_used)] pub mod pallet { use crate::RateLimitKey; use crate::migrations; @@ -81,20 +90,17 @@ pub mod pallet { use runtime_common::prod_or_fast; use sp_core::{ConstU32, H160, H256}; use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; + use sp_std::collections::btree_map::BTreeMap; + use sp_std::collections::btree_set::BTreeSet; use sp_std::collections::vec_deque::VecDeque; use sp_std::vec; use sp_std::vec::Vec; - use substrate_fixed::types::{I96F32, U64F64}; + use substrate_fixed::types::{I64F64, I96F32, U64F64}; use subtensor_macros::freeze_struct; use subtensor_runtime_common::{ AlphaCurrency, Currency, MechId, NetUid, NetUidStorageIndex, TaoCurrency, }; - #[cfg(not(feature = "std"))] - use alloc::boxed::Box; - #[cfg(feature = "std")] - use sp_std::prelude::Box; - /// Origin for the pallet pub type PalletsOriginOf = <::RuntimeOrigin as OriginTrait>::PalletsOrigin; @@ -318,11 +324,63 @@ pub mod pallet { /// ==== Staking + Accounts ==== /// ============================ + #[derive( + Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug, DecodeWithMemTracking, + )] + /// Enum for the per-coldkey root claim setting. + pub enum RootClaimTypeEnum { + /// Swap any alpha emission for TAO. + #[default] + Swap, + /// Keep all alpha emission. + Keep, + } + + /// Enum for the per-coldkey root claim frequency setting. + #[derive(Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug)] + pub enum RootClaimFrequencyEnum { + /// Claim automatically. + #[default] + Auto, + /// Only claim manually; Never automatically. + Manual, + } + + #[pallet::type_value] + /// Default minimum root claim amount. + /// This is the minimum amount of root claim that can be made. + /// Any amount less than this will not be claimed. + pub fn DefaultMinRootClaimAmount() -> I96F32 { + 500_000u64.into() + } + + #[pallet::type_value] + /// Default root claim type. + /// This is the type of root claim that will be made. + /// This is set by the user. Either swap to TAO or keep as alpha. + pub fn DefaultRootClaimType() -> RootClaimTypeEnum { + RootClaimTypeEnum::default() + } + + #[pallet::type_value] + /// Default number of root claims per claim call. + /// Ideally this is calculated using the number of staking coldkey + /// and the block time. + pub fn DefaultNumRootClaim() -> u64 { + // once per week (+ spare keys for skipped tries) + 5 + } + #[pallet::type_value] /// Default value for zero. pub fn DefaultZeroU64() -> u64 { 0 } + #[pallet::type_value] + /// Default value for zero. + pub fn DefaultZeroI64() -> i64 { + 0 + } /// Default value for Alpha currency. #[pallet::type_value] pub fn DefaultZeroAlpha() -> AlphaCurrency { @@ -415,6 +473,7 @@ pub mod pallet { #[pallet::type_value] /// Default account, derived from zero trailing bytes. pub fn DefaultAccount() -> T::AccountId { + #[allow(clippy::expect_used)] T::AccountId::decode(&mut TrailingZeroInput::zeroes()) .expect("trailing zeroes always produce a valid account ID; qed") } @@ -588,6 +647,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for subnet owner. pub fn DefaultSubnetOwner() -> T::AccountId { + #[allow(clippy::expect_used)] T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) .expect("trailing zeroes always produce a valid account ID; qed") } @@ -652,11 +712,6 @@ pub mod pallet { T::InitialActivityCutoff::get() } #[pallet::type_value] - /// Default maximum weights limit. - pub fn DefaultMaxWeightsLimit() -> u16 { - T::InitialMaxWeightsLimit::get() - } - #[pallet::type_value] /// Default weights version key. pub fn DefaultWeightsVersionKey() -> u64 { T::InitialWeightsVersionKey::get() @@ -749,6 +804,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for key with type T::AccountId derived from trailing zeroes. pub fn DefaultKey() -> T::AccountId { + #[allow(clippy::expect_used)] T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) .expect("trailing zeroes always produce a valid account ID; qed") } @@ -791,11 +847,6 @@ pub mod pallet { 4 } #[pallet::type_value] - /// Senate requirements - pub fn DefaultSenateRequiredStakePercentage() -> u64 { - T::InitialSenateRequiredStakePercentage::get() - } - #[pallet::type_value] /// -- ITEM (switches liquid alpha on) pub fn DefaultLiquidAlpha() -> bool { false @@ -858,6 +909,12 @@ pub mod pallet { pub fn DefaultMovingPrice() -> I96F32 { I96F32::saturating_from_num(0.0) } + + #[pallet::type_value] + /// Default subnet root claimable + pub fn DefaultRootClaimable() -> BTreeMap { + Default::default() + } #[pallet::type_value] /// Default value for Share Pool variables pub fn DefaultSharePoolZero() -> U64F64 { @@ -873,6 +930,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for coldkey swap scheduled pub fn DefaultColdkeySwapScheduled() -> (BlockNumberFor, T::AccountId) { + #[allow(clippy::expect_used)] let default_account = T::AccountId::decode(&mut TrailingZeroInput::zeroes()) .expect("trailing zeroes always produce a valid account ID; qed"); (BlockNumberFor::::from(0_u32), default_account) @@ -884,6 +942,12 @@ pub mod pallet { 50400 } + /// Default last Alpha map key for iteration + #[pallet::type_value] + pub fn DefaultAlphaIterationLastKey() -> Option> { + None + } + #[pallet::type_value] /// Default number of terminal blocks in a tempo during which admin operations are prohibited pub fn DefaultAdminFreezeWindow() -> u16 { @@ -934,10 +998,6 @@ pub mod pallet { pub type DissolveNetworkScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultDissolveNetworkScheduleDuration>; - #[pallet::storage] - pub type SenateRequiredStakePercentage = - StorageValue<_, u64, ValueQuery, DefaultSenateRequiredStakePercentage>; - #[pallet::storage] /// --- DMap ( netuid, coldkey ) --> blocknumber | last hotkey swap on network. pub type LastHotkeySwapOnNetuid = StorageDoubleMap< @@ -1051,17 +1111,6 @@ pub mod pallet { ValueQuery, DefaultZeroAlpha, >; - #[pallet::storage] // --- DMAP ( netuid, hotkey ) --> u64 | Last total root dividend paid to this hotkey on this subnet. - pub type TaoDividendsPerSubnet = StorageDoubleMap< - _, - Identity, - NetUid, - Blake2_128Concat, - T::AccountId, - TaoCurrency, - ValueQuery, - DefaultZeroTao, - >; /// ================== /// ==== Coinbase ==== @@ -1216,10 +1265,57 @@ pub mod pallet { U64F64, // Shares ValueQuery, >; + + #[pallet::storage] // Contains last Alpha storage map key to iterate (check first) + pub type AlphaMapLastKey = + StorageValue<_, Option>, ValueQuery, DefaultAlphaIterationLastKey>; + #[pallet::storage] // --- MAP ( netuid ) --> token_symbol | Returns the token symbol for a subnet. pub type TokenSymbol = StorageMap<_, Identity, NetUid, Vec, ValueQuery, DefaultUnicodeVecU8>; + #[pallet::storage] // --- MAP ( netuid ) --> subnet_tao_flow | Returns the TAO inflow-outflow balance. + pub type SubnetTaoFlow = + StorageMap<_, Identity, NetUid, i64, ValueQuery, DefaultZeroI64>; + #[pallet::storage] // --- MAP ( netuid ) --> subnet_ema_tao_flow | Returns the EMA of TAO inflow-outflow balance. + pub type SubnetEmaTaoFlow = + StorageMap<_, Identity, NetUid, (u64, I64F64), OptionQuery>; + #[pallet::type_value] + /// Default value for flow cutoff. + pub fn DefaultFlowCutoff() -> I64F64 { + I64F64::saturating_from_num(0) + } + #[pallet::storage] + /// --- ITEM --> TAO Flow Cutoff + pub type TaoFlowCutoff = StorageValue<_, I64F64, ValueQuery, DefaultFlowCutoff>; + #[pallet::type_value] + /// Default value for flow normalization exponent. + pub fn DefaultFlowNormExponent() -> U64F64 { + U64F64::saturating_from_num(1) + } + #[pallet::storage] + /// --- ITEM --> Flow Normalization Exponent (p) + pub type FlowNormExponent = + StorageValue<_, U64F64, ValueQuery, DefaultFlowNormExponent>; + #[pallet::type_value] + /// Default value for flow EMA smoothing. + pub fn DefaultFlowEmaSmoothingFactor() -> u64 { + // Example values: + // half-life factor value i64 normalized (x 2^63) + // 216000 (1 month) --> 0.000003209009576 ( 29_597_889_189_277) + // 50400 (1 week) --> 0.000013752825678 (126_847_427_788_335) + 29_597_889_189_277 + } + #[pallet::type_value] + /// Flow EMA smoothing half-life. + pub fn FlowHalfLife() -> u64 { + 216_000 + } + #[pallet::storage] + /// --- ITEM --> Flow EMA smoothing factor (flow alpha), u64 normalized + pub type FlowEmaSmoothingFactor = + StorageValue<_, u64, ValueQuery, DefaultFlowEmaSmoothingFactor>; + /// ============================ /// ==== Global Parameters ===== /// ============================ @@ -1337,13 +1433,9 @@ pub mod pallet { /// --- MAP ( netuid ) --> pending_emission pub type PendingEmission = StorageMap<_, Identity, NetUid, AlphaCurrency, ValueQuery, DefaultPendingEmission>; + /// --- MAP ( netuid ) --> pending_root_alpha_emission #[pallet::storage] - /// --- MAP ( netuid ) --> pending_root_emission - pub type PendingRootDivs = - StorageMap<_, Identity, NetUid, TaoCurrency, ValueQuery, DefaultZeroTao>; - #[pallet::storage] - /// --- MAP ( netuid ) --> pending_alpha_swapped - pub type PendingAlphaSwapped = + pub type PendingRootAlphaDivs = StorageMap<_, Identity, NetUid, AlphaCurrency, ValueQuery, DefaultZeroAlpha>; #[pallet::storage] /// --- MAP ( netuid ) --> pending_owner_cut @@ -1411,6 +1503,11 @@ pub mod pallet { /// --- MAP ( netuid ) --> activity_cutoff pub type ActivityCutoff = StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultActivityCutoff>; + #[pallet::type_value] + /// Default maximum weights limit. + pub fn DefaultMaxWeightsLimit() -> u16 { + u16::MAX + } #[pallet::storage] /// --- MAP ( netuid ) --> max_weight_limit pub type MaxWeightsLimit = @@ -1842,6 +1939,53 @@ pub mod pallet { ValueQuery, >; + #[pallet::storage] // --- MAP(netuid ) --> Root claim threshold + pub type RootClaimableThreshold = + StorageMap<_, Blake2_128Concat, NetUid, I96F32, ValueQuery, DefaultMinRootClaimAmount>; + + #[pallet::storage] // --- MAP ( hot ) --> MAP(netuid ) --> claimable_dividends | Root claimable dividends. + pub type RootClaimable = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BTreeMap, + ValueQuery, + DefaultRootClaimable, + >; + + // Already claimed root alpha. + #[pallet::storage] + pub type RootClaimed = StorageNMap< + _, + ( + NMapKey, // subnet + NMapKey, // hot + NMapKey, // cold + ), + u128, + ValueQuery, + >; + #[pallet::storage] // -- MAP ( cold ) --> root_claim_type enum + pub type RootClaimType = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + RootClaimTypeEnum, + ValueQuery, + DefaultRootClaimType, + >; + #[pallet::storage] // --- MAP ( u64 ) --> coldkey | Maps coldkeys that have stake to an index + pub type StakingColdkeysByIndex = + StorageMap<_, Identity, u64, T::AccountId, OptionQuery>; + + #[pallet::storage] // --- MAP ( coldkey ) --> index | Maps index that have stake to a coldkey + pub type StakingColdkeys = StorageMap<_, Identity, T::AccountId, u64, OptionQuery>; + + #[pallet::storage] // --- Value --> num_staking_coldkeys + pub type NumStakingColdkeys = StorageValue<_, u64, ValueQuery, DefaultZeroU64>; + #[pallet::storage] // --- Value --> num_root_claim | Number of coldkeys to claim each auto-claim. + pub type NumRootClaim = StorageValue<_, u64, ValueQuery, DefaultNumRootClaim>; + /// ============================= /// ==== EVM related storage ==== /// ============================= @@ -2066,92 +2210,51 @@ use sp_std::vec; use sp_std::vec::Vec; use subtensor_macros::freeze_struct; -/// Trait for managing a membership pallet instance in the runtime -pub trait MemberManagement { - /// Add member - fn add_member(account: &AccountId) -> DispatchResultWithPostInfo; - - /// Remove a member - fn remove_member(account: &AccountId) -> DispatchResultWithPostInfo; - - /// Swap member - fn swap_member(remove: &AccountId, add: &AccountId) -> DispatchResultWithPostInfo; +#[derive(Clone)] +pub struct TaoCurrencyReserve(PhantomData); - /// Get all members - fn members() -> Vec; - - /// Check if an account is apart of the set - fn is_member(account: &AccountId) -> bool; - - /// Get our maximum member count - fn max_members() -> u32; -} - -impl MemberManagement for () { - /// Add member - fn add_member(_: &T) -> DispatchResultWithPostInfo { - Ok(().into()) - } - - // Remove a member - fn remove_member(_: &T) -> DispatchResultWithPostInfo { - Ok(().into()) +impl CurrencyReserve for TaoCurrencyReserve { + #![deny(clippy::expect_used)] + fn reserve(netuid: NetUid) -> TaoCurrency { + SubnetTAO::::get(netuid).saturating_add(SubnetTaoProvided::::get(netuid)) } - // Swap member - fn swap_member(_: &T, _: &T) -> DispatchResultWithPostInfo { - Ok(().into()) + fn increase_provided(netuid: NetUid, tao: TaoCurrency) { + Pallet::::increase_provided_tao_reserve(netuid, tao); } - // Get all members - fn members() -> Vec { - vec![] + fn decrease_provided(netuid: NetUid, tao: TaoCurrency) { + Pallet::::decrease_provided_tao_reserve(netuid, tao); } +} - // Check if an account is apart of the set - fn is_member(_: &T) -> bool { - false - } +#[derive(Clone)] +pub struct AlphaCurrencyReserve(PhantomData); - fn max_members() -> u32 { - 0 +impl CurrencyReserve for AlphaCurrencyReserve { + #![deny(clippy::expect_used)] + fn reserve(netuid: NetUid) -> AlphaCurrency { + SubnetAlphaIn::::get(netuid).saturating_add(SubnetAlphaInProvided::::get(netuid)) } -} - -/// Trait for interacting with collective pallets -pub trait CollectiveInterface { - /// Remove vote - fn remove_votes(hotkey: &AccountId) -> Result; - - fn add_vote( - hotkey: &AccountId, - proposal: Hash, - index: ProposalIndex, - approve: bool, - ) -> Result; -} -impl CollectiveInterface for () { - fn remove_votes(_: &T) -> Result { - Ok(true) + fn increase_provided(netuid: NetUid, alpha: AlphaCurrency) { + Pallet::::increase_provided_alpha_reserve(netuid, alpha); } - fn add_vote(_: &T, _: H, _: P, _: bool) -> Result { - Ok(true) + fn decrease_provided(netuid: NetUid, alpha: AlphaCurrency) { + Pallet::::decrease_provided_alpha_reserve(netuid, alpha); } } +pub type GetAlphaForTao = + subtensor_swap_interface::GetAlphaForTao, AlphaCurrencyReserve>; +pub type GetTaoForAlpha = + subtensor_swap_interface::GetTaoForAlpha, TaoCurrencyReserve>; + impl> subtensor_runtime_common::SubnetInfo for Pallet { - fn tao_reserve(netuid: NetUid) -> TaoCurrency { - SubnetTAO::::get(netuid).saturating_add(SubnetTaoProvided::::get(netuid)) - } - - fn alpha_reserve(netuid: NetUid) -> AlphaCurrency { - SubnetAlphaIn::::get(netuid).saturating_add(SubnetAlphaInProvided::::get(netuid)) - } - + #![deny(clippy::expect_used)] fn exists(netuid: NetUid) -> bool { Self::if_subnet_exist(netuid) } @@ -2184,6 +2287,7 @@ impl> impl> subtensor_runtime_common::BalanceOps for Pallet { + #![deny(clippy::expect_used)] fn tao_balance(account_id: &T::AccountId) -> TaoCurrency { pallet_balances::Pallet::::free_balance(account_id).into() } @@ -2248,22 +2352,6 @@ impl> hotkey, coldkey, netuid, alpha, )) } - - fn increase_provided_tao_reserve(netuid: NetUid, tao: TaoCurrency) { - Self::increase_provided_tao_reserve(netuid, tao); - } - - fn decrease_provided_tao_reserve(netuid: NetUid, tao: TaoCurrency) { - Self::decrease_provided_tao_reserve(netuid, tao); - } - - fn increase_provided_alpha_reserve(netuid: NetUid, alpha: AlphaCurrency) { - Self::increase_provided_alpha_reserve(netuid, alpha); - } - - fn decrease_provided_alpha_reserve(netuid: NetUid, alpha: AlphaCurrency) { - Self::decrease_provided_alpha_reserve(netuid, alpha); - } } /// Enum that defines types of rate limited operations for @@ -2271,16 +2359,22 @@ impl> #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, TypeInfo)] pub enum RateLimitKey { // The setting sn owner hotkey operation is rate limited per netuid + #[codec(index = 0)] SetSNOwnerHotkey(NetUid), // Generic rate limit for subnet-owner hyperparameter updates (per netuid) + #[codec(index = 1)] OwnerHyperparamUpdate(NetUid, Hyperparameter), // Subnet registration rate limit + #[codec(index = 2)] NetworkLastRegistered, // Last tx block limit per account ID + #[codec(index = 3)] LastTxBlock(AccountId), // Last tx block child key limit per account ID + #[codec(index = 4)] LastTxBlockChildKeyTake(AccountId), // Last tx block delegate key limit per account ID + #[codec(index = 5)] LastTxBlockDelegateTake(AccountId), } diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index ea04fe07e2..a735bde1e1 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -6,9 +6,9 @@ use frame_support::pallet_macros::pallet_section; #[pallet_section] mod config { - use crate::CommitmentsInterface; + use crate::{CommitmentsInterface, GetAlphaForTao, GetTaoForAlpha}; use pallet_commitments::GetCommitments; - use subtensor_swap_interface::SwapHandler; + use subtensor_swap_interface::{SwapEngine, SwapHandler}; /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] @@ -27,19 +27,10 @@ mod config { + UnfilteredDispatchable + GetDispatchInfo; - /// Origin checking for council majority - type CouncilOrigin: EnsureOrigin; - /// Currency type that will be used to place deposits on neurons type Currency: fungible::Balanced + fungible::Mutate; - /// Senate members with members management functions. - type SenateMembers: crate::MemberManagement; - - /// Interface to allow other pallets to control who can register identities - type TriumvirateInterface: crate::CollectiveInterface; - /// The scheduler type used for scheduling delayed calls. type Scheduler: ScheduleAnon< BlockNumberFor, @@ -51,8 +42,10 @@ mod config { /// the preimage to store the call data. type Preimages: QueryPreimage + StorePreimage; - /// Swap interface. - type SwapInterface: SwapHandler; + /// Implementor of `SwapHandler` interface from `subtensor_swap_interface` + type SwapInterface: SwapHandler + + SwapEngine> + + SwapEngine>; /// Interface to allow interacting with the proxy pallet. type ProxyInterface: crate::ProxyInterface; @@ -79,9 +72,6 @@ mod config { /// Initial Emission Ratio. #[pallet::constant] type InitialEmissionValue: Get; - /// Initial max weight limit. - #[pallet::constant] - type InitialMaxWeightsLimit: Get; /// Tempo for each network. #[pallet::constant] type InitialTempo: Get; @@ -193,9 +183,6 @@ mod config { /// Initial childkey take transaction rate limit. #[pallet::constant] type InitialTxChildKeyTakeRateLimit: Get; - /// Initial percentage of total stake required to join senate. - #[pallet::constant] - type InitialSenateRequiredStakePercentage: Get; /// Initial adjustment alpha on burn and pow. #[pallet::constant] type InitialAdjustmentAlpha: Get; diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 7c73ea760d..d534dbb0c6 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -13,11 +13,17 @@ mod dispatches { use sp_runtime::{Percent, traits::Saturating}; use crate::MAX_CRV3_COMMIT_SIZE_BYTES; + use crate::MAX_NUM_ROOT_CLAIMS; + use crate::MAX_ROOT_CLAIM_THRESHOLD; + use crate::MAX_SUBNET_CLAIMS; + /// Dispatchable functions allow users to interact with the pallet and invoke state changes. /// These functions materialize as "extrinsics", which are often compared to transactions. /// Dispatchable functions must be annotated with a weight and must return a DispatchResult. #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// --- Sets the caller weights for the incentive mechanism. The call can be /// made from the hotkey account so is potentially insecure, however, the damage /// of changing weights is minimal if caught early. This function includes all the @@ -78,7 +84,7 @@ mod dispatches { /// - Attempting to set weights with max value exceeding limit. #[pallet::call_index(0)] #[pallet::weight((Weight::from_parts(15_540_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4112_u64)) + .saturating_add(T::DbWeight::get().reads(4111_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn set_weights( origin: OriginFor, @@ -706,8 +712,8 @@ mod dispatches { /// #[pallet::call_index(2)] #[pallet::weight((Weight::from_parts(340_800_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) - .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(25_u64)) + .saturating_add(T::DbWeight::get().writes(16_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( origin: OriginFor, hotkey: T::AccountId, @@ -1027,21 +1033,12 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] #[pallet::weight((Weight::from_parts(135_900_000, 0) - .saturating_add(T::DbWeight::get().reads(24_u64)) - .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(22_u64)) + .saturating_add(T::DbWeight::get().writes(19_u64)), DispatchClass::Normal, Pays::Yes))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_root_register(origin, hotkey) } - /// Attempt to adjust the senate membership to include a hotkey - #[pallet::call_index(63)] - #[pallet::weight((Weight::from_parts(58_980_000, 0) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)), DispatchClass::Normal, Pays::Yes))] - pub fn adjust_senate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { - Self::do_adjust_senate(origin, hotkey) - } - /// User register a new subnetwork via burning token #[pallet::call_index(7)] #[pallet::weight((Weight::from_parts(354_200_000, 0) @@ -1058,8 +1055,8 @@ mod dispatches { /// The extrinsic for user to change its hotkey in subnet or all subnets. #[pallet::call_index(70)] #[pallet::weight((Weight::from_parts(275_300_000, 0) - .saturating_add(T::DbWeight::get().reads(47)) - .saturating_add(T::DbWeight::get().writes(37)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().reads(50_u64)) + .saturating_add(T::DbWeight::get().writes(35_u64)), DispatchClass::Normal, Pays::No))] pub fn swap_hotkey( origin: OriginFor, hotkey: T::AccountId, @@ -1132,7 +1129,7 @@ mod dispatches { /// #[pallet::call_index(75)] #[pallet::weight(( - Weight::from_parts(45_360_000, 0) + Weight::from_parts(66_450_000, 0) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, @@ -1229,93 +1226,12 @@ mod dispatches { Self::set_max_childkey_take(take); Ok(()) } - // ================================== - // ==== Parameter Sudo calls ======== - // ================================== - // Each function sets the corresponding hyper paramter on the specified network - // Args: - // * 'origin': (Origin): - // - The caller, must be sudo. - // - // * `netuid` (u16): - // - The network identifier. - // - // * `hyperparameter value` (u16): - // - The value of the hyper parameter. - // - - /// Authenticates a council proposal and dispatches a function call with `Root` origin. - /// - /// The dispatch origin for this call must be a council majority. - /// - /// ## Complexity - /// - O(1). - #[pallet::call_index(51)] - #[pallet::weight((Weight::from_parts(111_100_000, 0), DispatchClass::Operational, Pays::Yes))] - pub fn sudo( - origin: OriginFor, - call: Box, - ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is a council majority. - T::CouncilOrigin::ensure_origin(origin)?; - - let result = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - let error = result.map(|_| ()).map_err(|e| e.error); - Self::deposit_event(Event::Sudid(error)); - - return result; - } - - /// Authenticates a council proposal and dispatches a function call with `Root` origin. - /// This function does not check the weight of the call, and instead allows the - /// user to specify the weight of the call. - /// - /// The dispatch origin for this call must be a council majority. - /// - /// ## Complexity - /// - O(1). - #[allow(deprecated)] - #[pallet::call_index(52)] - #[pallet::weight((*weight, call.get_dispatch_info().class, Pays::Yes))] - pub fn sudo_unchecked_weight( - origin: OriginFor, - call: Box, - weight: Weight, - ) -> DispatchResultWithPostInfo { - // We dont need to check the weight witness, suppress warning. - // See https://github.com/paritytech/polkadot-sdk/pull/1818. - let _ = weight; - - // This is a public call, so we ensure that the origin is a council majority. - T::CouncilOrigin::ensure_origin(origin)?; - - let result = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - let error = result.map(|_| ()).map_err(|e| e.error); - Self::deposit_event(Event::Sudid(error)); - - return result; - } - - /// User vote on a proposal - #[pallet::call_index(55)] - #[pallet::weight((Weight::from_parts(111_100_000, 0) - .saturating_add(T::DbWeight::get().reads(0)) - .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Operational))] - pub fn vote( - origin: OriginFor, - hotkey: T::AccountId, - proposal: T::Hash, - #[pallet::compact] index: u32, - approve: bool, - ) -> DispatchResultWithPostInfo { - Self::do_vote_root(origin, &hotkey, proposal, index, approve) - } /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) .saturating_add(T::DbWeight::get().reads(39_u64)) - .saturating_add(T::DbWeight::get().writes(57_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(56_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1602,7 +1518,7 @@ mod dispatches { #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) .saturating_add(T::DbWeight::get().reads(38_u64)) - .saturating_add(T::DbWeight::get().writes(56_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(55_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -1671,8 +1587,8 @@ mod dispatches { /// - Thrown if key has hit transaction rate limit #[pallet::call_index(84)] #[pallet::weight((Weight::from_parts(358_500_000, 0) - .saturating_add(T::DbWeight::get().reads(38_u64)) - .saturating_add(T::DbWeight::get().writes(21_u64)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(41_u64)) + .saturating_add(T::DbWeight::get().writes(26_u64)), DispatchClass::Normal, Pays::Yes))] pub fn unstake_all_alpha(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all_alpha(origin, hotkey) } @@ -1786,7 +1702,7 @@ mod dispatches { #[pallet::weight(( Weight::from_parts(351_300_000, 0) .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(22_u64)), + .saturating_add(T::DbWeight::get().writes(24_u64)), DispatchClass::Normal, Pays::Yes ))] @@ -1850,8 +1766,8 @@ mod dispatches { /// #[pallet::call_index(88)] #[pallet::weight((Weight::from_parts(402_900_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) - .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(25_u64)) + .saturating_add(T::DbWeight::get().writes(16_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake_limit( origin: OriginFor, hotkey: T::AccountId, @@ -1914,8 +1830,8 @@ mod dispatches { /// #[pallet::call_index(89)] #[pallet::weight((Weight::from_parts(377_400_000, 0) - .saturating_add(T::DbWeight::get().reads(30_u64)) - .saturating_add(T::DbWeight::get().writes(14)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(29_u64)) + .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_limit( origin: OriginFor, hotkey: T::AccountId, @@ -1959,7 +1875,7 @@ mod dispatches { #[pallet::weight(( Weight::from_parts(411_500_000, 0) .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(22_u64)), + .saturating_add(T::DbWeight::get().writes(24_u64)), DispatchClass::Normal, Pays::Yes ))] @@ -2136,8 +2052,8 @@ mod dispatches { /// Without limit_price it remove all the stake similar to `remove_stake` extrinsic #[pallet::call_index(103)] #[pallet::weight((Weight::from_parts(395_300_000, 10142) - .saturating_add(T::DbWeight::get().reads(30_u64)) - .saturating_add(T::DbWeight::get().writes(14_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(29_u64)) + .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_full_limit( origin: T::RuntimeOrigin, hotkey: T::AccountId, @@ -2293,8 +2209,8 @@ mod dispatches { /// - The hotkey account to designate as the autostake destination. #[pallet::call_index(114)] #[pallet::weight((Weight::from_parts(29_930_000, 0) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::Yes))] pub fn set_coldkey_auto_stake_hotkey( origin: T::RuntimeOrigin, netuid: NetUid, @@ -2392,10 +2308,118 @@ mod dispatches { #[pallet::call_index(120)] #[pallet::weight((Weight::from_parts(119_000_000, 0) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] pub fn root_dissolve_network(origin: OriginFor, netuid: NetUid) -> DispatchResult { ensure_root(origin)?; Self::do_dissolve_network(netuid) } + + /// --- Claims the root emissions for a coldkey. + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller's coldkey. + /// + /// # Event: + /// * RootClaimed; + /// - On the successfully claiming the root emissions for a coldkey. + /// + /// # Raises: + /// + #[pallet::call_index(121)] + #[pallet::weight(( + Weight::from_parts(117_000_000, 7767) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)), + DispatchClass::Normal, + Pays::Yes + ))] + pub fn claim_root( + origin: OriginFor, + subnets: BTreeSet, + ) -> DispatchResultWithPostInfo { + let coldkey: T::AccountId = ensure_signed(origin)?; + + ensure!(!subnets.is_empty(), Error::::InvalidSubnetNumber); + ensure!( + subnets.len() <= MAX_SUBNET_CLAIMS, + Error::::InvalidSubnetNumber + ); + + Self::maybe_add_coldkey_index(&coldkey); + + let weight = Self::do_root_claim(coldkey, Some(subnets)); + Ok((Some(weight), Pays::Yes).into()) + } + + /// --- Sets the root claim type for the coldkey. + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller's coldkey. + /// + /// # Event: + /// * RootClaimTypeSet; + /// - On the successfully setting the root claim type for the coldkey. + /// + #[pallet::call_index(122)] + #[pallet::weight(( + Weight::from_parts(19_420_000, 0).saturating_add(T::DbWeight::get().writes(4_u64)), + DispatchClass::Normal, + Pays::Yes + ))] + pub fn set_root_claim_type( + origin: OriginFor, + new_root_claim_type: RootClaimTypeEnum, + ) -> DispatchResult { + let coldkey: T::AccountId = ensure_signed(origin)?; + + Self::maybe_add_coldkey_index(&coldkey); + + Self::change_root_claim_type(&coldkey, new_root_claim_type); + Ok(()) + } + + /// --- Sets root claim number (sudo extrinsic). Zero disables auto-claim. + #[pallet::call_index(123)] + #[pallet::weight(( + Weight::from_parts(4_000_000, 0).saturating_add(T::DbWeight::get().writes(1_u64)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn sudo_set_num_root_claims(origin: OriginFor, new_value: u64) -> DispatchResult { + ensure_root(origin)?; + + ensure!( + new_value <= MAX_NUM_ROOT_CLAIMS, + Error::::InvalidNumRootClaim + ); + + NumRootClaim::::set(new_value); + + Ok(()) + } + + /// --- Sets root claim threshold for subnet (sudo or owner origin). + #[pallet::call_index(124)] + #[pallet::weight(( + Weight::from_parts(5_711_000, 0).saturating_add(T::DbWeight::get().writes(1_u64)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn sudo_set_root_claim_threshold( + origin: OriginFor, + netuid: NetUid, + new_value: u64, + ) -> DispatchResult { + Self::ensure_subnet_owner_or_root(origin, netuid)?; + + ensure!( + new_value <= I96F32::from(MAX_ROOT_CLAIM_THRESHOLD), + Error::::InvalidRootClaimThreshold + ); + + RootClaimableThreshold::::set(netuid, new_value.into()); + + Ok(()) + } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index e241ff068f..5a15330075 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -71,7 +71,7 @@ mod errors { /// The supplied PoW hash seal does not match the supplied work. InvalidSeal, /// The dispatch is attempting to set weights on chain with weight value exceeding the - /// MaxWeightLimit (max_weight_limit subnet hyperparameter). + /// configured max weight limit (currently `u16::MAX`). MaxWeightExceeded, /// The hotkey is attempting to become a delegate when the hotkey is already a delegate. HotKeyAlreadyDelegate, @@ -97,8 +97,6 @@ mod errors { TooManyRegistrationsThisInterval, /// The hotkey is required to be the origin. TransactorAccountShouldBeHotKey, - /// A hotkey is attempting to do something only senate members can do. - NotSenateMember, /// Faucet is disabled. FaucetDisabled, /// Not a subnet owner. @@ -130,8 +128,6 @@ mod errors { CommitRevealEnabled, /// Attemtping to commit/reveal weights when disabled. CommitRevealDisabled, - /// Not able to join the senate. - CouldNotJoinSenate, /// Attempting to set alpha high/low while disabled LiquidAlphaDisabled, /// Alpha high is too low: alpha_high > 0.8 @@ -262,5 +258,13 @@ mod errors { UidMapCouldNotBeCleared, /// Trimming would exceed the max immune neurons percentage TrimmingWouldExceedMaxImmunePercentage, + /// Violating the rules of Childkey-Parentkey consistency + ChildParentInconsistency, + /// Invalid number of root claims + InvalidNumRootClaim, + /// Invalid value of root claim threshold + InvalidRootClaimThreshold, + /// Exceeded subnet limit number or zero. + InvalidSubnetNumber, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index c34219d532..c2931024ee 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -50,7 +50,8 @@ mod events { BulkBalancesSet(u16, u16), /// max allowed uids has been set for a subnetwork. MaxAllowedUidsSet(NetUid, u16), - /// the max weight limit has been set for a subnetwork. + #[deprecated(note = "Max weight limit is now a constant and this event is unused")] + /// DEPRECATED: max weight limit updates are no longer supported. MaxWeightLimitSet(NetUid, u16), /// the difficulty has been set for a subnet. DifficultySet(NetUid, u64), @@ -136,8 +137,6 @@ mod events { RAORecycledForRegistrationSet(NetUid, TaoCurrency), /// min stake is set for validators to set weights. StakeThresholdSet(u64), - /// setting the minimum required stake amount for senate registration. - SenateRequiredStakePercentSet(u64), /// setting the adjustment alpha on a subnet. AdjustmentAlphaSet(NetUid, u64), /// the faucet it called on the test net. @@ -171,13 +170,6 @@ mod events { MaxDelegateTakeSet(u16), /// minimum delegate take is set by sudo/admin transaction MinDelegateTakeSet(u16), - /// a member of the senate is adjusted - SenateAdjusted { - /// the account ID of the old senate member, if any - old_member: Option, - /// the account ID of the new senate member - new_member: T::AccountId, - }, /// A coldkey has been swapped ColdkeySwapped { /// the account ID of old coldkey @@ -456,5 +448,24 @@ mod events { /// The account ID of the hotkey. hotkey: T::AccountId, }, + + /// Root emissions have been claimed for a coldkey on all subnets and hotkeys. + /// Parameters: + /// (coldkey) + RootClaimed { + /// Claim coldkey + coldkey: T::AccountId, + }, + + /// Root claim type for a coldkey has been set. + /// Parameters: + /// (coldkey, u8) + RootClaimTypeSet { + /// Claim coldkey + coldkey: T::AccountId, + + /// Claim type + root_claim_type: RootClaimTypeEnum, + }, } } diff --git a/pallets/subtensor/src/macros/genesis.rs b/pallets/subtensor/src/macros/genesis.rs index b9378e38f6..7bf8ba2a53 100644 --- a/pallets/subtensor/src/macros/genesis.rs +++ b/pallets/subtensor/src/macros/genesis.rs @@ -4,10 +4,29 @@ use frame_support::pallet_macros::pallet_section; /// This can later be imported into the pallet using [`import_section`]. #[pallet_section] mod genesis { + use sp_core::crypto::Pair; + use sp_core::sr25519::Pair as Sr25519Pair; #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { + // Alice's public key + let alice_bytes = sp_keyring::Sr25519Keyring::Alice.public(); + + // Create Alice's hotkey from seed string + let pair = Sr25519Pair::from_string("//Alice_hk", None) + .expect("Alice hotkey pair should be valid"); + let alice_hk_bytes = pair.public().0; + + let alice_account = + T::AccountId::decode(&mut &alice_bytes[..]).expect("Alice account should decode"); + let alice_hk_account = T::AccountId::decode(&mut &alice_hk_bytes[..]) + .expect("Alice hotkey account should decode"); + + let subnet_root_owner = prod_or_fast!(DefaultSubnetOwner::::get(), alice_account); + let subnet_root_owner_hotkey = + prod_or_fast!(DefaultSubnetOwner::::get(), alice_hk_account); + // Set initial total issuance from balances TotalIssuance::::put(self.balances_issuance); @@ -17,6 +36,12 @@ mod genesis { // Increment the number of total networks. TotalNetworks::::mutate(|n| *n = n.saturating_add(1)); + // Set the root network owner. + SubnetOwner::::insert(NetUid::ROOT, subnet_root_owner); + + // Set the root network owner hotkey. + SubnetOwnerHotkey::::insert(NetUid::ROOT, subnet_root_owner_hotkey); + // Set the number of validators to 1. SubnetworkN::::insert(NetUid::ROOT, 0); @@ -29,9 +54,6 @@ mod genesis { // Set the min allowed weights to zero, no weights restrictions. MinAllowedWeights::::insert(NetUid::ROOT, 0); - // Set the max weight limit to infitiy, no weight restrictions. - MaxWeightsLimit::::insert(NetUid::ROOT, u16::MAX); - // Add default root tempo. Tempo::::insert(NetUid::ROOT, 100); @@ -59,7 +81,6 @@ mod genesis { MaxAllowedUids::::insert(netuid, 256u16); MaxAllowedValidators::::insert(netuid, 64u16); MinAllowedWeights::::insert(netuid, 0); - MaxWeightsLimit::::insert(netuid, u16::MAX); Tempo::::insert(netuid, 100); NetworkRegistrationAllowed::::insert(netuid, true); SubnetOwner::::insert(netuid, hotkey.clone()); diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 4b4dfcc781..87a87e911c 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -138,6 +138,8 @@ mod hooks { .saturating_add(migrations::migrate_fix_root_tao_and_alpha_in::migrate_fix_root_tao_and_alpha_in::()) // Migrate last block rate limiting storage items .saturating_add(migrations::migrate_rate_limiting_last_blocks::migrate_obsolete_rate_limiting_last_blocks_storage::()) + // Re-encode rate limit keys after introducing OwnerHyperparamUpdate variant + .saturating_add(migrations::migrate_rate_limit_keys::migrate_rate_limit_keys::()) // Migrate remove network modality .saturating_add(migrations::migrate_remove_network_modality::migrate_remove_network_modality::()) // Migrate Immunity Period @@ -150,8 +152,14 @@ mod hooks { .saturating_add(migrations::migrate_subnet_locked::migrate_restore_subnet_locked::()) // Migrate subnet burn cost to 2500 .saturating_add(migrations::migrate_network_lock_cost_2500::migrate_network_lock_cost_2500::()) + // Cleanup child/parent keys + .saturating_add(migrations::migrate_fix_childkeys::migrate_fix_childkeys::()) // Migrate AutoStakeDestinationColdkeys - .saturating_add(migrations::migrate_auto_stake_destination::migrate_auto_stake_destination::()); + .saturating_add(migrations::migrate_auto_stake_destination::migrate_auto_stake_destination::()) + // Migrate Kappa to default (0.5) + .saturating_add(migrations::migrate_kappa_map_to_default::migrate_kappa_map_to_default::()) + // Remove obsolete map entries + .saturating_add(migrations::migrate_remove_tao_dividends::migrate_remove_tao_dividends::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_create_root_network.rs b/pallets/subtensor/src/migrations/migrate_create_root_network.rs index 24e2251c38..6cca34f815 100644 --- a/pallets/subtensor/src/migrations/migrate_create_root_network.rs +++ b/pallets/subtensor/src/migrations/migrate_create_root_network.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::{ pallet_prelude::{Identity, OptionQuery}, storage_alias, - traits::{DefensiveResult, Get}, + traits::Get, weights::Weight, }; use sp_std::vec::Vec; @@ -64,9 +64,6 @@ pub fn migrate_create_root_network() -> Weight { // Set the minimum allowed weights to zero (no weight restrictions) MinAllowedWeights::::insert(NetUid::ROOT, 0); - // Set the maximum weight limit to u16::MAX (no weight restrictions) - MaxWeightsLimit::::insert(NetUid::ROOT, u16::MAX); - // Set default root tempo Tempo::::insert(NetUid::ROOT, 100); @@ -80,18 +77,11 @@ pub fn migrate_create_root_network() -> Weight { // WeightsSetRateLimit::::insert(NetUid::ROOT, 7200); // Accrue weight for database writes - weight.saturating_accrue(T::DbWeight::get().writes(8)); - - // Remove all existing senate members - for hotkey_i in T::SenateMembers::members().iter() { - // Remove votes associated with the member - T::TriumvirateInterface::remove_votes(hotkey_i).defensive_ok(); - // Remove the member from the senate - T::SenateMembers::remove_member(hotkey_i).defensive_ok(); + weight.saturating_accrue(T::DbWeight::get().writes(7)); - // Accrue weight for database operations - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } + // Remove all existing triumvirate votes and senate members + remove_prefix::("Triumvirate", "Votes", &mut weight); + remove_prefix::("SenateMembers", "Members", &mut weight); log::info!("Migrated create root network"); weight diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs index c4e79692ac..260904395d 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs @@ -100,13 +100,12 @@ pub fn migrate_delete_subnet_21() -> Weight { MaxAllowedUids::::remove(netuid); ImmunityPeriod::::remove(netuid); ActivityCutoff::::remove(netuid); - MaxWeightsLimit::::remove(netuid); MinAllowedWeights::::remove(netuid); RegistrationsThisInterval::::remove(netuid); POWRegistrationsThisInterval::::remove(netuid); BurnRegistrationsThisInterval::::remove(netuid); - weight.saturating_accrue(T::DbWeight::get().writes(12)); + weight.saturating_accrue(T::DbWeight::get().writes(10)); // Update storage version StorageVersion::new(new_storage_version).put::>(); diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs index 3470004362..0cda0f0e06 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs @@ -102,13 +102,12 @@ pub fn migrate_delete_subnet_3() -> Weight { MaxAllowedUids::::remove(netuid); ImmunityPeriod::::remove(netuid); ActivityCutoff::::remove(netuid); - MaxWeightsLimit::::remove(netuid); MinAllowedWeights::::remove(netuid); RegistrationsThisInterval::::remove(netuid); POWRegistrationsThisInterval::::remove(netuid); BurnRegistrationsThisInterval::::remove(netuid); - weight.saturating_accrue(T::DbWeight::get().writes(12)); + weight.saturating_accrue(T::DbWeight::get().writes(10)); // Update storage version StorageVersion::new(new_storage_version).put::>(); diff --git a/pallets/subtensor/src/migrations/migrate_fix_childkeys.rs b/pallets/subtensor/src/migrations/migrate_fix_childkeys.rs new file mode 100644 index 0000000000..cc0b6988e0 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_fix_childkeys.rs @@ -0,0 +1,40 @@ +use super::*; +use alloc::string::String; + +pub fn migrate_fix_childkeys() -> Weight { + let migration_name = b"migrate_fix_childkeys".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + //////////////////////////////////////////////////////// + // Actual migration + + Pallet::::clean_zero_childkey_vectors(&mut weight); + Pallet::::clean_zero_parentkey_vectors(&mut weight); + Pallet::::clean_self_loops(&mut weight); + Pallet::::repair_child_parent_consistency(&mut weight); + + //////////////////////////////////////////////////////// + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_kappa_map_to_default.rs b/pallets/subtensor/src/migrations/migrate_kappa_map_to_default.rs new file mode 100644 index 0000000000..69b82cae0b --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_kappa_map_to_default.rs @@ -0,0 +1,54 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_kappa_map_to_default() -> Weight { + let mig_name: Vec = b"kappa_map_to_default".to_vec(); + let mig_name_str = String::from_utf8_lossy(&mig_name); + + // 1 read for the HasMigrationRun flag + let mut total_weight = T::DbWeight::get().reads(1); + + // Run once guard + if HasMigrationRun::::get(&mig_name) { + log::info!("Migration '{mig_name_str}' already executed - skipping"); + return total_weight; + } + + log::info!("Running migration '{mig_name_str}'"); + + let target: u16 = DefaultKappa::::get(); + + let mut reads: u64 = 0; + let mut writes: u64 = 0; + let mut visited: u64 = 0; + let mut updated: u64 = 0; + let mut unchanged: u64 = 0; + + for (netuid, current) in Kappa::::iter() { + visited = visited.saturating_add(1); + reads = reads.saturating_add(1); + + if current != target { + Kappa::::insert(netuid, target); + writes = writes.saturating_add(1); + updated = updated.saturating_add(1); + } else { + unchanged = unchanged.saturating_add(1); + } + } + + total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(reads, writes)); + + log::info!( + "Kappa migration summary: visited={visited}, updated={updated}, unchanged={unchanged}, target_default={target}" + ); + + HasMigrationRun::::insert(&mig_name, true); + total_weight = total_weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!("Migration '{mig_name_str}' completed"); + + total_weight +} diff --git a/pallets/subtensor/src/migrations/migrate_rate_limit_keys.rs b/pallets/subtensor/src/migrations/migrate_rate_limit_keys.rs new file mode 100644 index 0000000000..e6e331fb63 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_rate_limit_keys.rs @@ -0,0 +1,241 @@ +use alloc::string::String; +use codec::{Decode, Encode}; +use frame_support::traits::Get; +use frame_support::weights::Weight; +use sp_io::hashing::twox_128; +use sp_io::storage; +use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; +use subtensor_runtime_common::NetUid; + +use crate::{ + ChildKeys, Config, Delegates, HasMigrationRun, LastRateLimitedBlock, ParentKeys, + PendingChildKeys, RateLimitKey, +}; + +const MIGRATION_NAME: &[u8] = b"migrate_rate_limit_keys"; + +#[allow(dead_code)] +#[derive(Decode)] +enum RateLimitKeyV0 { + SetSNOwnerHotkey(NetUid), + NetworkLastRegistered, + LastTxBlock(AccountId), + LastTxBlockChildKeyTake(AccountId), + LastTxBlockDelegateTake(AccountId), +} + +pub fn migrate_rate_limit_keys() -> Weight +where + T::AccountId: Ord + Clone, +{ + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(MIGRATION_NAME) { + log::info!( + "Migration '{}' already executed - skipping", + String::from_utf8_lossy(MIGRATION_NAME) + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(MIGRATION_NAME) + ); + + let (child_accounts, child_weight) = collect_child_related_accounts::(); + let (delegate_accounts, delegate_weight) = collect_delegate_accounts::(); + weight = weight.saturating_add(child_weight); + weight = weight.saturating_add(delegate_weight); + + let prefix = storage_prefix("SubtensorModule", "LastRateLimitedBlock"); + let mut cursor = prefix.clone(); + let mut entries = Vec::new(); + + while let Some(next_key) = storage::next_key(&cursor) { + if !next_key.starts_with(&prefix) { + break; + } + if let Some(value) = storage::get(&next_key) { + entries.push((next_key.clone(), value)); + } + cursor = next_key; + } + + weight = weight.saturating_add(T::DbWeight::get().reads(entries.len() as u64)); + + let mut migrated_network = 0u64; + let mut migrated_last_tx = 0u64; + let mut migrated_child_take = 0u64; + let mut migrated_delegate_take = 0u64; + + for (old_storage_key, value_bytes) in entries { + if value_bytes.is_empty() { + continue; + } + + let Some(encoded_key) = old_storage_key.get(prefix.len()..) else { + continue; + }; + if encoded_key.is_empty() { + continue; + } + + let Some(decoded_legacy) = decode_legacy::(encoded_key) else { + // Unknown entry – skip to avoid clobbering valid data. + continue; + }; + + let legacy_value = match decode_value(&value_bytes) { + Some(v) => v, + None => continue, + }; + + let Some(modern_key) = + legacy_to_modern(decoded_legacy, &child_accounts, &delegate_accounts) + else { + continue; + }; + let new_storage_key = LastRateLimitedBlock::::hashed_key_for(&modern_key); + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + let merged_value = storage::get(&new_storage_key) + .and_then(|data| decode_value(&data)) + .map_or(legacy_value, |current| { + core::cmp::max(current, legacy_value) + }); + + storage::set(&new_storage_key, &merged_value.encode()); + if new_storage_key != old_storage_key { + storage::clear(&old_storage_key); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + match &modern_key { + RateLimitKey::NetworkLastRegistered => { + migrated_network = migrated_network.saturating_add(1); + } + RateLimitKey::LastTxBlock(_) => { + migrated_last_tx = migrated_last_tx.saturating_add(1); + } + RateLimitKey::LastTxBlockChildKeyTake(_) => { + migrated_child_take = migrated_child_take.saturating_add(1); + } + RateLimitKey::LastTxBlockDelegateTake(_) => { + migrated_delegate_take = migrated_delegate_take.saturating_add(1); + } + _ => {} + } + } + + HasMigrationRun::::insert(MIGRATION_NAME, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{}' completed. network={}, last_tx={}, child_take={}, delegate_take={}", + String::from_utf8_lossy(MIGRATION_NAME), + migrated_network, + migrated_last_tx, + migrated_child_take, + migrated_delegate_take + ); + + weight +} + +fn storage_prefix(pallet: &str, storage: &str) -> Vec { + let pallet_hash = twox_128(pallet.as_bytes()); + let storage_hash = twox_128(storage.as_bytes()); + [pallet_hash, storage_hash].concat() +} + +fn decode_legacy(bytes: &[u8]) -> Option> { + let mut slice = bytes; + let decoded = RateLimitKeyV0::::decode(&mut slice).ok()?; + if slice.is_empty() { + Some(decoded) + } else { + None + } +} + +fn decode_value(bytes: &[u8]) -> Option { + let mut slice = bytes; + u64::decode(&mut slice).ok() +} + +fn legacy_to_modern( + legacy: RateLimitKeyV0, + child_accounts: &BTreeSet, + delegate_accounts: &BTreeSet, +) -> Option> { + match legacy { + RateLimitKeyV0::SetSNOwnerHotkey(_) => None, + RateLimitKeyV0::NetworkLastRegistered => Some(RateLimitKey::NetworkLastRegistered), + RateLimitKeyV0::LastTxBlock(account) => Some(RateLimitKey::LastTxBlock(account)), + RateLimitKeyV0::LastTxBlockChildKeyTake(account) => { + if child_accounts.contains(&account) { + Some(RateLimitKey::LastTxBlockChildKeyTake(account)) + } else { + None + } + } + RateLimitKeyV0::LastTxBlockDelegateTake(account) => { + if delegate_accounts.contains(&account) { + Some(RateLimitKey::LastTxBlockDelegateTake(account)) + } else { + None + } + } + } +} + +fn collect_child_related_accounts() -> (BTreeSet, Weight) +where + T::AccountId: Ord + Clone, +{ + let mut accounts = BTreeSet::new(); + let mut reads = 0u64; + + for (parent, _, children) in ChildKeys::::iter() { + accounts.insert(parent.clone()); + for (_, child) in children { + accounts.insert(child.clone()); + } + reads = reads.saturating_add(1); + } + + for (_, parent, (children, _)) in PendingChildKeys::::iter() { + accounts.insert(parent.clone()); + for (_, child) in children { + accounts.insert(child.clone()); + } + reads = reads.saturating_add(1); + } + + for (child, _, parents) in ParentKeys::::iter() { + accounts.insert(child.clone()); + for (_, parent) in parents { + accounts.insert(parent.clone()); + } + reads = reads.saturating_add(1); + } + + (accounts, T::DbWeight::get().reads(reads)) +} + +fn collect_delegate_accounts() -> (BTreeSet, Weight) +where + T::AccountId: Ord + Clone, +{ + let mut accounts = BTreeSet::new(); + let mut reads = 0u64; + + for (account, _) in Delegates::::iter() { + accounts.insert(account.clone()); + reads = reads.saturating_add(1); + } + + (accounts, T::DbWeight::get().reads(reads)) +} diff --git a/pallets/subtensor/src/migrations/migrate_remove_tao_dividends.rs b/pallets/subtensor/src/migrations/migrate_remove_tao_dividends.rs new file mode 100644 index 0000000000..b93df22339 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_remove_tao_dividends.rs @@ -0,0 +1,64 @@ +use crate::{Config, HasMigrationRun}; +use alloc::string::String; +use frame_support::pallet_prelude::Weight; +use frame_support::traits::Get; +use sp_io::KillStorageResult; +use sp_io::hashing::twox_128; +use sp_io::storage::clear_prefix; +use sp_std::vec::Vec; +fn remove_prefix(old_map: &str) -> Weight { + let mut prefix = Vec::new(); + prefix.extend_from_slice(&twox_128("SubtensorModule".as_bytes())); + prefix.extend_from_slice(&twox_128(old_map.as_bytes())); + + let removal_results = clear_prefix(&prefix, Some(u32::MAX)); + + let removed_entries_count = match removal_results { + KillStorageResult::AllRemoved(removed) => removed as u64, + KillStorageResult::SomeRemaining(removed) => { + log::info!("Failed To Remove Some Items During migration"); + removed as u64 + } + }; + + log::info!("Removed {removed_entries_count:?} entries from {old_map:?} map."); + + T::DbWeight::get().writes(removed_entries_count) +} + +pub fn migrate_remove_tao_dividends() -> Weight { + let migration_name = b"migrate_remove_tao_dividends".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // Remove obsolete map entries + let weight1 = remove_prefix::("TaoDividendsPerSubnet"); + let weight2 = remove_prefix::("PendingAlphaSwapped"); + let weight3 = remove_prefix::("PendingRootDivs"); + + // Mark Migration as Completed + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight + .saturating_add(weight1) + .saturating_add(weight2) + .saturating_add(weight3) +} diff --git a/pallets/subtensor/src/migrations/migrate_remove_unused_maps_and_values.rs b/pallets/subtensor/src/migrations/migrate_remove_unused_maps_and_values.rs index 701ab14893..df439eafe3 100644 --- a/pallets/subtensor/src/migrations/migrate_remove_unused_maps_and_values.rs +++ b/pallets/subtensor/src/migrations/migrate_remove_unused_maps_and_values.rs @@ -2,31 +2,7 @@ use super::*; use crate::HasMigrationRun; use frame_support::{traits::Get, weights::Weight}; use scale_info::prelude::string::String; -use sp_io::{ - KillStorageResult, - hashing::twox_128, - storage::{clear, clear_prefix}, -}; - -fn remove_prefix(old_map: &str, weight: &mut Weight) { - let mut prefix = Vec::new(); - prefix.extend_from_slice(&twox_128("SubtensorModule".as_bytes())); - prefix.extend_from_slice(&twox_128(old_map.as_bytes())); - - let removal_results = clear_prefix(&prefix, Some(u32::MAX)); - - let removed_entries_count = match removal_results { - KillStorageResult::AllRemoved(removed) => removed as u64, - KillStorageResult::SomeRemaining(removed) => { - log::info!("Failed To Remove Some Items During migration"); - removed as u64 - } - }; - - log::info!("Removed {removed_entries_count:?} entries from {old_map:?} map."); - - *weight = (*weight).saturating_add(T::DbWeight::get().writes(removed_entries_count)); -} +use sp_io::storage::clear; pub fn migrate_remove_unused_maps_and_values() -> Weight { let migration_name = b"migrate_remove_unused_maps_and_values".to_vec(); @@ -46,10 +22,10 @@ pub fn migrate_remove_unused_maps_and_values() -> Weight { ); // Remove EmissionValues entries - remove_prefix::("EmissionValues", &mut weight); + remove_prefix::("SubtensorModule", "EmissionValues", &mut weight); // Remove NetworkMaxStake - remove_prefix::("NetworkMaxStake", &mut weight); + remove_prefix::("SubtensorModule", "NetworkMaxStake", &mut weight); // Remove SubnetLimit clear(b"SubtensorModule::SubnetLimit"); diff --git a/pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs b/pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs index cf9b1fdf07..11711a9184 100644 --- a/pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs +++ b/pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs @@ -5,7 +5,7 @@ use frame_support::{ traits::{GetStorageVersion, StorageVersion}, weights::Weight, }; -use log::info; +use log::{error, info}; use sp_core::Get; use sp_std::vec::Vec; use subtensor_runtime_common::NetUid; @@ -41,7 +41,7 @@ pub mod deprecated_loaded_emission_format { pub fn migrate_transfer_ownership_to_foundation(coldkey: [u8; 32]) -> Weight { let new_storage_version = 3; - // Initialize weight counter + // Start with one read (on-chain storage version). let mut weight = T::DbWeight::get().reads(1); // Get current on-chain storage version @@ -54,10 +54,14 @@ pub fn migrate_transfer_ownership_to_foundation(coldkey: [u8; 32]) -> "Migrating subnet 1 and 11 to foundation control. Current version: {onchain_version:?}" ); - // Decode the foundation's coldkey into an AccountId - // TODO: Consider error handling for decoding failure - let coldkey_account: T::AccountId = T::AccountId::decode(&mut &coldkey[..]) - .expect("coldkey should be a valid 32-byte array"); + // Decode the foundation's coldkey into an AccountId — if it fails, log and abort migration. + let Ok(coldkey_account) = T::AccountId::decode(&mut &coldkey[..]) else { + error!( + target: LOG_TARGET, + "migration error: failed to decode foundation coldkey from 32 bytes" + ); + return weight; + }; info!(target: LOG_TARGET, "Foundation coldkey: {coldkey_account:?}"); // Get the current block number diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index b036783d9d..d95e4c7bac 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -15,21 +15,25 @@ pub mod migrate_crv3_v2_to_timelocked; pub mod migrate_delete_subnet_21; pub mod migrate_delete_subnet_3; pub mod migrate_disable_commit_reveal; +pub mod migrate_fix_childkeys; pub mod migrate_fix_is_network_member; pub mod migrate_fix_root_subnet_tao; pub mod migrate_fix_root_tao_and_alpha_in; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; +pub mod migrate_kappa_map_to_default; pub mod migrate_network_immunity_period; pub mod migrate_network_lock_cost_2500; pub mod migrate_network_lock_reduction_interval; pub mod migrate_orphaned_storage_items; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; +pub mod migrate_rate_limit_keys; pub mod migrate_rate_limiting_last_blocks; pub mod migrate_remove_commitments_rate_limit; pub mod migrate_remove_network_modality; pub mod migrate_remove_stake_map; +pub mod migrate_remove_tao_dividends; pub mod migrate_remove_total_hotkey_coldkey_stakes_this_interval; pub mod migrate_remove_unused_maps_and_values; pub mod migrate_remove_zero_total_hotkey_alpha; @@ -100,3 +104,23 @@ pub(crate) fn migrate_storage( weight } + +pub(crate) fn remove_prefix(module: &str, old_map: &str, weight: &mut Weight) { + let mut prefix = Vec::new(); + prefix.extend_from_slice(&twox_128(module.as_bytes())); + prefix.extend_from_slice(&twox_128(old_map.as_bytes())); + + let removal_results = clear_prefix(&prefix, Some(u32::MAX)); + + let removed_entries_count = match removal_results { + KillStorageResult::AllRemoved(removed) => removed as u64, + KillStorageResult::SomeRemaining(removed) => { + log::info!("Failed To Remove Some Items During migration"); + removed as u64 + } + }; + + log::info!("Removed {removed_entries_count:?} entries from {old_map:?} map."); + + *weight = (*weight).saturating_add(T::DbWeight::get().writes(removed_entries_count)); +} diff --git a/pallets/subtensor/src/rpc_info/dynamic_info.rs b/pallets/subtensor/src/rpc_info/dynamic_info.rs index 28b3990082..3bfbda8676 100644 --- a/pallets/subtensor/src/rpc_info/dynamic_info.rs +++ b/pallets/subtensor/src/rpc_info/dynamic_info.rs @@ -63,7 +63,7 @@ impl Pallet { alpha_in_emission: SubnetAlphaInEmission::::get(netuid).into(), tao_in_emission: SubnetTaoInEmission::::get(netuid).into(), pending_alpha_emission: PendingEmission::::get(netuid).into(), - pending_root_emission: PendingRootDivs::::get(netuid).into(), + pending_root_emission: TaoCurrency::from(0u64).into(), subnet_volume: SubnetVolume::::get(netuid).into(), network_registered_at: NetworkRegisteredAt::::get(netuid).into(), subnet_identity: SubnetIdentitiesV3::::get(netuid), diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 2ec772b2c6..df0c8023b0 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -644,7 +644,8 @@ impl Pallet { let mut tao_dividends_per_hotkey: Vec<(T::AccountId, Compact)> = vec![]; let mut alpha_dividends_per_hotkey: Vec<(T::AccountId, Compact)> = vec![]; for hotkey in hotkeys.clone() { - let tao_divs = TaoDividendsPerSubnet::::get(netuid, hotkey.clone()); + // Tao dividends were removed + let tao_divs = TaoCurrency::ZERO; let alpha_divs = AlphaDividendsPerSubnet::::get(netuid, hotkey.clone()); tao_dividends_per_hotkey.push((hotkey.clone(), tao_divs.into())); alpha_dividends_per_hotkey.push((hotkey.clone(), alpha_divs.into())); @@ -694,7 +695,7 @@ impl Pallet { alpha_in_emission: SubnetAlphaInEmission::::get(netuid).into(), // amount injected outstanding per block tao_in_emission: SubnetTaoInEmission::::get(netuid).into(), // amount of tao injected per block pending_alpha_emission: PendingEmission::::get(netuid).into(), // pending alpha to be distributed - pending_root_emission: PendingRootDivs::::get(netuid).into(), // panding tao for root divs to be distributed + pending_root_emission: TaoCurrency::from(0u64).into(), // panding tao for root divs to be distributed subnet_volume: subnet_volume.into(), moving_price: SubnetMovingPrice::::get(netuid), @@ -1004,7 +1005,7 @@ impl Pallet { }, Some(SelectiveMetagraphIndex::PendingRootEmission) => SelectiveMetagraph { netuid: netuid.into(), - pending_root_emission: Some(PendingRootDivs::::get(netuid).into()), + pending_root_emission: Some(TaoCurrency::from(0u64).into()), ..Default::default() }, Some(SelectiveMetagraphIndex::SubnetVolume) => SelectiveMetagraph { @@ -1405,7 +1406,8 @@ impl Pallet { let mut tao_dividends_per_hotkey: Vec<(T::AccountId, Compact)> = vec![]; for hotkey in hotkeys.clone() { - let tao_divs = TaoDividendsPerSubnet::::get(netuid, hotkey.clone()); + // Tao dividends were removed + let tao_divs = TaoCurrency::ZERO; tao_dividends_per_hotkey.push((hotkey.clone(), tao_divs.into())); } SelectiveMetagraph { diff --git a/pallets/subtensor/src/rpc_info/stake_info.rs b/pallets/subtensor/src/rpc_info/stake_info.rs index 84e9e6d6b7..83c0cfed8b 100644 --- a/pallets/subtensor/src/rpc_info/stake_info.rs +++ b/pallets/subtensor/src/rpc_info/stake_info.rs @@ -43,7 +43,8 @@ impl Pallet { continue; } let emission = AlphaDividendsPerSubnet::::get(*netuid_i, &hotkey_i); - let tao_emission = TaoDividendsPerSubnet::::get(*netuid_i, &hotkey_i); + // Tao dividends were removed + let tao_emission = TaoCurrency::ZERO; let is_registered: bool = Self::is_hotkey_registered_on_network(*netuid_i, hotkey_i); stake_info_for_coldkey.push(StakeInfo { @@ -101,7 +102,8 @@ impl Pallet { netuid, ); let emission = AlphaDividendsPerSubnet::::get(netuid, &hotkey_account); - let tao_emission = TaoDividendsPerSubnet::::get(netuid, &hotkey_account); + // Tao dividends were removed + let tao_emission = TaoCurrency::ZERO; let is_registered: bool = Self::is_hotkey_registered_on_network(netuid, &hotkey_account); Some(StakeInfo { @@ -128,7 +130,7 @@ impl Pallet { 0_u64 } else { let netuid = destination.or(origin).map(|v| v.1).unwrap_or_default(); - T::SwapInterface::approx_fee_amount(netuid.into(), amount) + T::SwapInterface::approx_fee_amount(netuid.into(), TaoCurrency::from(amount)).to_u64() } } } diff --git a/pallets/subtensor/src/staking/add_stake.rs b/pallets/subtensor/src/staking/add_stake.rs index 740d42d09e..ea33912bf1 100644 --- a/pallets/subtensor/src/staking/add_stake.rs +++ b/pallets/subtensor/src/staking/add_stake.rs @@ -1,6 +1,6 @@ use substrate_fixed::types::I96F32; use subtensor_runtime_common::{NetUid, TaoCurrency}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_swap_interface::{Order, SwapHandler}; use super::*; @@ -74,7 +74,7 @@ impl Pallet { &coldkey, netuid, tao_staked.saturating_to_num::().into(), - T::SwapInterface::max_price().into(), + T::SwapInterface::max_price(), true, false, )?; @@ -180,7 +180,10 @@ impl Pallet { } // Returns the maximum amount of RAO that can be executed with price limit - pub fn get_max_amount_add(netuid: NetUid, limit_price: TaoCurrency) -> Result> { + pub fn get_max_amount_add( + netuid: NetUid, + limit_price: TaoCurrency, + ) -> Result { // Corner case: root and stao // There's no slippage for root or stable subnets, so if limit price is 1e9 rao or // higher, then max_amount equals u64::MAX, otherwise it is 0. @@ -188,26 +191,19 @@ impl Pallet { if limit_price >= 1_000_000_000.into() { return Ok(u64::MAX); } else { - return Err(Error::ZeroMaxStakeAmount); + return Err(Error::::ZeroMaxStakeAmount.into()); } } // Use reverting swap to estimate max limit amount - let result = T::SwapInterface::swap( - netuid.into(), - OrderType::Buy, - u64::MAX, - limit_price.into(), - false, - true, - ) - .map(|r| r.amount_paid_in.saturating_add(r.fee_paid)) - .map_err(|_| Error::ZeroMaxStakeAmount)?; + let order = GetAlphaForTao::::with_amount(u64::MAX); + let result = T::SwapInterface::swap(netuid.into(), order, limit_price, false, true) + .map(|r| r.amount_paid_in.saturating_add(r.fee_paid))?; - if result != 0 { - Ok(result) + if !result.is_zero() { + Ok(result.into()) } else { - Err(Error::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) } } } diff --git a/pallets/subtensor/src/staking/claim_root.rs b/pallets/subtensor/src/staking/claim_root.rs new file mode 100644 index 0000000000..7071a1ad55 --- /dev/null +++ b/pallets/subtensor/src/staking/claim_root.rs @@ -0,0 +1,399 @@ +use super::*; +use frame_support::weights::Weight; +use sp_core::Get; +use sp_std::collections::btree_set::BTreeSet; +use substrate_fixed::types::I96F32; +use subtensor_swap_interface::SwapHandler; + +impl Pallet { + pub fn block_hash_to_indices(block_hash: T::Hash, k: u64, n: u64) -> Vec { + let block_hash_bytes = block_hash.as_ref(); + let mut indices: BTreeSet = BTreeSet::new(); + // k < n + let start_index: u64 = u64::from_be_bytes( + block_hash_bytes + .get(0..8) + .unwrap_or(&[0; 8]) + .try_into() + .unwrap_or([0; 8]), + ); + let mut last_idx = start_index; + for i in 0..k { + let bh_idx: usize = ((i.saturating_mul(8)) % 32) as usize; + let idx_step = u64::from_be_bytes( + block_hash_bytes + .get(bh_idx..(bh_idx.saturating_add(8))) + .unwrap_or(&[0; 8]) + .try_into() + .unwrap_or([0; 8]), + ); + let idx = last_idx + .saturating_add(idx_step) + .checked_rem(n) + .unwrap_or(0); + indices.insert(idx); + last_idx = idx; + } + indices.into_iter().collect() + } + + pub fn increase_root_claimable_for_hotkey_and_subnet( + hotkey: &T::AccountId, + netuid: NetUid, + amount: AlphaCurrency, + ) { + // Get total stake on this hotkey on root. + let total: I96F32 = + I96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(hotkey, NetUid::ROOT)); + + // Get increment + let increment: I96F32 = I96F32::saturating_from_num(amount) + .checked_div(total) + .unwrap_or(I96F32::saturating_from_num(0.0)); + + // Unlikely to happen. This is mostly for test environment sanity checks. + if u64::from(amount) > total.saturating_to_num::() { + log::warn!("Not enough root stake. NetUID = {netuid}"); + + let owner = Owner::::get(hotkey); + Self::increase_stake_for_hotkey_and_coldkey_on_subnet(hotkey, &owner, netuid, amount); + return; + } + + // Increment claimable for this subnet. + RootClaimable::::mutate(hotkey, |claimable| { + claimable + .entry(netuid) + .and_modify(|claim_total| *claim_total = claim_total.saturating_add(increment)) + .or_insert(increment); + }); + } + + pub fn get_root_claimable_for_hotkey_coldkey( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + netuid: NetUid, + ) -> I96F32 { + // Get this keys stake balance on root. + let root_stake: I96F32 = I96F32::saturating_from_num( + Self::get_stake_for_hotkey_and_coldkey_on_subnet(hotkey, coldkey, NetUid::ROOT), + ); + + // Get the total claimable_rate for this hotkey and this network + let claimable_rate: I96F32 = *RootClaimable::::get(hotkey) + .get(&netuid) + .unwrap_or(&I96F32::from(0)); + + // Compute the proportion owed to this coldkey via balance. + let claimable: I96F32 = claimable_rate.saturating_mul(root_stake); + + claimable + } + + pub fn get_root_owed_for_hotkey_coldkey_float( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + netuid: NetUid, + ) -> I96F32 { + let claimable = Self::get_root_claimable_for_hotkey_coldkey(hotkey, coldkey, netuid); + + // Attain the root claimed to avoid overclaiming. + let root_claimed: I96F32 = + I96F32::saturating_from_num(RootClaimed::::get((netuid, hotkey, coldkey))); + + // Subtract the already claimed alpha. + let owed: I96F32 = claimable.saturating_sub(root_claimed); + + owed + } + + pub fn get_root_owed_for_hotkey_coldkey( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + netuid: NetUid, + ) -> u64 { + let owed = Self::get_root_owed_for_hotkey_coldkey_float(hotkey, coldkey, netuid); + + // Convert owed to u64, mapping negative values to 0 + let owed_u64: u64 = if owed.is_negative() { + 0 + } else { + owed.saturating_to_num::() + }; + + owed_u64 + } + + pub fn root_claim_on_subnet( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + netuid: NetUid, + root_claim_type: RootClaimTypeEnum, + ignore_minimum_condition: bool, + ) { + // Subtract the root claimed. + let owed: I96F32 = Self::get_root_owed_for_hotkey_coldkey_float(hotkey, coldkey, netuid); + + if !ignore_minimum_condition + && owed < I96F32::saturating_from_num(RootClaimableThreshold::::get(&netuid)) + { + log::debug!( + "root claim on subnet {netuid} is skipped: {owed:?} for h={hotkey:?},c={coldkey:?} " + ); + return; // no-op + } + + // Convert owed to u64, mapping negative values to 0 + let owed_u64: u64 = if owed.is_negative() { + 0 + } else { + owed.saturating_to_num::() + }; + + if owed_u64 == 0 { + log::debug!( + "root claim on subnet {netuid} is skipped: {owed:?} for h={hotkey:?},c={coldkey:?}" + ); + return; // no-op + } + + match root_claim_type { + // Increase stake on root + RootClaimTypeEnum::Swap => { + // Swap the alpha owed to TAO + let owed_tao = match Self::swap_alpha_for_tao( + netuid, + owed_u64.into(), + T::SwapInterface::min_price::(), + true, + ) { + Ok(owed_tao) => owed_tao, + Err(err) => { + log::error!("Error swapping alpha for TAO: {err:?}"); + + return; + } + }; + + Self::increase_stake_for_hotkey_and_coldkey_on_subnet( + hotkey, + coldkey, + NetUid::ROOT, + owed_tao.amount_paid_out.to_u64().into(), + ); + + Self::add_stake_adjust_root_claimed_for_hotkey_and_coldkey( + hotkey, + coldkey, + owed_tao.amount_paid_out.into(), + ); + } + RootClaimTypeEnum::Keep => { + // Increase the stake with the alpha owned + Self::increase_stake_for_hotkey_and_coldkey_on_subnet( + hotkey, + coldkey, + netuid, + owed_u64.into(), + ); + } + }; + + // Increase root claimed by owed amount. + RootClaimed::::mutate((netuid, hotkey, coldkey), |root_claimed| { + *root_claimed = root_claimed.saturating_add(owed_u64.into()); + }); + } + + fn root_claim_on_subnet_weight(_root_claim_type: RootClaimTypeEnum) -> Weight { + Weight::from_parts(60_000_000, 6987) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + pub fn root_claim_all( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + subnets: Option>, + ) -> Weight { + let mut weight = Weight::default(); + + let root_claim_type = RootClaimType::::get(coldkey); + weight.saturating_accrue(T::DbWeight::get().reads(1)); + + // Iterate over all the subnets this hotkey has claimable for root. + let root_claimable = RootClaimable::::get(hotkey); + weight.saturating_accrue(T::DbWeight::get().reads(1)); + + for (netuid, _) in root_claimable.iter() { + let skip = subnets + .as_ref() + .map(|subnets| !subnets.contains(netuid)) + .unwrap_or(false); + + if skip { + continue; + } + + Self::root_claim_on_subnet(hotkey, coldkey, *netuid, root_claim_type.clone(), false); + weight.saturating_accrue(Self::root_claim_on_subnet_weight(root_claim_type.clone())); + } + + weight + } + + pub fn add_stake_adjust_root_claimed_for_hotkey_and_coldkey( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + amount: u64, + ) { + // Iterate over all the subnets this hotkey is staked on for root. + let root_claimable = RootClaimable::::get(hotkey); + for (netuid, claimable_rate) in root_claimable.iter() { + // Get current staker root claimed value. + let root_claimed: u128 = RootClaimed::::get((netuid, hotkey, coldkey)); + + // Increase root claimed based on the claimable rate. + let new_root_claimed = root_claimed.saturating_add( + claimable_rate + .saturating_mul(I96F32::from(u64::from(amount))) + .saturating_to_num(), + ); + + // Set the new root claimed value. + RootClaimed::::insert((netuid, hotkey, coldkey), new_root_claimed); + } + } + + pub fn remove_stake_adjust_root_claimed_for_hotkey_and_coldkey( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + amount: AlphaCurrency, + ) { + // Iterate over all the subnets this hotkey is staked on for root. + let root_claimable = RootClaimable::::get(hotkey); + for (netuid, claimable_rate) in root_claimable.iter() { + if *netuid == NetUid::ROOT.into() { + continue; // Skip the root netuid. + } + + // Get current staker root claimed value. + let root_claimed: u128 = RootClaimed::::get((netuid, hotkey, coldkey)); + + // Decrease root claimed based on the claimable rate. + let new_root_claimed = root_claimed.saturating_sub( + claimable_rate + .saturating_mul(I96F32::from(u64::from(amount))) + .saturating_to_num(), + ); + + // Set the new root_claimed value. + RootClaimed::::insert((netuid, hotkey, coldkey), new_root_claimed); + } + } + + pub fn do_root_claim(coldkey: T::AccountId, subnets: Option>) -> Weight { + let mut weight = Weight::default(); + + let hotkeys = StakingHotkeys::::get(&coldkey); + weight.saturating_accrue(T::DbWeight::get().reads(1)); + + hotkeys.iter().for_each(|hotkey| { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + weight.saturating_accrue(Self::root_claim_all(hotkey, &coldkey, subnets.clone())); + }); + + Self::deposit_event(Event::RootClaimed { coldkey }); + + weight + } + + fn block_hash_to_indices_weight(k: u64, _n: u64) -> Weight { + Weight::from_parts(3_000_000, 1517) + .saturating_add(Weight::from_parts(100_412, 0).saturating_mul(k.into())) + } + + pub fn maybe_add_coldkey_index(coldkey: &T::AccountId) { + if !StakingColdkeys::::contains_key(coldkey) { + let n = NumStakingColdkeys::::get(); + StakingColdkeysByIndex::::insert(n, coldkey.clone()); + StakingColdkeys::::insert(coldkey.clone(), n); + NumStakingColdkeys::::mutate(|n| *n = n.saturating_add(1)); + } + } + + pub fn run_auto_claim_root_divs(last_block_hash: T::Hash) -> Weight { + let mut weight: Weight = Weight::default(); + + let n = NumStakingColdkeys::::get(); + let k = NumRootClaim::::get(); + weight.saturating_accrue(T::DbWeight::get().reads(2)); + + let coldkeys_to_claim: Vec = Self::block_hash_to_indices(last_block_hash, k, n); + weight.saturating_accrue(Self::block_hash_to_indices_weight(k, n)); + + for i in coldkeys_to_claim.iter() { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Ok(coldkey) = StakingColdkeysByIndex::::try_get(i) { + weight.saturating_accrue(Self::do_root_claim(coldkey.clone(), None)); + } + + continue; + } + + weight + } + + pub fn change_root_claim_type(coldkey: &T::AccountId, new_type: RootClaimTypeEnum) { + RootClaimType::::insert(coldkey.clone(), new_type.clone()); + + Self::deposit_event(Event::RootClaimTypeSet { + coldkey: coldkey.clone(), + root_claim_type: new_type, + }); + } + + pub fn transfer_root_claimed_for_new_keys( + netuid: NetUid, + old_hotkey: &T::AccountId, + new_hotkey: &T::AccountId, + old_coldkey: &T::AccountId, + new_coldkey: &T::AccountId, + ) { + let old_root_claimed = RootClaimed::::get((netuid, old_hotkey, old_coldkey)); + RootClaimed::::remove((netuid, old_hotkey, old_coldkey)); + + RootClaimed::::mutate((netuid, new_hotkey, new_coldkey), |new_root_claimed| { + *new_root_claimed = old_root_claimed.saturating_add(*new_root_claimed); + }); + } + pub fn transfer_root_claimable_for_new_hotkey( + old_hotkey: &T::AccountId, + new_hotkey: &T::AccountId, + ) { + let src_root_claimable = RootClaimable::::get(old_hotkey); + let mut dst_root_claimable = RootClaimable::::get(new_hotkey); + RootClaimable::::remove(old_hotkey); + + for (netuid, claimable_rate) in src_root_claimable.into_iter() { + dst_root_claimable + .entry(netuid) + .and_modify(|total| *total = total.saturating_add(claimable_rate)) + .or_insert(claimable_rate); + } + + RootClaimable::::insert(new_hotkey, dst_root_claimable); + } + + /// Claim all root dividends for subnet and remove all associated data. + pub fn finalize_all_subnet_root_dividends(netuid: NetUid) { + let hotkeys = RootClaimable::::iter_keys().collect::>(); + + for hotkey in hotkeys.iter() { + RootClaimable::::mutate(hotkey, |claimable| { + claimable.remove(&netuid); + }); + } + + let _ = RootClaimed::::clear_prefix((netuid,), u32::MAX, None); + } +} diff --git a/pallets/subtensor/src/staking/helpers.rs b/pallets/subtensor/src/staking/helpers.rs index 1625afa811..1176064e36 100644 --- a/pallets/subtensor/src/staking/helpers.rs +++ b/pallets/subtensor/src/staking/helpers.rs @@ -8,7 +8,7 @@ use frame_support::traits::{ use safe_math::*; use substrate_fixed::types::U96F32; use subtensor_runtime_common::{NetUid, TaoCurrency}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_swap_interface::{Order, SwapHandler}; use super::*; @@ -73,20 +73,17 @@ impl Pallet { let alpha_stake = Self::get_stake_for_hotkey_and_coldkey_on_subnet( hotkey, coldkey, netuid, ); - T::SwapInterface::sim_swap( - netuid.into(), - OrderType::Sell, - alpha_stake.into(), - ) - .map(|r| { - let fee: u64 = U96F32::saturating_from_num(r.fee_paid) - .saturating_mul(T::SwapInterface::current_alpha_price( - netuid.into(), - )) - .saturating_to_num(); - r.amount_paid_out.saturating_add(fee) - }) - .unwrap_or_default() + let order = GetTaoForAlpha::::with_amount(alpha_stake); + T::SwapInterface::sim_swap(netuid.into(), order) + .map(|r| { + let fee: u64 = U96F32::saturating_from_num(r.fee_paid) + .saturating_mul(T::SwapInterface::current_alpha_price( + netuid.into(), + )) + .saturating_to_num(); + r.amount_paid_out.to_u64().saturating_add(fee) + }) + .unwrap_or_default() }) .sum::() }) @@ -202,7 +199,7 @@ impl Pallet { coldkey, netuid, alpha_stake, - T::SwapInterface::min_price().into(), + T::SwapInterface::min_price(), false, ); @@ -329,6 +326,56 @@ impl Pallet { }); } + /// The function clears Alpha map in batches. Each run will check ALPHA_MAP_BATCH_SIZE + /// alphas. It keeps the alpha value stored when it's >= than MIN_ALPHA. + /// The function uses AlphaMapLastKey as a storage for key iterator between runs. + pub fn populate_root_coldkey_staking_maps() { + // Get starting key for the batch. Get the first key if we restart the process. + let mut new_starting_raw_key = AlphaMapLastKey::::get(); + let mut starting_key = None; + if new_starting_raw_key.is_none() { + starting_key = Alpha::::iter_keys().next(); + new_starting_raw_key = starting_key.as_ref().map(Alpha::::hashed_key_for); + } + + if let Some(starting_raw_key) = new_starting_raw_key { + // Get the key batch + let mut keys = Alpha::::iter_keys_from(starting_raw_key) + .take(ALPHA_MAP_BATCH_SIZE) + .collect::>(); + + // New iteration: insert the starting key in the batch if it's a new iteration + // iter_keys_from() skips the starting key + if let Some(starting_key) = starting_key { + if keys.len() == ALPHA_MAP_BATCH_SIZE { + keys.remove(keys.len().saturating_sub(1)); + } + keys.insert(0, starting_key); + } + + let mut new_starting_key = None; + let new_iteration = keys.len() < ALPHA_MAP_BATCH_SIZE; + + // Check and remove alphas if necessary + for key in keys { + let (_, coldkey, netuid) = key.clone(); + + if netuid == NetUid::ROOT { + Self::maybe_add_coldkey_index(&coldkey); + } + + new_starting_key = Some(Alpha::::hashed_key_for(key)); + } + + // Restart the process if it's the last batch + if new_iteration { + new_starting_key = None; + } + + AlphaMapLastKey::::put(new_starting_key); + } + } + pub fn burn_subnet_alpha(_netuid: NetUid, _amount: AlphaCurrency) { // Do nothing; TODO: record burned alpha in a tracker } diff --git a/pallets/subtensor/src/staking/mod.rs b/pallets/subtensor/src/staking/mod.rs index 570658631a..ad2b66189f 100644 --- a/pallets/subtensor/src/staking/mod.rs +++ b/pallets/subtensor/src/staking/mod.rs @@ -1,6 +1,7 @@ use super::*; pub mod account; pub mod add_stake; +mod claim_root; pub mod decrease_take; pub mod helpers; pub mod increase_take; diff --git a/pallets/subtensor/src/staking/move_stake.rs b/pallets/subtensor/src/staking/move_stake.rs index 589da2b4b8..a1d9b46d5b 100644 --- a/pallets/subtensor/src/staking/move_stake.rs +++ b/pallets/subtensor/src/staking/move_stake.rs @@ -360,7 +360,7 @@ impl Pallet { origin_coldkey, origin_netuid, move_amount, - T::SwapInterface::min_price().into(), + T::SwapInterface::min_price(), drop_fee_origin, )?; @@ -378,7 +378,7 @@ impl Pallet { destination_coldkey, destination_netuid, tao_unstaked, - T::SwapInterface::max_price().into(), + T::SwapInterface::max_price(), set_limit, drop_fee_destination, )?; @@ -424,8 +424,8 @@ impl Pallet { origin_netuid: NetUid, destination_netuid: NetUid, limit_price: TaoCurrency, - ) -> Result> { - let tao: U64F64 = U64F64::saturating_from_num(1_000_000_000); + ) -> Result { + let tao = U64F64::saturating_from_num(1_000_000_000); // Corner case: both subnet IDs are root or stao // There's no slippage for root or stable subnets, so slippage is always 0. @@ -434,7 +434,7 @@ impl Pallet { && (destination_netuid.is_root() || SubnetMechanism::::get(destination_netuid) == 0) { if limit_price > tao.saturating_to_num::().into() { - return Err(Error::ZeroMaxStakeAmount); + return Err(Error::::ZeroMaxStakeAmount.into()); } else { return Ok(AlphaCurrency::MAX); } @@ -476,7 +476,7 @@ impl Pallet { let subnet_tao_2 = SubnetTAO::::get(destination_netuid) .saturating_add(SubnetTaoProvided::::get(destination_netuid)); if subnet_tao_1.is_zero() || subnet_tao_2.is_zero() { - return Err(Error::ZeroMaxStakeAmount); + return Err(Error::::ZeroMaxStakeAmount.into()); } let subnet_tao_1_float: U64F64 = U64F64::saturating_from_num(subnet_tao_1); let subnet_tao_2_float: U64F64 = U64F64::saturating_from_num(subnet_tao_2); @@ -487,7 +487,7 @@ impl Pallet { let alpha_in_2 = SubnetAlphaIn::::get(destination_netuid) .saturating_add(SubnetAlphaInProvided::::get(destination_netuid)); if alpha_in_1.is_zero() || alpha_in_2.is_zero() { - return Err(Error::ZeroMaxStakeAmount); + return Err(Error::::ZeroMaxStakeAmount.into()); } let alpha_in_1_float: U64F64 = U64F64::saturating_from_num(alpha_in_1); let alpha_in_2_float: U64F64 = U64F64::saturating_from_num(alpha_in_2); @@ -503,7 +503,7 @@ impl Pallet { T::SwapInterface::current_alpha_price(destination_netuid.into()), ); if limit_price_float > current_price { - return Err(Error::ZeroMaxStakeAmount); + return Err(Error::::ZeroMaxStakeAmount.into()); } // Corner case: limit_price is zero @@ -529,7 +529,7 @@ impl Pallet { if final_result != 0 { Ok(final_result.into()) } else { - Err(Error::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) } } } diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index 99e4a52be1..5771029f74 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -1,8 +1,7 @@ -use subtensor_swap_interface::{OrderType, SwapHandler}; - use super::*; use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_swap_interface::{Order, SwapHandler}; impl Pallet { /// ---- The implementation for the extrinsic remove_stake: Removes stake from a hotkey account and adds it onto a coldkey. @@ -73,7 +72,7 @@ impl Pallet { &coldkey, netuid, alpha_unstaked, - T::SwapInterface::min_price().into(), + T::SwapInterface::min_price(), false, )?; @@ -168,7 +167,7 @@ impl Pallet { &coldkey, netuid, alpha_unstaked, - T::SwapInterface::min_price().into(), + T::SwapInterface::min_price(), false, )?; @@ -261,7 +260,7 @@ impl Pallet { &coldkey, netuid, alpha_unstaked, - T::SwapInterface::min_price().into(), + T::SwapInterface::min_price(), false, )?; @@ -280,7 +279,7 @@ impl Pallet { &coldkey, NetUid::ROOT, total_tao_unstaked, - T::SwapInterface::max_price().into(), + T::SwapInterface::max_price(), false, // no limit for Root subnet false, )?; @@ -392,7 +391,7 @@ impl Pallet { pub fn get_max_amount_remove( netuid: NetUid, limit_price: TaoCurrency, - ) -> Result> { + ) -> Result { // Corner case: root and stao // There's no slippage for root or stable subnets, so if limit price is 1e9 rao or // lower, then max_amount equals u64::MAX, otherwise it is 0. @@ -400,26 +399,19 @@ impl Pallet { if limit_price <= 1_000_000_000.into() { return Ok(AlphaCurrency::MAX); } else { - return Err(Error::ZeroMaxStakeAmount); + return Err(Error::::ZeroMaxStakeAmount.into()); } } // Use reverting swap to estimate max limit amount - let result = T::SwapInterface::swap( - netuid.into(), - OrderType::Sell, - u64::MAX, - limit_price.into(), - false, - true, - ) - .map(|r| r.amount_paid_in.saturating_add(r.fee_paid)) - .map_err(|_| Error::ZeroMaxStakeAmount)?; + let order = GetTaoForAlpha::::with_amount(u64::MAX); + let result = T::SwapInterface::swap(netuid.into(), order, limit_price.into(), false, true) + .map(|r| r.amount_paid_in.saturating_add(r.fee_paid))?; - if result != 0 { - Ok(result.into()) + if !result.is_zero() { + Ok(result) } else { - Err(Error::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) } } @@ -459,7 +451,7 @@ impl Pallet { // - sum emitted α, // - apply owner fraction to get owner α, // - price that α using a *simulated* AMM swap. - let mut owner_emission_tao: TaoCurrency = TaoCurrency::ZERO; + let mut owner_emission_tao = TaoCurrency::ZERO; if should_refund_owner && !lock_cost.is_zero() { let total_emitted_alpha_u128: u128 = Emission::::get(netuid) @@ -471,17 +463,14 @@ impl Pallet { if total_emitted_alpha_u128 > 0 { let owner_fraction: U96F32 = Self::get_float_subnet_owner_cut(); - let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha_u128) + let owner_alpha_u64 = U96F32::from_num(total_emitted_alpha_u128) .saturating_mul(owner_fraction) .floor() .saturating_to_num::(); owner_emission_tao = if owner_alpha_u64 > 0 { - match T::SwapInterface::sim_swap( - netuid.into(), - OrderType::Sell, - owner_alpha_u64, - ) { + let order = GetTaoForAlpha::with_amount(owner_alpha_u64); + match T::SwapInterface::sim_swap(netuid.into(), order) { Ok(sim) => TaoCurrency::from(sim.amount_paid_out), Err(e) => { log::debug!( @@ -489,7 +478,7 @@ impl Pallet { ); let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); - let val_u64: u64 = U96F32::from_num(owner_alpha_u64) + let val_u64 = U96F32::from_num(owner_alpha_u64) .saturating_mul(cur_price) .floor() .saturating_to_num::(); diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs index cf7103b7ab..c7ebd62c04 100644 --- a/pallets/subtensor/src/staking/set_children.rs +++ b/pallets/subtensor/src/staking/set_children.rs @@ -1,8 +1,451 @@ use super::*; +use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use subtensor_runtime_common::NetUid; +pub struct PCRelations { + /// The distinguished `hotkey` this structure is built around. + pivot: T::AccountId, + children: BTreeMap, + parents: BTreeMap, +} + +impl PCRelations { + /// Create empty relations for a given pivot. + pub fn new(hotkey: T::AccountId) -> Self { + Self { + pivot: hotkey, + children: BTreeMap::new(), + parents: BTreeMap::new(), + } + } + + //////////////////////////////////////////////////////////// + // Constraint checkers + + /// Ensures sum(proportions) <= u64::MAX + pub fn ensure_total_proportions(children: &BTreeMap) -> DispatchResult { + let total: u128 = children + .values() + .fold(0u128, |acc, &w| acc.saturating_add(w as u128)); + ensure!(total <= u64::MAX as u128, Error::::ProportionOverflow); + Ok(()) + } + + /// Ensure that the number of children does not exceed 5 + pub fn ensure_childkey_count(children: &BTreeMap) -> DispatchResult { + ensure!(children.len() <= 5, Error::::TooManyChildren); + + Ok(()) + } + + /// Ensures the given children or parent set doesn't contain pivot + pub fn ensure_no_self_loop( + pivot: &T::AccountId, + hotkey_set: &BTreeMap, + ) -> DispatchResult { + ensure!(!hotkey_set.contains_key(pivot), Error::::InvalidChild); + Ok(()) + } + + /// Ensures that children and parents sets do not have any overlap + pub fn ensure_bipartite_separation( + children: &BTreeMap, + parents: &BTreeMap, + ) -> DispatchResult { + let has_overlap = children.keys().any(|c| parents.contains_key(c)); + ensure!(!has_overlap, Error::::ChildParentInconsistency); + Ok(()) + } + + /// Validate that applying `pending_children_vec` to `relations` (as the new + /// pivot->children mapping) preserves all invariants. + /// + /// Checks: + /// 1) No self-loop: pivot must not appear among children. + /// 2) Sum of child proportions fits in `u64`. + /// 3) Bipartite role separation: no child may also be a parent. + pub fn ensure_pending_consistency( + &self, + pending_children_vec: &Vec<(u64, T::AccountId)>, + ) -> DispatchResult { + // Build a deduped children map (last proportion wins if duplicates present). + let mut new_children: BTreeMap = BTreeMap::new(); + for (prop, child) in pending_children_vec { + new_children.insert(child.clone(), *prop); + } + + // Check constraints + Self::ensure_no_self_loop(&self.pivot, &new_children)?; + Self::ensure_childkey_count(&new_children)?; + Self::ensure_total_proportions(&new_children)?; + Self::ensure_bipartite_separation(&new_children, &self.parents)?; + + Ok(()) + } + + //////////////////////////////////////////////////////////// + // Getters + + #[inline] + pub fn pivot(&self) -> &T::AccountId { + &self.pivot + } + #[inline] + pub fn children(&self) -> &BTreeMap { + &self.children + } + #[inline] + pub fn parents(&self) -> &BTreeMap { + &self.parents + } + + //////////////////////////////////////////////////////////// + // Safe updaters + + /// Replace the pivot->children mapping after validating invariants. + /// + /// Invariants: + /// - No self-loop: child != pivot + /// - sum(proportions) fits in u64 (checked as u128 to avoid overflow mid-sum) + pub fn link_children(&mut self, new_children: BTreeMap) -> DispatchResult { + // Check constraints + Self::ensure_no_self_loop(&self.pivot, &new_children)?; + Self::ensure_total_proportions(&new_children)?; + Self::ensure_bipartite_separation(&new_children, &self.parents)?; + + self.children = new_children; + Ok(()) + } + + pub fn link_parents(&mut self, new_parents: BTreeMap) -> DispatchResult { + // Check constraints + Self::ensure_no_self_loop(&self.pivot, &new_parents)?; + Self::ensure_bipartite_separation(&self.children, &new_parents)?; + + self.parents = new_parents; + Ok(()) + } + + #[inline] + fn upsert_edge(list: &mut Vec<(u64, T::AccountId)>, proportion: u64, id: &T::AccountId) { + for (p, who) in list.iter_mut() { + if who == id { + *p = proportion; + return; + } + } + list.push((proportion, id.clone())); + } + + #[inline] + fn remove_edge(list: &mut Vec<(u64, T::AccountId)>, id: &T::AccountId) { + list.retain(|(_, who)| who != id); + } + + /// Change the pivot hotkey for these relations. + /// Ensures there are no self-loops with the new pivot. + pub fn rebind_pivot(&mut self, new_pivot: T::AccountId) -> DispatchResult { + // No self-loop via children or parents for the new pivot. + Self::ensure_no_self_loop(&new_pivot, &self.children)?; + Self::ensure_no_self_loop(&new_pivot, &self.parents)?; + + self.pivot = new_pivot; + Ok(()) + } +} + impl Pallet { + /// Set childkeys vector making sure there are no empty vectors in the state + fn set_childkeys(parent: T::AccountId, netuid: NetUid, childkey_vec: Vec<(u64, T::AccountId)>) { + if childkey_vec.is_empty() { + ChildKeys::::remove(parent, netuid); + } else { + ChildKeys::::insert(parent, netuid, childkey_vec); + } + } + + /// Set parentkeys vector making sure there are no empty vectors in the state + fn set_parentkeys( + child: T::AccountId, + netuid: NetUid, + parentkey_vec: Vec<(u64, T::AccountId)>, + ) { + if parentkey_vec.is_empty() { + ParentKeys::::remove(child, netuid); + } else { + ParentKeys::::insert(child, netuid, parentkey_vec); + } + } + + /// Loads all records from ChildKeys and ParentKeys where (hotkey, netuid) is the key. + /// Produces a parent->(child->prop) adjacency map that **cannot violate** + /// the required consistency because all inserts go through `link`. + fn load_child_parent_relations( + hotkey: &T::AccountId, + netuid: NetUid, + ) -> Result, DispatchError> { + let mut rel = PCRelations::::new(hotkey.clone()); + + // Load children: (prop, child) from ChildKeys(hotkey, netuid) + let child_links = ChildKeys::::get(hotkey, netuid); + let mut children = BTreeMap::::new(); + for (prop, child) in child_links { + // Ignore any accidental self-loop in storage + if child != *hotkey { + children.insert(child, prop); + } + } + // Validate & set (enforce no self-loop and sum limit) + rel.link_children(children)?; + + // Load parents: (prop, parent) from ParentKeys(hotkey, netuid) + let parent_links = ParentKeys::::get(hotkey, netuid); + let mut parents = BTreeMap::::new(); + for (prop, parent) in parent_links { + if parent != *hotkey { + parents.insert(parent, prop); + } + } + // Keep the same validation rules for parents (no self-loop, bounded sum). + rel.link_parents(parents)?; + + Ok(rel) + } + + /// Build a `PCRelations` for `pivot` (parent) from the `PendingChildKeys` queue, + /// preserving the current `ParentKeys(pivot, netuid)` so `persist_child_parent_relations` + /// won’t accidentally clear existing parents. + /// + /// PendingChildKeys layout: + /// (netuid, pivot) -> (Vec<(proportion, child)>) + pub fn load_relations_from_pending( + pivot: T::AccountId, + pending_children_vec: &Vec<(u64, T::AccountId)>, + netuid: NetUid, + ) -> Result, DispatchError> { + let mut rel = PCRelations::::new(pivot.clone()); + + // Deduplicate into a BTreeMap (last wins if duplicates). + let mut children: BTreeMap = BTreeMap::new(); + for (prop, child) in pending_children_vec { + if *child != pivot { + children.insert(child.clone(), *prop); + } + } + + // Enforce invariants (no self-loop, total weight <= u64::MAX) + rel.link_children(children)?; + + // Preserve the current parents of the pivot so `persist_child_parent_relations` + // won’t clear them when we only intend to update children. + let existing_parents_vec = ParentKeys::::get(pivot.clone(), netuid); + let mut parents: BTreeMap = BTreeMap::new(); + for (w, parent) in existing_parents_vec { + if parent != pivot { + parents.insert(parent, w); + } + } + // This uses the same basic checks (no self-loop, bounded sum). + // If you didn't expose link_parents, inline the simple validations here. + rel.link_parents(parents)?; + + Ok(rel) + } + + /// Persist the `relations` around `hotkey` to storage, updating both directions: + /// - Writes ChildKeys(hotkey, netuid) = children + /// and synchronizes ParentKeys(child, netuid) entries accordingly. + /// - Writes ParentKeys(hotkey, netuid) = parents + /// and synchronizes ChildKeys(parent, netuid) entries accordingly. + /// + /// This is a **diff-based** update that only touches affected neighbors. + pub fn persist_child_parent_relations( + relations: PCRelations, + netuid: NetUid, + weight: &mut Weight, + ) -> DispatchResult { + let pivot = relations.pivot().clone(); + + // --------------------------- + // 1) Pivot -> Children side + // --------------------------- + let new_children_map = relations.children(); + let new_children_vec: Vec<(u64, T::AccountId)> = new_children_map + .iter() + .map(|(c, p)| (*p, c.clone())) + .collect(); + + let prev_children_vec = ChildKeys::::get(&pivot, netuid); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 0)); + + // Overwrite pivot's children vector + Self::set_childkeys(pivot.clone(), netuid, new_children_vec.clone()); + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + + // Build quick-lookup sets for diffing + let prev_children_set: BTreeSet = + prev_children_vec.iter().map(|(_, c)| c.clone()).collect(); + let new_children_set: BTreeSet = new_children_map.keys().cloned().collect(); + + // Added children = new / prev + for added in new_children_set + .iter() + .filter(|c| !prev_children_set.contains(*c)) + { + let p = match new_children_map.get(added) { + Some(p) => *p, + None => return Err(Error::::ChildParentInconsistency.into()), + }; + let mut pk = ParentKeys::::get(added.clone(), netuid); + PCRelations::::upsert_edge(&mut pk, p, &pivot); + Self::set_parentkeys(added.clone(), netuid, pk); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // Updated children = intersection where proportion changed + for common in new_children_set.intersection(&prev_children_set) { + let new_p = match new_children_map.get(common) { + Some(p) => *p, + None => return Err(Error::::ChildParentInconsistency.into()), + }; + let mut pk = ParentKeys::::get(common.clone(), netuid); + PCRelations::::upsert_edge(&mut pk, new_p, &pivot); + Self::set_parentkeys(common.clone(), netuid, pk); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // Removed children = prev \ new => remove (pivot) from ParentKeys(child) + for removed in prev_children_set + .iter() + .filter(|c| !new_children_set.contains(*c)) + { + let mut pk = ParentKeys::::get(removed.clone(), netuid); + PCRelations::::remove_edge(&mut pk, &pivot); + Self::set_parentkeys(removed.clone(), netuid, pk); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // --------------------------- + // 2) Parents -> Pivot side + // --------------------------- + let new_parents_map = relations.parents(); + let new_parents_vec: Vec<(u64, T::AccountId)> = new_parents_map + .iter() + .map(|(p, pr)| (*pr, p.clone())) + .collect(); + + let prev_parents_vec = ParentKeys::::get(&pivot, netuid); + + // Overwrite pivot's parents vector + Self::set_parentkeys(pivot.clone(), netuid, new_parents_vec.clone()); + + let prev_parents_set: BTreeSet = + prev_parents_vec.into_iter().map(|(_, p)| p).collect(); + let new_parents_set: BTreeSet = new_parents_map.keys().cloned().collect(); + + // Added parents = new / prev => ensure ChildKeys(parent) has (p, pivot) + for added in new_parents_set + .iter() + .filter(|p| !prev_parents_set.contains(*p)) + { + let p_val = match new_parents_map.get(added) { + Some(p) => *p, + None => return Err(Error::::ChildParentInconsistency.into()), + }; + let mut ck = ChildKeys::::get(added.clone(), netuid); + PCRelations::::upsert_edge(&mut ck, p_val, &pivot); + Self::set_childkeys(added.clone(), netuid, ck); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // Updated parents = intersection where proportion changed + for common in new_parents_set.intersection(&prev_parents_set) { + let new_p = new_parents_map + .get(common) + .ok_or(Error::::ChildParentInconsistency)?; + let mut ck = ChildKeys::::get(common.clone(), netuid); + PCRelations::::upsert_edge(&mut ck, *new_p, &pivot); + Self::set_childkeys(common.clone(), netuid, ck); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // Removed parents = prev \ new => remove (pivot) from ChildKeys(parent) + for removed in prev_parents_set + .iter() + .filter(|p| !new_parents_set.contains(*p)) + { + let mut ck = ChildKeys::::get(removed.clone(), netuid); + PCRelations::::remove_edge(&mut ck, &pivot); + Self::set_childkeys(removed.clone(), netuid, ck); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + Ok(()) + } + + /// Swap all parent/child relations from `old_hotkey` to `new_hotkey` on `netuid`. + /// Steps: + /// 1) Load relations around `old_hotkey` + /// 2) Clean up storage references to `old_hotkey` (both directions) + /// 3) Rebind pivot to `new_hotkey` + /// 4) Persist relations around `new_hotkey` + pub fn parent_child_swap_hotkey( + old_hotkey: &T::AccountId, + new_hotkey: &T::AccountId, + netuid: NetUid, + weight: &mut Weight, + ) -> DispatchResult { + // 1) Load the current relations around old_hotkey + let mut relations = Self::load_child_parent_relations(old_hotkey, netuid)?; + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 0)); + + // 2) Clean up all storage entries that reference old_hotkey + // 2a) For each child of old_hotkey: remove old_hotkey from ParentKeys(child, netuid) + for (child, _) in relations.children().iter() { + let mut pk = ParentKeys::::get(child.clone(), netuid); + PCRelations::::remove_edge(&mut pk, old_hotkey); + Self::set_parentkeys(child.clone(), netuid, pk.clone()); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + // 2b) For each parent of old_hotkey: remove old_hotkey from ChildKeys(parent, netuid) + for (parent, _) in relations.parents().iter() { + let mut ck = ChildKeys::::get(parent.clone(), netuid); + PCRelations::::remove_edge(&mut ck, old_hotkey); + ChildKeys::::insert(parent.clone(), netuid, ck); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + // 2c) Clear direct maps of old_hotkey + ChildKeys::::insert( + old_hotkey.clone(), + netuid, + Vec::<(u64, T::AccountId)>::new(), + ); + Self::set_parentkeys( + old_hotkey.clone(), + netuid, + Vec::<(u64, T::AccountId)>::new(), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 2)); + + // 3) Rebind pivot to new_hotkey (validate no self-loop with existing maps) + relations.rebind_pivot(new_hotkey.clone())?; + + // 4) Swap PendingChildKeys( netuid, parent ) --> Vec<(proportion,child), cool_down_block> + // Fail if consistency breaks + if PendingChildKeys::::contains_key(netuid, old_hotkey) { + let (children, cool_down_block) = PendingChildKeys::::get(netuid, old_hotkey); + relations.ensure_pending_consistency(&children)?; + + PendingChildKeys::::remove(netuid, old_hotkey); + PendingChildKeys::::insert(netuid, new_hotkey, (children, cool_down_block)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + + // 5) Persist relations under the new pivot (diffs vs existing state at new_hotkey) + Self::persist_child_parent_relations(relations, netuid, weight) + } + /// ---- The implementation for the extrinsic do_set_child_singular: Sets a single child. /// This function allows a coldkey to set children keys. /// @@ -70,19 +513,6 @@ impl Pallet { Error::::NonAssociatedColdKey ); - // Ensure that the number of children does not exceed 5. - ensure!(children.len() <= 5, Error::::TooManyChildren); - - // Ensure that each child is not the hotkey. - for (_, child_i) in &children { - ensure!(child_i != &hotkey, Error::::InvalidChild); - } - // Ensure that the sum of the proportions does not exceed u64::MAX. - let _total_proportion: u64 = children - .iter() - .try_fold(0u64, |acc, &(proportion, _)| acc.checked_add(proportion)) - .ok_or(Error::::ProportionOverflow)?; - // Ensure there are no duplicates in the list of children. let mut unique_children = Vec::new(); for (_, child_i) in &children { @@ -93,6 +523,14 @@ impl Pallet { unique_children.push(child_i.clone()); } + // Ensure we don't break consistency when these new childkeys are set: + // - Ensure that the number of children does not exceed 5 + // - Each child is not the hotkey. + // - The sum of the proportions does not exceed u64::MAX. + // - Bipartite separation (no A <-> B relations) + let relations = Self::load_child_parent_relations(&hotkey, netuid)?; + relations.ensure_pending_consistency(&children)?; + // Check that the parent key has at least the minimum own stake // if children vector is not empty // (checking with check_weights_min_stake wouldn't work because it considers @@ -165,60 +603,29 @@ impl Pallet { PendingChildKeys::::iter_prefix(netuid).for_each( |(hotkey, (children, cool_down_block))| { if cool_down_block < current_block { - // Erase myself from old children's parents. - let old_children: Vec<(u64, T::AccountId)> = - ChildKeys::::get(hotkey.clone(), netuid); - - // Iterate over all my old children and remove myself from their parent's map. - for (_, old_child_i) in old_children.clone().iter() { - // Get the old child's parents on this network. - let my_old_child_parents: Vec<(u64, T::AccountId)> = - ParentKeys::::get(old_child_i.clone(), netuid); - - // Filter my hotkey from my old children's parents list. - let filtered_parents: Vec<(u64, T::AccountId)> = my_old_child_parents - .into_iter() - .filter(|(_, parent)| *parent != hotkey) - .collect(); - - // Update the parent list in storage - ParentKeys::::insert(old_child_i, netuid, filtered_parents); - } - - // Insert my new children + proportion list into the map. - ChildKeys::::insert(hotkey.clone(), netuid, children.clone()); - - // Update the parents list for my new children. - for (proportion, new_child_i) in children.clone().iter() { - // Get the child's parents on this network. - let mut new_child_previous_parents: Vec<(u64, T::AccountId)> = - ParentKeys::::get(new_child_i.clone(), netuid); - - // Append my hotkey and proportion to my new child's parents list. - // NOTE: There are no duplicates possible because I previously removed my self from my old children. - new_child_previous_parents.push((*proportion, hotkey.clone())); - - // Update the parents list in storage. - ParentKeys::::insert( - new_child_i.clone(), - netuid, - new_child_previous_parents, - ); + // If child-parent consistency is broken, we will fail setting new children silently + let maybe_relations = + Self::load_relations_from_pending(hotkey.clone(), &children, netuid); + if let Ok(relations) = maybe_relations { + let mut _weight: Weight = T::DbWeight::get().reads(0); + if let Ok(()) = + Self::persist_child_parent_relations(relations, netuid, &mut _weight) + { + // Log and emit event. + log::trace!( + "SetChildren( netuid:{:?}, hotkey:{:?}, children:{:?} )", + hotkey, + netuid, + children.clone() + ); + Self::deposit_event(Event::SetChildren( + hotkey.clone(), + netuid, + children.clone(), + )); + } } - // Log and emit event. - log::trace!( - "SetChildren( netuid:{:?}, hotkey:{:?}, children:{:?} )", - hotkey, - netuid, - children.clone() - ); - Self::deposit_event(Event::SetChildren( - hotkey.clone(), - netuid, - children.clone(), - )); - // Remove pending children PendingChildKeys::::remove(netuid, hotkey); } @@ -357,4 +764,268 @@ impl Pallet { pub fn get_childkey_take(hotkey: &T::AccountId, netuid: NetUid) -> u16 { ChildkeyTake::::get(hotkey, netuid) } + + //////////////////////////////////////////////////////////// + // State cleaners (for use in migration) + // TODO: Deprecate when the state is clean for a while + + pub fn clean_zero_childkey_vectors(weight: &mut Weight) { + // Collect keys to delete first to avoid mutating while iterating. + let mut to_remove: Vec<(T::AccountId, NetUid)> = Vec::new(); + + for (parent, netuid, children) in ChildKeys::::iter() { + // Account for the read + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + if children.is_empty() { + to_remove.push((parent, netuid)); + } + } + + // Remove all empty entries + for (parent, netuid) in &to_remove { + ChildKeys::::remove(parent, netuid); + // Account for the write + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + log::info!( + target: "runtime", + "Removed {} empty childkey vectors.", + to_remove.len() + ); + } + + /// Remove self-loops in `ChildKeys` and `ParentKeys`. + /// If, after removal, a value-vector becomes empty, the storage key is removed. + pub fn clean_self_loops(weight: &mut Weight) { + // ------------------------------- + // 1) ChildKeys: (parent, netuid) -> Vec<(w, child)> + // Remove any entries where child == parent. + // ------------------------------- + let mut to_update_ck: Vec<((T::AccountId, NetUid), Vec<(u64, T::AccountId)>)> = Vec::new(); + let mut to_remove_ck: Vec<(T::AccountId, NetUid)> = Vec::new(); + + for (parent, netuid, children) in ChildKeys::::iter() { + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // Filter out self-loops + let filtered: Vec<(u64, T::AccountId)> = children + .clone() + .into_iter() + .filter(|(_, c)| *c != parent) + .collect(); + + // If nothing changed, skip + // (we can detect by comparing lengths; safer is to re-check if any removed existed) + // For simplicity, just compare lengths: + // If len unchanged and the previous vector had no self-loop, skip. + // If there *was* a self-loop and filtered is empty, we'll remove the key. + if filtered.len() == children.len() { + // No change -> continue + continue; + } + + if filtered.is_empty() { + to_remove_ck.push((parent, netuid)); + } else { + to_update_ck.push(((parent, netuid), filtered)); + } + } + + // Apply ChildKeys updates/removals + for ((parent, netuid), new_vec) in &to_update_ck { + Self::set_childkeys(parent.clone(), *netuid, new_vec.clone()); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + for (parent, netuid) in &to_remove_ck { + ChildKeys::::remove(parent, netuid); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + log::info!( + target: "runtime", + "Removed {} self-looping childkeys.", + to_update_ck.len().saturating_add(to_remove_ck.len()) + ); + + // ------------------------------- + // 2) ParentKeys: (child, netuid) -> Vec<(w, parent)> + // Remove any entries where parent == child. + // ------------------------------- + let mut to_update_pk: Vec<((T::AccountId, NetUid), Vec<(u64, T::AccountId)>)> = Vec::new(); + let mut to_remove_pk: Vec<(T::AccountId, NetUid)> = Vec::new(); + + for (child, netuid, parents) in ParentKeys::::iter() { + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // Filter out self-loops + let filtered: Vec<(u64, T::AccountId)> = parents + .clone() + .into_iter() + .filter(|(_, p)| *p != child) + .collect(); + + // If unchanged, skip + if filtered.len() == parents.len() { + continue; + } + + if filtered.is_empty() { + to_remove_pk.push((child, netuid)); + } else { + to_update_pk.push(((child, netuid), filtered)); + } + } + + // Apply ParentKeys updates/removals + for ((child, netuid), new_vec) in &to_update_pk { + Self::set_parentkeys(child.clone(), *netuid, new_vec.clone()); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + for (child, netuid) in &to_remove_pk { + ParentKeys::::remove(child, netuid); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + log::info!( + target: "runtime", + "Removed {} self-looping parentkeys.", + to_update_pk.len().saturating_add(to_remove_pk.len()) + ); + } + + pub fn clean_zero_parentkey_vectors(weight: &mut Weight) { + // Collect keys to delete first to avoid mutating while iterating. + let mut to_remove: Vec<(T::AccountId, NetUid)> = Vec::new(); + + for (parent, netuid, children) in ParentKeys::::iter() { + // Account for the read + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + if children.is_empty() { + to_remove.push((parent, netuid)); + } + } + + // Remove all empty entries + for (parent, netuid) in &to_remove { + ParentKeys::::remove(parent, netuid); + // Account for the write + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + log::info!( + target: "runtime", + "Removed {} empty parentkey vectors.", + to_remove.len() + ); + } + + /// Make ChildKeys and ParentKeys bidirectionally consistent by + /// **removing** entries that don't have a matching counterpart. + /// A match means the exact tuple `(p, other_id)` is present on the opposite map. + /// + /// Rules: + /// - For each (parent, netuid) -> [(p, child)...] in ChildKeys: + /// keep only those (p, child) that appear in ParentKeys(child, netuid) as (p, parent). + /// If resulting list is empty, remove the key. + /// - For each (child, netuid) -> [(p, parent)...] in ParentKeys: + /// keep only those (p, parent) that appear in ChildKeys(parent, netuid) as (p, child). + /// If resulting list is empty, remove the key. + pub fn repair_child_parent_consistency(weight: &mut Weight) { + // ------------------------------- + // 1) Prune ChildKeys by checking ParentKeys + // ------------------------------- + let mut ck_updates: Vec<((T::AccountId, NetUid), Vec<(u64, T::AccountId)>)> = Vec::new(); + let mut ck_removes: Vec<(T::AccountId, NetUid)> = Vec::new(); + + for (parent, netuid, children) in ChildKeys::::iter() { + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // Keep (p, child) only if ParentKeys(child, netuid) contains (p, parent) + let mut filtered: Vec<(u64, T::AccountId)> = Vec::with_capacity(children.len()); + for (p, child) in children.clone().into_iter() { + let rev = ParentKeys::::get(&child, netuid); + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + let has_match = rev.iter().any(|(pr, pa)| *pr == p && *pa == parent); + if has_match { + filtered.push((p, child)); + } + } + + if filtered.is_empty() { + ck_removes.push((parent, netuid)); + } else { + // Only write if changed + if children != filtered { + ck_updates.push(((parent, netuid), filtered)); + } + } + } + + for ((parent, netuid), new_vec) in &ck_updates { + Self::set_childkeys(parent.clone(), *netuid, new_vec.clone()); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + for (parent, netuid) in &ck_removes { + ChildKeys::::remove(parent, netuid); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + log::info!( + target: "runtime", + "Updated {} childkey inconsistent records.", + ck_updates.len() + ); + log::info!( + target: "runtime", + "Removed {} childkey inconsistent records.", + ck_removes.len() + ); + + // ------------------------------- + // 2) Prune ParentKeys by checking ChildKeys + // ------------------------------- + let mut pk_updates: Vec<((T::AccountId, NetUid), Vec<(u64, T::AccountId)>)> = Vec::new(); + let mut pk_removes: Vec<(T::AccountId, NetUid)> = Vec::new(); + + for (child, netuid, parents) in ParentKeys::::iter() { + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // Keep (p, parent) only if ChildKeys(parent, netuid) contains (p, child) + let mut filtered: Vec<(u64, T::AccountId)> = Vec::with_capacity(parents.len()); + for (p, parent) in parents.clone().into_iter() { + let fwd = ChildKeys::::get(&parent, netuid); + *weight = weight.saturating_add(T::DbWeight::get().reads(1)); + let has_match = fwd.iter().any(|(pr, ch)| *pr == p && *ch == child); + if has_match { + filtered.push((p, parent)); + } + } + + if filtered.is_empty() { + pk_removes.push((child, netuid)); + } else { + // Only write if changed + if parents != filtered { + pk_updates.push(((child, netuid), filtered)); + } + } + } + + for ((child, netuid), new_vec) in &pk_updates { + Self::set_parentkeys(child.clone(), *netuid, new_vec.clone()); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + for (child, netuid) in &pk_removes { + ParentKeys::::remove(child, netuid); + *weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + log::info!( + target: "runtime", + "Updated {} parentkey inconsistent records.", + pk_updates.len() + ); + log::info!( + target: "runtime", + "Removed {} parentkey inconsistent records.", + pk_removes.len() + ); + } } diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index 528289ec0e..21ff1a57d5 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -4,7 +4,7 @@ use share_pool::{SharePool, SharePoolDataOperations}; use sp_std::ops::Neg; use substrate_fixed::types::{I64F64, I96F32, U64F64, U96F32}; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; -use subtensor_swap_interface::{OrderType, SwapHandler, SwapResult}; +use subtensor_swap_interface::{Order, SwapHandler, SwapResult}; impl Pallet { /// Retrieves the total alpha issuance for a given subnet. @@ -579,35 +579,25 @@ impl Pallet { tao: TaoCurrency, price_limit: TaoCurrency, drop_fees: bool, - ) -> Result { + ) -> Result, DispatchError> { // Step 1: Get the mechanism type for the subnet (0 for Stable, 1 for Dynamic) let mechanism_id: u16 = SubnetMechanism::::get(netuid); let swap_result = if mechanism_id == 1 { - T::SwapInterface::swap( - netuid.into(), - OrderType::Buy, - tao.into(), - price_limit.into(), - drop_fees, - false, - )? + let order = GetAlphaForTao::::with_amount(tao); + T::SwapInterface::swap(netuid.into(), order, price_limit.into(), drop_fees, false)? } else { - let abs_delta: u64 = tao.into(); - // Step 3.b.1: Stable mechanism, just return the value 1:1 SwapResult { - amount_paid_in: tao.into(), - amount_paid_out: tao.into(), - fee_paid: 0, - tao_reserve_delta: abs_delta as i64, - alpha_reserve_delta: (abs_delta as i64).neg(), + amount_paid_in: tao, + amount_paid_out: tao.to_u64().into(), + fee_paid: TaoCurrency::ZERO, } }; - let alpha_decrease = AlphaCurrency::from(swap_result.alpha_reserve_delta.unsigned_abs()); + let alpha_decrease = swap_result.paid_out_reserve_delta_i64().unsigned_abs(); // Decrease Alpha reserves. - Self::decrease_provided_alpha_reserve(netuid.into(), alpha_decrease); + Self::decrease_provided_alpha_reserve(netuid.into(), alpha_decrease.into()); // Increase Alpha outstanding. SubnetAlphaOut::::mutate(netuid, |total| { @@ -618,7 +608,8 @@ impl Pallet { // (SubnetTAO + SubnetTaoProvided) in tao_reserve(), so it is irrelevant // which one to increase. SubnetTAO::::mutate(netuid, |total| { - *total = total.saturating_add((swap_result.tao_reserve_delta as u64).into()); + let delta = swap_result.paid_in_reserve_delta_i64().unsigned_abs(); + *total = total.saturating_add(delta.into()); }); // Increase Total Tao reserves. @@ -640,64 +631,46 @@ impl Pallet { alpha: AlphaCurrency, price_limit: TaoCurrency, drop_fees: bool, - ) -> Result { + ) -> Result, DispatchError> { // Step 1: Get the mechanism type for the subnet (0 for Stable, 1 for Dynamic) let mechanism_id: u16 = SubnetMechanism::::get(netuid); // Step 2: Swap alpha and attain tao let swap_result = if mechanism_id == 1 { - T::SwapInterface::swap( - netuid.into(), - OrderType::Sell, - alpha.into(), - price_limit.into(), - drop_fees, - false, - )? + let order = GetTaoForAlpha::::with_amount(alpha); + T::SwapInterface::swap(netuid.into(), order, price_limit.into(), drop_fees, false)? } else { - let abs_delta: u64 = alpha.into(); - // Step 3.b.1: Stable mechanism, just return the value 1:1 SwapResult { - amount_paid_in: alpha.into(), - amount_paid_out: alpha.into(), - fee_paid: 0, - tao_reserve_delta: (abs_delta as i64).neg(), - alpha_reserve_delta: abs_delta as i64, + amount_paid_in: alpha, + amount_paid_out: alpha.to_u64().into(), + fee_paid: AlphaCurrency::ZERO, } }; // Increase only the protocol Alpha reserve. We only use the sum of // (SubnetAlphaIn + SubnetAlphaInProvided) in alpha_reserve(), so it is irrelevant // which one to increase. + let alpha_delta = swap_result.paid_in_reserve_delta_i64().unsigned_abs(); SubnetAlphaIn::::mutate(netuid, |total| { - *total = total.saturating_add((swap_result.alpha_reserve_delta as u64).into()); + *total = total.saturating_add(alpha_delta.into()); }); // Decrease Alpha outstanding. // TODO: Deprecate, not accurate in v3 anymore SubnetAlphaOut::::mutate(netuid, |total| { - *total = total.saturating_sub((swap_result.alpha_reserve_delta as u64).into()); + *total = total.saturating_sub(alpha_delta.into()); }); // Decrease tao reserves. - Self::decrease_provided_tao_reserve( - netuid.into(), - swap_result - .tao_reserve_delta - .abs() - .try_into() - .unwrap_or(0) - .into(), - ); + let tao_delta = swap_result.paid_out_reserve_delta_i64().unsigned_abs(); + Self::decrease_provided_tao_reserve(netuid.into(), tao_delta.into()); // Reduce total TAO reserves. - TotalStake::::mutate(|total| { - *total = total.saturating_sub(swap_result.amount_paid_out.into()) - }); + TotalStake::::mutate(|total| *total = total.saturating_sub(swap_result.amount_paid_out)); // Increase total subnet TAO volume. SubnetVolume::::mutate(netuid, |total| { - *total = total.saturating_add(swap_result.amount_paid_out.into()) + *total = total.saturating_add(swap_result.amount_paid_out.to_u64() as u128) }); // Return the tao received. @@ -734,6 +707,12 @@ impl Pallet { Self::increase_stake_for_hotkey_and_coldkey_on_subnet(hotkey, coldkey, netuid, refund); } + // If this is a root-stake + if netuid == NetUid::ROOT { + // Adjust root claimed value for this hotkey and coldkey. + Self::remove_stake_adjust_root_claimed_for_hotkey_and_coldkey(hotkey, coldkey, alpha); + } + // Step 3: Update StakingHotkeys if the hotkey's total alpha, across all subnets, is zero // TODO const: fix. // if Self::get_stake(hotkey, coldkey) == 0 { @@ -742,6 +721,9 @@ impl Pallet { // }); // } + // Record TAO outflow + Self::record_tao_outflow(netuid, swap_result.amount_paid_out.into()); + LastColdkeyHotkeyStakeBlock::::insert(coldkey, hotkey, Self::get_current_block_as_u64()); // Deposit and log the unstaking event. @@ -751,7 +733,7 @@ impl Pallet { swap_result.amount_paid_out.into(), actual_alpha_decrease, netuid, - swap_result.fee_paid, + swap_result.fee_paid.to_u64(), )); log::debug!( @@ -782,7 +764,10 @@ impl Pallet { // Swap the tao to alpha. let swap_result = Self::swap_tao_for_alpha(netuid, tao, price_limit, drop_fees)?; - ensure!(swap_result.amount_paid_out > 0, Error::::AmountTooLow); + ensure!( + !swap_result.amount_paid_out.is_zero(), + Error::::AmountTooLow + ); ensure!( Self::try_increase_stake_for_hotkey_and_coldkey_on_subnet( @@ -801,7 +786,7 @@ impl Pallet { swap_result.amount_paid_out.into(), ) .is_zero() - || swap_result.amount_paid_out == 0 + || swap_result.amount_paid_out.is_zero() { return Ok(AlphaCurrency::ZERO); } @@ -813,12 +798,23 @@ impl Pallet { StakingHotkeys::::insert(coldkey, staking_hotkeys.clone()); } + // Record TAO inflow + Self::record_tao_inflow(netuid, swap_result.amount_paid_in.into()); + LastColdkeyHotkeyStakeBlock::::insert(coldkey, hotkey, Self::get_current_block_as_u64()); if set_limit { Self::set_stake_operation_limit(hotkey, coldkey, netuid.into()); } + // If this is a root-stake + if netuid == NetUid::ROOT { + // Adjust root claimed for this hotkey and coldkey. + let alpha = swap_result.amount_paid_out.into(); + Self::add_stake_adjust_root_claimed_for_hotkey_and_coldkey(hotkey, coldkey, alpha); + Self::maybe_add_coldkey_index(coldkey); + } + // Deposit and log the staking event. Self::deposit_event(Event::StakeAdded( coldkey.clone(), @@ -826,7 +822,7 @@ impl Pallet { tao, swap_result.amount_paid_out.into(), netuid, - swap_result.fee_paid, + swap_result.fee_paid.to_u64(), )); log::debug!( @@ -946,7 +942,8 @@ impl Pallet { // Get the minimum balance (and amount) that satisfies the transaction let min_stake = DefaultMinStake::::get(); let min_amount = { - let fee = T::SwapInterface::sim_swap(netuid.into(), OrderType::Buy, min_stake.into()) + let order = GetAlphaForTao::::with_amount(min_stake); + let fee = T::SwapInterface::sim_swap(netuid.into(), order) .map(|res| res.fee_paid) .unwrap_or(T::SwapInterface::approx_fee_amount( netuid.into(), @@ -978,18 +975,18 @@ impl Pallet { Error::::HotKeyAccountNotExists ); - let swap_result = - T::SwapInterface::sim_swap(netuid.into(), OrderType::Buy, stake_to_be_added.into()) - .map_err(|_| Error::::InsufficientLiquidity)?; + let order = GetAlphaForTao::::with_amount(stake_to_be_added); + let swap_result = T::SwapInterface::sim_swap(netuid.into(), order) + .map_err(|_| Error::::InsufficientLiquidity)?; // Check that actual withdrawn TAO amount is not lower than the minimum stake ensure!( - TaoCurrency::from(swap_result.amount_paid_in) >= min_stake, + swap_result.amount_paid_in >= min_stake, Error::::AmountTooLow ); ensure!( - swap_result.amount_paid_out > 0, + !swap_result.amount_paid_out.is_zero(), Error::::InsufficientLiquidity ); @@ -1029,11 +1026,12 @@ impl Pallet { // Bypass this check if the user unstakes full amount let remaining_alpha_stake = Self::calculate_reduced_stake_on_subnet(hotkey, coldkey, netuid, alpha_unstaked)?; - match T::SwapInterface::sim_swap(netuid.into(), OrderType::Sell, alpha_unstaked.into()) { + let order = GetTaoForAlpha::::with_amount(alpha_unstaked); + match T::SwapInterface::sim_swap(netuid.into(), order) { Ok(res) => { if !remaining_alpha_stake.is_zero() { ensure!( - TaoCurrency::from(res.amount_paid_out) >= DefaultMinStake::::get(), + res.amount_paid_out >= DefaultMinStake::::get(), Error::::AmountTooLow ); } @@ -1166,15 +1164,12 @@ impl Pallet { // If origin and destination netuid are different, do the swap-related checks if origin_netuid != destination_netuid { // Ensure that the stake amount to be removed is above the minimum in tao equivalent. - let tao_equivalent = T::SwapInterface::sim_swap( - origin_netuid.into(), - OrderType::Sell, - alpha_amount.into(), - ) - .map(|res| res.amount_paid_out) - .map_err(|_| Error::::InsufficientLiquidity)?; + let order = GetTaoForAlpha::::with_amount(alpha_amount); + let tao_equivalent = T::SwapInterface::sim_swap(origin_netuid.into(), order) + .map(|res| res.amount_paid_out) + .map_err(|_| Error::::InsufficientLiquidity)?; ensure!( - TaoCurrency::from(tao_equivalent) > DefaultMinStake::::get(), + tao_equivalent > DefaultMinStake::::get(), Error::::AmountTooLow ); diff --git a/pallets/subtensor/src/subnets/leasing.rs b/pallets/subtensor/src/subnets/leasing.rs index 244b9af2e9..cf263a1335 100644 --- a/pallets/subtensor/src/subnets/leasing.rs +++ b/pallets/subtensor/src/subnets/leasing.rs @@ -87,8 +87,8 @@ impl Pallet { // Initialize the lease id, coldkey and hotkey and keep track of them let lease_id = Self::get_next_lease_id()?; - let lease_coldkey = Self::lease_coldkey(lease_id); - let lease_hotkey = Self::lease_hotkey(lease_id); + let lease_coldkey = Self::lease_coldkey(lease_id)?; + let lease_hotkey = Self::lease_hotkey(lease_id)?; frame_system::Pallet::::inc_providers(&lease_coldkey); frame_system::Pallet::::inc_providers(&lease_hotkey); @@ -310,7 +310,7 @@ impl Pallet { &lease.coldkey, lease.netuid, total_contributors_cut_alpha, - T::SwapInterface::min_price().into(), + T::SwapInterface::min_price(), false, ) { Ok(tao_unstaked) => tao_unstaked, @@ -341,16 +341,16 @@ impl Pallet { AccumulatedLeaseDividends::::insert(lease_id, AlphaCurrency::ZERO); } - fn lease_coldkey(lease_id: LeaseId) -> T::AccountId { + fn lease_coldkey(lease_id: LeaseId) -> Result { let entropy = ("leasing/coldkey", lease_id).using_encoded(blake2_256); - Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) - .expect("infinite length input; no invalid inputs for type; qed") + T::AccountId::decode(&mut TrailingZeroInput::new(entropy.as_ref())) + .map_err(|_| Error::::InvalidValue.into()) } - fn lease_hotkey(lease_id: LeaseId) -> T::AccountId { + fn lease_hotkey(lease_id: LeaseId) -> Result { let entropy = ("leasing/hotkey", lease_id).using_encoded(blake2_256); - Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) - .expect("infinite length input; no invalid inputs for type; qed") + T::AccountId::decode(&mut TrailingZeroInput::new(entropy.as_ref())) + .map_err(|_| Error::::InvalidValue.into()) } fn get_next_lease_id() -> Result> { diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index 418c9cbde9..b71aa68a0a 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -136,7 +136,7 @@ impl Pallet { let burned_alpha = Self::swap_tao_for_alpha( netuid, actual_burn_amount, - T::SwapInterface::max_price().into(), + T::SwapInterface::max_price(), false, )? .amount_paid_out; diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 8439297e14..4185aee624 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -2,7 +2,6 @@ use super::*; use sp_core::Get; use subtensor_runtime_common::{NetUid, TaoCurrency}; use subtensor_swap_interface::SwapHandler; - impl Pallet { /// Returns true if the subnetwork exists. /// @@ -276,7 +275,6 @@ impl Pallet { Self::set_max_allowed_uids(netuid, 256); Self::set_max_allowed_validators(netuid, 64); Self::set_min_allowed_weights(netuid, 1); - Self::set_max_weight_limit(netuid, u16::MAX); Self::set_adjustment_interval(netuid, 360); Self::set_target_registrations_per_interval(netuid, 1); Self::set_adjustment_alpha(netuid, 17_893_341_751_498_265_066); // 18_446_744_073_709_551_615 * 0.97 = 17_893_341_751_498_265_066 @@ -303,9 +301,6 @@ impl Pallet { if !ActivityCutoff::::contains_key(netuid) { ActivityCutoff::::insert(netuid, ActivityCutoff::::get(netuid)); } - if !MaxWeightsLimit::::contains_key(netuid) { - MaxWeightsLimit::::insert(netuid, MaxWeightsLimit::::get(netuid)); - } if !MinAllowedWeights::::contains_key(netuid) { MinAllowedWeights::::insert(netuid, MinAllowedWeights::::get(netuid)); } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 9d303cc979..669f74bccc 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -200,7 +200,6 @@ impl Pallet { IsNetworkMember::::remove(&hotkey, netuid); LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); AlphaDividendsPerSubnet::::remove(netuid, &hotkey); - TaoDividendsPerSubnet::::remove(netuid, &hotkey); Axons::::remove(netuid, &hotkey); NeuronCertificates::::remove(netuid, &hotkey); Prometheus::::remove(netuid, &hotkey); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 61b64df137..f1a2df56e2 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -1203,14 +1203,14 @@ impl Pallet { weights } - /// Returns False if the weights exceed the max_weight_limit for this network. + /// Returns False if the weights exceed the configured max weight limit for this network. pub fn max_weight_limited(netuid: NetUid, uid: u16, uids: &[u16], weights: &[u16]) -> bool { // Allow self weights to exceed max weight limit. if Self::is_self_weight(uid, uids, weights) { return true; } - // If the max weight limit it u16 max, return true. + // If the max weight limit it u16 max, return true (current constant). let max_weight_limit: u16 = Self::get_max_weight_limit(netuid); if max_weight_limit == u16::MAX { return true; diff --git a/pallets/subtensor/src/swap/swap_coldkey.rs b/pallets/subtensor/src/swap/swap_coldkey.rs index 06f5d77f57..c81138b58c 100644 --- a/pallets/subtensor/src/swap/swap_coldkey.rs +++ b/pallets/subtensor/src/swap/swap_coldkey.rs @@ -188,6 +188,21 @@ impl Pallet { ); // Remove the value from the old account. Alpha::::remove((&hotkey, old_coldkey, netuid)); + + if new_alpha.saturating_add(old_alpha) > U64F64::from(0u64) { + Self::transfer_root_claimed_for_new_keys( + netuid, + &hotkey, + &hotkey, + old_coldkey, + new_coldkey, + ); + + if netuid == NetUid::ROOT { + // Register new coldkey with root stake + Self::maybe_add_coldkey_index(new_coldkey); + } + } } // Add the weight for the read and write. weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index dc84f5a4fd..38f1f85df8 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -131,16 +131,15 @@ impl Pallet { /// 4. Moves all stake-related data for the interval. /// 5. Updates the last transaction block for the new hotkey. /// 6. Transfers the delegate take information. - /// 7. Swaps Senate membership if applicable. - /// 8. Updates delegate information. - /// 9. For each subnet: + /// 7. Updates delegate information. + /// 8. For each subnet: /// - Updates network membership status. /// - Transfers UID and key information. /// - Moves Prometheus data. /// - Updates axon information. /// - Transfers weight commits. /// - Updates loaded emission data. - /// 10. Transfers all stake information, including updating staking hotkeys for each coldkey. + /// 9. Transfers all stake information, including updating staking hotkeys for each coldkey. /// /// Throughout the process, the function accumulates the computational weight of operations performed. /// @@ -189,7 +188,7 @@ impl Pallet { // 5. execute the hotkey swap on all subnets for netuid in Self::get_all_subnet_netuids() { - Self::perform_hotkey_swap_on_one_subnet(old_hotkey, new_hotkey, weight, netuid); + Self::perform_hotkey_swap_on_one_subnet(old_hotkey, new_hotkey, weight, netuid)?; } // 6. Swap LastTxBlock @@ -207,14 +206,7 @@ impl Pallet { Self::remove_last_tx_block_childkey(old_hotkey); weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - // 9. Swap Senate members. - // Senate( hotkey ) --> ? - if T::SenateMembers::is_member(old_hotkey) { - T::SenateMembers::swap_member(old_hotkey, new_hotkey).map_err(|e| e.error)?; - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } - - // 10. Swap delegates. + // 9. Swap delegates. // Delegates( hotkey ) -> take value -- the hotkey delegate take value. if Delegates::::contains_key(old_hotkey) { let old_delegate_take = Delegates::::get(old_hotkey); @@ -223,7 +215,7 @@ impl Pallet { weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); } - // 11. Alpha already update in perform_hotkey_swap_on_one_subnet + // 10. Alpha already update in perform_hotkey_swap_on_one_subnet // Update the StakingHotkeys for the case where hotkey staked by multiple coldkeys. for ((coldkey, _netuid), _alpha) in old_alpha_values { // Swap StakingHotkeys. @@ -244,19 +236,6 @@ impl Pallet { Ok(()) } - pub fn swap_senate_member( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - weight: &mut Weight, - ) -> DispatchResult { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if T::SenateMembers::is_member(old_hotkey) { - T::SenateMembers::swap_member(old_hotkey, new_hotkey).map_err(|e| e.error)?; - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } - Ok(()) - } - fn swap_hotkey_on_subnet( coldkey: &T::AccountId, old_hotkey: &T::AccountId, @@ -320,7 +299,7 @@ impl Pallet { } // 9. Perform the hotkey swap - Self::perform_hotkey_swap_on_one_subnet(old_hotkey, new_hotkey, &mut weight, netuid); + Self::perform_hotkey_swap_on_one_subnet(old_hotkey, new_hotkey, &mut weight, netuid)?; // 10. Update the last transaction block for the coldkey Self::set_last_tx_block(coldkey, block); @@ -344,7 +323,7 @@ impl Pallet { new_hotkey: &T::AccountId, weight: &mut Weight, netuid: NetUid, - ) { + ) -> DispatchResult { // 1. Swap total hotkey alpha for all subnets it exists on. // TotalHotkeyAlpha( hotkey, netuid ) -> alpha -- the total alpha that the hotkey has on a specific subnet. let alpha = TotalHotkeyAlpha::::take(old_hotkey, netuid); @@ -450,62 +429,9 @@ impl Pallet { } } // 4. Swap ChildKeys. - // ChildKeys( parent, netuid ) --> Vec<(proportion,child)> -- the child keys of the parent. - let my_children: Vec<(u64, T::AccountId)> = ChildKeys::::get(old_hotkey, netuid); - // Remove the old hotkey's child entries - ChildKeys::::remove(old_hotkey, netuid); - // Insert the same child entries for the new hotkey - ChildKeys::::insert(new_hotkey, netuid, my_children.clone()); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - - for (_, child_key_i) in my_children { - // For each child, update their parent list - let mut child_parents: Vec<(u64, T::AccountId)> = - ParentKeys::::get(child_key_i.clone(), netuid); - for parent in child_parents.iter_mut() { - // If the parent is the old hotkey, replace it with the new hotkey - if parent.1 == *old_hotkey { - parent.1 = new_hotkey.clone(); - } - } - // Update the child's parent list - ParentKeys::::insert(child_key_i, netuid, child_parents); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - } - // } - // 5. Swap ParentKeys. - // ParentKeys( child, netuid ) --> Vec<(proportion,parent)> -- the parent keys of the child. - let parents: Vec<(u64, T::AccountId)> = ParentKeys::::get(old_hotkey, netuid); - // Remove the old hotkey's parent entries - ParentKeys::::remove(old_hotkey, netuid); - // Insert the same parent entries for the new hotkey - ParentKeys::::insert(new_hotkey, netuid, parents.clone()); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - - for (_, parent_key_i) in parents { - // For each parent, update their children list - let mut parent_children: Vec<(u64, T::AccountId)> = - ChildKeys::::get(parent_key_i.clone(), netuid); - for child in parent_children.iter_mut() { - // If the child is the old hotkey, replace it with the new hotkey - if child.1 == *old_hotkey { - child.1 = new_hotkey.clone(); - } - } - // Update the parent's children list - ChildKeys::::insert(parent_key_i, netuid, parent_children); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - } - // 6. Swap PendingChildKeys. - // PendingChildKeys( netuid, parent ) --> Vec<(proportion,child), cool_down_block> - if PendingChildKeys::::contains_key(netuid, old_hotkey) { - let (children, cool_down_block) = PendingChildKeys::::get(netuid, old_hotkey); - PendingChildKeys::::remove(netuid, old_hotkey); - PendingChildKeys::::insert(netuid, new_hotkey, (children, cool_down_block)); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } + Self::parent_child_swap_hotkey(old_hotkey, new_hotkey, netuid, weight)?; // Also check for others with our hotkey as a child for (hotkey, (children, cool_down_block)) in PendingChildKeys::::iter_prefix(netuid) { @@ -569,15 +495,7 @@ impl Pallet { weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); // 8.3 Swap TaoDividendsPerSubnet - let old_hotkey_tao_dividends = TaoDividendsPerSubnet::::get(netuid, old_hotkey); - let new_hotkey_tao_dividends = TaoDividendsPerSubnet::::get(netuid, new_hotkey); - TaoDividendsPerSubnet::::remove(netuid, old_hotkey); - TaoDividendsPerSubnet::::insert( - netuid, - new_hotkey, - old_hotkey_tao_dividends.saturating_add(new_hotkey_tao_dividends), - ); - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + // Tao dividends were removed // 9. Swap Alpha // Alpha( hotkey, coldkey, netuid ) -> alpha @@ -586,9 +504,17 @@ impl Pallet { weight.saturating_accrue(T::DbWeight::get().reads(old_alpha_values.len() as u64)); weight.saturating_accrue(T::DbWeight::get().writes(old_alpha_values.len() as u64)); - // Insert the new alpha values. + // 9.1. Transfer root claimable + + Self::transfer_root_claimable_for_new_hotkey(old_hotkey, new_hotkey); + + // 9.2. Insert the new alpha values. for ((coldkey, netuid_alpha), alpha) in old_alpha_values { if netuid == netuid_alpha { + Self::transfer_root_claimed_for_new_keys( + netuid, old_hotkey, new_hotkey, &coldkey, &coldkey, + ); + let new_alpha = Alpha::::take((new_hotkey, &coldkey, netuid)); Alpha::::remove((old_hotkey, &coldkey, netuid)); Alpha::::insert( @@ -608,5 +534,7 @@ impl Pallet { } } } + + Ok(()) } } diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 0fee0af2ca..e9b8c2aa6b 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -3007,7 +3007,7 @@ fn test_parent_child_chain_emission() { SubtensorModule::swap_tao_for_alpha( netuid, total_tao.to_num::().into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, ) .unwrap() @@ -4170,3 +4170,36 @@ fn test_do_set_childkey_take_rate_limit_exceeded() { )); }); } + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::children::test_set_child_keys_empty_vector_clears_storage --exact --show-output +#[test] +fn test_set_child_keys_empty_vector_clears_storage() { + new_test_ext(1).execute_with(|| { + let sn_owner_hotkey = U256::from(1001); + let sn_owner_coldkey = U256::from(1002); + let parent = U256::from(1); + let child = U256::from(2); + let netuid = add_dynamic_network(&sn_owner_hotkey, &sn_owner_coldkey); + + // Initialize ChildKeys for `parent` with a non-empty vector + ChildKeys::::insert(parent, netuid, vec![(u64::MAX, child)]); + ParentKeys::::insert(child, netuid, vec![(u64::MAX, parent)]); + + // Sanity: entry exists right now because we explicitly inserted it + assert!(ChildKeys::::contains_key(parent, netuid)); + assert!(ParentKeys::::contains_key(child, netuid)); + + // Set children to empty + let empty_children: Vec<(u64, U256)> = Vec::new(); + mock_set_children_no_epochs(netuid, &parent, &empty_children); + + // When the child vector is empty, we should NOT keep an empty vec in storage. + // The key must be fully removed (no entry), not just zero-length value. + assert!(!ChildKeys::::contains_key(parent, netuid)); + assert!(!ParentKeys::::contains_key(child, netuid)); + + // `get` returns empty due to ValueQuery default, but presence is false. + assert!(ChildKeys::::get(parent, netuid).is_empty()); + assert!(ParentKeys::::get(child, netuid).is_empty()); + }); +} diff --git a/pallets/subtensor/src/tests/claim_root.rs b/pallets/subtensor/src/tests/claim_root.rs new file mode 100644 index 0000000000..e73417a326 --- /dev/null +++ b/pallets/subtensor/src/tests/claim_root.rs @@ -0,0 +1,1503 @@ +#![allow(clippy::expect_used)] + +use crate::tests::mock::{ + RuntimeOrigin, SubtensorModule, Test, add_dynamic_network, new_test_ext, run_to_block, +}; +use crate::{ + DefaultMinRootClaimAmount, Error, MAX_NUM_ROOT_CLAIMS, MAX_ROOT_CLAIM_THRESHOLD, NetworksAdded, + NumRootClaim, NumStakingColdkeys, PendingRootAlphaDivs, RootClaimable, RootClaimableThreshold, + StakingColdkeys, StakingColdkeysByIndex, SubnetAlphaIn, SubnetMechanism, SubnetTAO, + SubtokenEnabled, Tempo, pallet, +}; +use crate::{RootClaimType, RootClaimTypeEnum, RootClaimed}; +use approx::assert_abs_diff_eq; +use frame_support::dispatch::RawOrigin; +use frame_support::pallet_prelude::Weight; +use frame_support::traits::Get; +use frame_support::{assert_err, assert_noop, assert_ok}; +use sp_core::{H256, U256}; +use sp_runtime::DispatchError; +use std::collections::BTreeSet; +use substrate_fixed::types::{I96F32, U96F32}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_swap_interface::SwapHandler; + +#[test] +fn test_claim_root_set_claim_type() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Keep + ),); + + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Keep); + }); +} + +#[test] +fn test_claim_root_with_drain_emissions() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 2_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + let old_validator_stake = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + ); + assert_eq!(old_validator_stake, initial_total_hotkey_alpha.into()); + + // Distribute pending root alpha + + let pending_root_alpha = 1_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + // Check new validator stake + let validator_take_percent = 0.18f64; + + let new_validator_stake = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + ); + let calculated_validator_stake = (pending_root_alpha as f64) * validator_take_percent + + (initial_total_hotkey_alpha as f64); + + assert_abs_diff_eq!( + u64::from(new_validator_stake), + calculated_validator_stake as u64, + epsilon = 100u64, + ); + + // Check claimable + + let claimable = *RootClaimable::::get(hotkey) + .get(&netuid) + .expect("claimable must exist at this point"); + let calculated_rate = + (pending_root_alpha as f64) * (1f64 - validator_take_percent) / (root_stake as f64); + + assert_abs_diff_eq!( + claimable.saturating_to_num::(), + calculated_rate, + epsilon = 0.001f64, + ); + + // Claim root alpha + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Keep + ),); + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Keep); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + let new_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + + assert_abs_diff_eq!( + new_stake, + (I96F32::from(root_stake) * claimable).saturating_to_num::(), + epsilon = 10u64, + ); + + // Check root claimed value saved + + let claimed = RootClaimed::::get((netuid, &hotkey, &coldkey)); + assert_eq!(u128::from(new_stake), claimed); + + // Distribute pending root alpha (round 2) + + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + // Check claimable (round 2) + + let claimable2 = *RootClaimable::::get(hotkey) + .get(&netuid) + .expect("claimable must exist at this point"); + let calculated_rate = + (pending_root_alpha as f64) * (1f64 - validator_take_percent) / (root_stake as f64); + + assert_abs_diff_eq!( + claimable2.saturating_to_num::(), + calculated_rate + claimable.saturating_to_num::(), + epsilon = 0.001f64, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + let new_stake2: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + let calculated_new_stake2 = + (I96F32::from(root_stake) * claimable2).saturating_to_num::(); + + assert_abs_diff_eq!( + u64::from(new_stake2), + calculated_new_stake2, + epsilon = 10u64, + ); + + // Check root claimed value saved (round 2) + + let claimed = RootClaimed::::get((netuid, &hotkey, &coldkey)); + assert_eq!(u128::from(u64::from(new_stake2)), claimed); + }); +} + +#[test] +fn test_claim_root_adding_stake_proportionally_for_two_stakers() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let other_coldkey = U256::from(10010); + let hotkey = U256::from(1002); + let alice_coldkey = U256::from(1003); + let bob_coldkey = U256::from(1004); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 1_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + NetUid::ROOT, + root_stake.into(), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let root_stake_rate = 0.1f64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + NetUid::ROOT, + (8 * root_stake).into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + // Claim root alpha + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(alice_coldkey), + RootClaimTypeEnum::Keep + ),); + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(bob_coldkey), + RootClaimTypeEnum::Keep + ),); + + // Distribute pending root alpha + + let pending_root_alpha = 10_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(alice_coldkey), + BTreeSet::from([netuid]) + )); + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(bob_coldkey), + BTreeSet::from([netuid]) + )); + + // Check stakes + let validator_take_percent = 0.18f64; + + let alice_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + netuid, + ) + .into(); + + let bob_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + netuid, + ) + .into(); + + let estimated_stake = + (pending_root_alpha as f64) * (1f64 - validator_take_percent) * root_stake_rate; + + assert_eq!(alice_stake, bob_stake); + + assert_abs_diff_eq!(alice_stake, estimated_stake as u64, epsilon = 100u64,); + }); +} + +#[test] +fn test_claim_root_adding_stake_disproportionally_for_two_stakers() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let other_coldkey = U256::from(10010); + let hotkey = U256::from(1002); + let alice_coldkey = U256::from(1003); + let bob_coldkey = U256::from(1004); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let alice_root_stake = 1_000_000u64; + let bob_root_stake = 2_000_000u64; + let other_root_stake = 7_000_000u64; + + let alice_root_stake_rate = 0.1f64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + NetUid::ROOT, + alice_root_stake.into(), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + NetUid::ROOT, + bob_root_stake.into(), + ); + + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + NetUid::ROOT, + (other_root_stake).into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + // Claim root alpha + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(alice_coldkey), + RootClaimTypeEnum::Keep + ),); + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(bob_coldkey), + RootClaimTypeEnum::Keep + ),); + + // Distribute pending root alpha + + let pending_root_alpha = 10_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(alice_coldkey), + BTreeSet::from([netuid]) + )); + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(bob_coldkey), + BTreeSet::from([netuid]) + )); + + // Check stakes + let validator_take_percent = 0.18f64; + + let alice_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + netuid, + ) + .into(); + + let bob_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + netuid, + ) + .into(); + + let alice_estimated_stake = + (pending_root_alpha as f64) * (1f64 - validator_take_percent) * alice_root_stake_rate; + + assert_eq!(2 * alice_stake, bob_stake); + + assert_abs_diff_eq!(alice_stake, alice_estimated_stake as u64, epsilon = 100u64,); + }); +} + +#[test] +fn test_claim_root_with_changed_stake() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let alice_coldkey = U256::from(1003); + let bob_coldkey = U256::from(1004); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + SubtokenEnabled::::insert(NetUid::ROOT, true); + NetworksAdded::::insert(NetUid::ROOT, true); + + let root_stake = 8_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + NetUid::ROOT, + root_stake.into(), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + // Claim root alpha + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(alice_coldkey), + RootClaimTypeEnum::Keep + ),); + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(bob_coldkey), + RootClaimTypeEnum::Keep + ),); + + // Distribute pending root alpha + + let pending_root_alpha = 10_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(alice_coldkey), + BTreeSet::from([netuid]) + )); + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(bob_coldkey), + BTreeSet::from([netuid]) + )); + + // Check stakes + let validator_take_percent = 0.18f64; + + let alice_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + netuid, + ) + .into(); + + let bob_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + netuid, + ) + .into(); + + let estimated_stake = (pending_root_alpha as f64) * (1f64 - validator_take_percent) / 2f64; + + assert_eq!(alice_stake, bob_stake); + + assert_abs_diff_eq!(alice_stake, estimated_stake as u64, epsilon = 100u64,); + + // Remove stake + let stake_decrement = root_stake / 2u64; + + assert_ok!(SubtensorModule::remove_stake( + RuntimeOrigin::signed(bob_coldkey,), + hotkey, + NetUid::ROOT, + stake_decrement.into(), + )); + + // Distribute pending root alpha + + let pending_root_alpha = 10_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(alice_coldkey), + BTreeSet::from([netuid]) + )); + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(bob_coldkey), + BTreeSet::from([netuid]) + )); + + // Check new stakes + + let alice_stake2: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + netuid, + ) + .into(); + + let bob_stake2: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + netuid, + ) + .into(); + + let estimated_stake = (pending_root_alpha as f64) * (1f64 - validator_take_percent) / 3f64; + + let alice_stake_diff = alice_stake2 - alice_stake; + let bob_stake_diff = bob_stake2 - bob_stake; + + assert_abs_diff_eq!(alice_stake_diff, 2 * bob_stake_diff, epsilon = 100u64,); + assert_abs_diff_eq!(bob_stake_diff, estimated_stake as u64, epsilon = 100u64,); + + // Add stake + let stake_increment = root_stake / 2u64; + + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(bob_coldkey,), + hotkey, + NetUid::ROOT, + stake_increment.into(), + )); + + // Distribute pending root alpha + + let pending_root_alpha = 10_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(alice_coldkey), + BTreeSet::from([netuid]) + )); + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(bob_coldkey), + BTreeSet::from([netuid]) + )); + + // Check new stakes + + let alice_stake3: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &alice_coldkey, + netuid, + ) + .into(); + + let bob_stake3: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &bob_coldkey, + netuid, + ) + .into(); + + let estimated_stake = (pending_root_alpha as f64) * (1f64 - validator_take_percent) / 2f64; + + let alice_stake_diff2 = alice_stake3 - alice_stake2; + let bob_stake_diff2 = bob_stake3 - bob_stake2; + + assert_abs_diff_eq!(alice_stake_diff2, bob_stake_diff2, epsilon = 100u64,); + assert_abs_diff_eq!(bob_stake_diff2, estimated_stake as u64, epsilon = 100u64,); + }); +} + +#[test] +fn test_claim_root_with_drain_emissions_and_swap_claim_type() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let other_coldkey = U256::from(10010); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + SubnetMechanism::::insert(netuid, 1); + + // let initial_balance = 10_000_000u64; + // SubtensorModule::add_balance_to_coldkey_account(&coldkey, initial_balance.into()); + + let tao_reserve = TaoCurrency::from(50_000_000_000); + let alpha_in = AlphaCurrency::from(100_000_000_000); + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + let current_price = + ::SwapInterface::current_alpha_price(netuid.into()) + .saturating_to_num::(); + assert_eq!(current_price, 0.5f64); + + let root_stake = 2_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + let root_stake_rate = 0.1f64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + NetUid::ROOT, + (9 * root_stake).into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + // Distribute pending root alpha + + let pending_root_alpha = 10_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + // Claim root alpha + + let validator_take_percent = 0.18f64; + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Swap + ),); + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Swap); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + // Check new stake + + let new_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + ) + .into(); + + let estimated_stake_increment = (pending_root_alpha as f64) + * (1f64 - validator_take_percent) + * current_price + * root_stake_rate; + + assert_abs_diff_eq!( + new_stake, + root_stake + estimated_stake_increment as u64, + epsilon = 10000u64, + ); + + // Distribute and claim pending root alpha (round 2) + + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + // Check new stake (2) + + let new_stake2: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + ) + .into(); + + // new root stake / new total stake + let root_stake_rate2 = (root_stake as f64 + estimated_stake_increment) + / (root_stake as f64 / root_stake_rate + estimated_stake_increment); + let estimated_stake_increment2 = (pending_root_alpha as f64) + * (1f64 - validator_take_percent) + * current_price + * root_stake_rate2; + + assert_abs_diff_eq!( + new_stake2, + new_stake + estimated_stake_increment2 as u64, + epsilon = 10000u64, + ); + // Distribute and claim pending root alpha (round 3) + + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + // Check new stake (3) + + let new_stake3: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + ) + .into(); + + // new root stake / new total stake + let root_stake_rate3 = + (root_stake as f64 + estimated_stake_increment + estimated_stake_increment2) + / (root_stake as f64 / root_stake_rate + + estimated_stake_increment + + estimated_stake_increment2); + let estimated_stake_increment3 = (pending_root_alpha as f64) + * (1f64 - validator_take_percent) + * current_price + * root_stake_rate3; + + assert_abs_diff_eq!( + new_stake3, + new_stake2 + estimated_stake_increment3 as u64, + epsilon = 10000u64, + ); + }); +} + +#[test] +fn test_claim_root_with_run_coinbase() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + Tempo::::insert(netuid, 1); + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 200_000_000u64; + SubnetTAO::::insert(NetUid::ROOT, TaoCurrency::from(root_stake)); + + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + // Distribute pending root alpha + + let initial_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + assert_eq!(initial_stake, 0u64); + + let block_emissions = 1_000_000u64; + SubtensorModule::run_coinbase(U96F32::from(block_emissions)); + + // Claim root alpha + + let initial_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + assert_eq!(initial_stake, 0u64); + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Keep + ),); + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Keep); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + let new_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + + assert!(new_stake > 0); + }); +} + +#[test] +fn test_claim_root_block_hash_indices() { + new_test_ext(1).execute_with(|| { + let k = 15u64; + let n = 15000u64; + + // 0 + let indices = + SubtensorModule::block_hash_to_indices(H256(sp_core::keccak_256(b"zero")), 0, n); + assert!(indices.is_empty()); + + // 1 + let hash = sp_core::keccak_256(b"some"); + let mut indices = SubtensorModule::block_hash_to_indices(H256(hash), k, n); + indices.sort(); + + assert!(indices.len() <= k as usize); + assert!(!indices.iter().any(|i| *i >= n)); + // precomputed values + let expected_result = vec![ + 265, 630, 1286, 1558, 4496, 4861, 5517, 5789, 6803, 8096, 9092, 11034, 11399, 12055, + 12327, + ]; + assert_eq!(indices, expected_result); + + // 2 + let hash = sp_core::keccak_256(b"some2"); + let mut indices = SubtensorModule::block_hash_to_indices(H256(hash), k, n); + indices.sort(); + + assert!(indices.len() <= k as usize); + assert!(!indices.iter().any(|i| *i >= n)); + // precomputed values + let expected_result = vec![ + 61, 246, 1440, 2855, 3521, 5236, 6130, 6615, 8511, 9405, 9890, 11786, 11971, 13165, + 14580, + ]; + assert_eq!(indices, expected_result); + }); +} + +#[test] +fn test_claim_root_with_block_emissions() { + new_test_ext(0).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + Tempo::::insert(netuid, 1); + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 200_000_000u64; + SubnetTAO::::insert(NetUid::ROOT, TaoCurrency::from(root_stake)); + + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + SubtensorModule::maybe_add_coldkey_index(&coldkey); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Keep + ),); + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Keep); + + // Distribute pending root alpha + + let initial_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + assert_eq!(initial_stake, 0u64); + + run_to_block(2); + + // Check stake after block emissions + + let new_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + + assert!(new_stake > 0); + }); +} +#[test] +fn test_populate_staking_maps() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1000); + let coldkey1 = U256::from(1001); + let coldkey2 = U256::from(1002); + let coldkey3 = U256::from(1003); + let hotkey = U256::from(1004); + let _netuid = add_dynamic_network(&hotkey, &owner_coldkey); + let netuid2 = NetUid::from(2); + + let root_stake = 200_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey1, + NetUid::ROOT, + root_stake.into(), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey2, + NetUid::ROOT, + root_stake.into(), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey3, + netuid2, + root_stake.into(), + ); + + assert_eq!(NumStakingColdkeys::::get(), 0); + + // Populate maps through block step + + run_to_block(2); + + assert_eq!(NumStakingColdkeys::::get(), 2); + + assert!(StakingColdkeysByIndex::::contains_key(0)); + assert!(StakingColdkeysByIndex::::contains_key(1)); + + assert!(StakingColdkeys::::contains_key(coldkey1)); + assert!(StakingColdkeys::::contains_key(coldkey2)); + assert!(!StakingColdkeys::::contains_key(coldkey3)); + }); +} + +#[test] +fn test_claim_root_coinbase_distribution() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + Tempo::::insert(netuid, 1); + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 200_000_000u64; + let initial_tao = 200_000_000u64; + SubnetTAO::::insert(NetUid::ROOT, TaoCurrency::from(initial_tao)); + + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + let initial_alpha_issuance = SubtensorModule::get_alpha_issuance(netuid); + let alpha_emissions: AlphaCurrency = 1_000_000_000u64.into(); + + // Check total issuance (saved to pending alpha divs) + + run_to_block(2); + + let alpha_issuance = SubtensorModule::get_alpha_issuance(netuid); + assert_eq!(initial_alpha_issuance + alpha_emissions, alpha_issuance); + + let root_prop = initial_tao as f64 / (u64::from(alpha_issuance) + initial_tao) as f64; + let root_validators_share = 0.5f64; + + let expected_pending_root_alpha_divs = + u64::from(alpha_emissions) as f64 * root_prop * root_validators_share; + assert_abs_diff_eq!( + u64::from(PendingRootAlphaDivs::::get(netuid)) as f64, + expected_pending_root_alpha_divs, + epsilon = 100f64 + ); + + // Epoch pending alphas divs is distributed + + run_to_block(3); + + assert_eq!(u64::from(PendingRootAlphaDivs::::get(netuid)), 0u64); + + let claimable = *RootClaimable::::get(hotkey) + .get(&netuid) + .expect("claimable must exist at this point"); + + let validator_take_percent = 0.18f64; + let calculated_rate = (expected_pending_root_alpha_divs * 2f64) + * (1f64 - validator_take_percent) + / (root_stake as f64); + + assert_abs_diff_eq!( + claimable.saturating_to_num::(), + calculated_rate, + epsilon = 0.001f64, + ); + }); +} + +#[test] +fn test_sudo_set_num_root_claims() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1003); + + assert_noop!( + SubtensorModule::sudo_set_num_root_claims(RuntimeOrigin::signed(coldkey), 50u64), + DispatchError::BadOrigin + ); + + assert_noop!( + SubtensorModule::sudo_set_num_root_claims( + RuntimeOrigin::root(), + MAX_NUM_ROOT_CLAIMS + 1, + ), + Error::::InvalidNumRootClaim + ); + + let new_value = 27u64; + assert_ok!(SubtensorModule::sudo_set_num_root_claims( + RuntimeOrigin::root(), + new_value, + ),); + + assert_eq!(NumRootClaim::::get(), new_value); + }); +} + +#[test] +fn test_claim_root_with_swap_coldkey() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 2_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + let old_validator_stake = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + ); + assert_eq!(old_validator_stake, initial_total_hotkey_alpha.into()); + + // Distribute pending root alpha + + let pending_root_alpha = 1_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + // Claim root alpha + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Keep + ),); + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Keep); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + let new_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + + // Check root claimed value saved + let new_coldkey = U256::from(10030); + + assert_eq!( + u128::from(new_stake), + RootClaimed::::get((netuid, &hotkey, &coldkey)) + ); + assert_eq!( + 0u128, + RootClaimed::::get((netuid, &hotkey, &new_coldkey)) + ); + + // Swap coldkey + let mut weight = Weight::zero(); + + assert_ok!(SubtensorModule::perform_swap_coldkey( + &coldkey, + &new_coldkey, + &mut weight + )); + + // Check swapped keys claimed values + + assert_eq!(0u128, RootClaimed::::get((netuid, &hotkey, &coldkey))); + assert_eq!( + u128::from(new_stake), + RootClaimed::::get((netuid, &hotkey, &new_coldkey,)) + ); + }); +} +#[test] +fn test_claim_root_with_swap_hotkey() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 2_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + let old_validator_stake = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + ); + assert_eq!(old_validator_stake, initial_total_hotkey_alpha.into()); + + // Distribute pending root alpha + + let pending_root_alpha = 1_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + // Claim root alpha + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Keep + ),); + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Keep); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + let new_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + + // Check root claimed value saved + let new_hotkey = U256::from(10030); + + assert_eq!( + u128::from(new_stake), + RootClaimed::::get((netuid, &hotkey, &coldkey,)) + ); + assert_eq!( + 0u128, + RootClaimed::::get((netuid, &new_hotkey, &coldkey,)) + ); + + let _old_claimable = *RootClaimable::::get(hotkey) + .get(&netuid) + .expect("claimable must exist at this point"); + + assert!(!RootClaimable::::get(new_hotkey).contains_key(&netuid)); + + // Swap hotkey + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_hotkey_swap_on_one_subnet( + &hotkey, + &new_hotkey, + &mut weight, + netuid + )); + + // Check swapped keys claimed values + + assert_eq!( + 0u128, + RootClaimed::::get((netuid, &hotkey, &coldkey,)) + ); + assert_eq!( + u128::from(new_stake), + RootClaimed::::get((netuid, &new_hotkey, &coldkey,)) + ); + + assert!(!RootClaimable::::get(hotkey).contains_key(&netuid)); + + let _new_claimable = *RootClaimable::::get(new_hotkey) + .get(&netuid) + .expect("claimable must exist at this point"); + }); +} + +#[test] +fn test_claim_root_on_network_deregistration() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let other_coldkey = U256::from(10010); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + SubnetMechanism::::insert(netuid, 1); + + let tao_reserve = TaoCurrency::from(50_000_000_000); + let alpha_in = AlphaCurrency::from(100_000_000_000); + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + let current_price = + ::SwapInterface::current_alpha_price(netuid.into()) + .saturating_to_num::(); + assert_eq!(current_price, 0.5f64); + + let root_stake = 2_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + NetUid::ROOT, + (9 * root_stake).into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + // Distribute pending root alpha + + let pending_root_alpha = 10_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + assert!(RootClaimable::::get(hotkey).contains_key(&netuid)); + + assert!(RootClaimed::::contains_key(( + netuid, &hotkey, &coldkey, + ))); + + // Claim root via network deregistration + + assert_ok!(SubtensorModule::do_dissolve_network(netuid)); + + assert!(!RootClaimed::::contains_key(( + netuid, &hotkey, &coldkey, + ))); + assert!(!RootClaimable::::get(hotkey).contains_key(&netuid)); + }); +} + +#[test] +fn test_claim_root_threshold() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + assert_eq!( + RootClaimableThreshold::::get(netuid), + DefaultMinRootClaimAmount::::get() + ); + + let threshold = 1000u64; + assert_ok!(SubtensorModule::sudo_set_root_claim_threshold( + RawOrigin::Root.into(), + netuid, + threshold + )); + assert_eq!( + RootClaimableThreshold::::get(netuid), + I96F32::from(threshold) + ); + + let threshold = 2000u64; + assert_ok!(SubtensorModule::sudo_set_root_claim_threshold( + RawOrigin::Signed(owner_coldkey).into(), + netuid, + threshold + )); + assert_eq!( + RootClaimableThreshold::::get(netuid), + I96F32::from(threshold) + ); + + // Errors + assert_err!( + SubtensorModule::sudo_set_root_claim_threshold( + RawOrigin::Signed(hotkey).into(), + netuid, + threshold + ), + DispatchError::BadOrigin, + ); + + assert_err!( + SubtensorModule::sudo_set_root_claim_threshold( + RawOrigin::Signed(owner_coldkey).into(), + netuid, + MAX_ROOT_CLAIM_THRESHOLD + 1 + ), + Error::::InvalidRootClaimThreshold, + ); + }); +} + +#[test] +fn test_claim_root_subnet_limits() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1003); + + assert_err!( + SubtensorModule::claim_root(RuntimeOrigin::signed(coldkey), BTreeSet::new()), + Error::::InvalidSubnetNumber + ); + + assert_err!( + SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from_iter((0u16..=10u16).into_iter().map(NetUid::from)) + ), + Error::::InvalidSubnetNumber + ); + }); +} + +#[test] +fn test_claim_root_with_unrelated_subnets() { + new_test_ext(1).execute_with(|| { + let owner_coldkey = U256::from(1001); + let hotkey = U256::from(1002); + let coldkey = U256::from(1003); + let netuid = add_dynamic_network(&hotkey, &owner_coldkey); + + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let root_stake = 2_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + NetUid::ROOT, + root_stake.into(), + ); + + let initial_total_hotkey_alpha = 10_000_000u64; + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + initial_total_hotkey_alpha.into(), + ); + + let old_validator_stake = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &owner_coldkey, + netuid, + ); + assert_eq!(old_validator_stake, initial_total_hotkey_alpha.into()); + + // Distribute pending root alpha + + let pending_root_alpha = 1_000_000u64; + SubtensorModule::drain_pending_emission( + netuid, + AlphaCurrency::ZERO, + pending_root_alpha.into(), + AlphaCurrency::ZERO, + ); + + // Claim root alpha + + assert_ok!(SubtensorModule::set_root_claim_type( + RuntimeOrigin::signed(coldkey), + RootClaimTypeEnum::Keep + ),); + + // Claim root alpha on unrelated subnets + + let unrelated_subnet_uid = NetUid::from(100u16); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([unrelated_subnet_uid]) + )); + + let new_stake: u64 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + unrelated_subnet_uid, + ) + .into(); + + assert_eq!(new_stake, 0u64,); + + // Check root claim for correct subnet + + // before + let new_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + + assert_eq!(new_stake, 0u64,); + + assert_ok!(SubtensorModule::claim_root( + RuntimeOrigin::signed(coldkey), + BTreeSet::from([netuid]) + )); + + // after + let new_stake: u64 = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + .into(); + + assert!(new_stake > 0u64); + + // Check root claimed value saved + + let claimed = RootClaimed::::get((netuid, &hotkey, &coldkey)); + assert_eq!(u128::from(new_stake), claimed); + }); +} diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index b53308ae9f..2d0376c896 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -8,9 +8,9 @@ use approx::assert_abs_diff_eq; use frame_support::assert_ok; use pallet_subtensor_swap::position::PositionId; use sp_core::U256; -use substrate_fixed::types::{I64F64, I96F32, U96F32}; +use substrate_fixed::types::{I64F64, I96F32, U64F64, U96F32}; use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; -use subtensor_swap_interface::SwapHandler; +use subtensor_swap_interface::{SwapEngine, SwapHandler}; #[allow(clippy::arithmetic_side_effects)] fn close(value: u64, target: u64, eps: u64) { @@ -80,21 +80,22 @@ fn test_coinbase_basecase() { // Test the emission distribution for a single subnet. // This test verifies that: -// - A single subnet receives the full emission amount -// - The emission is correctly reflected in SubnetTAO -// - Total issuance and total stake are updated appropriately +// - Single subnet gets cutoff by lower flow limit, so nothing is distributed // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_coinbase_tao_issuance_base --exact --show-output --nocapture #[test] fn test_coinbase_tao_issuance_base() { new_test_ext(1).execute_with(|| { - let netuid = NetUid::from(1); let emission = TaoCurrency::from(1_234_567); - add_network(netuid, 1, 0); - assert_eq!(SubnetTAO::::get(netuid), TaoCurrency::ZERO); + let subnet_owner_ck = U256::from(1001); + let subnet_owner_hk = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + SubnetMovingPrice::::insert(netuid, I96F32::from(3141) / I96F32::from(1000)); + let tao_in_before = SubnetTAO::::get(netuid); + let total_stake_before = TotalStake::::get(); SubtensorModule::run_coinbase(U96F32::from_num(emission)); - assert_eq!(SubnetTAO::::get(netuid), emission); + assert_eq!(SubnetTAO::::get(netuid), tao_in_before + emission); assert_eq!(TotalIssuance::::get(), emission); - assert_eq!(TotalStake::::get(), emission); + assert_eq!(TotalStake::::get(), total_stake_before + emission); }); } @@ -113,6 +114,30 @@ fn test_coinbase_tao_issuance_base_low() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_coinbase_tao_issuance_base_low_flow --exact --show-output --nocapture +#[test] +fn test_coinbase_tao_issuance_base_low_flow() { + new_test_ext(1).execute_with(|| { + let emission = TaoCurrency::from(1_234_567); + let subnet_owner_ck = U256::from(1001); + let subnet_owner_hk = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + let emission = TaoCurrency::from(1); + + // 100% tao flow method + let block_num = FlowHalfLife::::get(); + SubnetEmaTaoFlow::::insert(netuid, (block_num, I64F64::from_num(1_000_000_000))); + System::set_block_number(block_num); + + let tao_in_before = SubnetTAO::::get(netuid); + let total_stake_before = TotalStake::::get(); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); + assert_eq!(SubnetTAO::::get(netuid), tao_in_before + emission); + assert_eq!(TotalIssuance::::get(), emission); + assert_eq!(TotalStake::::get(), total_stake_before + emission); + }); +} + // Test emission distribution across multiple subnets. // This test verifies that: // - Multiple subnets receive equal portions of the total emission @@ -134,11 +159,23 @@ fn test_coinbase_tao_issuance_multiple() { assert_eq!(SubnetTAO::::get(netuid2), TaoCurrency::ZERO); assert_eq!(SubnetTAO::::get(netuid3), TaoCurrency::ZERO); SubtensorModule::run_coinbase(U96F32::from_num(emission)); - assert_eq!(SubnetTAO::::get(netuid1), emission / 3.into()); - assert_eq!(SubnetTAO::::get(netuid2), emission / 3.into()); - assert_eq!(SubnetTAO::::get(netuid3), emission / 3.into()); - assert_eq!(TotalIssuance::::get(), emission); - assert_eq!(TotalStake::::get(), emission); + assert_abs_diff_eq!( + SubnetTAO::::get(netuid1), + emission / 3.into(), + epsilon = 1.into(), + ); + assert_abs_diff_eq!( + SubnetTAO::::get(netuid2), + emission / 3.into(), + epsilon = 1.into(), + ); + assert_abs_diff_eq!( + SubnetTAO::::get(netuid3), + emission / 3.into(), + epsilon = 1.into(), + ); + assert_abs_diff_eq!(TotalIssuance::::get(), emission, epsilon = 3.into(),); + assert_abs_diff_eq!(TotalStake::::get(), emission, epsilon = 3.into(),); }); } @@ -199,12 +236,12 @@ fn test_coinbase_tao_issuance_different_prices() { assert_abs_diff_eq!( SubnetTAO::::get(netuid1), TaoCurrency::from(initial_tao + emission / 3), - epsilon = 1.into(), + epsilon = 10.into(), ); assert_abs_diff_eq!( SubnetTAO::::get(netuid2), TaoCurrency::from(initial_tao + 2 * emission / 3), - epsilon = 1.into(), + epsilon = 10.into(), ); // Prices are low => we limit tao issued (buy alpha with it) @@ -222,6 +259,87 @@ fn test_coinbase_tao_issuance_different_prices() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_coinbase_tao_issuance_different_flows --exact --show-output --nocapture +#[test] +fn test_coinbase_tao_issuance_different_flows() { + new_test_ext(1).execute_with(|| { + let subnet_owner_ck = U256::from(1001); + let subnet_owner_hk = U256::from(1002); + let netuid1 = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + let netuid2 = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + let emission = 100_000_000; + + // Setup prices 0.1 and 0.2 + let initial_tao: u64 = 100_000_u64; + let initial_alpha1: u64 = initial_tao * 10; + let initial_alpha2: u64 = initial_tao * 5; + mock::setup_reserves(netuid1, initial_tao.into(), initial_alpha1.into()); + mock::setup_reserves(netuid2, initial_tao.into(), initial_alpha2.into()); + + // Force the swap to initialize + SubtensorModule::swap_tao_for_alpha( + netuid1, + TaoCurrency::ZERO, + 1_000_000_000_000.into(), + false, + ) + .unwrap(); + SubtensorModule::swap_tao_for_alpha( + netuid2, + TaoCurrency::ZERO, + 1_000_000_000_000.into(), + false, + ) + .unwrap(); + + // Set subnet prices to reversed proportion to ensure they don't affect emissions. + SubnetMovingPrice::::insert(netuid1, I96F32::from_num(2)); + SubnetMovingPrice::::insert(netuid2, I96F32::from_num(1)); + + // Set subnet tao flow ema. + let block_num = FlowHalfLife::::get(); + SubnetEmaTaoFlow::::insert(netuid1, (block_num, I64F64::from_num(1))); + SubnetEmaTaoFlow::::insert(netuid2, (block_num, I64F64::from_num(2))); + System::set_block_number(block_num); + + // Set normalization exponent to 1 for simplicity + FlowNormExponent::::set(U64F64::from(1_u64)); + + // Assert initial TAO reserves. + assert_eq!(SubnetTAO::::get(netuid1), initial_tao.into()); + assert_eq!(SubnetTAO::::get(netuid2), initial_tao.into()); + let total_stake_before = TotalStake::::get(); + + // Run the coinbase with the emission amount. + SubtensorModule::run_coinbase(U96F32::from_num(emission)); + + // Assert tao emission is split evenly. + assert_abs_diff_eq!( + SubnetTAO::::get(netuid1), + TaoCurrency::from(initial_tao + emission / 3), + epsilon = 10.into(), + ); + assert_abs_diff_eq!( + SubnetTAO::::get(netuid2), + TaoCurrency::from(initial_tao + 2 * emission / 3), + epsilon = 10.into(), + ); + + // Prices are low => we limit tao issued (buy alpha with it) + let tao_issued = TaoCurrency::from(((0.1 + 0.2) * emission as f64) as u64); + assert_abs_diff_eq!( + TotalIssuance::::get(), + tao_issued, + epsilon = 10.into() + ); + assert_abs_diff_eq!( + TotalStake::::get(), + total_stake_before + emission.into(), + epsilon = 10.into() + ); + }); +} + // Test moving price updates with different alpha values. // This test verifies that: // - Moving price stays constant when alpha is 1.0 @@ -352,6 +470,8 @@ fn test_coinbase_alpha_issuance_base() { SubnetAlphaIn::::insert(netuid1, AlphaCurrency::from(initial)); SubnetTAO::::insert(netuid2, TaoCurrency::from(initial)); SubnetAlphaIn::::insert(netuid2, AlphaCurrency::from(initial)); + SubnetMovingPrice::::insert(netuid1, I96F32::from(1)); + SubnetMovingPrice::::insert(netuid2, I96F32::from(1)); // Check initial SubtensorModule::run_coinbase(U96F32::from_num(emission)); // tao_in = 500_000 @@ -367,9 +487,9 @@ fn test_coinbase_alpha_issuance_base() { }); } -// Test alpha issuance with different subnet prices. +// Test alpha issuance with different subnet flows. // This test verifies that: -// - Alpha issuance is proportional to subnet prices +// - Alpha issuance is proportional to subnet flows // - Higher priced subnets receive more TAO emission // - Alpha issuance is correctly calculated based on price ratios // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_coinbase_alpha_issuance_different --exact --show-output --nocapture @@ -384,15 +504,16 @@ fn test_coinbase_alpha_issuance_different() { // Make subnets dynamic. SubnetMechanism::::insert(netuid1, 1); SubnetMechanism::::insert(netuid2, 1); - // Setup prices 1 and 1 + // Setup prices 1 and 2 let initial: u64 = 1_000_000; SubnetTAO::::insert(netuid1, TaoCurrency::from(initial)); SubnetAlphaIn::::insert(netuid1, AlphaCurrency::from(initial)); - SubnetTAO::::insert(netuid2, TaoCurrency::from(initial)); + SubnetTAO::::insert(netuid2, TaoCurrency::from(2 * initial)); SubnetAlphaIn::::insert(netuid2, AlphaCurrency::from(initial)); - // Set subnet prices. + // Set subnet ema prices to 1 and 2 SubnetMovingPrice::::insert(netuid1, I96F32::from_num(1)); SubnetMovingPrice::::insert(netuid2, I96F32::from_num(2)); + // Do NOT Set tao flow, let it initialize // Run coinbase SubtensorModule::run_coinbase(U96F32::from_num(emission)); // tao_in = 333_333 @@ -402,10 +523,10 @@ fn test_coinbase_alpha_issuance_different() { (initial + emission / 3).into() ); // tao_in = 666_666 - // alpha_in = 666_666/price = 666_666 + initial + // alpha_in = 666_666/price = 333_333 + initial assert_eq!( SubnetAlphaIn::::get(netuid2), - (initial + emission / 3 + emission / 3).into() + (initial + (emission * 2 / 3) / 2).into() ); }); } @@ -538,36 +659,25 @@ fn test_owner_cut_base() { // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_pending_swapped --exact --show-output --nocapture #[test] -fn test_pending_swapped() { +fn test_pending_emission() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1); let emission: u64 = 1_000_000; add_network(netuid, 1, 0); mock::setup_reserves(netuid, 1_000_000.into(), 1.into()); SubtensorModule::run_coinbase(U96F32::from_num(0)); - assert_eq!(PendingAlphaSwapped::::get(netuid), 0.into()); // Zero tao weight and no root. SubnetTAO::::insert(NetUid::ROOT, TaoCurrency::from(1_000_000_000)); // Add root weight. SubtensorModule::run_coinbase(U96F32::from_num(0)); - assert_eq!(PendingAlphaSwapped::::get(netuid), 0.into()); // Zero tao weight with 1 root. SubtensorModule::set_tempo(netuid, 10000); // Large number (dont drain) SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 SubtensorModule::run_coinbase(U96F32::from_num(0)); // 1 TAO / ( 1 + 3 ) = 0.25 * 1 / 2 = 125000000 - assert_abs_diff_eq!( - u64::from(PendingAlphaSwapped::::get(netuid)), - 125000000, - epsilon = 1 - ); + assert_abs_diff_eq!( u64::from(PendingEmission::::get(netuid)), 1_000_000_000 - 125000000, epsilon = 1 ); // 1 - swapped. - assert_abs_diff_eq!( - u64::from(PendingRootDivs::::get(netuid)), - 125000000, - epsilon = 1 - ); // swapped * (price = 1) }); } @@ -578,7 +688,6 @@ fn test_drain_base() { SubtensorModule::drain_pending_emission( 0.into(), AlphaCurrency::ZERO, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ) @@ -594,7 +703,6 @@ fn test_drain_base_with_subnet() { SubtensorModule::drain_pending_emission( netuid, AlphaCurrency::ZERO, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ) @@ -620,7 +728,6 @@ fn test_drain_base_with_subnet_with_single_staker_not_registered() { SubtensorModule::drain_pending_emission( netuid, pending_alpha.into(), - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -650,7 +757,6 @@ fn test_drain_base_with_subnet_with_single_staker_registered() { SubtensorModule::drain_pending_emission( netuid, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -689,14 +795,13 @@ fn test_drain_base_with_subnet_with_single_staker_registered_root_weight() { netuid, stake_before, ); - let pending_tao = TaoCurrency::from(1_000_000_000); let pending_alpha = AlphaCurrency::from(1_000_000_000); + let pending_root_alpha = AlphaCurrency::from(1_000_000_000); assert_eq!(SubnetTAO::::get(NetUid::ROOT), TaoCurrency::ZERO); SubtensorModule::drain_pending_emission( netuid, pending_alpha, - pending_tao, - AlphaCurrency::ZERO, + pending_root_alpha, AlphaCurrency::ZERO, ); let stake_after = @@ -711,12 +816,7 @@ fn test_drain_base_with_subnet_with_single_staker_registered_root_weight() { stake_after.into(), 10, ); // Registered gets all alpha emission. - close( - stake_before.to_u64() + pending_tao.to_u64(), - root_after.into(), - 10, - ); // Registered gets all tao emission - assert_eq!(SubnetTAO::::get(NetUid::ROOT), pending_tao); + close(stake_before.to_u64(), root_after.into(), 10); // Registered doesn't get tao immediately }); } @@ -748,7 +848,6 @@ fn test_drain_base_with_subnet_with_two_stakers_registered() { SubtensorModule::drain_pending_emission( netuid, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -814,7 +913,6 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root() { SubtensorModule::drain_pending_emission( netuid, pending_alpha, - pending_tao, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -842,17 +940,6 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root() { stake_after2.into(), 10, ); // Registered gets 1/2 emission. - close( - stake_before.to_u64() + pending_tao.to_u64() / 2, - root_after1.into(), - 10, - ); // Registered gets 1/2 tao emission - close( - stake_before.to_u64() + pending_tao.to_u64() / 2, - root_after2.into(), - 10, - ); // Registered gets 1/2 tao emission - assert_eq!(SubnetTAO::::get(NetUid::ROOT), pending_tao); }); } @@ -901,8 +988,7 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am SubtensorModule::drain_pending_emission( netuid, pending_alpha, - pending_tao, - 0.into(), + AlphaCurrency::ZERO, 0.into(), ); let stake_after1 = @@ -933,25 +1019,6 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am stake_after2.into(), epsilon = 10 ); // Registered gets 50% emission - let expected_root1 = I96F32::from_num(2 * u64::from(stake_before)) - + I96F32::from_num(pending_tao.to_u64()) * I96F32::from_num(2.0 / 3.0); - assert_abs_diff_eq!( - expected_root1.to_num::(), - root_after1.into(), - epsilon = 10 - ); // Registered gets 2/3 tao emission - let expected_root2 = I96F32::from_num(u64::from(stake_before)) - + I96F32::from_num(pending_tao.to_u64()) * I96F32::from_num(1.0 / 3.0); - assert_abs_diff_eq!( - expected_root2.to_num::(), - root_after2.into(), - epsilon = 10 - ); // Registered gets 1/3 tao emission - assert_abs_diff_eq!( - SubnetTAO::::get(NetUid::ROOT), - pending_tao, - epsilon = 10.into() - ); }); } @@ -1001,7 +1068,6 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am SubtensorModule::drain_pending_emission( netuid, pending_alpha, - pending_tao, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1033,27 +1099,6 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am u64::from(stake_after2), epsilon = 10 ); - // hotkey 1 has 2 / 3 root tao - let expected_root1 = I96F32::from_num(2 * u64::from(stake_before)) - + I96F32::from_num(pending_tao) * I96F32::from_num(2.0 / 3.0); - assert_abs_diff_eq!( - expected_root1.to_num::(), - u64::from(root_after1), - epsilon = 10 - ); - // hotkey 1 has 1 / 3 root tao - let expected_root2 = I96F32::from_num(u64::from(stake_before)) - + I96F32::from_num(pending_tao) * I96F32::from_num(1.0 / 3.0); - assert_abs_diff_eq!( - expected_root2.to_num::(), - u64::from(root_after2), - epsilon = 10 - ); - assert_abs_diff_eq!( - SubnetTAO::::get(NetUid::ROOT), - pending_tao, - epsilon = 10.into() - ); }); } @@ -1084,7 +1129,6 @@ fn test_drain_alpha_childkey_parentkey() { SubtensorModule::drain_pending_emission( netuid, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1310,7 +1354,6 @@ fn test_get_root_children_drain() { SubtensorModule::drain_pending_emission( alpha, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1334,7 +1377,7 @@ fn test_get_root_children_drain() { SubtensorModule::drain_pending_emission( alpha, pending_alpha, - pending_root1, + // pending_root1, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1342,16 +1385,13 @@ fn test_get_root_children_drain() { // Alice and Bob both made half of the dividends. assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&alice, NetUid::ROOT), - AlphaCurrency::from(alice_root_stake + pending_root1.to_u64() / 2) + AlphaCurrency::from(alice_root_stake) ); assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&bob, NetUid::ROOT), - AlphaCurrency::from(bob_root_stake + pending_root1.to_u64() / 2) + AlphaCurrency::from(bob_root_stake) ); - // The pending root dividends should be present in root subnet. - assert_eq!(SubnetTAO::::get(NetUid::ROOT), pending_root1); - // Lets change the take value. (Bob is greedy.) ChildkeyTake::::insert(bob, alpha, u16::MAX); @@ -1361,7 +1401,6 @@ fn test_get_root_children_drain() { SubtensorModule::drain_pending_emission( alpha, pending_alpha, - pending_root2, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1371,25 +1410,12 @@ fn test_get_root_children_drain() { AlphaDividendsPerSubnet::::get(alpha, alice), AlphaCurrency::ZERO ); - assert_eq!( - TaoDividendsPerSubnet::::get(alpha, alice), - TaoCurrency::ZERO - ); // Bob makes it all. assert_abs_diff_eq!( AlphaDividendsPerSubnet::::get(alpha, bob), pending_alpha, epsilon = 1.into() ); - assert_eq!( - TaoDividendsPerSubnet::::get(alpha, bob), - pending_root2 - ); - // The pending root dividends should be present in root subnet. - assert_eq!( - SubnetTAO::::get(NetUid::ROOT), - pending_root1 + pending_root2 - ); }); } @@ -1463,7 +1489,6 @@ fn test_get_root_children_drain_half_proportion() { SubtensorModule::drain_pending_emission( alpha, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1550,7 +1575,6 @@ fn test_get_root_children_drain_with_take() { SubtensorModule::drain_pending_emission( alpha, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1638,7 +1662,6 @@ fn test_get_root_children_drain_with_half_take() { SubtensorModule::drain_pending_emission( alpha, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -1940,8 +1963,8 @@ fn test_calculate_dividend_distribution_totals() { let mut dividends: BTreeMap = BTreeMap::new(); let pending_validator_alpha = AlphaCurrency::from(183_123_567_452); - let pending_tao = TaoCurrency::from(837_120_949_872); - let tao_weight: U96F32 = U96F32::saturating_from_num(0.18); // 18% + let pending_root_alpha = AlphaCurrency::from(837_120_949_872); + let tao_weight: U96F32 = U96F32::from_num(0.18); // 18% let hotkeys = [U256::from(0), U256::from(1)]; @@ -1951,26 +1974,27 @@ fn test_calculate_dividend_distribution_totals() { dividends.insert(hotkeys[0], 77_783_738_u64.into()); dividends.insert(hotkeys[1], 19_283_940_u64.into()); - let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( - pending_validator_alpha, - pending_tao, - tao_weight, - stake_map, - dividends, - ); + let (alpha_dividends, root_alpha_dividends) = + SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_root_alpha, + tao_weight, + stake_map, + dividends, + ); // Verify the total of each dividends type is close to the inputs. let total_alpha_dividends = alpha_dividends.values().sum::(); - let total_tao_dividends = tao_dividends.values().sum::(); + let total_root_alpha_dividends = root_alpha_dividends.values().sum::(); assert_abs_diff_eq!( - total_alpha_dividends.saturating_to_num::(), + total_alpha_dividends.to_num::(), u64::from(pending_validator_alpha), epsilon = 1_000 ); assert_abs_diff_eq!( - total_tao_dividends.saturating_to_num::(), - pending_tao.to_u64(), + total_root_alpha_dividends.to_num::(), + pending_root_alpha.to_u64(), epsilon = 1_000 ); }); @@ -1983,8 +2007,8 @@ fn test_calculate_dividend_distribution_total_only_tao() { let mut dividends: BTreeMap = BTreeMap::new(); let pending_validator_alpha = AlphaCurrency::ZERO; - let pending_tao = TaoCurrency::from(837_120_949_872); - let tao_weight: U96F32 = U96F32::saturating_from_num(0.18); // 18% + let pending_root_alpha = AlphaCurrency::from(837_120_949_872); + let tao_weight: U96F32 = U96F32::from_num(0.18); // 18% let hotkeys = [U256::from(0), U256::from(1)]; @@ -1994,26 +2018,27 @@ fn test_calculate_dividend_distribution_total_only_tao() { dividends.insert(hotkeys[0], 77_783_738_u64.into()); dividends.insert(hotkeys[1], 19_283_940_u64.into()); - let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( - pending_validator_alpha, - pending_tao, - tao_weight, - stake_map, - dividends, - ); + let (alpha_dividends, root_alpha_dividends) = + SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_root_alpha, + tao_weight, + stake_map, + dividends, + ); // Verify the total of each dividends type is close to the inputs. let total_alpha_dividends = alpha_dividends.values().sum::(); - let total_tao_dividends = tao_dividends.values().sum::(); + let total_root_alpha_dividends = root_alpha_dividends.values().sum::(); assert_abs_diff_eq!( - total_alpha_dividends.saturating_to_num::(), + total_alpha_dividends.to_num::(), u64::from(pending_validator_alpha), epsilon = 1_000 ); assert_abs_diff_eq!( - total_tao_dividends.saturating_to_num::(), - pending_tao.to_u64(), + total_root_alpha_dividends.to_num::(), + pending_root_alpha.to_u64(), epsilon = 1_000 ); }); @@ -2027,7 +2052,7 @@ fn test_calculate_dividend_distribution_total_no_tao_weight() { let pending_validator_alpha = AlphaCurrency::from(183_123_567_452); let pending_tao = TaoCurrency::ZERO; // If tao weight is 0, then only alpha dividends should be input. - let tao_weight: U96F32 = U96F32::saturating_from_num(0.0); // 0% + let tao_weight: U96F32 = U96F32::from_num(0.0); // 0% let hotkeys = [U256::from(0), U256::from(1)]; @@ -2039,7 +2064,8 @@ fn test_calculate_dividend_distribution_total_no_tao_weight() { let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( pending_validator_alpha, - pending_tao, + // pending_tao, + AlphaCurrency::ZERO, tao_weight, stake_map, dividends, @@ -2050,12 +2076,12 @@ fn test_calculate_dividend_distribution_total_no_tao_weight() { let total_tao_dividends = tao_dividends.values().sum::(); assert_abs_diff_eq!( - total_alpha_dividends.saturating_to_num::(), + total_alpha_dividends.to_num::(), u64::from(pending_validator_alpha), epsilon = 1_000 ); assert_abs_diff_eq!( - total_tao_dividends.saturating_to_num::(), + total_tao_dividends.to_num::(), pending_tao.to_u64(), epsilon = 1_000 ); @@ -2070,7 +2096,7 @@ fn test_calculate_dividend_distribution_total_only_alpha() { let pending_validator_alpha = AlphaCurrency::from(183_123_567_452); let pending_tao = TaoCurrency::ZERO; - let tao_weight: U96F32 = U96F32::saturating_from_num(0.18); // 18% + let tao_weight: U96F32 = U96F32::from_num(0.18); // 18% let hotkeys = [U256::from(0), U256::from(1)]; @@ -2082,7 +2108,8 @@ fn test_calculate_dividend_distribution_total_only_alpha() { let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( pending_validator_alpha, - pending_tao, + // pending_tao, + AlphaCurrency::ZERO, tao_weight, stake_map, dividends, @@ -2093,12 +2120,12 @@ fn test_calculate_dividend_distribution_total_only_alpha() { let total_tao_dividends = tao_dividends.values().sum::(); assert_abs_diff_eq!( - total_alpha_dividends.saturating_to_num::(), + total_alpha_dividends.to_num::(), u64::from(pending_validator_alpha), epsilon = 1_000 ); assert_abs_diff_eq!( - total_tao_dividends.saturating_to_num::(), + total_tao_dividends.to_num::(), pending_tao.to_u64(), epsilon = 1_000 ); @@ -2128,7 +2155,7 @@ fn test_calculate_dividend_and_incentive_distribution() { let pending_validator_alpha = pending_alpha / 2.into(); // Pay half to validators. let pending_tao = TaoCurrency::ZERO; let pending_swapped = 0; // Only alpha output. - let tao_weight: U96F32 = U96F32::saturating_from_num(0.0); // 0% + let tao_weight: U96F32 = U96F32::from_num(0.0); // 0% // Hotkey, Incentive, Dividend let hotkey_emission = vec![(hotkey, pending_alpha / 2.into(), pending_alpha / 2.into())]; @@ -2136,20 +2163,18 @@ fn test_calculate_dividend_and_incentive_distribution() { let (incentives, (alpha_dividends, tao_dividends)) = SubtensorModule::calculate_dividend_and_incentive_distribution( netuid, - pending_tao, + // pending_tao, + AlphaCurrency::ZERO, pending_validator_alpha, hotkey_emission, tao_weight, ); let incentives_total = incentives.values().copied().map(u64::from).sum::(); - let dividends_total = alpha_dividends - .values() - .sum::() - .saturating_to_num::(); + let dividends_total = alpha_dividends.values().sum::().to_num::(); assert_abs_diff_eq!( - dividends_total.saturating_add(incentives_total), + dividends_total + incentives_total, u64::from(pending_alpha), epsilon = 2 ); @@ -2178,7 +2203,7 @@ fn test_calculate_dividend_and_incentive_distribution_all_to_validators() { let pending_alpha = AlphaCurrency::from(123_456_789); let pending_validator_alpha = pending_alpha; // Pay all to validators. let pending_tao = TaoCurrency::ZERO; - let tao_weight: U96F32 = U96F32::saturating_from_num(0.0); // 0% + let tao_weight: U96F32 = U96F32::from_num(0.0); // 0% // Hotkey, Incentive, Dividend let hotkey_emission = vec![(hotkey, 0.into(), pending_alpha)]; @@ -2186,20 +2211,18 @@ fn test_calculate_dividend_and_incentive_distribution_all_to_validators() { let (incentives, (alpha_dividends, tao_dividends)) = SubtensorModule::calculate_dividend_and_incentive_distribution( netuid, - pending_tao, + // pending_tao, + AlphaCurrency::ZERO, pending_validator_alpha, hotkey_emission, tao_weight, ); let incentives_total = incentives.values().copied().map(u64::from).sum::(); - let dividends_total = alpha_dividends - .values() - .sum::() - .saturating_to_num::(); + let dividends_total = alpha_dividends.values().sum::().to_num::(); assert_eq!( - AlphaCurrency::from(dividends_total.saturating_add(incentives_total)), + AlphaCurrency::from(dividends_total + incentives_total), pending_alpha ); }); @@ -2226,7 +2249,7 @@ fn test_calculate_dividends_and_incentives() { let divdends = AlphaCurrency::from(123_456_789); let incentive = AlphaCurrency::from(683_051_923); - let total_emission = divdends.saturating_add(incentive); + let total_emission = divdends + incentive; // Hotkey, Incentive, Dividend let hotkey_emission = vec![(hotkey, incentive, divdends)]; @@ -2238,17 +2261,10 @@ fn test_calculate_dividends_and_incentives() { .values() .copied() .fold(AlphaCurrency::ZERO, |acc, x| acc + x); - let dividends_total = AlphaCurrency::from( - dividends - .values() - .sum::() - .saturating_to_num::(), - ); + let dividends_total = + AlphaCurrency::from(dividends.values().sum::().to_num::()); - assert_eq!( - dividends_total.saturating_add(incentives_total), - total_emission - ); + assert_eq!(dividends_total + incentives_total, total_emission); }); } @@ -2284,12 +2300,8 @@ fn test_calculate_dividends_and_incentives_only_validators() { .values() .copied() .fold(AlphaCurrency::ZERO, |acc, x| acc + x); - let dividends_total = AlphaCurrency::from( - dividends - .values() - .sum::() - .saturating_to_num::(), - ); + let dividends_total = + AlphaCurrency::from(dividends.values().sum::().to_num::()); assert_eq!(dividends_total, divdends); assert_eq!(incentives_total, AlphaCurrency::ZERO); @@ -2328,12 +2340,8 @@ fn test_calculate_dividends_and_incentives_only_miners() { .values() .copied() .fold(AlphaCurrency::ZERO, |acc, x| acc + x); - let dividends_total = AlphaCurrency::from( - dividends - .values() - .sum::() - .saturating_to_num::(), - ); + let dividends_total = + AlphaCurrency::from(dividends.values().sum::().to_num::()); assert_eq!(incentives_total, incentive); assert_eq!(dividends_total, divdends); @@ -2369,7 +2377,6 @@ fn test_drain_pending_emission_no_miners_all_drained() { SubtensorModule::drain_pending_emission( netuid, emission, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -2380,7 +2387,7 @@ fn test_drain_pending_emission_no_miners_all_drained() { // Slight epsilon due to rounding (hotkey_take). assert_abs_diff_eq!( new_stake, - u64::from(emission.saturating_add(init_stake.into())).into(), + u64::from(emission + init_stake.into()).into(), epsilon = 1.into() ); }); @@ -2442,7 +2449,6 @@ fn test_drain_pending_emission_zero_emission() { SubtensorModule::drain_pending_emission( netuid, 0.into(), - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -2536,7 +2542,7 @@ fn test_run_coinbase_not_started() { assert!(SubtensorModule::should_run_epoch(netuid, current_block)); // Run coinbase with emission. - SubtensorModule::run_coinbase(U96F32::saturating_from_num(100_000_000)); + SubtensorModule::run_coinbase(U96F32::from_num(100_000_000)); // We expect that the epoch ran. assert_eq!(BlocksSinceLastStep::::get(netuid), 0); @@ -2627,7 +2633,7 @@ fn test_run_coinbase_not_started_start_after() { assert!(SubtensorModule::should_run_epoch(netuid, current_block)); // Run coinbase with emission. - SubtensorModule::run_coinbase(U96F32::saturating_from_num(100_000_000)); + SubtensorModule::run_coinbase(U96F32::from_num(100_000_000)); // We expect that the epoch ran. assert_eq!(BlocksSinceLastStep::::get(netuid), 0); @@ -2647,7 +2653,7 @@ fn test_run_coinbase_not_started_start_after() { ); // Run coinbase with emission. - SubtensorModule::run_coinbase(U96F32::saturating_from_num(100_000_000)); + SubtensorModule::run_coinbase(U96F32::from_num(100_000_000)); // We expect that the epoch ran. assert_eq!(BlocksSinceLastStep::::get(netuid), 0); @@ -2736,7 +2742,6 @@ fn test_drain_alpha_childkey_parentkey_with_burn() { SubtensorModule::drain_pending_emission( netuid, pending_alpha, - TaoCurrency::ZERO, AlphaCurrency::ZERO, AlphaCurrency::ZERO, ); @@ -2876,3 +2881,31 @@ fn test_incentive_goes_to_hotkey_when_no_autostake_destination() { ); }); } + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_zero_shares_zero_emission --exact --show-output --nocapture +#[test] +fn test_zero_shares_zero_emission() { + new_test_ext(1).execute_with(|| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + let netuid1 = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + let netuid2 = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + let emission: u64 = 1_000_000; + // Setup prices 1 and 1 + let initial: u64 = 1_000_000; + SubnetTAO::::insert(netuid1, TaoCurrency::from(initial)); + SubnetAlphaIn::::insert(netuid1, AlphaCurrency::from(initial)); + SubnetTAO::::insert(netuid2, TaoCurrency::from(initial)); + SubnetAlphaIn::::insert(netuid2, AlphaCurrency::from(initial)); + // Set subnet prices so that both are + // - cut off by lower limit for tao flow method + // - zeroed out for price ema method + SubnetMovingPrice::::insert(netuid1, I96F32::from_num(0)); + SubnetMovingPrice::::insert(netuid2, I96F32::from_num(0)); + // Run coinbase + SubtensorModule::run_coinbase(U96F32::from_num(emission)); + // Netuid 1 is cut off by lower limit, all emission goes to netuid2 + assert_eq!(SubnetAlphaIn::::get(netuid1), initial.into()); + assert_eq!(SubnetAlphaIn::::get(netuid2), initial.into()); + }); +} diff --git a/pallets/subtensor/src/tests/consensus.rs b/pallets/subtensor/src/tests/consensus.rs index 7eb65c3fc0..454f41e2cf 100644 --- a/pallets/subtensor/src/tests/consensus.rs +++ b/pallets/subtensor/src/tests/consensus.rs @@ -1,5 +1,6 @@ #![allow( clippy::arithmetic_side_effects, + clippy::expect_used, clippy::indexing_slicing, clippy::unwrap_used )] diff --git a/pallets/subtensor/src/tests/delegate_info.rs b/pallets/subtensor/src/tests/delegate_info.rs index c7aabb899e..0553a88fe5 100644 --- a/pallets/subtensor/src/tests/delegate_info.rs +++ b/pallets/subtensor/src/tests/delegate_info.rs @@ -1,3 +1,4 @@ +#![allow(clippy::expect_used)] use super::mock::*; use codec::Compact; diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs index 298339defa..a59bfd7484 100644 --- a/pallets/subtensor/src/tests/ensure.rs +++ b/pallets/subtensor/src/tests/ensure.rs @@ -1,3 +1,4 @@ +#![allow(clippy::expect_used)] use frame_support::{assert_noop, assert_ok}; use frame_system::Config; use sp_core::U256; diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 7c23dc2b2c..cdf44df645 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -1,5 +1,6 @@ #![allow( clippy::arithmetic_side_effects, + clippy::expect_used, clippy::indexing_slicing, clippy::unwrap_used )] @@ -1023,7 +1024,6 @@ fn test_bonds() { SubtensorModule::set_target_registrations_per_interval(netuid, n); SubtensorModule::set_weights_set_rate_limit( netuid, 0 ); SubtensorModule::set_min_allowed_weights( netuid, 1 ); - SubtensorModule::set_max_weight_limit( netuid, u16::MAX ); SubtensorModule::set_bonds_penalty(netuid, u16::MAX); @@ -1324,13 +1324,13 @@ fn test_set_alpha_disabled() { assert_ok!(SubtensorModule::root_register(signer.clone(), hotkey,)); let fee = ::SwapInterface::approx_fee_amount( netuid.into(), - DefaultMinStake::::get().into(), + DefaultMinStake::::get(), ); assert_ok!(SubtensorModule::add_stake( signer.clone(), hotkey, netuid, - (5 * DefaultMinStake::::get().to_u64() + fee).into() + TaoCurrency::from(5) * DefaultMinStake::::get() + fee )); // Only owner can set alpha values assert_ok!(SubtensorModule::register_network(signer.clone(), hotkey)); @@ -1369,7 +1369,6 @@ fn test_active_stake() { SubtensorModule::set_max_registrations_per_block(netuid, n); SubtensorModule::set_target_registrations_per_interval(netuid, n); SubtensorModule::set_min_allowed_weights(netuid, 0); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); // === Register [validator1, validator2, server1, server2] for key in 0..n as u64 { @@ -1586,7 +1585,6 @@ fn test_outdated_weights() { SubtensorModule::set_max_registrations_per_block(netuid, n); SubtensorModule::set_target_registrations_per_interval(netuid, n); SubtensorModule::set_min_allowed_weights(netuid, 0); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); SubtensorModule::set_bonds_penalty(netuid, u16::MAX); assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); @@ -1776,7 +1774,6 @@ fn test_zero_weights() { SubtensorModule::set_max_registrations_per_block(netuid, n); SubtensorModule::set_target_registrations_per_interval(netuid, n); SubtensorModule::set_min_allowed_weights(netuid, 0); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); // === Register [validator, server] for key in 0..n as u64 { @@ -1979,7 +1976,6 @@ fn test_deregistered_miner_bonds() { SubtensorModule::set_max_registrations_per_block(netuid, n); SubtensorModule::set_target_registrations_per_interval(netuid, n); SubtensorModule::set_min_allowed_weights(netuid, 0); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); SubtensorModule::set_bonds_penalty(netuid, u16::MAX); assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); @@ -2294,14 +2290,14 @@ fn test_get_set_alpha() { let fee = ::SwapInterface::approx_fee_amount( netuid.into(), - DefaultMinStake::::get().into(), + DefaultMinStake::::get(), ); assert_ok!(SubtensorModule::add_stake( signer.clone(), hotkey, netuid, - (DefaultMinStake::::get().to_u64() + fee * 2).into() + DefaultMinStake::::get() + fee * 2.into() )); assert_ok!(SubtensorModule::do_set_alpha_values( @@ -2699,7 +2695,6 @@ fn setup_yuma_3_scenario(netuid: NetUid, n: u16, sparse: bool, max_stake: u64, s SubtensorModule::set_target_registrations_per_interval(netuid, n); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_min_allowed_weights(netuid, 1); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); SubtensorModule::set_bonds_penalty(netuid, 0); SubtensorModule::set_alpha_sigmoid_steepness(netuid, 1000); SubtensorModule::set_bonds_moving_average(netuid, 975_000); @@ -3883,3 +3878,55 @@ fn test_last_update_size_mismatch() { assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); }); } + +#[test] +fn empty_ok() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 155.into(); + assert!(Pallet::::is_epoch_input_state_consistent(netuid)); + }); +} + +#[test] +fn unique_hotkeys_and_uids_ok() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 155.into(); + + // (netuid, uid) -> hotkey (AccountId = U256) + Keys::::insert(netuid, 0u16, U256::from(1u64)); + Keys::::insert(netuid, 1u16, U256::from(2u64)); + Keys::::insert(netuid, 2u16, U256::from(3u64)); + + assert!(Pallet::::is_epoch_input_state_consistent(netuid)); + }); +} + +#[test] +fn duplicate_hotkey_within_same_netuid_fails() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 155.into(); + + // Same hotkey mapped from two different UIDs in the SAME netuid + let hk = U256::from(42u64); + Keys::::insert(netuid, 0u16, hk); + Keys::::insert(netuid, 1u16, U256::from(42u64)); // duplicate hotkey + + assert!(!Pallet::::is_epoch_input_state_consistent(netuid)); + }); +} + +#[test] +fn same_hotkey_across_different_netuids_is_ok() { + new_test_ext(1).execute_with(|| { + let net_a: NetUid = 10.into(); + let net_b: NetUid = 11.into(); + + // Same hotkey appears once in each netuid — each net checks independently. + let hk = U256::from(777u64); + Keys::::insert(net_a, 0u16, hk); + Keys::::insert(net_b, 0u16, hk); + + assert!(Pallet::::is_epoch_input_state_consistent(net_a)); + assert!(Pallet::::is_epoch_input_state_consistent(net_b)); + }); +} diff --git a/pallets/subtensor/src/tests/epoch_logs.rs b/pallets/subtensor/src/tests/epoch_logs.rs new file mode 100644 index 0000000000..0c95857346 --- /dev/null +++ b/pallets/subtensor/src/tests/epoch_logs.rs @@ -0,0 +1,657 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] + +// Run all tests +// cargo test --package pallet-subtensor --lib -- tests::epoch_logs --show-output + +use super::mock::*; +use crate::*; +use frame_support::assert_ok; +use sp_core::U256; +use std::io::{Result as IoResult, Write}; +use std::sync::{Arc, Mutex}; +use subtensor_runtime_common::{AlphaCurrency, MechId}; +use tracing_subscriber::{EnvFilter, layer::SubscriberExt}; + +const NETUID: u16 = 1; +const TEMPO: u16 = 10; +const ACTIVITY_CUTOFF: u16 = 10; +// Coldkey is irrelevant for epoch because it operates only hotkeys. Nominators' stake is distributed downstream +// We can use a single coldkey for all tests here. +const COLDKEY: u16 = 9876; + +#[derive(Clone)] +struct Neuron { + uid: u16, + hotkey: U256, + validator: bool, + alpha_stake: u64, + registration_block: u64, + last_update: u64, +} + +impl Neuron { + fn new( + uid: u16, + hotkey: u16, + validator: bool, + alpha_stake: u64, + registration_block: u64, + last_update: u64, + ) -> Self { + Neuron { + uid, + hotkey: U256::from(hotkey), + validator, + alpha_stake, + registration_block, + last_update, + } + } +} + +fn setup_epoch(neurons: Vec, mechanism_count: u8) { + let netuid = NetUid::from(NETUID); + + // Setup subnet parameters + NetworksAdded::::insert(netuid, true); + let network_n = neurons.len() as u16; + SubnetworkN::::insert(netuid, network_n); + ActivityCutoff::::insert(netuid, ACTIVITY_CUTOFF); + Tempo::::insert(netuid, TEMPO); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + MechanismCountCurrent::::insert(netuid, MechId::from(mechanism_count)); + + // Setup neurons + let mut last_update_vec: Vec = Vec::new(); + let mut permit_vec: Vec = Vec::new(); + neurons.iter().for_each(|neuron| { + let hotkey = U256::from(neuron.hotkey); + + Keys::::insert(netuid, neuron.uid, hotkey); + Uids::::insert(netuid, hotkey, neuron.uid); + BlockAtRegistration::::insert(netuid, neuron.uid, neuron.registration_block); + last_update_vec.push(neuron.last_update); + permit_vec.push(neuron.validator); + + // Setup stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &U256::from(COLDKEY), + netuid, + AlphaCurrency::from(neuron.alpha_stake), + ); + }); + + ValidatorPermit::::insert(netuid, permit_vec); + for m in 0..mechanism_count { + let netuid_index = SubtensorModule::get_mechanism_storage_index(netuid, m.into()); + LastUpdate::::insert(netuid_index, last_update_vec.clone()); + } +} + +fn set_weights(netuid: NetUid, weights: Vec>, indices: Vec) { + for (uid, weight) in weights.iter().enumerate() { + let hotkey = Keys::::get(netuid, uid as u16); + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + indices.clone(), + weight.to_vec(), + 0 + )); + } +} + +/// Write sparse weight rows **for a specific mechanism**. +/// `rows` is a list of `(validator_uid, row)` where `row` is `[(dest_uid, weight_u16)]`. +fn set_weights_for_mech(netuid: NetUid, mecid: MechId, rows: Vec<(u16, Vec<(u16, u16)>)>) { + let netuid_index = SubtensorModule::get_mechanism_storage_index(netuid, mecid); + for (uid, sparse_row) in rows { + Weights::::insert(netuid_index, uid, sparse_row); + } +} + +/// Run `f` with a per-thread subscriber configured by `spec` and +/// return the captured log text. +pub fn with_log_capture(spec: &str, f: F) -> String +where + F: FnOnce() -> R, +{ + // ensure log::... is bridged to tracing (no-op if already set) + let _ = tracing_log::LogTracer::init(); + + // Shared buffer we'll write logs into + let buf: Arc>> = Arc::new(Mutex::new(Vec::new())); + struct Buf(Arc>>); + impl Write for Buf { + fn write(&mut self, b: &[u8]) -> IoResult { + let mut g = self.0.lock().unwrap(); + g.extend_from_slice(b); + Ok(b.len()) + } + fn flush(&mut self) -> IoResult<()> { + Ok(()) + } + } + + // Formatting layer that writes into our buffer + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_target(true) + .with_level(true) + .without_time() + .with_writer({ + let buf = buf.clone(); + move || Buf(buf.clone()) + }); + + // Parse filter spec (full RUST_LOG syntax). Fall back to "off" on error. + let filter = EnvFilter::try_new(spec).unwrap_or_else(|_| EnvFilter::new("off")); + + // Build the per-thread subscriber + let sub = tracing_subscriber::registry().with(filter).with(fmt_layer); + + // Activate just for this thread/scope and run the code + tracing::subscriber::with_default(sub, f); + + // Collect captured text + let mut g = buf.lock().unwrap(); + String::from_utf8(std::mem::take(&mut *g)).unwrap_or_default() +} + +#[test] +fn test_simple() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 1, true, 1_000_000_000, 0, 1 ), + Neuron::new( 1, 2, true, 1_000_000_000, 0, 1 ), + ]; + setup_epoch(neurons.to_vec(), 1); + + // Run epoch, watch logs + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(NetUid::from(NETUID), MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + + assert!(has("Number of Neurons in Network: 2")); + assert!(has("Inactive: [false, false]")); + assert!(has("Block at registration: [0, 0]")); + assert!(has("alpha_stake: [1000000000, 1000000000]")); + assert!(has("Filtered stake: [1000000000, 1000000000]")); + assert!(has("Normalised Stake: [0.5, 0.5]")); + assert!(has("validator_permits: [true, true]")); + assert!(has("new_validator_permits: [true, true]")); + assert!(has("Active Stake: [0.5, 0.5]")); + assert!(has("Consensus: [0, 0]")); + assert!(has("Normalized Validator Emission: [0.5, 0.5]")); + assert!(has("Normalized Combined Emission: [0.5, 0.5]")); + assert!(has( + "Combined Emission: [AlphaCurrency(500000000), AlphaCurrency(500000000)]" + )); + assert!(has("Pruning Scores: [0.5, 0.5]")); + assert!(!has("math error:")); + }); +} + +#[test] +fn test_bad_permit_vector() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 1, true, 1_000_000_000, 0, 1 ), + Neuron::new( 1, 2, true, 1_000_000_000, 0, 1 ), + ]; + setup_epoch(neurons.to_vec(), 1); + ValidatorPermit::::insert(NetUid::from(NETUID), vec![true]); + + // Run epoch, watch logs + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(NetUid::from(NETUID), MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + + assert!(has( + "math error: inplace_mask_vector input lengths are not equal" + )); + assert!(has( + "Validator Emission: [AlphaCurrency(1000000000), AlphaCurrency(0)]" + )); + }); +} + +#[test] +fn test_inactive_mask_zeroes_active_stake() { + // Big block so updated + activity_cutoff < current_block + new_test_ext(1_000_000).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 11, true, 1_000_000_000, 0, 0 ), + Neuron::new( 1, 22, true, 1_000_000_000, 0, 0 ), + ]; + setup_epoch(neurons.to_vec(), 1); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(NetUid::from(NETUID), MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + assert!(has("Number of Neurons in Network: 2")); + assert!(has("Inactive: [true, true]")); + // After masking + renormalizing, both entries are zero. + assert!(has("Active Stake: [0, 0]")); + }); +} + +#[test] +fn test_validator_permit_masks_active_stake() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 11, true, 1_000_000_000, 0, 1 ), + Neuron::new( 1, 22, true, 1_000_000_000, 0, 1 ), + ]; + setup_epoch(neurons.to_vec(), 1); + + // Forbid validator #1 + let netuid = NetUid::from(NETUID); + ValidatorPermit::::insert(netuid, vec![true, false]); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + assert!(has("validator_permits: [true, false]")); + // After masking and renormalizing, only the first stays: [1, 0] + assert!(logs.contains("Active Stake: [1, 0]")); + }); +} + +#[test] +fn yuma_emergency_mode() { + // Large block so everyone is inactive (updated + cutoff < current_block) + new_test_ext(1_000_000).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 11, true, 1_000_000_000, 0, 0 ), + Neuron::new( 1, 22, true, 1_000_000_000, 0, 0 ), + ]; + setup_epoch(neurons.to_vec(), 1); + + // No weights needed; keep defaults empty to make ranks/dividends zero. + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(NetUid::from(NETUID), MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + assert!(has("Inactive: [true, true]")); + // Because emission_sum == 0 and active_stake == 0, we expect fallback to normalized stake. + assert!(has("Normalized Combined Emission: [0.5, 0.5]")); + }); +} + +#[test] +fn epoch_uses_active_stake_when_nonzero_active() { + new_test_ext(1000).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 11, true, 1_000_000_000, 0, 999 ), // active + Neuron::new( 1, 22, true, 1_000_000_000, 0, 1 ), // inactive + ]; + setup_epoch(neurons.to_vec(), 1); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(NetUid::from(NETUID), MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + assert!(has("Inactive: [false, true]")); + // With ranks/dividends zero, fallback should mirror active_stake ~ [1, 0]. + assert!(has("Active Stake: [1, 0]")); + assert!(has("Normalized Combined Emission: [1, 0]")); + }); +} + +#[test] +fn epoch_topk_validator_permits() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 11, true, 2_000_000_000, 0, 1 ), + Neuron::new( 1, 22, true, 1_000_000_000, 0, 1 ), + ]; + setup_epoch(neurons.to_vec(), 1); + + // K = 1 (one validator allowed) + let netuid = NetUid::from(NETUID); + MaxAllowedValidators::::insert(netuid, 1u16); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + assert!( + has("Normalised Stake: [0.666"), + "sanity: asymmetric stake normalized" + ); + assert!(has("max_allowed_validators: 1")); + assert!(has("new_validator_permits: [true, false]")); + }); +} + +#[test] +fn epoch_yuma3_bonds_pipeline() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + Neuron::new(0, 11, true, 1_000_000_000, 0, 1), + Neuron::new(1, 22, true, 1_000_000_000, 0, 1), + ]; + setup_epoch(neurons.to_vec(), 1); + + let netuid = NetUid::from(NETUID); + Yuma3On::::insert(netuid, true); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + // These appear only in the Yuma3 branch: + assert!(has("Bonds: ")); + assert!(has("emaB: [")); + assert!(has("emaB norm: ")); + assert!(has("total_bonds_per_validator: ")); + }); +} + +#[test] +fn epoch_original_yuma_bonds_pipeline() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + Neuron::new(0, 11, true, 1_000_000_000, 0, 1), + Neuron::new(1, 22, true, 1_000_000_000, 0, 1), + ]; + setup_epoch(neurons.to_vec(), 1); + + let netuid = NetUid::from(NETUID); + Yuma3On::::insert(netuid, false); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + // These strings are present in the non-Yuma3 branch: + assert!(has("B (outdatedmask): ")); + assert!(has("ΔB (norm): ")); + assert!(has("Exponential Moving Average Bonds: ")); + }); +} + +#[test] +fn test_validators_weight_two_distinct_servers() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + // uid hotkey vali stake reg upd + Neuron::new( 0, 11, true, 1_000_000_000, 0, 1 ), // validator + Neuron::new( 1, 22, true, 1_000_000_000, 0, 1 ), // validator + Neuron::new( 2, 33, true, 1_000_000_000, 0, 1 ), // validator + Neuron::new( 3, 44, false, 0, 0, 1 ), // server + Neuron::new( 4, 55, false, 0, 0, 1 ), // server + ]; + setup_epoch(neurons.to_vec(), 1); + + let netuid = NetUid::from(NETUID); + + // rows are per-validator; columns correspond to server UIDs [3, 4] + // V0 -> [MAX, 0] (server 3) + // V1 -> [0, MAX] (server 4) + // V2 -> [MAX, 0] (server 3) + CommitRevealWeightsEnabled::::insert(netuid, false); + set_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + + // topology sanity + assert!(has("Number of Neurons in Network: 5")); + assert!(has("validator_permits: [true, true, true, false, false]")); + + // weight pipeline exercised + assert!(has("Weights: [[(3, 65535), (4, 0)], [(3, 0), (4, 65535)], [(3, 65535), (4, 0)], [], []]")); + assert!(has("Weights (permit): [[(3, 65535), (4, 0)], [(3, 0), (4, 65535)], [(3, 65535), (4, 0)], [], []]")); + assert!(has("Weights (permit+diag): [[(3, 65535), (4, 0)], [(3, 0), (4, 65535)], [(3, 65535), (4, 0)], [], []]")); + assert!(has("Weights (mask+norm): [[(3, 1), (4, 0)], [(3, 0), (4, 1)], [(3, 1), (4, 0)], [], []]")); + + // downstream signals present + assert!(has("Ranks (before): [0, 0, 0, 0.6666666665, 0.3333333333]")); + assert!(has("Consensus: [0, 0, 0, 1, 0]")); + assert!(has("Validator Trust: [1, 0, 1, 0, 0]")); + assert!(has("Ranks (after): [0, 0, 0, 0.6666666665, 0]")); + assert!(has("Trust: [0, 0, 0, 1, 0]")); + assert!(has("Dividends: [0.5, 0, 0.5, 0, 0]")); + assert!(has("Normalized Combined Emission: [0.25, 0, 0.25, 0.5, 0]")); + assert!(has("Pruning Scores: [0.25, 0, 0.25, 0.5, 0]")); + + // math is ok + assert!(!has("math error:")); + }); +} + +#[test] +fn test_validator_splits_weight_across_two_servers() { + new_test_ext(1).execute_with(|| { + let logs = with_log_capture("trace", || { + #[rustfmt::skip] + let neurons = [ + Neuron::new(0, 11, true, 1_000_000_000, 0, 1), + Neuron::new(1, 22, true, 1_000_000_000, 0, 1), + Neuron::new(2, 33, true, 1_000_000_000, 0, 1), + Neuron::new(3, 44, false, 0, 0, 1), + Neuron::new(4, 55, false, 0, 0, 1), + ]; + setup_epoch(neurons.to_vec(), 1); + + let netuid = NetUid::from(NETUID); + + // V2 splits: both entries nonzero; row normalization should make ~[0.5, 0.5] for V2 + CommitRevealWeightsEnabled::::insert(netuid, false); + set_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![u16::MAX, u16::MAX]], + vec![3, 4], + ); + + let emission = AlphaCurrency::from(1_000_000_000); + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), emission); + }); + + let has = |s: &str| logs.contains(s); + + assert!(has("validator_permits: [true, true, true, false, false]")); + assert!(has("Weights (mask+norm): [[(3, 1), (4, 0)], [(3, 0), (4, 1)], [(3, 0.5), (4, 0.5)], [], []]")); + assert!(has("Ranks (before): [0, 0, 0, 0.4999999998, 0.4999999998]")); + assert!(has("Ranks (after): [0, 0, 0, 0.333333333, 0.333333333]")); + assert!(has("ΔB (norm): [[(3, 0.5), (4, 0)], [(3, 0), (4, 0.5)], [(3, 0.5), (4, 0.5)], [], []]")); + assert!(has("Dividends: [0.25, 0.25, 0.5, 0, 0]")); + assert!(has("Normalized Combined Emission: [0.125, 0.125, 0.25, 0.25, 0.25]")); + assert!(!has("math error:")); + }); +} + +#[test] +fn epoch_mechanism_reads_weights_per_mechanism() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(NETUID); + + // 3 validators (0,1,2) and 2 servers (3,4) + #[rustfmt::skip] + let neurons = [ + Neuron::new(0, 11, true, 1_000_000_000, 0, 1), + Neuron::new(1, 22, true, 1_000_000_000, 0, 1), + Neuron::new(2, 33, true, 1_000_000_000, 0, 1), + Neuron::new(3, 44, false, 0, 0, 1), + Neuron::new(4, 55, false, 0, 0, 1), + ]; + setup_epoch(neurons.to_vec(), 2); // 2 mechanisms + + // Mech 0: V0,V2 -> server 3 ; V1 -> server 4 + set_weights_for_mech( + netuid, + MechId::from(0), + vec![ + (0, vec![(3, u16::MAX)]), + (1, vec![(4, u16::MAX)]), + (2, vec![(3, u16::MAX)]), + ], + ); + let logs_m0 = with_log_capture("trace", || { + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), AlphaCurrency::from(1_000)); + }); + + // Mech 1: flipped routing: V0,V2 -> server 4 ; V1 -> server 3 + set_weights_for_mech( + netuid, + MechId::from(1), + vec![ + (0, vec![(4, u16::MAX)]), + (1, vec![(3, u16::MAX)]), + (2, vec![(4, u16::MAX)]), + ], + ); + let logs_m1 = with_log_capture("trace", || { + SubtensorModule::epoch_mechanism(netuid, MechId::from(1), AlphaCurrency::from(1_000)); + }); + + // Both should run the full pipeline… + assert!(logs_m0.contains("Active Stake: [0.3333333333, 0.3333333333, 0.3333333333, 0, 0]")); + assert!(logs_m1.contains("Active Stake: [0.3333333333, 0.3333333333, 0.3333333333, 0, 0]")); + assert!(logs_m0.contains("Weights (mask+norm): [[(3, 1)], [(4, 1)], [(3, 1)], [], []]")); + assert!(logs_m1.contains("Weights (mask+norm): [[(4, 1)], [(3, 1)], [(4, 1)], [], []]")); + assert!(logs_m0.contains("Ranks (before): [0, 0, 0, 0.6666666665, 0.3333333333]")); + assert!(logs_m1.contains("Ranks (before): [0, 0, 0, 0.3333333333, 0.6666666665]")); + assert!(logs_m0.contains("ΔB (norm): [[(3, 0.5)], [], [(3, 0.5)], [], []]")); + assert!(logs_m1.contains("ΔB (norm): [[(4, 0.5)], [], [(4, 0.5)], [], []]")); + assert!(logs_m0.contains("Normalized Combined Emission: [0.25, 0, 0.25, 0.5, 0]")); + assert!(logs_m1.contains("Normalized Combined Emission: [0.25, 0, 0.25, 0, 0.5]")); + + // ...and produce different logs because weights differ per mechanism. + assert_ne!( + logs_m0, logs_m1, + "mechanism-specific weights should yield different outcomes/logs" + ); + }); +} + +// cargo test --package pallet-subtensor --lib -- tests::epoch_logs::epoch_mechanism_three_mechanisms_separate_state --exact --show-output +#[test] +fn epoch_mechanism_three_mechanisms_separate_state() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(NETUID); + + // 2 validators, 2 servers + #[rustfmt::skip] + let neurons = [ + Neuron::new(0, 11, true, 1_000_000_000, 0, 1), + Neuron::new(1, 22, true, 1_000_000_000, 0, 1), + Neuron::new(2, 44, false, 0, 0, 1), + Neuron::new(3, 55, false, 0, 0, 1), + ]; + setup_epoch(neurons.to_vec(), 3); // 3 mechanisms + + // Mech 0: all validators -> server 2 + set_weights_for_mech( + netuid, + MechId::from(0), + vec![(0, vec![(2, u16::MAX)]), (1, vec![(2, u16::MAX)])], + ); + + // Mech 1: split across both servers (two nonzero entries per row) + set_weights_for_mech( + netuid, + MechId::from(1), + vec![ + (0, vec![(2, u16::MAX), (3, u16::MAX)]), + (1, vec![(2, u16::MAX), (3, u16::MAX)]), + ], + ); + + // Mech 2: all validators -> server 3 + set_weights_for_mech( + netuid, + MechId::from(2), + vec![(0, vec![(3, u16::MAX)]), (1, vec![(3, u16::MAX)])], + ); + + let l0 = with_log_capture("trace", || { + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), AlphaCurrency::from(1_000)); + }); + let l1 = with_log_capture("trace", || { + SubtensorModule::epoch_mechanism(netuid, MechId::from(1), AlphaCurrency::from(1_000)); + }); + let l2 = with_log_capture("trace", || { + SubtensorModule::epoch_mechanism(netuid, MechId::from(2), AlphaCurrency::from(1_000)); + }); + + // Check major epoch indicators + assert!(l0.contains("Weights (mask+norm): [[(2, 1)], [(2, 1)], [], []]")); + assert!(l0.contains("Ranks (before): [0, 0, 1, 0]")); + assert!(l0.contains("ΔB (norm): [[(2, 0.5)], [(2, 0.5)], [], []]")); + assert!(l0.contains("Normalized Combined Emission: [0.25, 0.25, 0.5, 0]")); + + assert!( + l1.contains( + "Weights (mask+norm): [[(2, 0.5), (3, 0.5)], [(2, 0.5), (3, 0.5)], [], []]" + ) + ); + assert!(l1.contains("Ranks (before): [0, 0, 0.5, 0.5]")); + assert!(l1.contains("ΔB (norm): [[(2, 0.5), (3, 0.5)], [(2, 0.5), (3, 0.5)], [], []]")); + assert!(l1.contains("Normalized Combined Emission: [0.25, 0.25, 0.25, 0.25]")); + + assert!(l2.contains("Weights (mask+norm): [[(3, 1)], [(3, 1)], [], []]")); + assert!(l2.contains("Ranks (before): [0, 0, 0, 1]")); + assert!(l2.contains("ΔB (norm): [[(3, 0.5)], [(3, 0.5)], [], []]")); + assert!(l2.contains("Normalized Combined Emission: [0.25, 0.25, 0, 0.5]")); + + // Distinct outcomes + assert_ne!(l0, l1); + assert_ne!(l1, l2); + assert_ne!(l0, l2); + }); +} diff --git a/pallets/subtensor/src/tests/evm.rs b/pallets/subtensor/src/tests/evm.rs index 0e10262497..6d668d738d 100644 --- a/pallets/subtensor/src/tests/evm.rs +++ b/pallets/subtensor/src/tests/evm.rs @@ -1,5 +1,6 @@ #![allow( clippy::arithmetic_side_effects, + clippy::expect_used, clippy::unwrap_used, clippy::indexing_slicing )] diff --git a/pallets/subtensor/src/tests/mechanism.rs b/pallets/subtensor/src/tests/mechanism.rs index 8e5b1563d8..e5c46e8722 100644 --- a/pallets/subtensor/src/tests/mechanism.rs +++ b/pallets/subtensor/src/tests/mechanism.rs @@ -1,5 +1,6 @@ #![allow( clippy::arithmetic_side_effects, + clippy::expect_used, clippy::indexing_slicing, clippy::unwrap_used )] diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 2d5ada4099..28159d41bc 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -1,4 +1,10 @@ -#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] +#![allow( + unused, + clippy::expect_used, + clippy::indexing_slicing, + clippy::panic, + clippy::unwrap_used +)] use super::mock::*; use crate::*; @@ -1021,6 +1027,117 @@ fn test_migrate_last_tx_block_delegate_take() { }); } +#[test] +fn test_migrate_rate_limit_keys() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &[u8] = b"migrate_rate_limit_keys"; + let prefix = { + let pallet_prefix = twox_128("SubtensorModule".as_bytes()); + let storage_prefix = twox_128("LastRateLimitedBlock".as_bytes()); + [pallet_prefix, storage_prefix].concat() + }; + + // Seed new-format entries that must survive the migration untouched. + let new_last_account = U256::from(10); + SubtensorModule::set_last_tx_block(&new_last_account, 555); + let new_child_account = U256::from(11); + SubtensorModule::set_last_tx_block_childkey(&new_child_account, 777); + let new_delegate_account = U256::from(12); + SubtensorModule::set_last_tx_block_delegate_take(&new_delegate_account, 888); + + // Legacy NetworkLastRegistered entry (index 1) + let mut legacy_network_key = prefix.clone(); + legacy_network_key.push(1u8); + sp_io::storage::set(&legacy_network_key, &111u64.encode()); + + // Legacy LastTxBlock entry (index 2) for an account that already has a new-format value. + let mut legacy_last_key = prefix.clone(); + legacy_last_key.push(2u8); + legacy_last_key.extend_from_slice(&new_last_account.encode()); + sp_io::storage::set(&legacy_last_key, &666u64.encode()); + + // Legacy LastTxBlockChildKeyTake entry (index 3) + let legacy_child_account = U256::from(3); + ChildKeys::::insert( + legacy_child_account, + NetUid::from(0), + vec![(0u64, U256::from(99))], + ); + let mut legacy_child_key = prefix.clone(); + legacy_child_key.push(3u8); + legacy_child_key.extend_from_slice(&legacy_child_account.encode()); + sp_io::storage::set(&legacy_child_key, &333u64.encode()); + + // Legacy LastTxBlockDelegateTake entry (index 4) + let legacy_delegate_account = U256::from(4); + Delegates::::insert(legacy_delegate_account, 500u16); + let mut legacy_delegate_key = prefix.clone(); + legacy_delegate_key.push(4u8); + legacy_delegate_key.extend_from_slice(&legacy_delegate_account.encode()); + sp_io::storage::set(&legacy_delegate_key, &444u64.encode()); + + let weight = crate::migrations::migrate_rate_limit_keys::migrate_rate_limit_keys::(); + assert!( + HasMigrationRun::::get(MIGRATION_NAME.to_vec()), + "Migration should be marked as executed" + ); + assert!(!weight.is_zero(), "Migration weight should be non-zero"); + + // Legacy entries were migrated and cleared. + assert_eq!( + SubtensorModule::get_network_last_lock_block(), + 111u64, + "Network last lock block should match migrated value" + ); + assert!( + sp_io::storage::get(&legacy_network_key).is_none(), + "Legacy network entry should be cleared" + ); + + assert_eq!( + SubtensorModule::get_last_tx_block(&new_last_account), + 666u64, + "LastTxBlock should reflect the merged legacy value" + ); + assert!( + sp_io::storage::get(&legacy_last_key).is_none(), + "Legacy LastTxBlock entry should be cleared" + ); + + assert_eq!( + SubtensorModule::get_last_tx_block_childkey_take(&legacy_child_account), + 333u64, + "Child key take block should be migrated" + ); + assert!( + sp_io::storage::get(&legacy_child_key).is_none(), + "Legacy child take entry should be cleared" + ); + + assert_eq!( + SubtensorModule::get_last_tx_block_delegate_take(&legacy_delegate_account), + 444u64, + "Delegate take block should be migrated" + ); + assert!( + sp_io::storage::get(&legacy_delegate_key).is_none(), + "Legacy delegate take entry should be cleared" + ); + + // New-format entries remain untouched. + assert_eq!( + SubtensorModule::get_last_tx_block_childkey_take(&new_child_account), + 777u64, + "Existing child take entry should be preserved" + ); + assert_eq!( + SubtensorModule::get_last_tx_block_delegate_take(&new_delegate_account), + 888u64, + "Existing delegate take entry should be preserved" + ); + }); +} + #[test] fn test_migrate_fix_root_subnet_tao() { new_test_ext(1).execute_with(|| { @@ -2165,3 +2282,118 @@ fn test_migrate_network_lock_cost_2500_sets_price_and_decay() { ); }); } + +#[test] +fn test_migrate_kappa_map_to_default() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // 0. Constants / helpers + // ------------------------------ + const MIG_NAME: &[u8] = b"kappa_map_to_default"; + let default: u16 = DefaultKappa::::get(); + + let not_default: u16 = if default == u16::MAX { + default.saturating_sub(1) + } else { + default.saturating_add(1) + }; + + // ------------------------------ + // 1. Pre-state: seed using the correct key type (NetUid) + // ------------------------------ + let n0: NetUid = 0u16.into(); + let n1: NetUid = 1u16.into(); + let n2: NetUid = 42u16.into(); + + Kappa::::insert(n0, not_default); + Kappa::::insert(n1, default); + Kappa::::insert(n2, not_default); + + assert_eq!( + Kappa::::get(n0), + not_default, + "precondition failed: Kappa[n0] should be non-default before migration" + ); + assert_eq!( + Kappa::::get(n1), + default, + "precondition failed: Kappa[n1] should be default before migration" + ); + assert_eq!( + Kappa::::get(n2), + not_default, + "precondition failed: Kappa[n2] should be non-default before migration" + ); + + assert!( + !HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag should be false before run" + ); + + // ------------------------------ + // 2. Run migration + // ------------------------------ + let w = + crate::migrations::migrate_kappa_map_to_default::migrate_kappa_map_to_default::(); + assert!(!w.is_zero(), "weight must be non-zero"); + + // ------------------------------ + // 3. Verify results + // ------------------------------ + assert!( + HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag not set" + ); + + assert_eq!( + Kappa::::get(n0), + default, + "Kappa[n0] should be reset to the configured default" + ); + assert_eq!( + Kappa::::get(n1), + default, + "Kappa[n1] should remain at the configured default" + ); + assert_eq!( + Kappa::::get(n2), + default, + "Kappa[n2] should be reset to the configured default" + ); + }); +} + +#[test] +fn test_migrate_remove_tao_dividends() { + const MIGRATION_NAME: &str = "migrate_remove_tao_dividends"; + let pallet_name = "SubtensorModule"; + let storage_name = "TaoDividendsPerSubnet"; + let migration = + crate::migrations::migrate_remove_tao_dividends::migrate_remove_tao_dividends::; + + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); + + let storage_name = "PendingAlphaSwapped"; + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); + + let storage_name = "PendingRootDivs"; + test_remove_storage_item( + MIGRATION_NAME, + pallet_name, + storage_name, + migration, + 200_000, + ); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 219e210d07..9d028d76ab 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -1,10 +1,13 @@ -#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] +#![allow( + clippy::arithmetic_side_effects, + clippy::expect_used, + clippy::unwrap_used +)] use core::num::NonZeroU64; use crate::utils::rate_limiting::TransactionType; use crate::*; -use frame_support::dispatch::DispatchResultWithPostInfo; use frame_support::traits::{Contains, Everything, InherentBuilder, InsideBoth}; use frame_support::weights::Weight; use frame_support::weights::constants::RocksDbWeight; @@ -14,9 +17,7 @@ use frame_support::{ traits::{Hooks, PrivilegeCmp}, }; use frame_system as system; -use frame_system::{EnsureNever, EnsureRoot, RawOrigin, limits, offchain::CreateTransactionBase}; -use pallet_subtensor_collective as pallet_collective; -use pallet_subtensor_collective::MemberCount; +use frame_system::{EnsureRoot, RawOrigin, limits, offchain::CreateTransactionBase}; use pallet_subtensor_utility as pallet_utility; use sp_core::{ConstU64, Get, H256, U256, offchain::KeyTypeId}; use sp_runtime::Perbill; @@ -24,9 +25,11 @@ use sp_runtime::{ BuildStorage, Percent, traits::{BlakeTwo256, IdentityLookup}, }; -use sp_std::{cell::RefCell, cmp::Ordering}; +use sp_std::{cell::RefCell, cmp::Ordering, sync::OnceLock}; +use sp_tracing::tracing_subscriber; use subtensor_runtime_common::{NetUid, TaoCurrency}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_swap_interface::{Order, SwapHandler}; +use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; type Block = frame_system::mocking::MockBlock; @@ -36,10 +39,6 @@ frame_support::construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event} = 1, Balances: pallet_balances::{Pallet, Call, Config, Storage, Event} = 2, - Triumvirate: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 3, - TriumvirateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 4, - Senate: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 5, - SenateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 6, SubtensorModule: crate::{Pallet, Call, Storage, Event} = 7, Utility: pallet_utility::{Pallet, Call, Storage, Event} = 8, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 9, @@ -152,7 +151,6 @@ parameter_types! { parameter_types! { pub const InitialMinAllowedWeights: u16 = 0; pub const InitialEmissionValue: u16 = 0; - pub const InitialMaxWeightsLimit: u16 = u16::MAX; pub BlockWeights: limits::BlockWeights = limits::BlockWeights::with_sensible_defaults( Weight::from_parts(2_000_000_000_000, u64::MAX), Perbill::from_percent(75), @@ -203,7 +201,6 @@ parameter_types! { pub const InitialMinDifficulty: u64 = 1; pub const InitialMaxDifficulty: u64 = u64::MAX; pub const InitialRAORecycledForRegistration: u64 = 0; - pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake pub const InitialNetworkImmunityPeriod: u64 = 1_296_000; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. @@ -229,175 +226,14 @@ parameter_types! { pub const EvmKeyAssociateRateLimit: u64 = 10; } -// Configure collective pallet for council -parameter_types! { - pub const CouncilMotionDuration: BlockNumber = 100; - pub const CouncilMaxProposals: u32 = 10; - pub const CouncilMaxMembers: u32 = 3; -} - -// Configure collective pallet for Senate -parameter_types! { - pub const SenateMaxMembers: u32 = 12; -} - -use pallet_collective::{CanPropose, CanVote, GetVotingMembers}; -pub struct CanProposeToTriumvirate; -impl CanPropose for CanProposeToTriumvirate { - fn can_propose(account: &AccountId) -> bool { - Triumvirate::is_member(account) - } -} - -pub struct CanVoteToTriumvirate; -impl CanVote for CanVoteToTriumvirate { - fn can_vote(_: &AccountId) -> bool { - //Senate::is_member(account) - false // Disable voting from pallet_collective::vote - } -} - -use crate::{CollectiveInterface, MemberManagement, StakeThreshold}; -pub struct ManageSenateMembers; -impl MemberManagement for ManageSenateMembers { - fn add_member(account: &AccountId) -> DispatchResultWithPostInfo { - let who = *account; - SenateMembers::add_member(RawOrigin::Root.into(), who) - } - - fn remove_member(account: &AccountId) -> DispatchResultWithPostInfo { - let who = *account; - SenateMembers::remove_member(RawOrigin::Root.into(), who) - } - - fn swap_member(rm: &AccountId, add: &AccountId) -> DispatchResultWithPostInfo { - let remove = *rm; - let add = *add; - - Triumvirate::remove_votes(rm)?; - SenateMembers::swap_member(RawOrigin::Root.into(), remove, add) - } - - fn is_member(account: &AccountId) -> bool { - SenateMembers::members().contains(account) - } - - fn members() -> Vec { - SenateMembers::members().into() - } - - fn max_members() -> u32 { - SenateMaxMembers::get() - } -} - -pub struct GetSenateMemberCount; -impl GetVotingMembers for GetSenateMemberCount { - fn get_count() -> MemberCount { - Senate::members().len() as u32 - } -} -impl Get for GetSenateMemberCount { - fn get() -> MemberCount { - SenateMaxMembers::get() - } -} - -pub struct TriumvirateVotes; -impl CollectiveInterface for TriumvirateVotes { - fn remove_votes(hotkey: &AccountId) -> Result { - Triumvirate::remove_votes(hotkey) - } - - fn add_vote( - hotkey: &AccountId, - proposal: H256, - index: u32, - approve: bool, - ) -> Result { - Triumvirate::do_vote(*hotkey, proposal, index, approve) - } -} - -// We call pallet_collective TriumvirateCollective -#[allow(dead_code)] -type TriumvirateCollective = pallet_collective::Instance1; -impl pallet_collective::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type MotionDuration = CouncilMotionDuration; - type MaxProposals = CouncilMaxProposals; - type MaxMembers = GetSenateMemberCount; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type WeightInfo = pallet_collective::weights::SubstrateWeight; - type SetMembersOrigin = EnsureNever; - type CanPropose = CanProposeToTriumvirate; - type CanVote = CanVoteToTriumvirate; - type GetVotingMembers = GetSenateMemberCount; -} - -// We call council members Triumvirate -#[allow(dead_code)] -type TriumvirateMembership = pallet_membership::Instance1; -impl pallet_membership::Config for Test { - type RuntimeEvent = RuntimeEvent; - type AddOrigin = EnsureRoot; - type RemoveOrigin = EnsureRoot; - type SwapOrigin = EnsureRoot; - type ResetOrigin = EnsureRoot; - type PrimeOrigin = EnsureRoot; - type MembershipInitialized = Triumvirate; - type MembershipChanged = Triumvirate; - type MaxMembers = CouncilMaxMembers; - type WeightInfo = pallet_membership::weights::SubstrateWeight; -} - -// This is a dummy collective instance for managing senate members -// Probably not the best solution, but fastest implementation -#[allow(dead_code)] -type SenateCollective = pallet_collective::Instance2; -impl pallet_collective::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type MotionDuration = CouncilMotionDuration; - type MaxProposals = CouncilMaxProposals; - type MaxMembers = SenateMaxMembers; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type WeightInfo = pallet_collective::weights::SubstrateWeight; - type SetMembersOrigin = EnsureNever; - type CanPropose = (); - type CanVote = (); - type GetVotingMembers = (); -} - -// We call our top K delegates membership Senate -#[allow(dead_code)] -type SenateMembership = pallet_membership::Instance2; -impl pallet_membership::Config for Test { - type RuntimeEvent = RuntimeEvent; - type AddOrigin = EnsureRoot; - type RemoveOrigin = EnsureRoot; - type SwapOrigin = EnsureRoot; - type ResetOrigin = EnsureRoot; - type PrimeOrigin = EnsureRoot; - type MembershipInitialized = Senate; - type MembershipChanged = Senate; - type MaxMembers = SenateMaxMembers; - type WeightInfo = pallet_membership::weights::SubstrateWeight; -} - impl crate::Config for Test { type RuntimeCall = RuntimeCall; type Currency = Balances; type InitialIssuance = InitialIssuance; type SudoRuntimeCall = TestRuntimeCall; - type CouncilOrigin = frame_system::EnsureSigned; - type SenateMembers = ManageSenateMembers; - type TriumvirateInterface = TriumvirateVotes; type Scheduler = Scheduler; type InitialMinAllowedWeights = InitialMinAllowedWeights; type InitialEmissionValue = InitialEmissionValue; - type InitialMaxWeightsLimit = InitialMaxWeightsLimit; type InitialTempo = InitialTempo; type InitialDifficulty = InitialDifficulty; type InitialAdjustmentInterval = InitialAdjustmentInterval; @@ -436,7 +272,6 @@ impl crate::Config for Test { type MinBurnUpperBound = MinBurnUpperBound; type MaxBurnLowerBound = MaxBurnLowerBound; type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; - type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; @@ -454,7 +289,7 @@ impl crate::Config for Test { type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; type DurationOfStartCall = DurationOfStartCall; - type SwapInterface = Swap; + type SwapInterface = pallet_subtensor_swap::Pallet; type KeySwapOnSubnetCost = InitialKeySwapOnSubnetCost; type HotkeySwapOnSubnetInterval = HotkeySwapOnSubnetInterval; type ProxyInterface = FakeProxier; @@ -478,6 +313,8 @@ impl pallet_subtensor_swap::Config for Test { type SubnetInfo = SubtensorModule; type BalanceOps = SubtensorModule; type ProtocolId = SwapProtocolId; + type TaoReserve = TaoCurrencyReserve; + type AlphaReserve = AlphaCurrencyReserve; type MaxFeeRate = SwapMaxFeeRate; type MaxPositions = SwapMaxPositions; type MinimumLiquidity = SwapMinimumLiquidity; @@ -669,10 +506,38 @@ where } } +static TEST_LOGS_INIT: OnceLock<()> = OnceLock::new(); + +pub fn init_logs_for_tests() { + if TEST_LOGS_INIT.get().is_some() { + return; + } + + // RUST_LOG (full syntax) or "off" if unset + let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("off")); + + // Bridge log -> tracing (ok if already set) + let _ = tracing_log::LogTracer::init(); + + // Simple formatter + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_target(true) + .with_level(true) + .without_time(); + + let _ = tracing_subscriber::registry() + .with(filter) + .with(fmt_layer) + .try_init(); + + let _ = TEST_LOGS_INIT.set(()); +} + #[allow(dead_code)] // Build genesis storage according to the mock runtime. pub fn new_test_ext(block_number: BlockNumber) -> sp_io::TestExternalities { - sp_tracing::try_init_simple(); + init_logs_for_tests(); let t = frame_system::GenesisConfig::::default() .build_storage() .unwrap(); @@ -683,7 +548,7 @@ pub fn new_test_ext(block_number: BlockNumber) -> sp_io::TestExternalities { #[allow(dead_code)] pub fn test_ext_with_balances(balances: Vec<(U256, u128)>) -> sp_io::TestExternalities { - sp_tracing::try_init_simple(); + init_logs_for_tests(); let mut t = frame_system::GenesisConfig::::default() .build_storage() .unwrap(); @@ -973,7 +838,7 @@ pub fn increase_stake_on_coldkey_hotkey_account( coldkey, netuid, tao_staked, - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1009,10 +874,10 @@ pub(crate) fn swap_tao_to_alpha(netuid: NetUid, tao: TaoCurrency) -> (AlphaCurre return (tao.to_u64().into(), 0); } + let order = GetAlphaForTao::::with_amount(tao); let result = ::SwapInterface::swap( netuid.into(), - OrderType::Buy, - tao.into(), + order, ::SwapInterface::max_price(), false, true, @@ -1022,10 +887,10 @@ pub(crate) fn swap_tao_to_alpha(netuid: NetUid, tao: TaoCurrency) -> (AlphaCurre let result = result.unwrap(); - // we don't want to have silent 0 comparissons in tests - assert!(result.amount_paid_out > 0); + // we don't want to have silent 0 comparisons in tests + assert!(result.amount_paid_out > AlphaCurrency::ZERO); - (result.amount_paid_out.into(), result.fee_paid) + (result.amount_paid_out, result.fee_paid.into()) } pub(crate) fn swap_alpha_to_tao_ext( @@ -1039,13 +904,13 @@ pub(crate) fn swap_alpha_to_tao_ext( println!( "::SwapInterface::min_price() = {:?}", - ::SwapInterface::min_price() + ::SwapInterface::min_price::() ); + let order = GetTaoForAlpha::::with_amount(alpha); let result = ::SwapInterface::swap( netuid.into(), - OrderType::Sell, - alpha.into(), + order, ::SwapInterface::min_price(), drop_fees, true, @@ -1055,10 +920,10 @@ pub(crate) fn swap_alpha_to_tao_ext( let result = result.unwrap(); - // we don't want to have silent 0 comparissons in tests - assert!(result.amount_paid_out > 0); + // we don't want to have silent 0 comparisons in tests + assert!(!result.amount_paid_out.is_zero()); - (result.amount_paid_out.into(), result.fee_paid) + (result.amount_paid_out, result.fee_paid.into()) } pub(crate) fn swap_alpha_to_tao(netuid: NetUid, alpha: AlphaCurrency) -> (TaoCurrency, u64) { diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index a0105a6ffe..bbaf25af58 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -1,6 +1,7 @@ mod auto_stake_hotkey; mod batch_tx; mod children; +mod claim_root; mod coinbase; mod consensus; mod delegate_info; @@ -8,6 +9,7 @@ mod difficulty; mod emission; mod ensure; mod epoch; +mod epoch_logs; mod evm; mod leasing; mod math; @@ -19,11 +21,11 @@ mod networks; mod neuron_info; mod recycle_alpha; mod registration; -mod senate; mod serving; mod staking; mod staking2; mod subnet; +mod subnet_emissions; mod swap_coldkey; mod swap_hotkey; mod swap_hotkey_with_subnet; diff --git a/pallets/subtensor/src/tests/move_stake.rs b/pallets/subtensor/src/tests/move_stake.rs index e49903aa86..dfd9927da4 100644 --- a/pallets/subtensor/src/tests/move_stake.rs +++ b/pallets/subtensor/src/tests/move_stake.rs @@ -33,7 +33,7 @@ fn test_do_move_success() { &coldkey, netuid.into(), stake_amount, - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -110,7 +110,7 @@ fn test_do_move_different_subnets() { &coldkey, origin_netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -140,17 +140,15 @@ fn test_do_move_different_subnets() { ), AlphaCurrency::ZERO ); - let fee = ::SwapInterface::approx_fee_amount( - destination_netuid.into(), - alpha.into(), - ); + let fee = + ::SwapInterface::approx_fee_amount(destination_netuid.into(), alpha); assert_abs_diff_eq!( SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( &destination_hotkey, &coldkey, destination_netuid ), - alpha - fee.into(), + alpha - fee, epsilon = alpha / 1000.into() ); }); @@ -180,7 +178,7 @@ fn test_do_move_nonexistent_subnet() { &coldkey, origin_netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -284,7 +282,7 @@ fn test_do_move_nonexistent_destination_hotkey() { &coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -349,7 +347,7 @@ fn test_do_move_partial_stake() { &coldkey, netuid, total_stake.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -418,7 +416,7 @@ fn test_do_move_multiple_times() { &coldkey, netuid, initial_stake.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -490,7 +488,7 @@ fn test_do_move_wrong_origin() { &coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -557,7 +555,7 @@ fn test_do_move_same_hotkey_fails() { &coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -608,7 +606,7 @@ fn test_do_move_event_emission() { &coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -669,7 +667,7 @@ fn test_do_move_storage_updates() { &coldkey, origin_netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -736,7 +734,7 @@ fn test_move_full_amount_same_netuid() { &coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -804,7 +802,7 @@ fn test_do_move_max_values() { &coldkey, netuid, max_stake.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -910,7 +908,7 @@ fn test_do_transfer_success() { &origin_coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1019,7 +1017,7 @@ fn test_do_transfer_insufficient_stake() { &origin_coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1060,7 +1058,7 @@ fn test_do_transfer_wrong_origin() { &origin_coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1098,7 +1096,7 @@ fn test_do_transfer_minimum_stake_check() { &origin_coldkey, netuid, stake_amount, - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1146,7 +1144,7 @@ fn test_do_transfer_different_subnets() { &origin_coldkey, origin_netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1212,7 +1210,7 @@ fn test_do_swap_success() { &coldkey, origin_netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1320,7 +1318,7 @@ fn test_do_swap_insufficient_stake() { &coldkey, netuid1, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1355,7 +1353,7 @@ fn test_do_swap_wrong_origin() { &real_coldkey, netuid1, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1393,7 +1391,7 @@ fn test_do_swap_minimum_stake_check() { &coldkey, netuid1, total_stake, - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1429,7 +1427,7 @@ fn test_do_swap_same_subnet() { &coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1474,7 +1472,7 @@ fn test_do_swap_partial_stake() { &coldkey, origin_netuid, total_stake_tao.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1526,7 +1524,7 @@ fn test_do_swap_storage_updates() { &coldkey, origin_netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1586,7 +1584,7 @@ fn test_do_swap_multiple_times() { &coldkey, netuid1, initial_stake.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1657,7 +1655,7 @@ fn test_do_swap_allows_non_owned_hotkey() { &coldkey, origin_netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1805,7 +1803,7 @@ fn test_transfer_stake_rate_limited() { &origin_coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), true, false, ) @@ -1850,7 +1848,7 @@ fn test_transfer_stake_doesnt_limit_destination_coldkey() { &origin_coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -1896,7 +1894,7 @@ fn test_swap_stake_limits_destination_netuid() { &origin_coldkey, netuid, stake_amount.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index e24c1a53c7..0449c67f86 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -1,13 +1,15 @@ +#![allow(clippy::expect_used)] + use super::mock::*; use crate::migrations::migrate_network_immunity_period; use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; -use sp_std::collections::btree_map::BTreeMap; +use sp_std::collections::{btree_map::BTreeMap, vec_deque::VecDeque}; use substrate_fixed::types::{I96F32, U64F64, U96F32}; -use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_runtime_common::{MechId, NetUidStorageIndex, TaoCurrency}; +use subtensor_swap_interface::{Order, SwapHandler}; #[test] fn test_registration_ok() { @@ -325,7 +327,6 @@ fn dissolve_clears_all_per_subnet_storages() { MaxAllowedUids::::insert(net, 1u16); ImmunityPeriod::::insert(net, 1u16); ActivityCutoff::::insert(net, 1u16); - MaxWeightsLimit::::insert(net, 1u16); MinAllowedWeights::::insert(net, 1u16); RegistrationsThisInterval::::insert(net, 1u16); @@ -368,8 +369,7 @@ fn dissolve_clears_all_per_subnet_storages() { NetworkRegistrationAllowed::::insert(net, true); NetworkPowRegistrationAllowed::::insert(net, true); PendingEmission::::insert(net, AlphaCurrency::from(1)); - PendingRootDivs::::insert(net, TaoCurrency::from(1)); - PendingAlphaSwapped::::insert(net, AlphaCurrency::from(1)); + PendingRootAlphaDivs::::insert(net, AlphaCurrency::from(1)); PendingOwnerCut::::insert(net, AlphaCurrency::from(1)); BlocksSinceLastStep::::insert(net, 1u64); LastMechansimStepBlock::::insert(net, 1u64); @@ -417,7 +417,6 @@ fn dissolve_clears_all_per_subnet_storages() { // Per‑subnet dividends AlphaDividendsPerSubnet::::insert(net, owner_hot, AlphaCurrency::from(1)); - TaoDividendsPerSubnet::::insert(net, owner_hot, TaoCurrency::from(1)); // Parent/child topology + takes ChildkeyTake::::insert(owner_hot, net, 1u16); @@ -477,7 +476,6 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!MaxAllowedUids::::contains_key(net)); assert!(!ImmunityPeriod::::contains_key(net)); assert!(!ActivityCutoff::::contains_key(net)); - assert!(!MaxWeightsLimit::::contains_key(net)); assert!(!MinAllowedWeights::::contains_key(net)); assert!(!RegistrationsThisInterval::::contains_key(net)); @@ -526,8 +524,7 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!NetworkRegistrationAllowed::::contains_key(net)); assert!(!NetworkPowRegistrationAllowed::::contains_key(net)); assert!(!PendingEmission::::contains_key(net)); - assert!(!PendingRootDivs::::contains_key(net)); - assert!(!PendingAlphaSwapped::::contains_key(net)); + assert!(!PendingRootAlphaDivs::::contains_key(net)); assert!(!PendingOwnerCut::::contains_key(net)); assert!(!BlocksSinceLastStep::::contains_key(net)); assert!(!LastMechansimStepBlock::::contains_key(net)); @@ -577,7 +574,6 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!AlphaDividendsPerSubnet::::contains_key( net, owner_hot )); - assert!(!TaoDividendsPerSubnet::::contains_key(net, owner_hot)); // Parent/child topology + takes assert!(!ChildkeyTake::::contains_key(owner_hot, net)); @@ -804,9 +800,9 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { let min_stake = DefaultMinStake::::get(); let fee = ::SwapInterface::approx_fee_amount( netuid.into(), - min_stake.into(), + min_stake, ); - min_stake.saturating_add(fee.into()) + min_stake.saturating_add(fee) }; const N: usize = 20; @@ -889,23 +885,22 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { .floor() .saturating_to_num::(); - let owner_emission_tao_u64: u64 = ::SwapInterface::sim_swap( - netuid.into(), - OrderType::Sell, - owner_alpha_u64, - ) - .map(|res| res.amount_paid_out) - .unwrap_or_else(|_| { - // Fallback matches the pallet's fallback - let price: U96F32 = - ::SwapInterface::current_alpha_price(netuid.into()); - U96F32::from_num(owner_alpha_u64) - .saturating_mul(price) - .floor() - .saturating_to_num::() - }); - - let expected_refund: u64 = lock.saturating_sub(owner_emission_tao_u64); + let order = GetTaoForAlpha::::with_amount(owner_alpha_u64); + let owner_emission_tao = + ::SwapInterface::sim_swap(netuid.into(), order) + .map(|res| res.amount_paid_out) + .unwrap_or_else(|_| { + // Fallback matches the pallet's fallback + let price: U96F32 = + ::SwapInterface::current_alpha_price(netuid.into()); + U96F32::from_num(owner_alpha_u64) + .saturating_mul(price) + .floor() + .saturating_to_num::() + .into() + }); + + let expected_refund = lock.saturating_sub(owner_emission_tao.to_u64()); // ── 6) run distribution (credits τ to coldkeys, wipes α state) ───── assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); @@ -968,20 +963,18 @@ fn destroy_alpha_out_refund_gating_by_registration_block() { .saturating_to_num::(); // Prefer sim_swap; fall back to current price if unavailable. - let owner_emission_tao_u64: u64 = ::SwapInterface::sim_swap( - netuid.into(), - OrderType::Sell, - owner_alpha_u64, - ) - .map(|res| res.amount_paid_out) - .unwrap_or_else(|_| { - let price: U96F32 = - ::SwapInterface::current_alpha_price(netuid.into()); - U96F32::from_num(owner_alpha_u64) - .saturating_mul(price) - .floor() - .saturating_to_num::() - }); + let order = GetTaoForAlpha::::with_amount(owner_alpha_u64); + let owner_emission_tao_u64 = + ::SwapInterface::sim_swap(netuid.into(), order) + .map(|res| res.amount_paid_out.to_u64()) + .unwrap_or_else(|_| { + let price: U96F32 = + ::SwapInterface::current_alpha_price(netuid.into()); + U96F32::from_num(owner_alpha_u64) + .saturating_mul(price) + .floor() + .saturating_to_num::() + }); let expected_refund: u64 = lock_u64.saturating_sub(owner_emission_tao_u64); @@ -2134,19 +2127,17 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( pallet_subtensor_swap::AlphaSqrtPrice::::set(net_new, sqrt1); // Compute the exact min stake per the pallet rule: DefaultMinStake + fee(DefaultMinStake). - let min_stake_u64: u64 = DefaultMinStake::::get().into(); - let fee_for_min: u64 = pallet_subtensor_swap::Pallet::::sim_swap( + let min_stake = DefaultMinStake::::get(); + let order = GetAlphaForTao::::with_amount(min_stake); + let fee_for_min = pallet_subtensor_swap::Pallet::::sim_swap( net_new, - subtensor_swap_interface::OrderType::Buy, - min_stake_u64, + order, ) .map(|r| r.fee_paid) .unwrap_or_else(|_e| { - as subtensor_swap_interface::SwapHandler< - ::AccountId, - >>::approx_fee_amount(net_new, min_stake_u64) + as subtensor_swap_interface::SwapHandler>::approx_fee_amount(net_new, min_stake) }); - let min_amount_required: u64 = min_stake_u64.saturating_add(fee_for_min); + let min_amount_required = min_stake.saturating_add(fee_for_min).to_u64(); // Re‑stake from three coldkeys; choose a specific DISTINCT hotkey per cold. for &cold in &cold_lps[0..3] { @@ -2157,10 +2148,10 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( let a_prev: u64 = Alpha::::get((hot1, cold, net_new)).saturating_to_num(); // Expected α for this exact τ, using the same sim path as the pallet. - let expected_alpha_out: u64 = pallet_subtensor_swap::Pallet::::sim_swap( + let order = GetAlphaForTao::::with_amount(min_amount_required); + let expected_alpha_out = pallet_subtensor_swap::Pallet::::sim_swap( net_new, - subtensor_swap_interface::OrderType::Buy, - min_amount_required, + order, ) .map(|r| r.amount_paid_out) .expect("sim_swap must succeed for fresh net and min amount"); @@ -2185,7 +2176,7 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( // α minted equals the simulated swap’s net out for that same τ. assert_eq!( - a_delta, expected_alpha_out, + a_delta, expected_alpha_out.to_u64(), "α minted mismatch for cold {cold:?} (hot {hot1:?}) on new net (αΔ {a_delta}, expected {expected_alpha_out})" ); } @@ -2201,3 +2192,105 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( ); }); } + +#[test] +fn dissolve_clears_all_mechanism_scoped_maps_for_all_mechanisms() { + new_test_ext(0).execute_with(|| { + // Create a subnet we can dissolve. + let owner_cold = U256::from(123); + let owner_hot = U256::from(456); + let net = add_dynamic_network(&owner_hot, &owner_cold); + + // We'll use two mechanisms for this subnet. + MechanismCountCurrent::::insert(net, MechId::from(2)); + let m0 = MechId::from(0u8); + let m1 = MechId::from(1u8); + + let idx0 = SubtensorModule::get_mechanism_storage_index(net, m0); + let idx1 = SubtensorModule::get_mechanism_storage_index(net, m1); + + // Minimal content to ensure each storage actually has keys for BOTH mechanisms. + + // --- Weights (DMAP: (netuid_index, uid) -> Vec<(dest_uid, weight_u16)>) + Weights::::insert(idx0, 0u16, vec![(1u16, 1u16)]); + Weights::::insert(idx1, 0u16, vec![(2u16, 1u16)]); + + // --- Bonds (DMAP: (netuid_index, uid) -> Vec<(dest_uid, weight_u16)>) + Bonds::::insert(idx0, 0u16, vec![(1u16, 1u16)]); + Bonds::::insert(idx1, 0u16, vec![(2u16, 1u16)]); + + // --- TimelockedWeightCommits (DMAP: (netuid_index, epoch) -> VecDeque<...>) + let hotkey = U256::from(1); + TimelockedWeightCommits::::insert( + idx0, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + TimelockedWeightCommits::::insert( + idx1, + 2u64, + VecDeque::from([(hotkey, 2u64, Default::default(), Default::default())]), + ); + + // --- Incentive (MAP: netuid_index -> Vec) + Incentive::::insert(idx0, vec![1u16, 2u16]); + Incentive::::insert(idx1, vec![3u16, 4u16]); + + // --- LastUpdate (MAP: netuid_index -> Vec) + LastUpdate::::insert(idx0, vec![42u64]); + LastUpdate::::insert(idx1, vec![84u64]); + + // Sanity: keys are present before dissolve. + assert!(Weights::::contains_key(idx0, 0u16)); + assert!(Weights::::contains_key(idx1, 0u16)); + assert!(Bonds::::contains_key(idx0, 0u16)); + assert!(Bonds::::contains_key(idx1, 0u16)); + assert!(TimelockedWeightCommits::::contains_key(idx0, 1u64)); + assert!(TimelockedWeightCommits::::contains_key(idx1, 2u64)); + assert!(Incentive::::contains_key(idx0)); + assert!(Incentive::::contains_key(idx1)); + assert!(LastUpdate::::contains_key(idx0)); + assert!(LastUpdate::::contains_key(idx1)); + assert!(MechanismCountCurrent::::contains_key(net)); + + // --- Dissolve the subnet --- + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + // After dissolve, ALL mechanism-scoped items must be cleared for ALL mechanisms. + + // Weights/Bonds double-maps should have no entries under either index. + assert!(Weights::::iter_prefix(idx0).next().is_none()); + assert!(Weights::::iter_prefix(idx1).next().is_none()); + assert!(Bonds::::iter_prefix(idx0).next().is_none()); + assert!(Bonds::::iter_prefix(idx1).next().is_none()); + + // WeightCommits (OptionQuery) should have no keys remaining. + assert!(WeightCommits::::iter_prefix(idx0).next().is_none()); + assert!(WeightCommits::::iter_prefix(idx1).next().is_none()); + assert!(!WeightCommits::::contains_key(idx0, owner_hot)); + assert!(!WeightCommits::::contains_key(idx1, owner_cold)); + + // TimelockedWeightCommits (ValueQuery) — ensure both prefix spaces empty and keys gone. + assert!( + TimelockedWeightCommits::::iter_prefix(idx0) + .next() + .is_none() + ); + assert!( + TimelockedWeightCommits::::iter_prefix(idx1) + .next() + .is_none() + ); + assert!(!TimelockedWeightCommits::::contains_key(idx0, 1u64)); + assert!(!TimelockedWeightCommits::::contains_key(idx1, 2u64)); + + // Single-map per-mechanism vectors cleared. + assert!(!Incentive::::contains_key(idx0)); + assert!(!Incentive::::contains_key(idx1)); + assert!(!LastUpdate::::contains_key(idx0)); + assert!(!LastUpdate::::contains_key(idx1)); + + // MechanismCountCurrent cleared + assert!(!MechanismCountCurrent::::contains_key(net)); + }); +} diff --git a/pallets/subtensor/src/tests/senate.rs b/pallets/subtensor/src/tests/senate.rs deleted file mode 100644 index 23d3552240..0000000000 --- a/pallets/subtensor/src/tests/senate.rs +++ /dev/null @@ -1,895 +0,0 @@ -#![allow(clippy::unwrap_used)] - -use approx::assert_abs_diff_eq; -use codec::Encode; -use frame_support::{assert_noop, assert_ok}; -use frame_system::Config; -use frame_system::pallet_prelude::*; -use frame_system::{EventRecord, Phase}; -use pallet_subtensor_collective as pallet_collective; -use pallet_subtensor_collective::Event as CollectiveEvent; -use sp_core::{Get, H256, U256, bounded_vec}; -use sp_runtime::{ - BuildStorage, - traits::{BlakeTwo256, Hash}, -}; -use subtensor_runtime_common::TaoCurrency; -use subtensor_swap_interface::SwapHandler; - -use super::mock; -use super::mock::*; -use crate::Delegates; -use crate::Error; -use crate::migrations; -use crate::*; - -pub fn new_test_ext() -> sp_io::TestExternalities { - sp_tracing::try_init_simple(); - - let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { - senate_members: pallet_membership::GenesisConfig:: { - members: bounded_vec![1.into(), 2.into(), 3.into(), 4.into(), 5.into()], - phantom: Default::default(), - }, - triumvirate: pallet_collective::GenesisConfig:: { - members: vec![1.into()], - phantom: Default::default(), - }, - ..Default::default() - } - .build_storage() - .unwrap() - .into(); - - ext.execute_with(|| System::set_block_number(1)); - ext -} - -fn make_proposal(value: u64) -> RuntimeCall { - RuntimeCall::System(frame_system::Call::remark_with_event { - remark: value.to_be_bytes().to_vec(), - }) -} - -fn record(event: RuntimeEvent) -> EventRecord { - EventRecord { - phase: Phase::Initialization, - event, - topics: vec![], - } -} - -#[test] -fn test_senate_join_works() { - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har - let stake = DefaultMinStake::::get() * 100.into(); - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - let reserve = 1_000_000_000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (10000 - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - // Lets make this new key a delegate with a 10% take. - Delegates::::insert(hotkey_account_id, u16::MAX / 10); - - let staker_coldkey = U256::from(7); - SubtensorModule::add_balance_to_coldkey_account(&staker_coldkey, stake.into()); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(staker_coldkey), - hotkey_account_id, - netuid, - stake - )); - - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey_account_id, - &staker_coldkey, - netuid - ), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_account_id, netuid), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - hotkey_account_id - )); - assert!(Senate::is_member(&hotkey_account_id)); - }); -} - -#[test] -fn test_senate_vote_works() { - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let senate_hotkey = U256::from(1); - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - let reserve = 1_000_000_000_000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (10000 - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - // Lets make this new key a delegate with a 10% take. - Delegates::::insert(hotkey_account_id, u16::MAX / 10); - - let staker_coldkey = U256::from(7); - let stake = DefaultMinStake::::get() * 10.into(); - SubtensorModule::add_balance_to_coldkey_account(&staker_coldkey, stake.into()); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(staker_coldkey), - hotkey_account_id, - netuid, - stake - )); - - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey_account_id, - &staker_coldkey, - netuid - ), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_account_id, netuid), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - hotkey_account_id - )); - assert!(Senate::is_member(&hotkey_account_id)); - - System::reset_events(); - - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Triumvirate::propose( - RuntimeOrigin::signed(senate_hotkey), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(100u64) - .expect("convert u64 to block number.") - )); - - assert_ok!(SubtensorModule::do_vote_root( - <::RuntimeOrigin>::signed(coldkey_account_id), - &hotkey_account_id, - hash, - 0, - true - )); - assert_eq!( - System::events(), - vec![ - record(RuntimeEvent::Triumvirate(CollectiveEvent::Proposed { - account: senate_hotkey, - proposal_index: 0, - proposal_hash: hash, - threshold: 1 - })), - record(RuntimeEvent::Triumvirate(CollectiveEvent::Voted { - account: hotkey_account_id, - proposal_hash: hash, - voted: true, - yes: 1, - no: 0 - })) - ] - ); - }); -} - -#[test] -fn test_senate_vote_not_member() { - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let senate_hotkey = U256::from(1); - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - let reserve = 1_000_000_000_000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (10000 - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Triumvirate::propose( - RuntimeOrigin::signed(senate_hotkey), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(100u64) - .expect("convert u64 to block number.") - )); - - assert_noop!( - SubtensorModule::do_vote_root( - <::RuntimeOrigin>::signed(coldkey_account_id), - &hotkey_account_id, - hash, - 0, - true - ), - Error::::NotSenateMember - ); - }); -} - -#[test] -fn test_senate_leave_works() { - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har - let stake = DefaultMinStake::::get() * 10.into(); - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - let reserve = stake.to_u64() * 1000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (10000 - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - // Lets make this new key a delegate with a 10% take. - Delegates::::insert(hotkey_account_id, u16::MAX / 10); - - let staker_coldkey = U256::from(7); - SubtensorModule::add_balance_to_coldkey_account(&staker_coldkey, stake.into()); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(staker_coldkey), - hotkey_account_id, - netuid, - stake - )); - - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey_account_id, - &staker_coldkey, - netuid - ), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_account_id, netuid), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - hotkey_account_id - )); - assert!(Senate::is_member(&hotkey_account_id)); - }); -} - -#[test] -fn test_senate_leave_vote_removal() { - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let senate_hotkey = U256::from(1); - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har - let coldkey_origin = <::RuntimeOrigin>::signed(coldkey_account_id); - let stake = DefaultMinStake::::get() * 10.into(); - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, stake.into()); - SubtokenEnabled::::insert(netuid, true); - - let reserve = stake.to_u64() * 1000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - coldkey_origin.clone(), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (stake.to_u64() - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - // Lets make this new key a delegate with a 10% take. - Delegates::::insert(hotkey_account_id, u16::MAX / 10); - - let staker_coldkey = U256::from(7); - SubtensorModule::add_balance_to_coldkey_account(&staker_coldkey, stake.into()); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(staker_coldkey), - hotkey_account_id, - netuid, - stake - )); - - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey_account_id, - &staker_coldkey, - netuid - ), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_account_id, netuid), - AlphaCurrency::from(stake.to_u64()), - epsilon = 1.into() - ); - - assert_ok!(SubtensorModule::root_register( - coldkey_origin.clone(), - hotkey_account_id - )); - assert!(Senate::is_member(&hotkey_account_id)); - - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Triumvirate::propose( - RuntimeOrigin::signed(senate_hotkey), - Box::new(proposal.clone()), - proposal_len, - TryInto::>::try_into(100u64) - .expect("convert u64 to block number.") - )); - - assert_ok!(SubtensorModule::do_vote_root( - coldkey_origin.clone(), - &hotkey_account_id, - hash, - 0, - true - )); - // Fill the root network with many large stake keys. - // This removes all other keys. - // Add two networks. - let other_netuid = NetUid::from(5); - add_network(other_netuid, 1, 0); - SubtensorModule::set_burn(other_netuid, TaoCurrency::ZERO); - SubtensorModule::set_max_registrations_per_block(other_netuid, 1000); - SubtensorModule::set_target_registrations_per_interval(other_netuid, 1000); - SubtensorModule::set_max_registrations_per_block(NetUid::ROOT, 1000); - SubtensorModule::set_target_registrations_per_interval(NetUid::ROOT, 1000); - - let reserve = 1_000_000_000_000; - mock::setup_reserves(other_netuid, reserve.into(), reserve.into()); - mock::setup_reserves(NetUid::ROOT, reserve.into(), reserve.into()); - SubtokenEnabled::::insert(NetUid::ROOT, true); - SubtokenEnabled::::insert(other_netuid, true); - - for i in 0..200 { - let hot = U256::from(i + 100); - let cold = U256::from(i + 100); - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&cold, 100_000_000 + (i as u64)); // lots ot stake - // Register - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(cold), - other_netuid, - hot - )); - // Add stake on other network - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(cold), - hot, - NetUid::ROOT, - (100_000_000 + (i as u64)).into() - )); - // Register them on the root network. - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(cold), - hot, - )); - // Check succesfull registration. - assert!(SubtensorModule::get_uid_for_net_and_hotkey(other_netuid, &hot).is_ok()); - assert!(SubtensorModule::get_uid_for_net_and_hotkey(NetUid::ROOT, &hot).is_ok()); - // Check that they are all delegates - assert!(SubtensorModule::hotkey_is_delegate(&hot)); - } - // No longer a root member - assert!( - SubtensorModule::get_uid_for_net_and_hotkey(NetUid::ROOT, &hotkey_account_id).is_err() - ); - // No longer a member of the senate - assert!(!Senate::is_member(&hotkey_account_id)); - assert_eq!( - // Vote is removed - Triumvirate::has_voted(hash, 0, &hotkey_account_id), - Ok(false) - ); - }); -} - -#[test] -fn test_senate_not_leave_when_stake_removed() { - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give it some $$$ in his coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - let reserve = 1_000_000_000_000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (10000 - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - // Lets make this new key a delegate with a 10% take. - Delegates::::insert(hotkey_account_id, u16::MAX / 10); - - let staker_coldkey = U256::from(7); - let stake_amount = DefaultMinStake::::get() * 10.into(); - SubtensorModule::add_balance_to_coldkey_account(&staker_coldkey, stake_amount.into()); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(staker_coldkey), - hotkey_account_id, - netuid, - stake_amount - )); - - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey_account_id, - &staker_coldkey, - netuid - ), - AlphaCurrency::from(stake_amount.to_u64()), - epsilon = 1.into() - ); - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_account_id, netuid), - AlphaCurrency::from(stake_amount.to_u64()), - epsilon = 1.into() - ); - - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - hotkey_account_id - )); - assert!(Senate::is_member(&hotkey_account_id)); - - step_block(100); - - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(staker_coldkey), - hotkey_account_id, - netuid, - (stake_amount - 1.into()).to_u64().into() - )); - assert!(Senate::is_member(&hotkey_account_id)); - }); -} - -#[test] -fn test_senate_join_current_delegate() { - // Test that a current delegate can join the senate - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give some coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); - - let reserve = 1_000_000_000_000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (10000 - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - // Register in the root network - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - hotkey_account_id - )); - // But, remove from the senate - assert_ok!(SenateMembers::remove_member( - <::RuntimeOrigin>::root(), - hotkey_account_id - )); - - // Should *NOT* be a member of the senate now - assert!(!Senate::is_member(&hotkey_account_id)); - - System::reset_events(); - - // We can call now to adjust the senate - assert_ok!(SubtensorModule::adjust_senate( - <::RuntimeOrigin>::signed(coldkey_account_id), - hotkey_account_id - )); - - // This should make the hotkey a member of the senate - assert!(Senate::is_member(&hotkey_account_id)); - - // Check the events - assert!( - System::events().contains(&record(RuntimeEvent::SubtensorModule( - SubtensorEvent::SenateAdjusted { - old_member: None, - new_member: hotkey_account_id - } - ))) - ); - }); -} - -#[test] -fn test_adjust_senate_events() { - // Test the events emitted after adjusting the senate successfully - new_test_ext().execute_with(|| { - migrations::migrate_create_root_network::migrate_create_root_network::(); - - let netuid = NetUid::from(1); - let tempo: u16 = 13; - let hotkey_account_id = U256::from(6); - let burn_cost = 1000; - let coldkey_account_id = U256::from(667); - - let max_senate_size: u16 = SenateMaxMembers::get() as u16; - let stake_threshold = { - let default_stake = DefaultMinStake::::get().to_u64(); - let fee = ::SwapInterface::approx_fee_amount( - netuid, - default_stake.into(), - ); - default_stake + fee - }; - - // We will be registering MaxMembers hotkeys and two more to try a replace - let balance_to_add = DefaultMinStake::::get().to_u64() * 10 - + 50_000 - + (stake_threshold + burn_cost) * (max_senate_size + 2) as u64; - - let replacement_hotkey_account_id = U256::from(7); // Will be added to the senate to replace hotkey_account_id - - //add network - SubtensorModule::set_burn(netuid, burn_cost.into()); - add_network(netuid, tempo, 0); - // Give some coldkey balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, balance_to_add); - - // Allow all registrations in netuid in same block. Same for root network. - SubtensorModule::set_max_registrations_per_block(netuid, max_senate_size + 1); - SubtensorModule::set_target_registrations_per_interval(netuid, max_senate_size + 1); - SubtensorModule::set_max_registrations_per_block(NetUid::ROOT, max_senate_size + 1); - SubtensorModule::set_target_registrations_per_interval(NetUid::ROOT, max_senate_size + 1); - SubtokenEnabled::::insert(netuid, true); - SubtokenEnabled::::insert(NetUid::ROOT, true); - - let reserve = 100_000_000_000_000; - mock::setup_reserves(netuid, reserve.into(), reserve.into()); - - // Subscribe and check extrinsic output - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - hotkey_account_id - )); - // Check if balance has decreased to pay for the burn. - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey_account_id), - (balance_to_add - burn_cost) - ); // funds drained on reg. - // Check if neuron has added to the specified network(netuid) - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - // Check if hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey_account_id), - coldkey_account_id - ); - - // Should *NOT* be a member of the senate - assert!(!Senate::is_member(&hotkey_account_id)); - - // root register - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - hotkey_account_id - )); // Has no stake, but is now a senate member - - // Check if they are a member of the senate - assert!(Senate::is_member(&hotkey_account_id)); - - // Register MaxMembers - 1 more hotkeys, add stake and join the senate - for i in 0..(max_senate_size - 1) { - let new_hotkey_account_id = U256::from(8 + i); - - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - new_hotkey_account_id - )); - // Check if this hotkey is added to the Hotkeys - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&new_hotkey_account_id), - coldkey_account_id - ); - // Add/delegate enough stake to join the senate - // +1 to be above hotkey_account_id - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey_account_id), - new_hotkey_account_id, - netuid, - (stake_threshold + 1 + i as u64).into() // Increasing with i to make them ordered - )); - // Join senate - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - new_hotkey_account_id - )); - // Check if they are a member of the senate - assert!(Senate::is_member(&new_hotkey_account_id)); - } - - // Verify we are at max senate size - assert_eq!(Senate::members().len(), max_senate_size as usize); - - // Verify the replacement hotkey is not a member of the senate - assert!(!Senate::is_member(&replacement_hotkey_account_id)); - - // Register - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid, - replacement_hotkey_account_id - )); - - // Register in root network - assert_ok!(SubtensorModule::root_register( - <::RuntimeOrigin>::signed(coldkey_account_id), - replacement_hotkey_account_id - )); - - // Check if they are a member of the senate, should not be, - // as they have no stake - assert!(!Senate::is_member(&replacement_hotkey_account_id)); - // Add/delegate enough stake to join the senate - let stake = DefaultMinStake::::get() * 10.into(); - - let reserve = 100_000_000_000_000; - mock::setup_reserves(NetUid::ROOT, reserve.into(), reserve.into()); - - let (_, fee) = mock::swap_tao_to_alpha(NetUid::ROOT, stake); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey_account_id), - replacement_hotkey_account_id, - NetUid::ROOT, - stake // Will be more than the last one in the senate by stake (has 0 stake) - )); - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( - &replacement_hotkey_account_id, - &coldkey_account_id, - NetUid::ROOT - ), - AlphaCurrency::from(stake.to_u64() - fee), - epsilon = (stake.to_u64() / 1000).into() - ); - assert_abs_diff_eq!( - SubtensorModule::get_stake_for_hotkey_on_subnet( - &replacement_hotkey_account_id, - NetUid::ROOT - ), - AlphaCurrency::from(stake.to_u64() - fee), - epsilon = (stake.to_u64() / 1000).into() - ); - - System::reset_events(); - - // We can call now to adjust the senate - assert_ok!(SubtensorModule::adjust_senate( - <::RuntimeOrigin>::signed(coldkey_account_id), - replacement_hotkey_account_id - )); - - // This should make the hotkey a member of the senate - assert!(Senate::is_member(&replacement_hotkey_account_id)); - - // Check the events - assert!( - System::events().contains(&record(RuntimeEvent::SubtensorModule( - SubtensorEvent::SenateAdjusted { - old_member: None, - new_member: replacement_hotkey_account_id - } - ))) - ); - }); -} diff --git a/pallets/subtensor/src/tests/serving.rs b/pallets/subtensor/src/tests/serving.rs index b4173a8ebb..d8a9b866d9 100644 --- a/pallets/subtensor/src/tests/serving.rs +++ b/pallets/subtensor/src/tests/serving.rs @@ -1,4 +1,4 @@ -#![allow(clippy::unwrap_used)] +#![allow(clippy::expect_used, clippy::unwrap_used)] use super::mock::*; use crate::Error; diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index b36788fa31..27c5b5c16d 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -16,7 +16,7 @@ use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; use subtensor_runtime_common::{ AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex, TaoCurrency, }; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_swap_interface::{Order, SwapHandler}; use super::mock; use super::mock::*; @@ -84,12 +84,14 @@ fn test_add_stake_ok_no_emission() { )); let (tao_expected, _) = mock::swap_alpha_to_tao(netuid, alpha_staked); - let approx_fee = - ::SwapInterface::approx_fee_amount(netuid.into(), amount); + let approx_fee = ::SwapInterface::approx_fee_amount( + netuid.into(), + TaoCurrency::from(amount), + ); assert_abs_diff_eq!( SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), - tao_expected + approx_fee.into(), // swap returns value after fee, so we need to compensate it + tao_expected + approx_fee, // swap returns value after fee, so we need to compensate it epsilon = 10000.into(), ); @@ -729,7 +731,11 @@ fn test_remove_stake_total_balance_no_change() { amount.into() )); - let fee = ::SwapInterface::approx_fee_amount(netuid.into(), amount); + let fee = ::SwapInterface::approx_fee_amount( + netuid.into(), + TaoCurrency::from(amount), + ) + .to_u64(); assert_abs_diff_eq!( SubtensorModule::get_coldkey_balance(&coldkey_account_id), amount - fee, @@ -864,7 +870,7 @@ fn test_remove_stake_insufficient_liquidity() { &coldkey, netuid, amount_staked.into(), - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, false, ) @@ -2723,13 +2729,13 @@ fn test_max_amount_add_root() { // 0 price on root => max is 0 assert_eq!( SubtensorModule::get_max_amount_add(NetUid::ROOT, TaoCurrency::ZERO), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 0.999999... price on root => max is 0 assert_eq!( SubtensorModule::get_max_amount_add(NetUid::ROOT, TaoCurrency::from(999_999_999)), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 1.0 price on root => max is u64::MAX @@ -2761,13 +2767,13 @@ fn test_max_amount_add_stable() { // 0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_add(netuid, TaoCurrency::ZERO), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 0.999999... price => max is 0 assert_eq!( SubtensorModule::get_max_amount_add(netuid, TaoCurrency::from(999_999_999)), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 1.0 price => max is u64::MAX @@ -2800,7 +2806,9 @@ fn test_max_amount_add_dynamic() { 1_000_000_000, 1_000_000_000, 0, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), // Low bounds (100, 100, 1_100_000_000, Ok(4)), @@ -2821,25 +2829,33 @@ fn test_max_amount_add_dynamic() { 150_000_000_000, 100_000_000_000, 0, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), ( 150_000_000_000, 100_000_000_000, 100_000_000, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), ( 150_000_000_000, 100_000_000_000, 500_000_000, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), ( 150_000_000_000, 100_000_000_000, 1_499_999_999, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), (150_000_000_000, 100_000_000_000, 1_500_000_000, Ok(5)), (150_000_000_000, 100_000_000_000, 1_500_000_001, Ok(51)), @@ -2928,13 +2944,13 @@ fn test_max_amount_remove_root() { // 1.000...001 price on root => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(NetUid::ROOT, TaoCurrency::from(1_000_000_001)), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 2.0 price on root => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(NetUid::ROOT, TaoCurrency::from(2_000_000_000)), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); }); } @@ -2966,13 +2982,13 @@ fn test_max_amount_remove_stable() { // 1.000...001 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(netuid, TaoCurrency::from(1_000_000_001)), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 2.0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(netuid, TaoCurrency::from(2_000_000_000)), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); }); } @@ -2992,13 +3008,17 @@ fn test_max_amount_remove_dynamic() { 0, 1_000_000_000, 100, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::ReservesTooLow, + )), ), ( 1_000_000_000, 0, 100, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), (10_000_000_000, 10_000_000_000, 0, Ok(u64::MAX)), // Low bounds (numbers are empirical, it is only important that result @@ -3037,13 +3057,17 @@ fn test_max_amount_remove_dynamic() { 200_000_000_000, 100_000_000_000, 2_000_000_000, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), ( 200_000_000_000, 100_000_000_000, 2_000_000_001, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), (200_000_000_000, 100_000_000_000, 1_999_999_999, Ok(24)), (200_000_000_000, 100_000_000_000, 1_999_999_990, Ok(252)), @@ -3059,7 +3083,9 @@ fn test_max_amount_remove_dynamic() { 21_000_000_000_000_000, 1_000_000_000_000_000_000, u64::MAX, - Err(Error::::ZeroMaxStakeAmount), + Err(DispatchError::from( + pallet_subtensor_swap::Error::::PriceLimitExceeded, + )), ), ( 21_000_000_000_000_000, @@ -3080,39 +3106,36 @@ fn test_max_amount_remove_dynamic() { Ok(u64::MAX), ), ] - .iter() - .for_each( - |&(tao_in, alpha_in, limit_price, ref expected_max_swappable)| { - let alpha_in = AlphaCurrency::from(alpha_in); - // Forse-set alpha in and tao reserve to achieve relative price of subnets - SubnetTAO::::insert(netuid, TaoCurrency::from(tao_in)); - SubnetAlphaIn::::insert(netuid, alpha_in); - - if !alpha_in.is_zero() { - let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); - assert_eq!( - ::SwapInterface::current_alpha_price(netuid.into()), - expected_price - ); - } + .into_iter() + .for_each(|(tao_in, alpha_in, limit_price, expected_max_swappable)| { + let alpha_in = AlphaCurrency::from(alpha_in); + // Forse-set alpha in and tao reserve to achieve relative price of subnets + SubnetTAO::::insert(netuid, TaoCurrency::from(tao_in)); + SubnetAlphaIn::::insert(netuid, alpha_in); - match expected_max_swappable { - Err(_) => assert_err!( - SubtensorModule::get_max_amount_remove(netuid, limit_price.into()), - Error::::ZeroMaxStakeAmount - ), - Ok(v) => { - let v = AlphaCurrency::from(*v); - assert_abs_diff_eq!( - SubtensorModule::get_max_amount_remove(netuid, limit_price.into()) - .unwrap(), - v, - epsilon = v / 100.into() - ); - } + if !alpha_in.is_zero() { + let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); + assert_eq!( + ::SwapInterface::current_alpha_price(netuid.into()), + expected_price + ); + } + + match expected_max_swappable { + Err(e) => assert_err!( + SubtensorModule::get_max_amount_remove(netuid, limit_price.into()), + DispatchError::from(e) + ), + Ok(v) => { + let v = AlphaCurrency::from(v); + assert_abs_diff_eq!( + SubtensorModule::get_max_amount_remove(netuid, limit_price.into()).unwrap(), + v, + epsilon = v / 100.into() + ); } - }, - ); + } + }); }); } @@ -3163,7 +3186,7 @@ fn test_max_amount_move_root_root() { NetUid::ROOT, TaoCurrency::from(1_000_000_001) ), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 2.0 price on (root, root) => max is 0 @@ -3173,7 +3196,7 @@ fn test_max_amount_move_root_root() { NetUid::ROOT, TaoCurrency::from(2_000_000_000) ), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); }); } @@ -3228,7 +3251,7 @@ fn test_max_amount_move_root_stable() { netuid, TaoCurrency::from(1_000_000_001) ), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 2.0 price on (root, stable) => max is 0 @@ -3238,7 +3261,7 @@ fn test_max_amount_move_root_stable() { netuid, TaoCurrency::from(2_000_000_000) ), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); }); } @@ -3280,7 +3303,7 @@ fn test_max_amount_move_stable_dynamic() { dynamic_netuid, TaoCurrency::from(2_000_000_000) ), - Err(Error::::ZeroMaxStakeAmount) + Err(Error::::ZeroMaxStakeAmount.into()) ); // 3.0 price => max is 0 @@ -3290,7 +3313,7 @@ fn test_max_amount_move_stable_dynamic() { dynamic_netuid, TaoCurrency::from(3_000_000_000) ), - Err(Error::::ZeroMaxStakeAmount) + Err(pallet_subtensor_swap::Error::::PriceLimitExceeded.into()) ); // 2x price => max is 1x TAO @@ -3322,7 +3345,7 @@ fn test_max_amount_move_stable_dynamic() { // Max price doesn't panic and returns something meaningful assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, TaoCurrency::MAX), - Err(Error::::ZeroMaxStakeAmount) + Err(pallet_subtensor_swap::Error::::PriceLimitExceeded.into()) ); assert_eq!( SubtensorModule::get_max_amount_move( @@ -3330,7 +3353,7 @@ fn test_max_amount_move_stable_dynamic() { dynamic_netuid, TaoCurrency::MAX - 1.into() ), - Err(Error::::ZeroMaxStakeAmount) + Err(pallet_subtensor_swap::Error::::PriceLimitExceeded.into()) ); assert_eq!( SubtensorModule::get_max_amount_move( @@ -3338,7 +3361,7 @@ fn test_max_amount_move_stable_dynamic() { dynamic_netuid, TaoCurrency::MAX / 2.into() ), - Err(Error::::ZeroMaxStakeAmount) + Err(pallet_subtensor_swap::Error::::PriceLimitExceeded.into()) ); }); } @@ -3394,7 +3417,7 @@ fn test_max_amount_move_dynamic_stable() { stable_netuid, 1_500_000_001.into() ), - Err(Error::::ZeroMaxStakeAmount) + Err(pallet_subtensor_swap::Error::::PriceLimitExceeded.into()) ); // 1.5 price => max is 0 because of non-zero slippage @@ -3743,8 +3766,9 @@ fn test_add_stake_limit_ok() { // Check that 450 TAO less fees balance still remains free on coldkey let fee = ::SwapInterface::approx_fee_amount( netuid.into(), - amount / 2, - ) as f64; + TaoCurrency::from(amount / 2), + ) + .to_u64() as f64; assert_abs_diff_eq!( SubtensorModule::get_coldkey_balance(&coldkey_account_id), amount / 2 - fee as u64, @@ -3846,7 +3870,7 @@ fn test_add_stake_limit_partial_zero_max_stake_amount_error() { limit_price, true ), - Error::::ZeroMaxStakeAmount + DispatchError::from(pallet_subtensor_swap::Error::::PriceLimitExceeded) ); }); } @@ -4026,18 +4050,16 @@ fn test_add_stake_specific_stake_into_subnet_fail() { ); // Add stake as new hotkey - let expected_alpha = AlphaCurrency::from( - ::SwapInterface::swap( - netuid.into(), - OrderType::Buy, - tao_staked.into(), - ::SwapInterface::max_price(), - false, - true, - ) - .map(|v| v.amount_paid_out) - .unwrap_or_default(), - ); + let order = GetAlphaForTao::::with_amount(tao_staked); + let expected_alpha = ::SwapInterface::swap( + netuid.into(), + order, + ::SwapInterface::max_price(), + false, + true, + ) + .map(|v| v.amount_paid_out) + .unwrap_or_default(); assert_ok!(SubtensorModule::add_stake( RuntimeOrigin::signed(coldkey_account_id), hotkey_account_id, @@ -4467,11 +4489,11 @@ fn test_stake_into_subnet_ok() { .to_num::(); // Initialize swap v3 + let order = GetAlphaForTao::::with_amount(0); assert_ok!(::SwapInterface::swap( netuid.into(), - OrderType::Buy, - 0, - u64::MAX, + order, + TaoCurrency::MAX, false, true )); @@ -4521,11 +4543,11 @@ fn test_stake_into_subnet_low_amount() { .to_num::(); // Initialize swap v3 + let order = GetAlphaForTao::::with_amount(0); assert_ok!(::SwapInterface::swap( netuid.into(), - OrderType::Buy, - 0, - u64::MAX, + order, + TaoCurrency::MAX, false, true )); @@ -4569,11 +4591,11 @@ fn test_unstake_from_subnet_low_amount() { mock::setup_reserves(netuid, tao_reserve, alpha_in); // Initialize swap v3 + let order = GetAlphaForTao::::with_amount(0); assert_ok!(::SwapInterface::swap( netuid.into(), - OrderType::Buy, - 0, - u64::MAX, + order, + TaoCurrency::MAX, false, true )); @@ -4627,11 +4649,11 @@ fn test_stake_into_subnet_prohibitive_limit() { mock::setup_reserves(netuid, tao_reserve, alpha_in); // Initialize swap v3 + let order = GetAlphaForTao::::with_amount(0); assert_ok!(::SwapInterface::swap( netuid.into(), - OrderType::Buy, - 0, - u64::MAX, + order, + TaoCurrency::MAX, false, true )); @@ -4647,7 +4669,7 @@ fn test_stake_into_subnet_prohibitive_limit() { TaoCurrency::ZERO, true, ), - Error::::ZeroMaxStakeAmount + DispatchError::from(pallet_subtensor_swap::Error::::PriceLimitExceeded) ); // Check if stake has NOT increased @@ -4683,11 +4705,11 @@ fn test_unstake_from_subnet_prohibitive_limit() { mock::setup_reserves(netuid, tao_reserve, alpha_in); // Initialize swap v3 + let order = GetAlphaForTao::::with_amount(0); assert_ok!(::SwapInterface::swap( netuid.into(), - OrderType::Buy, - 0, - u64::MAX, + order, + TaoCurrency::MAX, false, true )); @@ -4720,7 +4742,7 @@ fn test_unstake_from_subnet_prohibitive_limit() { TaoCurrency::MAX, true, ), - Error::::ZeroMaxStakeAmount + DispatchError::from(pallet_subtensor_swap::Error::::PriceLimitExceeded) ); // Check if stake has NOT decreased @@ -4759,11 +4781,11 @@ fn test_unstake_full_amount() { mock::setup_reserves(netuid, tao_reserve, alpha_in); // Initialize swap v3 + let order = GetAlphaForTao::::with_amount(0); assert_ok!(::SwapInterface::swap( netuid.into(), - OrderType::Buy, - 0, - u64::MAX, + order, + TaoCurrency::MAX, false, true )); @@ -5554,3 +5576,74 @@ fn test_remove_root_updates_counters() { ); }); } + +// cargo test --package pallet-subtensor --lib -- tests::staking::test_staking_records_flow --exact --show-output +#[test] +fn test_staking_records_flow() { + new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(1); + let owner_coldkey = U256::from(2); + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let amount = 100_000_000; + + // add network + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + // Forse-set alpha in and tao reserve to make price equal 0.01 + let tao_reserve = TaoCurrency::from(100_000_000_000); + let alpha_in = AlphaCurrency::from(1_000_000_000_000); + mock::setup_reserves(netuid, tao_reserve, alpha_in); + + // Initialize swap v3 + let order = GetAlphaForTao::::with_amount(0); + assert_ok!(::SwapInterface::swap( + netuid.into(), + order, + TaoCurrency::MAX, + false, + true + )); + + // Add stake with slippage safety and check if the result is ok + assert_ok!(SubtensorModule::stake_into_subnet( + &hotkey, + &coldkey, + netuid, + amount.into(), + TaoCurrency::MAX, + false, + false, + )); + let fee_rate = pallet_subtensor_swap::FeeRate::::get(NetUid::from(netuid)) as f64 + / u16::MAX as f64; + let expected_flow = (amount as f64) * (1. - fee_rate); + + // Check that flow has been recorded (less unstaking fees) + assert_abs_diff_eq!( + SubnetTaoFlow::::get(netuid), + expected_flow as i64, + epsilon = 1_i64 + ); + + // Remove stake + let alpha = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + assert_ok!(SubtensorModule::unstake_from_subnet( + &hotkey, + &coldkey, + netuid, + alpha, + TaoCurrency::ZERO, + false, + )); + + // Check that outflow has been recorded (less unstaking fees) + let expected_unstake_fee = expected_flow * fee_rate; + assert_abs_diff_eq!( + SubnetTaoFlow::::get(netuid), + expected_unstake_fee as i64, + epsilon = (expected_unstake_fee / 100.0) as i64 + ); + }); +} diff --git a/pallets/subtensor/src/tests/staking2.rs b/pallets/subtensor/src/tests/staking2.rs index f5d57d02d3..7d6cc6ad3c 100644 --- a/pallets/subtensor/src/tests/staking2.rs +++ b/pallets/subtensor/src/tests/staking2.rs @@ -38,7 +38,7 @@ fn test_stake_base_case() { SubtensorModule::swap_tao_for_alpha( netuid, tao_to_swap, - ::SwapInterface::max_price().into(), + ::SwapInterface::max_price(), false, ) .unwrap() @@ -698,8 +698,11 @@ fn test_stake_fee_api() { coldkey1, stake_amount, ); - let dynamic_fee_0 = - ::SwapInterface::approx_fee_amount(netuid0.into(), stake_amount); + let dynamic_fee_0 = ::SwapInterface::approx_fee_amount( + netuid0.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_0, dynamic_fee_0); // Test stake fee for remove on root @@ -710,8 +713,11 @@ fn test_stake_fee_api() { coldkey1, stake_amount, ); - let dynamic_fee_1 = - ::SwapInterface::approx_fee_amount(root_netuid.into(), stake_amount); + let dynamic_fee_1 = ::SwapInterface::approx_fee_amount( + root_netuid.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_1, dynamic_fee_1); // Test stake fee for move from root to non-root @@ -722,8 +728,11 @@ fn test_stake_fee_api() { coldkey1, stake_amount, ); - let dynamic_fee_2 = - ::SwapInterface::approx_fee_amount(netuid0.into(), stake_amount); + let dynamic_fee_2 = ::SwapInterface::approx_fee_amount( + netuid0.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_2, dynamic_fee_2); // Test stake fee for move between hotkeys on root @@ -734,8 +743,11 @@ fn test_stake_fee_api() { coldkey1, stake_amount, ); - let dynamic_fee_3 = - ::SwapInterface::approx_fee_amount(root_netuid.into(), stake_amount); + let dynamic_fee_3 = ::SwapInterface::approx_fee_amount( + root_netuid.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_3, dynamic_fee_3); // Test stake fee for move between coldkeys on root @@ -746,8 +758,11 @@ fn test_stake_fee_api() { coldkey2, stake_amount, ); - let dynamic_fee_4 = - ::SwapInterface::approx_fee_amount(root_netuid.into(), stake_amount); + let dynamic_fee_4 = ::SwapInterface::approx_fee_amount( + root_netuid.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_4, dynamic_fee_4); // Test stake fee for *swap* from non-root to root @@ -758,8 +773,11 @@ fn test_stake_fee_api() { coldkey1, stake_amount, ); - let dynamic_fee_5 = - ::SwapInterface::approx_fee_amount(root_netuid.into(), stake_amount); + let dynamic_fee_5 = ::SwapInterface::approx_fee_amount( + root_netuid.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_5, dynamic_fee_5); // Test stake fee for move between hotkeys on non-root @@ -770,8 +788,11 @@ fn test_stake_fee_api() { coldkey1, stake_amount, ); - let dynamic_fee_6 = - ::SwapInterface::approx_fee_amount(netuid0.into(), stake_amount); + let dynamic_fee_6 = ::SwapInterface::approx_fee_amount( + netuid0.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_6, dynamic_fee_6); // Test stake fee for move between coldkeys on non-root @@ -782,8 +803,11 @@ fn test_stake_fee_api() { coldkey2, stake_amount, ); - let dynamic_fee_7 = - ::SwapInterface::approx_fee_amount(netuid0.into(), stake_amount); + let dynamic_fee_7 = ::SwapInterface::approx_fee_amount( + netuid0.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_7, dynamic_fee_7); // Test stake fee for *swap* from non-root to non-root @@ -794,8 +818,11 @@ fn test_stake_fee_api() { coldkey1, stake_amount, ); - let dynamic_fee_8 = - ::SwapInterface::approx_fee_amount(netuid1.into(), stake_amount); + let dynamic_fee_8 = ::SwapInterface::approx_fee_amount( + netuid1.into(), + TaoCurrency::from(stake_amount), + ) + .to_u64(); assert_eq!(stake_fee_8, dynamic_fee_8); }); } @@ -817,9 +844,9 @@ fn test_stake_fee_calculation() { let total_hotkey_alpha = AlphaCurrency::from(100_000_000_000); let tao_in = TaoCurrency::from(100_000_000_000); // 100 TAO let reciprocal_price = 2; // 1 / price - let stake_amount = 100_000_000_000_u64; + let stake_amount = TaoCurrency::from(100_000_000_000); - let default_fee = 0; // FIXME: DefaultStakingFee is deprecated + let default_fee = TaoCurrency::ZERO; // FIXME: DefaultStakingFee is deprecated // Setup alpha out SubnetAlphaOut::::insert(netuid0, AlphaCurrency::from(100_000_000_000)); diff --git a/pallets/subtensor/src/tests/subnet_emissions.rs b/pallets/subtensor/src/tests/subnet_emissions.rs new file mode 100644 index 0000000000..9f29b3fee2 --- /dev/null +++ b/pallets/subtensor/src/tests/subnet_emissions.rs @@ -0,0 +1,490 @@ +#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] +use super::mock::*; +use crate::*; +use alloc::collections::BTreeMap; +use approx::assert_abs_diff_eq; +use sp_core::U256; +use substrate_fixed::types::{I64F64, I96F32, U64F64, U96F32}; +use subtensor_runtime_common::NetUid; + +fn u64f64(x: f64) -> U64F64 { + U64F64::from_num(x) +} + +fn i64f64(x: f64) -> I64F64 { + I64F64::from_num(x) +} + +fn i96f32(x: f64) -> I96F32 { + I96F32::from_num(x) +} + +#[test] +fn inplace_pow_normalize_all_zero_inputs_no_panic_and_unchanged() { + let mut m: BTreeMap = BTreeMap::new(); + m.insert(NetUid::from(1), u64f64(0.0)); + m.insert(NetUid::from(2), u64f64(0.0)); + m.insert(NetUid::from(3), u64f64(0.0)); + + let before = m.clone(); + // p = 1.0 (doesn't matter here) + SubtensorModule::inplace_pow_normalize(&mut m, u64f64(1.0)); + + // Expect unchanged (sum becomes 0 → safe_div handles, or branch skips) + for (k, v_before) in before { + let v_after = m.get(&k).copied().unwrap(); + assert_abs_diff_eq!( + v_after.to_num::(), + v_before.to_num::(), + epsilon = 1e-18 + ); + } +} + +#[test] +fn inplace_pow_normalize_tiny_values_no_panic() { + use alloc::collections::BTreeMap; + + // Very small inputs so that scaling branch is skipped in inplace_pow_normalize + let mut m: BTreeMap = BTreeMap::new(); + m.insert(NetUid::from(10), u64f64(1e-9)); + m.insert(NetUid::from(11), u64f64(2e-9)); + m.insert(NetUid::from(12), u64f64(3e-9)); + + let before = m.clone(); + SubtensorModule::inplace_pow_normalize(&mut m, u64f64(2.0)); // p = 2 + + let sum = (1 + 4 + 9) as f64; + for (k, v_before) in before { + let v_after = m.get(&k).copied().unwrap(); + let mut expected = v_before.to_num::(); + expected *= 1e18 * expected / sum; + assert_abs_diff_eq!( + v_after.to_num::(), + expected, + epsilon = expected / 100.0 + ); + } +} + +#[test] +fn inplace_pow_normalize_large_values_no_overflow_and_sum_to_one() { + use alloc::collections::BTreeMap; + + let mut m: BTreeMap = BTreeMap::new(); + m.insert(NetUid::from(1), u64f64(1e9)); + m.insert(NetUid::from(2), u64f64(5e9)); + m.insert(NetUid::from(3), u64f64(1e10)); + + SubtensorModule::inplace_pow_normalize(&mut m, u64f64(2.0)); // p = 2 + + // Sum ≈ 1 + let sum: f64 = m.values().map(|v| v.to_num::()).sum(); + assert_abs_diff_eq!(sum, 1.0_f64, epsilon = 1e-9); + + // Each value is finite and within [0, 1] + for (k, v) in &m { + let f = v.to_num::(); + assert!(f.is_finite(), "value for {k:?} not finite"); + assert!( + (0.0..=1.0).contains(&f), + "value for {k:?} out of [0,1]: {f}" + ); + } +} + +#[test] +fn inplace_pow_normalize_regular_case_relative_proportions_preserved() { + use alloc::collections::BTreeMap; + + // With p = 1, normalization should yield roughly same proportions + let mut m: BTreeMap = BTreeMap::new(); + m.insert(NetUid::from(7), u64f64(2.0)); + m.insert(NetUid::from(8), u64f64(3.0)); + m.insert(NetUid::from(9), u64f64(5.0)); + + SubtensorModule::inplace_pow_normalize(&mut m, u64f64(1.0)); // p = 1 + + let a = m.get(&NetUid::from(7)).copied().unwrap().to_num::(); + let b = m.get(&NetUid::from(8)).copied().unwrap().to_num::(); + let c = m.get(&NetUid::from(9)).copied().unwrap().to_num::(); + + assert_abs_diff_eq!(a, 0.2_f64, epsilon = 0.001); + assert_abs_diff_eq!(b, 0.3_f64, epsilon = 0.001); + assert_abs_diff_eq!(c, 0.5_f64, epsilon = 0.001); + + // The sum of shares is 1.0 with good precision + let sum = a + b + c; + assert_abs_diff_eq!(sum, 1.0_f64, epsilon = 1e-12); +} + +#[test] +fn inplace_pow_normalize_fractional_exponent() { + use alloc::collections::BTreeMap; + + [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] + .into_iter() + .for_each(|p| { + let mut m: BTreeMap = BTreeMap::new(); + m.insert(NetUid::from(7), u64f64(2.0)); + m.insert(NetUid::from(8), u64f64(3.0)); + m.insert(NetUid::from(9), u64f64(5.0)); + + SubtensorModule::inplace_pow_normalize(&mut m, u64f64(p)); + + let a = m.get(&NetUid::from(7)).copied().unwrap().to_num::(); + let b = m.get(&NetUid::from(8)).copied().unwrap().to_num::(); + let c = m.get(&NetUid::from(9)).copied().unwrap().to_num::(); + + let sum = (2.0_f64).powf(p) + (3.0_f64).powf(p) + (5.0_f64).powf(p); + let expected_a = (2.0_f64).powf(p) / sum; + let expected_b = (3.0_f64).powf(p) / sum; + let expected_c = (5.0_f64).powf(p) / sum; + + assert_abs_diff_eq!(a, expected_a, epsilon = expected_a / 100.0); + assert_abs_diff_eq!(b, expected_b, epsilon = expected_b / 100.0); + assert_abs_diff_eq!(c, expected_c, epsilon = expected_c / 100.0); + + // The sum of shares is 1.0 with good precision + let sum = a + b + c; + assert_abs_diff_eq!(sum, 1.0_f64, epsilon = 1e-12); + }) +} + +/// Normal (moderate, non-zero) EMA flows across 3 subnets. +/// Expect: shares sum to ~1 and are monotonic with flows. +#[test] +fn get_shares_normal_flows_three_subnets() { + new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(10); + let owner_coldkey = U256::from(20); + + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n3 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let block_num = FlowHalfLife::::get(); + System::set_block_number(block_num); + + // Set (block_number, flow) with reasonable positive flows + SubnetEmaTaoFlow::::insert(n1, (block_num, i64f64(1_000.0))); + SubnetEmaTaoFlow::::insert(n2, (block_num, i64f64(3_000.0))); + SubnetEmaTaoFlow::::insert(n3, (block_num, i64f64(6_000.0))); + + let subnets = vec![n1, n2, n3]; + let shares = SubtensorModule::get_shares(&subnets); + + // Sum ≈ 1 + let sum: f64 = shares.values().map(|v| v.to_num::()).sum(); + assert_abs_diff_eq!(sum, 1.0_f64, epsilon = 1e-9); + + // Each share in [0,1] and finite + for (k, v) in &shares { + let f = v.to_num::(); + assert!(f.is_finite(), "share for {k:?} not finite"); + assert!( + (0.0..=1.0).contains(&f), + "share for {k:?} out of [0,1]: {f}" + ); + } + + // Monotonicity with the flows: share(n3) > share(n2) > share(n1) + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + let s3 = shares.get(&n3).unwrap().to_num::(); + assert!( + s3 > s2 && s2 > s1, + "expected s3 > s2 > s1; got {s1}, {s2}, {s3}" + ); + }); +} + +/// Very low (but non-zero) EMA flows across 2 subnets. +/// Expect: shares sum to ~1 and higher-flow subnet gets higher share. +#[test] +fn get_shares_low_flows_sum_one_and_ordering() { + new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(11); + let owner_coldkey = U256::from(21); + + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let block_num = FlowHalfLife::::get(); + System::set_block_number(block_num); + + // Tiny flows to exercise precision/scaling path + SubnetEmaTaoFlow::::insert(n1, (block_num, i64f64(1e-9))); + SubnetEmaTaoFlow::::insert(n2, (block_num, i64f64(2e-9))); + + let subnets = vec![n1, n2]; + let shares = SubtensorModule::get_shares(&subnets); + + let sum: f64 = shares.values().map(|v| v.to_num::()).sum(); + assert_abs_diff_eq!(sum, 1.0_f64, epsilon = 1e-8); + + for (k, v) in &shares { + let f = v.to_num::(); + assert!(f.is_finite(), "share for {k:?} not finite"); + assert!( + (0.0..=1.0).contains(&f), + "share for {k:?} out of [0,1]: {f}" + ); + } + + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + assert!( + s2 > s1, + "expected s2 > s1 with higher flow; got s1={s1}, s2={s2}" + ); + }); +} + +/// High EMA flows across 2 subnets. +/// Expect: no overflow, shares sum to ~1, and ordering follows flows. +#[test] +fn get_shares_high_flows_sum_one_and_ordering() { + new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(12); + let owner_coldkey = U256::from(22); + + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let block_num = FlowHalfLife::::get(); + System::set_block_number(block_num); + + // Large but safe flows for I64F64 + SubnetEmaTaoFlow::::insert(n1, (block_num, i64f64(9.0e11))); + SubnetEmaTaoFlow::::insert(n2, (block_num, i64f64(1.8e12))); + + let subnets = vec![n1, n2]; + let shares = SubtensorModule::get_shares(&subnets); + + let sum: f64 = shares.values().map(|v| v.to_num::()).sum(); + assert_abs_diff_eq!(sum, 1.0_f64, epsilon = 1e-9); + + for (k, v) in &shares { + let f = v.to_num::(); + assert!(f.is_finite(), "share for {k:?} not finite"); + assert!( + (0.0..=1.0).contains(&f), + "share for {k:?} out of [0,1]: {f}" + ); + } + + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + assert!( + s2 > s1, + "expected s2 > s1 with higher flow; got s1={s1}, s2={s2}" + ); + }); +} + +/// Helper to (re)seed EMA price & flow at the *current* block. +fn seed_price_and_flow(n1: NetUid, n2: NetUid, price1: f64, price2: f64, flow1: f64, flow2: f64) { + let now = frame_system::Pallet::::block_number(); + SubnetMovingPrice::::insert(n1, i96f32(price1)); + SubnetMovingPrice::::insert(n2, i96f32(price2)); + SubnetEmaTaoFlow::::insert(n1, (now, i64f64(flow1))); + SubnetEmaTaoFlow::::insert(n2, (now, i64f64(flow2))); +} + +/// If one subnet has a negative EMA flow and the other positive, +/// the negative one should contribute no weight (treated as zero), +/// so the positive-flow subnet gets the full share. +#[test] +fn get_shares_negative_vs_positive_flow() { + new_test_ext(1).execute_with(|| { + // 2 subnets + let owner_hotkey = U256::from(60); + let owner_coldkey = U256::from(61); + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + // Configure blending window and current block + let half_life: u64 = FlowHalfLife::::get(); + FlowNormExponent::::set(u64f64(1.0)); + frame_system::Pallet::::set_block_number(half_life); + TaoFlowCutoff::::set(I64F64::from_num(0)); + + // Equal EMA prices so price side doesn't bias + SubnetMovingPrice::::insert(n1, i96f32(1.0)); + SubnetMovingPrice::::insert(n2, i96f32(1.0)); + + // Set flows: n1 negative, n2 positive + let now = frame_system::Pallet::::block_number(); + SubnetEmaTaoFlow::::insert(n1, (now, i64f64(-100.0))); + SubnetEmaTaoFlow::::insert(n2, (now, i64f64(500.0))); + + let shares = SubtensorModule::get_shares(&[n1, n2]); + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + + // Sum ~ 1 + assert_abs_diff_eq!(s1 + s2, 1.0_f64, epsilon = 1e-9); + // Negative flow subnet should not get weight from flow; with equal prices mid-window, + // positive-flow subnet should dominate and get all the allocation. + assert!( + s2 > 0.999_999 && s1 < 1e-6, + "expected s2≈1, s1≈0; got s1={s1}, s2={s2}" + ); + }); +} + +/// If both subnets have negative EMA flows, flows should contribute zero weight +#[test] +fn get_shares_both_negative_flows_zero_emission() { + new_test_ext(1).execute_with(|| { + // 2 subnets + let owner_hotkey = U256::from(60); + let owner_coldkey = U256::from(61); + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + // Configure blending window and current block + let half_life: u64 = FlowHalfLife::::get(); + FlowNormExponent::::set(u64f64(1.0)); + frame_system::Pallet::::set_block_number(half_life); + TaoFlowCutoff::::set(I64F64::from_num(0)); + + // Equal EMA prices so price side doesn't bias + SubnetMovingPrice::::insert(n1, i96f32(1.0)); + SubnetMovingPrice::::insert(n2, i96f32(1.0)); + + // Set flows + let now = frame_system::Pallet::::block_number(); + SubnetEmaTaoFlow::::insert(n1, (now, i64f64(-100.0))); + SubnetEmaTaoFlow::::insert(n2, (now, i64f64(-200.0))); + + let shares = SubtensorModule::get_shares(&[n1, n2]); + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + + assert!( + s1 < 1e-20 && s2 < 1e-20, + "expected s2≈0, s1≈0; got s1={s1}, s2={s2}" + ); + }); +} + +/// If both subnets have positive EMA flows lower than or equal to cutoff, flows should contribute zero weight +#[test] +fn get_shares_both_below_cutoff_zero_emission() { + new_test_ext(1).execute_with(|| { + // 2 subnets + let owner_hotkey = U256::from(60); + let owner_coldkey = U256::from(61); + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + // Configure blending window and current block + let half_life: u64 = FlowHalfLife::::get(); + FlowNormExponent::::set(u64f64(1.0)); + frame_system::Pallet::::set_block_number(half_life); + TaoFlowCutoff::::set(I64F64::from_num(2_000)); + + // Equal EMA prices so price side doesn't bias + SubnetMovingPrice::::insert(n1, i96f32(1.0)); + SubnetMovingPrice::::insert(n2, i96f32(1.0)); + + // Set flows + let now = frame_system::Pallet::::block_number(); + SubnetEmaTaoFlow::::insert(n1, (now, i64f64(1000.0))); + SubnetEmaTaoFlow::::insert(n2, (now, i64f64(2000.0))); + + let shares = SubtensorModule::get_shares(&[n1, n2]); + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + + assert!( + s1 < 1e-20 && s2 < 1e-20, + "expected s2≈0, s1≈0; got s1={s1}, s2={s2}" + ); + }); +} + +/// If one subnet has positive EMA flow lower than cutoff, the other gets full emission +#[test] +fn get_shares_one_below_cutoff_other_full_emission() { + new_test_ext(1).execute_with(|| { + [(1000.0, 2000.00001), (1000.0, 2000.001), (1000.0, 5000.0)] + .into_iter() + .for_each(|(flow1, flow2)| { + // 2 subnets + let owner_hotkey = U256::from(60); + let owner_coldkey = U256::from(61); + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + // Configure blending window and current block + let half_life: u64 = FlowHalfLife::::get(); + FlowNormExponent::::set(u64f64(1.0)); + frame_system::Pallet::::set_block_number(half_life); + TaoFlowCutoff::::set(I64F64::from_num(2_000)); + + // Equal EMA prices (price side doesn't bias) + SubnetMovingPrice::::insert(n1, i96f32(1.0)); + SubnetMovingPrice::::insert(n2, i96f32(1.0)); + + // Set flows + let now = frame_system::Pallet::::block_number(); + SubnetEmaTaoFlow::::insert(n1, (now, i64f64(flow1))); + SubnetEmaTaoFlow::::insert(n2, (now, i64f64(flow2))); + + let shares = SubtensorModule::get_shares(&[n1, n2]); + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + + // Sum ~ 1 + assert_abs_diff_eq!(s1 + s2, 1.0_f64, epsilon = 1e-9); + assert!( + s2 > 0.999_999 && s1 < 1e-6, + "expected s2≈1, s1≈0; got s1={s1}, s2={s2}" + ); + }); + }); +} + +/// If subnets have negative EMA flows, but they are above the cut-off, emissions are proportional +/// for all except the bottom one, which gets nothing +#[test] +fn get_shares_both_negative_above_cutoff() { + new_test_ext(1).execute_with(|| { + // 2 subnets + let owner_hotkey = U256::from(60); + let owner_coldkey = U256::from(61); + let n1 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n2 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + let n3 = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + // Configure blending window and current block + let half_life: u64 = FlowHalfLife::::get(); + FlowNormExponent::::set(u64f64(1.0)); + frame_system::Pallet::::set_block_number(half_life); + TaoFlowCutoff::::set(I64F64::from_num(-1000.0)); + + // Equal EMA prices so price side doesn't bias + SubnetMovingPrice::::insert(n1, i96f32(1.0)); + SubnetMovingPrice::::insert(n2, i96f32(1.0)); + SubnetMovingPrice::::insert(n3, i96f32(1.0)); + + // Set flows + let now = frame_system::Pallet::::block_number(); + SubnetEmaTaoFlow::::insert(n1, (now, i64f64(-100.0))); + SubnetEmaTaoFlow::::insert(n2, (now, i64f64(-300.0))); + SubnetEmaTaoFlow::::insert(n3, (now, i64f64(-400.0))); + + let shares = SubtensorModule::get_shares(&[n1, n2, n3]); + let s1 = shares.get(&n1).unwrap().to_num::(); + let s2 = shares.get(&n2).unwrap().to_num::(); + let s3 = shares.get(&n3).unwrap().to_num::(); + + assert_abs_diff_eq!(s1, 0.75, epsilon = s1 / 100.0); + assert_abs_diff_eq!(s2, 0.25, epsilon = s2 / 100.0); + assert_abs_diff_eq!(s3, 0.0, epsilon = 1e-9); + assert_abs_diff_eq!(s1 + s2 + s3, 1.0, epsilon = 1e-9); + }); +} diff --git a/pallets/subtensor/src/tests/swap_coldkey.rs b/pallets/subtensor/src/tests/swap_coldkey.rs index 7fb77c895e..9d3bdbfc62 100644 --- a/pallets/subtensor/src/tests/swap_coldkey.rs +++ b/pallets/subtensor/src/tests/swap_coldkey.rs @@ -1,4 +1,10 @@ -#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] +#![allow( + unused, + clippy::expect_used, + clippy::indexing_slicing, + clippy::panic, + clippy::unwrap_used +)] use approx::assert_abs_diff_eq; use codec::Encode; @@ -15,7 +21,7 @@ use sp_runtime::traits::{DispatchInfoOf, TransactionExtension}; use sp_runtime::{DispatchError, traits::TxBaseImplication}; use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, Currency, SubnetInfo, TaoCurrency}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_swap_interface::{SwapEngine, SwapHandler}; use super::mock; use super::mock::*; @@ -1610,51 +1616,6 @@ fn test_coldkey_swap_total() { ); }); } -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_swap_senate_member --exact --nocapture -#[test] -fn test_swap_senate_member() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let non_member_hotkey = U256::from(3); - let mut weight = Weight::zero(); - - // Setup: Add old_hotkey as a Senate member - assert_ok!(SenateMembers::add_member( - RawOrigin::Root.into(), - old_hotkey - )); - - // Test 1: Successful swap - assert_ok!(SubtensorModule::swap_senate_member( - &old_hotkey, - &new_hotkey, - &mut weight - )); - assert!(Senate::is_member(&new_hotkey)); - assert!(!Senate::is_member(&old_hotkey)); - - // Verify weight update - let expected_weight = ::DbWeight::get().reads_writes(2, 2); - assert_eq!(weight, expected_weight); - - // Reset weight for next test - weight = Weight::zero(); - - // Test 2: Swap with non-member (should not change anything) - assert_ok!(SubtensorModule::swap_senate_member( - &non_member_hotkey, - &new_hotkey, - &mut weight - )); - assert!(Senate::is_member(&new_hotkey)); - assert!(!Senate::is_member(&non_member_hotkey)); - - // Verify weight update (should only have read operations) - let expected_weight = ::DbWeight::get().reads(1); - assert_eq!(weight, expected_weight); - }); -} // SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --package pallet-subtensor --lib -- tests::swap_coldkey::test_coldkey_delegations --exact --show-output #[test] diff --git a/pallets/subtensor/src/tests/swap_hotkey.rs b/pallets/subtensor/src/tests/swap_hotkey.rs index b75d7967f3..71191d1951 100644 --- a/pallets/subtensor/src/tests/swap_hotkey.rs +++ b/pallets/subtensor/src/tests/swap_hotkey.rs @@ -9,7 +9,7 @@ use sp_core::{Get, H160, H256, U256}; use sp_runtime::SaturatedConversion; use substrate_fixed::types::U64F64; use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; -use subtensor_swap_interface::SwapHandler; +use subtensor_swap_interface::{SwapEngine, SwapHandler}; use super::mock; use super::mock::*; @@ -120,34 +120,6 @@ fn test_swap_total_hotkey_stake() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_senate_members --exact --nocapture -#[test] -fn test_swap_senate_members() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let mut weight = Weight::zero(); - - assert_ok!(SenateMembers::add_member(RuntimeOrigin::root(), old_hotkey)); - let members = SenateMembers::members(); - assert!(members.contains(&old_hotkey)); - assert!(!members.contains(&new_hotkey)); - - assert_ok!(SubtensorModule::perform_hotkey_swap_on_all_subnets( - &old_hotkey, - &new_hotkey, - &coldkey, - &mut weight - )); - - // Assert that the old_hotkey is no longer a member and new_hotkey is now a member - let members = SenateMembers::members(); - assert!(!members.contains(&old_hotkey)); - assert!(members.contains(&new_hotkey)); - }); -} - // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_delegates --exact --nocapture #[test] fn test_swap_delegates() { @@ -904,7 +876,6 @@ fn test_swap_stake_success() { TotalHotkeyShares::::insert(old_hotkey, netuid, shares); Alpha::::insert((old_hotkey, coldkey, netuid), U64F64::from_num(amount)); AlphaDividendsPerSubnet::::insert(netuid, old_hotkey, AlphaCurrency::from(amount)); - TaoDividendsPerSubnet::::insert(netuid, old_hotkey, TaoCurrency::from(amount)); // Perform the swap SubtensorModule::perform_hotkey_swap_on_all_subnets( @@ -955,14 +926,6 @@ fn test_swap_stake_success() { AlphaDividendsPerSubnet::::get(netuid, new_hotkey), amount.into() ); - assert_eq!( - TaoDividendsPerSubnet::::get(netuid, old_hotkey), - TaoCurrency::ZERO - ); - assert_eq!( - TaoDividendsPerSubnet::::get(netuid, new_hotkey), - amount.into() - ); }); } @@ -1273,14 +1236,10 @@ fn test_swap_complex_parent_child_structure() { assert!(ChildKeys::::get(old_hotkey, netuid).is_empty()); // Verify parent's ChildKeys update - assert_eq!( - ChildKeys::::get(parent1, netuid), - vec![(100u64, new_hotkey), (500u64, U256::from(8))] - ); - assert_eq!( - ChildKeys::::get(parent2, netuid), - vec![(200u64, new_hotkey), (600u64, U256::from(9))] - ); + assert!(ChildKeys::::get(parent1, netuid).contains(&(500u64, U256::from(8))),); + assert!(ChildKeys::::get(parent1, netuid).contains(&(100u64, new_hotkey)),); + assert!(ChildKeys::::get(parent2, netuid).contains(&(600u64, U256::from(9))),); + assert!(ChildKeys::::get(parent2, netuid).contains(&(200u64, new_hotkey)),); }); } @@ -1292,7 +1251,7 @@ fn test_swap_parent_hotkey_childkey_maps() { let coldkey = U256::from(2); let child = U256::from(3); let child_other = U256::from(4); - let parent_new = U256::from(4); + let parent_new = U256::from(5); add_network(netuid, 1, 0); SubtensorModule::create_account_if_non_existent(&coldkey, &parent_old); @@ -1466,6 +1425,52 @@ fn test_swap_hotkey_swap_rate_limits() { }); } +#[test] +fn test_swap_parent_hotkey_self_loops_in_pending() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let parent_old = U256::from(1); + let coldkey = U256::from(2); + let child = U256::from(3); + let child_other = U256::from(4); + + // Same as child_other, so it will self-loop when pending is set. Should fail. + let parent_new = U256::from(4); + add_network(netuid, 1, 0); + SubtensorModule::create_account_if_non_existent(&coldkey, &parent_old); + + // Set child and verify state maps + mock_set_children(&coldkey, &parent_old, netuid, &[(u64::MAX, child)]); + // Wait rate limit + step_rate_limit(&TransactionType::SetChildren, netuid); + // Schedule some pending child keys. + mock_schedule_children(&coldkey, &parent_old, netuid, &[(u64::MAX, child_other)]); + + assert_eq!( + ParentKeys::::get(child, netuid), + vec![(u64::MAX, parent_old)] + ); + assert_eq!( + ChildKeys::::get(parent_old, netuid), + vec![(u64::MAX, child)] + ); + let existing_pending_child_keys = PendingChildKeys::::get(netuid, parent_old); + assert_eq!(existing_pending_child_keys.0, vec![(u64::MAX, child_other)]); + + // Swap + let mut weight = Weight::zero(); + assert_err!( + SubtensorModule::perform_hotkey_swap_on_all_subnets( + &parent_old, + &parent_new, + &coldkey, + &mut weight + ), + Error::::InvalidChild + ); + }) +} + #[test] fn test_swap_auto_stake_destination_coldkeys() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs index c7baa55387..6e423c1269 100644 --- a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs +++ b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs @@ -122,33 +122,6 @@ fn test_swap_total_hotkey_stake() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey_with_subnet -- test_swap_senate_members --exact --nocapture -#[test] -fn test_swap_senate_members() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - - let netuid = add_dynamic_network(&old_hotkey, &coldkey); - SubtensorModule::add_balance_to_coldkey_account(&coldkey, u64::MAX); - - assert_ok!(SenateMembers::add_member(RuntimeOrigin::root(), old_hotkey)); - - System::set_block_number(System::block_number() + HotkeySwapOnSubnetInterval::get()); - assert_ok!(SubtensorModule::do_swap_hotkey( - RuntimeOrigin::signed(coldkey), - &old_hotkey, - &new_hotkey, - Some(netuid) - )); - - let members = SenateMembers::members(); - assert!(members.contains(&old_hotkey)); - assert!(!members.contains(&new_hotkey)); - }); -} - // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey_with_subnet -- test_swap_delegates --exact --nocapture #[test] fn test_swap_delegates() { @@ -957,7 +930,6 @@ fn test_swap_stake_success() { TotalHotkeyShares::::insert(old_hotkey, netuid, U64F64::from_num(shares)); Alpha::::insert((old_hotkey, coldkey, netuid), U64F64::from_num(amount)); AlphaDividendsPerSubnet::::insert(netuid, old_hotkey, AlphaCurrency::from(amount)); - TaoDividendsPerSubnet::::insert(netuid, old_hotkey, TaoCurrency::from(amount)); // Perform the swap System::set_block_number(System::block_number() + HotkeySwapOnSubnetInterval::get()); @@ -1009,14 +981,6 @@ fn test_swap_stake_success() { AlphaDividendsPerSubnet::::get(netuid, new_hotkey), AlphaCurrency::from(amount) ); - assert_eq!( - TaoDividendsPerSubnet::::get(netuid, old_hotkey), - TaoCurrency::ZERO - ); - assert_eq!( - TaoDividendsPerSubnet::::get(netuid, new_hotkey), - amount.into() - ); }); } @@ -1129,6 +1093,48 @@ fn test_swap_child_keys() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::swap_hotkey_with_subnet::test_swap_child_keys_self_loop --exact --show-output +#[test] +fn test_swap_child_keys_self_loop() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = add_dynamic_network(&old_hotkey, &coldkey); + let amount = AlphaCurrency::from(12345); + SubtensorModule::add_balance_to_coldkey_account(&coldkey, u64::MAX); + + // Only for checking + TotalHotkeyAlpha::::insert(old_hotkey, netuid, AlphaCurrency::from(amount)); + + let children = vec![(200u64, new_hotkey)]; + + // Initialize ChildKeys for old_hotkey + ChildKeys::::insert(old_hotkey, netuid, children.clone()); + + // Perform the swap extrinsic + System::set_block_number(System::block_number() + HotkeySwapOnSubnetInterval::get()); + assert_err!( + SubtensorModule::swap_hotkey( + RuntimeOrigin::signed(coldkey), + old_hotkey, + new_hotkey, + Some(netuid) + ), + Error::::InvalidChild + ); + + // Verify the swap didn't happen + assert_eq!(ChildKeys::::get(old_hotkey, netuid), children); + assert!(ChildKeys::::get(new_hotkey, netuid).is_empty()); + assert_eq!(TotalHotkeyAlpha::::get(old_hotkey, netuid), amount); + assert_eq!( + TotalHotkeyAlpha::::get(new_hotkey, netuid), + AlphaCurrency::from(0) + ); + }); +} + // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey_with_subnet -- test_swap_parent_keys --exact --nocapture #[test] fn test_swap_parent_keys() { @@ -1273,14 +1279,10 @@ fn test_swap_complex_parent_child_structure() { assert!(ChildKeys::::get(old_hotkey, netuid).is_empty()); // Verify parent's ChildKeys update - assert_eq!( - ChildKeys::::get(parent1, netuid), - vec![(100u64, new_hotkey), (500u64, U256::from(8))] - ); - assert_eq!( - ChildKeys::::get(parent2, netuid), - vec![(200u64, new_hotkey), (600u64, U256::from(9))] - ); + assert!(ChildKeys::::get(parent1, netuid).contains(&(500u64, U256::from(8))),); + assert!(ChildKeys::::get(parent1, netuid).contains(&(100u64, new_hotkey)),); + assert!(ChildKeys::::get(parent2, netuid).contains(&(600u64, U256::from(9))),); + assert!(ChildKeys::::get(parent2, netuid).contains(&(200u64, new_hotkey)),); }); } @@ -1291,7 +1293,7 @@ fn test_swap_parent_hotkey_childkey_maps() { let coldkey = U256::from(2); let child = U256::from(3); let child_other = U256::from(4); - let parent_new = U256::from(4); + let parent_new = U256::from(5); let netuid = add_dynamic_network(&parent_old, &coldkey); SubtensorModule::add_balance_to_coldkey_account(&coldkey, u64::MAX); diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index 4317337ffd..8fee5f7507 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -1,4 +1,4 @@ -#![allow(clippy::unwrap_used)] +#![allow(clippy::expect_used, clippy::unwrap_used)] use super::mock::*; use crate::*; diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index bc9af5cf07..a71a225dc7 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -1,4 +1,4 @@ -#![allow(clippy::indexing_slicing, clippy::unwrap_used)] +#![allow(clippy::expect_used, clippy::indexing_slicing, clippy::unwrap_used)] use ark_serialize::CanonicalDeserialize; use ark_serialize::CanonicalSerialize; @@ -281,15 +281,13 @@ fn test_set_weights_validate() { ); // Increase the stake and make it to be equal to the minimum threshold - let fee = ::SwapInterface::approx_fee_amount( - netuid.into(), - min_stake.into(), - ); + let fee = + ::SwapInterface::approx_fee_amount(netuid.into(), min_stake); assert_ok!(SubtensorModule::do_add_stake( RuntimeOrigin::signed(hotkey), hotkey, netuid, - min_stake + fee.into() + min_stake + fee )); let min_stake_with_slippage = SubtensorModule::get_total_stake_for_hotkey(&hotkey); @@ -733,7 +731,6 @@ fn test_weights_err_no_validator_permit() { add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_min_allowed_weights(netuid, 0); SubtensorModule::set_max_allowed_uids(netuid, 3); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); register_ok_neuron(netuid, hotkey_account_id, U256::from(66), 0); register_ok_neuron(netuid, U256::from(1), U256::from(1), 65555); register_ok_neuron(netuid, U256::from(2), U256::from(2), 75555); @@ -919,7 +916,6 @@ fn test_weights_err_setting_weights_too_fast() { add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_min_allowed_weights(netuid, 0); SubtensorModule::set_max_allowed_uids(netuid, 3); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); register_ok_neuron(netuid, hotkey_account_id, U256::from(66), 0); register_ok_neuron(netuid, U256::from(1), U256::from(1), 65555); register_ok_neuron(netuid, U256::from(2), U256::from(2), 75555); @@ -1062,95 +1058,6 @@ fn test_weights_err_has_duplicate_ids() { // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::weights::test_weights_err_max_weight_limit --exact --show-output --nocapture // Test ensures weights cannot exceed max weight limit. -#[test] -fn test_weights_err_max_weight_limit() { - //TO DO SAM: uncomment when we implement run_to_block fn - new_test_ext(0).execute_with(|| { - // Add network. - let netuid = NetUid::from(1); - let tempo: u16 = 100; - add_network_disable_commit_reveal(netuid, tempo, 0); - - // Set params. - SubtensorModule::set_max_allowed_uids(netuid, 5); - SubtensorModule::set_target_registrations_per_interval(netuid, 5); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX / 5); - SubtensorModule::set_min_allowed_weights(netuid, 0); - - // Add 5 accounts. - println!("+Registering: net:{:?}, cold:{:?}, hot:{:?}", netuid, 0, 0); - register_ok_neuron(netuid, U256::from(0), U256::from(0), 55555); - let neuron_uid: u16 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(0)) - .expect("Not registered."); - SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); - assert!(SubtensorModule::is_hotkey_registered_on_network( - netuid, - &U256::from(0) - )); - step_block(1); - - println!("+Registering: net:{:?}, cold:{:?}, hot:{:?}", netuid, 1, 1); - register_ok_neuron(netuid, U256::from(1), U256::from(1), 65555); - assert!(SubtensorModule::is_hotkey_registered_on_network( - netuid, - &U256::from(1) - )); - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 2); - step_block(1); - - println!("+Registering: net:{:?}, cold:{:?}, hot:{:?}", netuid, 2, 2); - register_ok_neuron(netuid, U256::from(2), U256::from(2), 75555); - assert!(SubtensorModule::is_hotkey_registered_on_network( - netuid, - &U256::from(2) - )); - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 3); - step_block(1); - - println!("+Registering: net:{:?}, cold:{:?}, hot:{:?}", netuid, 3, 3); - register_ok_neuron(netuid, U256::from(3), U256::from(3), 95555); - assert!(SubtensorModule::is_hotkey_registered_on_network( - netuid, - &U256::from(3) - )); - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 4); - step_block(1); - - println!("+Registering: net:{:?}, cold:{:?}, hot:{:?}", netuid, 4, 4); - register_ok_neuron(netuid, U256::from(4), U256::from(4), 35555); - assert!(SubtensorModule::is_hotkey_registered_on_network( - netuid, - &U256::from(4) - )); - assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 5); - step_block(1); - - // Non self-weight fails. - let uids: Vec = vec![1, 2, 3, 4]; - let values: Vec = vec![u16::MAX / 4, u16::MAX / 4, u16::MAX / 54, u16::MAX / 4]; - let result = SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(0)), - 1.into(), - uids, - values, - 0, - ); - assert_eq!(result, Err(Error::::MaxWeightExceeded.into())); - - // Self-weight is a success. - let uids: Vec = vec![0]; // Self. - let values: Vec = vec![u16::MAX]; // normalizes to u32::MAX - assert_ok!(SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(0)), - 1.into(), - uids, - values, - 0 - )); - }); -} - // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::weights::test_no_signature --exact --show-output --nocapture // Tests the call requires a valid origin. #[test] @@ -1249,7 +1156,6 @@ fn test_set_weight_not_enough_values() { let neuron_uid: u16 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(1)) .expect("Not registered."); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( &account_id, @@ -1315,8 +1221,6 @@ fn test_set_weight_too_many_uids() { register_ok_neuron(1.into(), U256::from(3), U256::from(4), 300_000); SubtensorModule::set_min_allowed_weights(1.into(), 2); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); - // Should fail because we are setting more weights than there are neurons. let weight_keys: Vec = vec![0, 1, 2, 3, 4]; // more uids than neurons in subnet. let weight_values: Vec = vec![88, 102, 303, 1212, 11]; // random value. @@ -1360,7 +1264,6 @@ fn test_set_weights_sum_larger_than_u16_max() { .expect("Not registered."); SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( &(U256::from(1)), @@ -1548,52 +1451,16 @@ fn test_max_weight_limited_when_weight_limit_is_u16_max() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::weights::test_max_weight_limited_when_max_weight_is_within_limit --exact --show-output --nocapture -/// Check _truthy_ path for max weight limit #[test] -fn test_max_weight_limited_when_max_weight_is_within_limit() { +fn test_get_max_weight_limit_is_constant() { new_test_ext(0).execute_with(|| { - let max_allowed: u16 = 1; - let max_weight_limit = u16::MAX / 5; - - let netuid = NetUid::from(1); - let uids: Vec = Vec::from_iter((0..max_allowed).map(|id| id + 1)); - let uid: u16 = uids[0]; - let weights: Vec = Vec::from_iter((0..max_allowed).map(|id| max_weight_limit - id)); - - SubtensorModule::set_max_weight_limit(netuid, max_weight_limit); - - let expected = true; - let result = SubtensorModule::max_weight_limited(netuid, uid, &uids, &weights); - assert_eq!( - expected, result, - "Failed get expected result when everything _should_ be fine" + SubtensorModule::get_max_weight_limit(NetUid::from(1)), + u16::MAX ); - }); -} - -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::weights::test_max_weight_limited_when_guard_checks_are_not_triggered --exact --show-output --nocapture -/// Check _falsey_ path -#[test] -fn test_max_weight_limited_when_guard_checks_are_not_triggered() { - new_test_ext(0).execute_with(|| { - let max_allowed: u16 = 3; - let max_weight_limit = u16::MAX / 5; - - let netuid = NetUid::from(1); - let uids: Vec = Vec::from_iter((0..max_allowed).map(|id| id + 1)); - let uid: u16 = uids[0]; - let weights: Vec = Vec::from_iter((0..max_allowed).map(|id| max_weight_limit + id)); - - SubtensorModule::set_max_weight_limit(netuid, max_weight_limit); - - let expected = false; - let result = SubtensorModule::max_weight_limited(netuid, uid, &uids, &weights); - assert_eq!( - expected, result, - "Failed get expected result when guard-checks were not triggered" + SubtensorModule::get_max_weight_limit(NetUid::ROOT), + u16::MAX ); }); } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index a4d4755e5d..0ba3df1103 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -7,7 +7,7 @@ use safe_math::*; use sp_core::Get; use sp_core::U256; use sp_runtime::Saturating; -use substrate_fixed::types::{I32F32, U96F32}; +use substrate_fixed::types::{I32F32, I64F64, U64F64, U96F32}; use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { @@ -549,12 +549,9 @@ impl Pallet { Self::deposit_event(Event::ScalingLawPowerSet(netuid, scaling_law_power)); } - pub fn get_max_weight_limit(netuid: NetUid) -> u16 { - MaxWeightsLimit::::get(netuid) - } - pub fn set_max_weight_limit(netuid: NetUid, max_weight_limit: u16) { - MaxWeightsLimit::::insert(netuid, max_weight_limit); - Self::deposit_event(Event::MaxWeightLimitSet(netuid, max_weight_limit)); + #[inline(always)] + pub const fn get_max_weight_limit(_netuid: NetUid) -> u16 { + u16::MAX } pub fn get_immunity_period(netuid: NetUid) -> u16 { @@ -779,25 +776,6 @@ impl Pallet { Self::set_rao_recycled(netuid, rao_recycled); } - pub fn set_senate_required_stake_perc(required_percent: u64) { - SenateRequiredStakePercentage::::put(required_percent); - } - - pub fn is_senate_member(hotkey: &T::AccountId) -> bool { - T::SenateMembers::is_member(hotkey) - } - - pub fn do_set_senate_required_stake_perc( - origin: T::RuntimeOrigin, - required_percent: u64, - ) -> DispatchResult { - ensure_root(origin)?; - - Self::set_senate_required_stake_perc(required_percent); - Self::deposit_event(Event::SenateRequiredStakePercentSet(required_percent)); - Ok(()) - } - pub fn is_subnet_owner(address: &T::AccountId) -> bool { SubnetOwner::::iter_values().any(|owner| *address == owner) } @@ -973,4 +951,19 @@ impl Pallet { SubnetLimit::::put(limit); Self::deposit_event(Event::SubnetLimitSet(limit)); } + + /// Sets TAO flow cutoff value (A) + pub fn set_tao_flow_cutoff(flow_cutoff: I64F64) { + TaoFlowCutoff::::set(flow_cutoff); + } + + /// Sets TAO flow normalization exponent (p) + pub fn set_tao_flow_normalization_exponent(exponent: U64F64) { + FlowNormExponent::::set(exponent); + } + + /// Sets TAO flow smoothing factor (alpha) + pub fn set_tao_flow_smoothing_factor(smoothing_factor: u64) { + FlowEmaSmoothingFactor::::set(smoothing_factor); + } } diff --git a/pallets/subtensor/src/utils/try_state.rs b/pallets/subtensor/src/utils/try_state.rs index 4ade47eeef..ad5a8e9dc8 100644 --- a/pallets/subtensor/src/utils/try_state.rs +++ b/pallets/subtensor/src/utils/try_state.rs @@ -5,6 +5,7 @@ use super::*; impl Pallet { /// Checks [`TotalIssuance`] equals the sum of currency issuance, total stake, and total subnet /// locked. + #[allow(clippy::expect_used)] pub(crate) fn check_total_issuance() -> Result<(), sp_runtime::TryRuntimeError> { // Get the total currency issuance let currency_issuance = ::Currency::total_issuance(); diff --git a/pallets/swap-interface/Cargo.toml b/pallets/swap-interface/Cargo.toml index a5ae9ac75f..e4392c6d67 100644 --- a/pallets/swap-interface/Cargo.toml +++ b/pallets/swap-interface/Cargo.toml @@ -9,6 +9,7 @@ frame-support.workspace = true scale-info.workspace = true substrate-fixed.workspace = true subtensor-runtime-common.workspace = true +subtensor-macros.workspace = true [lints] workspace = true diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index 4998bbe379..ae7d375f97 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -1,33 +1,46 @@ #![cfg_attr(not(feature = "std"), no_std)] +use core::ops::Neg; use frame_support::pallet_prelude::*; use substrate_fixed::types::U96F32; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_macros::freeze_struct; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum OrderType { - Sell, - Buy, -} +pub use order::*; + +mod order; -pub trait SwapHandler { +pub trait SwapEngine: DefaultPriceLimit { fn swap( netuid: NetUid, - order_t: OrderType, - amount: u64, - price_limit: u64, + order: O, + price_limit: TaoCurrency, + drop_fees: bool, + should_rollback: bool, + ) -> Result, DispatchError>; +} + +pub trait SwapHandler { + fn swap( + netuid: NetUid, + order: O, + price_limit: TaoCurrency, drop_fees: bool, should_rollback: bool, - ) -> Result; - fn sim_swap( + ) -> Result, DispatchError> + where + Self: SwapEngine; + fn sim_swap( netuid: NetUid, - order_t: OrderType, - amount: u64, - ) -> Result; - fn approx_fee_amount(netuid: NetUid, amount: u64) -> u64; + order: O, + ) -> Result, DispatchError> + where + Self: SwapEngine; + + fn approx_fee_amount(netuid: NetUid, amount: T) -> T; fn current_alpha_price(netuid: NetUid) -> U96F32; - fn max_price() -> u64; - fn min_price() -> u64; + fn max_price() -> C; + fn min_price() -> C; fn adjust_protocol_liquidity( netuid: NetUid, tao_delta: TaoCurrency, @@ -39,12 +52,47 @@ pub trait SwapHandler { fn clear_protocol_liquidity(netuid: NetUid) -> DispatchResult; } -#[derive(Debug, PartialEq)] -pub struct SwapResult { - pub amount_paid_in: u64, - pub amount_paid_out: u64, - pub fee_paid: u64, - // For calculation of new tao/alpha reserves - pub tao_reserve_delta: i64, - pub alpha_reserve_delta: i64, +pub trait DefaultPriceLimit +where + PaidIn: Currency, + PaidOut: Currency, +{ + fn default_price_limit() -> C; +} + +#[freeze_struct("d3d0b124fe5a97c8")] +#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] +pub struct SwapResult +where + PaidIn: Currency, + PaidOut: Currency, +{ + pub amount_paid_in: PaidIn, + pub amount_paid_out: PaidOut, + pub fee_paid: PaidIn, +} + +impl SwapResult +where + PaidIn: Currency, + PaidOut: Currency, +{ + pub fn paid_in_reserve_delta(&self) -> i128 { + self.amount_paid_in.to_u64() as i128 + } + + pub fn paid_in_reserve_delta_i64(&self) -> i64 { + self.paid_in_reserve_delta() + .clamp(i64::MIN as i128, i64::MAX as i128) as i64 + } + + pub fn paid_out_reserve_delta(&self) -> i128 { + (self.amount_paid_out.to_u64() as i128).neg() + } + + pub fn paid_out_reserve_delta_i64(&self) -> i64 { + (self.amount_paid_out.to_u64() as i128) + .neg() + .clamp(i64::MIN as i128, i64::MAX as i128) as i64 + } } diff --git a/pallets/swap-interface/src/order.rs b/pallets/swap-interface/src/order.rs new file mode 100644 index 0000000000..1576283fd5 --- /dev/null +++ b/pallets/swap-interface/src/order.rs @@ -0,0 +1,87 @@ +use core::marker::PhantomData; + +use substrate_fixed::types::U64F64; +use subtensor_runtime_common::{AlphaCurrency, Currency, CurrencyReserve, TaoCurrency}; + +pub trait Order: Clone { + type PaidIn: Currency; + type PaidOut: Currency; + type ReserveIn: CurrencyReserve; + type ReserveOut: CurrencyReserve; + + fn with_amount(amount: impl Into) -> Self; + fn amount(&self) -> Self::PaidIn; + fn is_beyond_price_limit(&self, alpha_sqrt_price: U64F64, limit_sqrt_price: U64F64) -> bool; +} + +#[derive(Clone, Default)] +pub struct GetAlphaForTao +where + ReserveIn: CurrencyReserve, + ReserveOut: CurrencyReserve, +{ + amount: TaoCurrency, + _phantom: PhantomData<(ReserveIn, ReserveOut)>, +} + +impl Order for GetAlphaForTao +where + ReserveIn: CurrencyReserve + Clone, + ReserveOut: CurrencyReserve + Clone, +{ + type PaidIn = TaoCurrency; + type PaidOut = AlphaCurrency; + type ReserveIn = ReserveIn; + type ReserveOut = ReserveOut; + + fn with_amount(amount: impl Into) -> Self { + Self { + amount: amount.into(), + _phantom: PhantomData, + } + } + + fn amount(&self) -> TaoCurrency { + self.amount + } + + fn is_beyond_price_limit(&self, alpha_sqrt_price: U64F64, limit_sqrt_price: U64F64) -> bool { + alpha_sqrt_price < limit_sqrt_price + } +} + +#[derive(Clone, Default)] +pub struct GetTaoForAlpha +where + ReserveIn: CurrencyReserve, + ReserveOut: CurrencyReserve, +{ + amount: AlphaCurrency, + _phantom: PhantomData<(ReserveIn, ReserveOut)>, +} + +impl Order for GetTaoForAlpha +where + ReserveIn: CurrencyReserve + Clone, + ReserveOut: CurrencyReserve + Clone, +{ + type PaidIn = AlphaCurrency; + type PaidOut = TaoCurrency; + type ReserveIn = ReserveIn; + type ReserveOut = ReserveOut; + + fn with_amount(amount: impl Into) -> Self { + Self { + amount: amount.into(), + _phantom: PhantomData, + } + } + + fn amount(&self) -> AlphaCurrency { + self.amount + } + + fn is_beyond_price_limit(&self, alpha_sqrt_price: U64F64, limit_sqrt_price: U64F64) -> bool { + alpha_sqrt_price > limit_sqrt_price + } +} diff --git a/pallets/swap/Cargo.toml b/pallets/swap/Cargo.toml index a013b8468e..7de8e49c1d 100644 --- a/pallets/swap/Cargo.toml +++ b/pallets/swap/Cargo.toml @@ -23,7 +23,7 @@ substrate-fixed.workspace = true pallet-subtensor-swap-runtime-api.workspace = true subtensor-macros.workspace = true -subtensor-runtime-common.workspace = true +subtensor-runtime-common = { workspace = true, features = ["approx"] } subtensor-swap-interface.workspace = true [dev-dependencies] diff --git a/pallets/swap/src/lib.rs b/pallets/swap/src/lib.rs index b9d05bd435..6257df852b 100644 --- a/pallets/swap/src/lib.rs +++ b/pallets/swap/src/lib.rs @@ -1,7 +1,6 @@ #![cfg_attr(not(feature = "std"), no_std)] use substrate_fixed::types::U64F64; -use subtensor_swap_interface::OrderType; pub mod pallet; pub mod position; diff --git a/pallets/swap/src/mock.rs b/pallets/swap/src/mock.rs index cbe71ec020..aacdf90835 100644 --- a/pallets/swap/src/mock.rs +++ b/pallets/swap/src/mock.rs @@ -14,9 +14,13 @@ use sp_runtime::{ BuildStorage, Vec, traits::{BlakeTwo256, IdentityLookup}, }; -use subtensor_runtime_common::{AlphaCurrency, BalanceOps, NetUid, SubnetInfo, TaoCurrency}; +use substrate_fixed::types::U64F64; +use subtensor_runtime_common::{ + AlphaCurrency, BalanceOps, Currency, CurrencyReserve, NetUid, SubnetInfo, TaoCurrency, +}; +use subtensor_swap_interface::Order; -use crate::pallet::EnabledUserLiquidity; +use crate::pallet::{EnabledUserLiquidity, FeeGlobalAlpha, FeeGlobalTao}; construct_runtime!( pub enum Test { @@ -83,11 +87,11 @@ parameter_types! { pub const MinimumReserves: NonZeroU64 = NonZeroU64::new(1).unwrap(); } -// Mock implementor of SubnetInfo trait -pub struct MockLiquidityProvider; +#[derive(Clone)] +pub struct TaoReserve; -impl SubnetInfo for MockLiquidityProvider { - fn tao_reserve(netuid: NetUid) -> TaoCurrency { +impl CurrencyReserve for TaoReserve { + fn reserve(netuid: NetUid) -> TaoCurrency { match netuid.into() { 123u16 => 10_000, WRAPPING_FEES_NETUID => 100_000_000_000, @@ -96,7 +100,15 @@ impl SubnetInfo for MockLiquidityProvider { .into() } - fn alpha_reserve(netuid: NetUid) -> AlphaCurrency { + fn increase_provided(_: NetUid, _: TaoCurrency) {} + fn decrease_provided(_: NetUid, _: TaoCurrency) {} +} + +#[derive(Clone)] +pub struct AlphaReserve; + +impl CurrencyReserve for AlphaReserve { + fn reserve(netuid: NetUid) -> AlphaCurrency { match netuid.into() { 123u16 => 10_000.into(), WRAPPING_FEES_NETUID => 400_000_000_000.into(), @@ -104,6 +116,65 @@ impl SubnetInfo for MockLiquidityProvider { } } + fn increase_provided(_: NetUid, _: AlphaCurrency) {} + fn decrease_provided(_: NetUid, _: AlphaCurrency) {} +} + +pub type GetAlphaForTao = subtensor_swap_interface::GetAlphaForTao; +pub type GetTaoForAlpha = subtensor_swap_interface::GetTaoForAlpha; + +pub(crate) trait GlobalFeeInfo: Currency { + fn global_fee(&self, netuid: NetUid) -> U64F64; +} + +impl GlobalFeeInfo for TaoCurrency { + fn global_fee(&self, netuid: NetUid) -> U64F64 { + FeeGlobalTao::::get(netuid) + } +} + +impl GlobalFeeInfo for AlphaCurrency { + fn global_fee(&self, netuid: NetUid) -> U64F64 { + FeeGlobalAlpha::::get(netuid) + } +} + +pub(crate) trait TestExt { + fn approx_expected_swap_output( + sqrt_current_price: f64, + liquidity_before: f64, + order_liquidity: f64, + ) -> f64; +} + +impl TestExt for Test { + fn approx_expected_swap_output( + sqrt_current_price: f64, + liquidity_before: f64, + order_liquidity: f64, + ) -> f64 { + let denom = sqrt_current_price * (sqrt_current_price * liquidity_before + order_liquidity); + let per_order_liq = liquidity_before / denom; + per_order_liq * order_liquidity + } +} + +impl TestExt for Test { + fn approx_expected_swap_output( + sqrt_current_price: f64, + liquidity_before: f64, + order_liquidity: f64, + ) -> f64 { + let denom = liquidity_before / sqrt_current_price + order_liquidity; + let per_order_liq = sqrt_current_price * liquidity_before / denom; + per_order_liq * order_liquidity + } +} + +// Mock implementor of SubnetInfo trait +pub struct MockLiquidityProvider; + +impl SubnetInfo for MockLiquidityProvider { fn exists(netuid: NetUid) -> bool { netuid != NON_EXISTENT_NETUID.into() } @@ -197,15 +268,12 @@ impl BalanceOps for MockBalanceOps { ) -> Result { Ok(alpha) } - - fn increase_provided_tao_reserve(_netuid: NetUid, _tao: TaoCurrency) {} - fn decrease_provided_tao_reserve(_netuid: NetUid, _tao: TaoCurrency) {} - fn increase_provided_alpha_reserve(_netuid: NetUid, _alpha: AlphaCurrency) {} - fn decrease_provided_alpha_reserve(_netuid: NetUid, _alpha: AlphaCurrency) {} } impl crate::pallet::Config for Test { type SubnetInfo = MockLiquidityProvider; + type TaoReserve = TaoReserve; + type AlphaReserve = AlphaReserve; type BalanceOps = MockBalanceOps; type ProtocolId = SwapProtocolId; type MaxFeeRate = MaxFeeRate; diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 9a41283426..34b5e624e6 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1,4 +1,3 @@ -use core::marker::PhantomData; use core::ops::Neg; use frame_support::storage::{TransactionOutcome, transactional}; @@ -8,13 +7,16 @@ use sp_arithmetic::helpers_128bit; use sp_runtime::{DispatchResult, Vec, traits::AccountIdConversion}; use substrate_fixed::types::{I64F64, U64F64, U96F32}; use subtensor_runtime_common::{ - AlphaCurrency, BalanceOps, Currency, NetUid, SubnetInfo, TaoCurrency, + AlphaCurrency, BalanceOps, Currency, CurrencyReserve, NetUid, SubnetInfo, TaoCurrency, +}; +use subtensor_swap_interface::{ + DefaultPriceLimit, Order as OrderT, SwapEngine, SwapHandler, SwapResult, }; -use subtensor_swap_interface::{SwapHandler, SwapResult}; use super::pallet::*; +use super::swap_step::{BasicSwapStep, SwapStep, SwapStepAction}; use crate::{ - OrderType, SqrtPrice, + SqrtPrice, position::{Position, PositionId}, tick::{ActiveTickIndexManager, Tick, TickIndex}, }; @@ -42,247 +44,6 @@ pub struct RemoveLiquidityResult { pub tick_high: TickIndex, pub liquidity: u64, } -/// A struct representing a single swap step with all its parameters and state -struct SwapStep { - // Input parameters - netuid: NetUid, - order_type: OrderType, - drop_fees: bool, - - // Computed values - current_liquidity: U64F64, - possible_delta_in: u64, - - // Ticks and prices (current, limit, edge, target) - target_sqrt_price: SqrtPrice, - limit_sqrt_price: SqrtPrice, - current_sqrt_price: SqrtPrice, - edge_sqrt_price: SqrtPrice, - edge_tick: TickIndex, - - // Result values - action: SwapStepAction, - delta_in: u64, - final_price: SqrtPrice, - fee: u64, - - // Phantom data to use T - _phantom: PhantomData, -} - -impl SwapStep { - /// Creates and initializes a new swap step - fn new( - netuid: NetUid, - order_type: OrderType, - amount_remaining: u64, - limit_sqrt_price: SqrtPrice, - drop_fees: bool, - ) -> Self { - // Calculate prices and ticks - let current_tick = CurrentTick::::get(netuid); - let current_sqrt_price = Pallet::::current_price_sqrt(netuid); - let edge_tick = Pallet::::tick_edge(netuid, current_tick, order_type); - let edge_sqrt_price = edge_tick.as_sqrt_price_bounded(); - - let fee = Pallet::::calculate_fee_amount(netuid, amount_remaining, drop_fees); - let possible_delta_in = amount_remaining.saturating_sub(fee); - - // Target price and quantities - let current_liquidity = U64F64::saturating_from_num(CurrentLiquidity::::get(netuid)); - let target_sqrt_price = Pallet::::sqrt_price_target( - order_type, - current_liquidity, - current_sqrt_price, - possible_delta_in, - ); - - Self { - netuid, - order_type, - drop_fees, - target_sqrt_price, - limit_sqrt_price, - current_sqrt_price, - edge_sqrt_price, - edge_tick, - possible_delta_in, - current_liquidity, - action: SwapStepAction::Stop, - delta_in: 0, - final_price: target_sqrt_price, - fee, - _phantom: PhantomData, - } - } - - /// Execute the swap step and return the result - fn execute(&mut self) -> Result> { - self.determine_action(); - self.process_swap() - } - - /// Returns True if sq_price1 is closer to the current price than sq_price2 - /// in terms of order direction. - /// For buying: sq_price1 <= sq_price2 - /// For selling: sq_price1 >= sq_price2 - /// - fn price_is_closer(&self, sq_price1: &SqrtPrice, sq_price2: &SqrtPrice) -> bool { - match self.order_type { - OrderType::Buy => sq_price1 <= sq_price2, - OrderType::Sell => sq_price1 >= sq_price2, - } - } - - /// Determine the appropriate action for this swap step - fn determine_action(&mut self) { - let mut recalculate_fee = false; - - // Calculate the stopping price: The price at which we either reach the limit price, - // exchange the full amount, or reach the edge price. - if self.price_is_closer(&self.target_sqrt_price, &self.limit_sqrt_price) - && self.price_is_closer(&self.target_sqrt_price, &self.edge_sqrt_price) - { - // Case 1. target_quantity is the lowest - // The trade completely happens within one tick, no tick crossing happens. - self.action = SwapStepAction::Stop; - self.final_price = self.target_sqrt_price; - self.delta_in = self.possible_delta_in; - } else if self.price_is_closer(&self.limit_sqrt_price, &self.target_sqrt_price) - && self.price_is_closer(&self.limit_sqrt_price, &self.edge_sqrt_price) - { - // Case 2. lim_quantity is the lowest - // The trade also completely happens within one tick, no tick crossing happens. - self.action = SwapStepAction::Stop; - self.final_price = self.limit_sqrt_price; - self.delta_in = Self::delta_in( - self.order_type, - self.current_liquidity, - self.current_sqrt_price, - self.limit_sqrt_price, - ); - recalculate_fee = true; - } else { - // Case 3. edge_quantity is the lowest - // Tick crossing is likely - self.action = SwapStepAction::Crossing; - self.delta_in = Self::delta_in( - self.order_type, - self.current_liquidity, - self.current_sqrt_price, - self.edge_sqrt_price, - ); - self.final_price = self.edge_sqrt_price; - recalculate_fee = true; - } - - log::trace!("\tAction : {:?}", self.action); - log::trace!( - "\tCurrent Price : {}", - self.current_sqrt_price - .saturating_mul(self.current_sqrt_price) - ); - log::trace!( - "\tTarget Price : {}", - self.target_sqrt_price - .saturating_mul(self.target_sqrt_price) - ); - log::trace!( - "\tLimit Price : {}", - self.limit_sqrt_price.saturating_mul(self.limit_sqrt_price) - ); - log::trace!( - "\tEdge Price : {}", - self.edge_sqrt_price.saturating_mul(self.edge_sqrt_price) - ); - log::trace!("\tDelta In : {}", self.delta_in); - - // Because on step creation we calculate fee off the total amount, we might need to recalculate it - // in case if we hit the limit price or the edge price. - if recalculate_fee { - let u16_max = U64F64::saturating_from_num(u16::MAX); - let fee_rate = if self.drop_fees { - U64F64::saturating_from_num(0) - } else { - U64F64::saturating_from_num(FeeRate::::get(self.netuid)) - }; - let delta_fixed = U64F64::saturating_from_num(self.delta_in); - self.fee = delta_fixed - .saturating_mul(fee_rate.safe_div(u16_max.saturating_sub(fee_rate))) - .saturating_to_num::(); - } - - // Now correct the action if we stopped exactly at the edge no matter what was the case above - // Because order type buy moves the price up and tick semi-open interval doesn't include its right - // point, we cross on buys and stop on sells. - let natural_reason_stop_price = - if self.price_is_closer(&self.limit_sqrt_price, &self.target_sqrt_price) { - self.limit_sqrt_price - } else { - self.target_sqrt_price - }; - if natural_reason_stop_price == self.edge_sqrt_price { - self.action = match self.order_type { - OrderType::Buy => SwapStepAction::Crossing, - OrderType::Sell => SwapStepAction::Stop, - }; - } - } - - /// Process a single step of a swap - fn process_swap(&self) -> Result> { - // Hold the fees - Pallet::::add_fees(self.netuid, self.order_type, self.fee); - let delta_out = Pallet::::convert_deltas(self.netuid, self.order_type, self.delta_in); - log::trace!("\tDelta Out : {delta_out:?}"); - - if self.action == SwapStepAction::Crossing { - let mut tick = Ticks::::get(self.netuid, self.edge_tick).unwrap_or_default(); - tick.fees_out_tao = I64F64::saturating_from_num(FeeGlobalTao::::get(self.netuid)) - .saturating_sub(tick.fees_out_tao); - tick.fees_out_alpha = - I64F64::saturating_from_num(FeeGlobalAlpha::::get(self.netuid)) - .saturating_sub(tick.fees_out_alpha); - Pallet::::update_liquidity_at_crossing(self.netuid, self.order_type)?; - Ticks::::insert(self.netuid, self.edge_tick, tick); - } - - // Update current price - AlphaSqrtPrice::::set(self.netuid, self.final_price); - - // Update current tick - let new_current_tick = TickIndex::from_sqrt_price_bounded(self.final_price); - CurrentTick::::set(self.netuid, new_current_tick); - - Ok(SwapStepResult { - amount_to_take: self.delta_in.saturating_add(self.fee), - fee_paid: self.fee, - delta_in: self.delta_in, - delta_out, - }) - } - - /// Get the input amount needed to reach the target price - fn delta_in( - order_type: OrderType, - liquidity_curr: U64F64, - sqrt_price_curr: SqrtPrice, - sqrt_price_target: SqrtPrice, - ) -> u64 { - let one = U64F64::saturating_from_num(1); - - (match order_type { - OrderType::Sell => liquidity_curr.saturating_mul( - one.safe_div(sqrt_price_target.into()) - .saturating_sub(one.safe_div(sqrt_price_curr)), - ), - OrderType::Buy => { - liquidity_curr.saturating_mul(sqrt_price_target.saturating_sub(sqrt_price_curr)) - } - }) - .saturating_to_num::() - } -} impl Pallet { pub fn current_price(netuid: NetUid) -> U96F32 { @@ -292,8 +53,8 @@ impl Pallet { let sqrt_price = AlphaSqrtPrice::::get(netuid); U96F32::saturating_from_num(sqrt_price.saturating_mul(sqrt_price)) } else { - let tao_reserve = T::SubnetInfo::tao_reserve(netuid.into()); - let alpha_reserve = T::SubnetInfo::alpha_reserve(netuid.into()); + let tao_reserve = T::TaoReserve::reserve(netuid.into()); + let alpha_reserve = T::AlphaReserve::reserve(netuid.into()); if !alpha_reserve.is_zero() { U96F32::saturating_from_num(tao_reserve) .saturating_div(U96F32::saturating_from_num(alpha_reserve)) @@ -306,10 +67,6 @@ impl Pallet { } } - pub fn current_price_sqrt(netuid: NetUid) -> SqrtPrice { - AlphaSqrtPrice::::get(netuid) - } - // initializes V3 swap for a subnet if needed pub(super) fn maybe_initialize_v3(netuid: NetUid) -> Result<(), Error> { if SwapV3Initialized::::get(netuid) { @@ -317,9 +74,10 @@ impl Pallet { } // Initialize the v3: - // Reserves are re-purposed, nothing to set, just query values for liquidity and price calculation - let tao_reserve = ::SubnetInfo::tao_reserve(netuid.into()); - let alpha_reserve = ::SubnetInfo::alpha_reserve(netuid.into()); + // Reserves are re-purposed, nothing to set, just query values for liquidity and price + // calculation + let tao_reserve = T::TaoReserve::reserve(netuid.into()); + let alpha_reserve = T::AlphaReserve::reserve(netuid.into()); // Set price let price = U64F64::saturating_from_num(tao_reserve) @@ -355,6 +113,41 @@ impl Pallet { Ok(()) } + pub(crate) fn get_proportional_alpha_tao_and_remainders( + sqrt_alpha_price: U64F64, + amount_tao: TaoCurrency, + amount_alpha: AlphaCurrency, + ) -> (TaoCurrency, AlphaCurrency, TaoCurrency, AlphaCurrency) { + let price = sqrt_alpha_price.saturating_mul(sqrt_alpha_price); + let tao_equivalent: u64 = U64F64::saturating_from_num(u64::from(amount_alpha)) + .saturating_mul(price) + .saturating_to_num(); + let amount_tao_u64 = u64::from(amount_tao); + + if tao_equivalent <= amount_tao_u64 { + // Too much or just enough TAO + ( + tao_equivalent.into(), + amount_alpha, + amount_tao.saturating_sub(TaoCurrency::from(tao_equivalent)), + 0.into(), + ) + } else { + // Too much Alpha + let alpha_equivalent: u64 = U64F64::saturating_from_num(u64::from(amount_tao)) + .safe_div(price) + .saturating_to_num(); + ( + amount_tao, + alpha_equivalent.into(), + 0.into(), + u64::from(amount_alpha) + .saturating_sub(alpha_equivalent) + .into(), + ) + } + } + /// Adjusts protocol liquidity with new values of TAO and Alpha reserve pub(super) fn adjust_protocol_liquidity( netuid: NetUid, @@ -371,17 +164,31 @@ impl Pallet { // Claim protocol fees and add them to liquidity let (tao_fees, alpha_fees) = position.collect_fees(); + // Add fee reservoirs and get proportional amounts + let current_sqrt_price = AlphaSqrtPrice::::get(netuid); + let tao_reservoir = ScrapReservoirTao::::get(netuid); + let alpha_reservoir = ScrapReservoirAlpha::::get(netuid); + let (corrected_tao_delta, corrected_alpha_delta, tao_scrap, alpha_scrap) = + Self::get_proportional_alpha_tao_and_remainders( + current_sqrt_price, + tao_delta + .saturating_add(TaoCurrency::from(tao_fees)) + .saturating_add(tao_reservoir), + alpha_delta + .saturating_add(AlphaCurrency::from(alpha_fees)) + .saturating_add(alpha_reservoir), + ); + + // Update scrap reservoirs + ScrapReservoirTao::::insert(netuid, tao_scrap); + ScrapReservoirAlpha::::insert(netuid, alpha_scrap); + // Adjust liquidity - let current_sqrt_price = Pallet::::current_price_sqrt(netuid); let maybe_token_amounts = position.to_token_amounts(current_sqrt_price); if let Ok((tao, alpha)) = maybe_token_amounts { // Get updated reserves, calculate liquidity - let new_tao_reserve = tao - .saturating_add(tao_delta.to_u64()) - .saturating_add(tao_fees); - let new_alpha_reserve = alpha - .saturating_add(alpha_delta.to_u64()) - .saturating_add(alpha_fees); + let new_tao_reserve = tao.saturating_add(corrected_tao_delta.to_u64()); + let new_alpha_reserve = alpha.saturating_add(corrected_alpha_delta.to_u64()); let new_liquidity = helpers_128bit::sqrt( (new_tao_reserve as u128).saturating_mul(new_alpha_reserve as u128), ) as u64; @@ -413,7 +220,8 @@ impl Pallet { /// - `order_type`: The type of the swap (e.g., Buy or Sell). /// - `amount`: The amount of tokens to swap. /// - `limit_sqrt_price`: A price limit (expressed as a square root) to bound the swap. - /// - `simulate`: If `true`, the function runs in simulation mode and does not persist any changes. + /// - `simulate`: If `true`, the function runs in simulation mode and does not persist any + /// changes. /// /// # Returns /// Returns a [`Result`] with a [`SwapResult`] on success, or a [`DispatchError`] on failure. @@ -425,25 +233,26 @@ impl Pallet { /// # Simulation Mode /// When `simulate` is set to `true`, the function: /// 1. Executes all logic without persisting any state changes (i.e., performs a dry run). - /// 2. Skips reserve checks — it may return an `amount_paid_out` greater than the available reserve. + /// 2. Skips reserve checks — it may return an `amount_paid_out` greater than the available + /// reserve. /// /// Use simulation mode to preview the outcome of a swap without modifying the blockchain state. - pub fn do_swap( + pub(crate) fn do_swap( netuid: NetUid, - order_type: OrderType, - amount: u64, + order: Order, limit_sqrt_price: SqrtPrice, drop_fees: bool, simulate: bool, - ) -> Result { + ) -> Result, DispatchError> + where + Order: OrderT, + BasicSwapStep: SwapStep, + { transactional::with_transaction(|| { - // Read alpha and tao reserves before transaction - let tao_reserve = T::SubnetInfo::tao_reserve(netuid.into()); - let alpha_reserve = T::SubnetInfo::alpha_reserve(netuid.into()); + let reserve = Order::ReserveOut::reserve(netuid.into()); - let mut result = - Self::swap_inner(netuid, order_type, amount, limit_sqrt_price, drop_fees) - .map_err(Into::into); + let result = Self::swap_inner::(netuid, order, limit_sqrt_price, drop_fees) + .map_err(Into::into); if simulate || result.is_err() { // Simulation only @@ -453,13 +262,10 @@ impl Pallet { // Check if reserves are overused if let Ok(ref swap_result) = result { - let checked_reserve = match order_type { - OrderType::Buy => alpha_reserve.to_u64(), - OrderType::Sell => tao_reserve.to_u64(), - }; - - if checked_reserve < swap_result.amount_paid_out { - result = Err(Error::::InsufficientLiquidity.into()); + if reserve < swap_result.amount_paid_out { + return TransactionOutcome::Commit(Err( + Error::::InsufficientLiquidity.into() + )); } } @@ -468,51 +274,40 @@ impl Pallet { }) } - fn swap_inner( + fn swap_inner( netuid: NetUid, - order_type: OrderType, - amount: u64, + order: Order, limit_sqrt_price: SqrtPrice, drop_fees: bool, - ) -> Result> { - match order_type { - OrderType::Buy => ensure!( - T::SubnetInfo::alpha_reserve(netuid.into()).to_u64() - >= T::MinimumReserve::get().get(), - Error::::ReservesTooLow - ), - OrderType::Sell => ensure!( - T::SubnetInfo::tao_reserve(netuid.into()).to_u64() - >= T::MinimumReserve::get().get(), - Error::::ReservesTooLow - ), - } + ) -> Result, Error> + where + Order: OrderT, + BasicSwapStep: SwapStep, + { + ensure!( + Order::ReserveOut::reserve(netuid).to_u64() >= T::MinimumReserve::get().get(), + Error::::ReservesTooLow + ); Self::maybe_initialize_v3(netuid)?; // Because user specifies the limit price, check that it is in fact beoynd the current one - match order_type { - OrderType::Buy => ensure!( - AlphaSqrtPrice::::get(netuid) < limit_sqrt_price, - Error::::PriceLimitExceeded - ), - OrderType::Sell => ensure!( - AlphaSqrtPrice::::get(netuid) > limit_sqrt_price, - Error::::PriceLimitExceeded - ), - }; + ensure!( + order.is_beyond_price_limit(AlphaSqrtPrice::::get(netuid), limit_sqrt_price), + Error::::PriceLimitExceeded + ); - let mut amount_remaining = amount; - let mut amount_paid_out: u64 = 0; + let mut amount_remaining = order.amount(); + let mut amount_paid_out = Order::PaidOut::ZERO; let mut iteration_counter: u16 = 0; - let mut in_acc: u64 = 0; - let mut fee_acc: u64 = 0; + let mut in_acc = Order::PaidIn::ZERO; + let mut fee_acc = Order::PaidIn::ZERO; log::trace!("======== Start Swap ========"); log::trace!("Amount Remaining: {amount_remaining}"); // Swap one tick at a time until we reach one of the stop conditions - while amount_remaining > 0 { + while !amount_remaining.is_zero() { log::trace!("\nIteration: {iteration_counter}"); log::trace!( "\tCurrent Liquidity: {}", @@ -520,9 +315,8 @@ impl Pallet { ); // Create and execute a swap step - let mut swap_step = SwapStep::::new( + let mut swap_step = BasicSwapStep::::new( netuid, - order_type, amount_remaining, limit_sqrt_price, drop_fees, @@ -535,13 +329,13 @@ impl Pallet { amount_remaining = amount_remaining.saturating_sub(swap_result.amount_to_take); amount_paid_out = amount_paid_out.saturating_add(swap_result.delta_out); - if swap_step.action == SwapStepAction::Stop { - amount_remaining = 0; + if swap_step.action() == SwapStepAction::Stop { + amount_remaining = Order::PaidIn::ZERO; } // The swap step didn't exchange anything - if swap_result.amount_to_take == 0 { - amount_remaining = 0; + if swap_result.amount_to_take.is_zero() { + amount_remaining = Order::PaidIn::ZERO; } iteration_counter = iteration_counter.saturating_add(1); @@ -555,233 +349,36 @@ impl Pallet { log::trace!("\nAmount Paid Out: {amount_paid_out}"); log::trace!("======== End Swap ========"); - let (tao_reserve_delta, alpha_reserve_delta) = match order_type { - OrderType::Buy => (in_acc as i64, (amount_paid_out as i64).neg()), - OrderType::Sell => ((amount_paid_out as i64).neg(), in_acc as i64), - }; - Ok(SwapResult { amount_paid_in: in_acc, amount_paid_out, fee_paid: fee_acc, - tao_reserve_delta, - alpha_reserve_delta, }) } - /// Get the tick at the current tick edge for the given direction (order type) If - /// order type is Buy, then edge tick is the high tick, otherwise it is the low - /// tick. - /// - /// If anything is wrong with tick math and it returns Err, we just abort the deal, i.e. return - /// the edge that is impossible to execute - fn tick_edge(netuid: NetUid, current_tick: TickIndex, order_type: OrderType) -> TickIndex { - match order_type { - OrderType::Buy => ActiveTickIndexManager::::find_closest_higher( - netuid, - current_tick.next().unwrap_or(TickIndex::MAX), - ) - .unwrap_or(TickIndex::MAX), - OrderType::Sell => { - let current_price = Pallet::::current_price_sqrt(netuid); - let current_tick_price = current_tick.as_sqrt_price_bounded(); - let is_active = ActiveTickIndexManager::::tick_is_active(netuid, current_tick); - - if is_active && current_price > current_tick_price { - ActiveTickIndexManager::::find_closest_lower(netuid, current_tick) - .unwrap_or(TickIndex::MIN) - } else { - ActiveTickIndexManager::::find_closest_lower( - netuid, - current_tick.prev().unwrap_or(TickIndex::MIN), - ) - .unwrap_or(TickIndex::MIN) - } - } - } - } - /// Calculate fee amount /// /// Fee is provided by state ops as u16-normalized value. - fn calculate_fee_amount(netuid: NetUid, amount: u64, drop_fees: bool) -> u64 { + pub(crate) fn calculate_fee_amount( + netuid: NetUid, + amount: C, + drop_fees: bool, + ) -> C { if drop_fees { - 0 - } else { - match T::SubnetInfo::mechanism(netuid) { - 1 => { - let fee_rate = U64F64::saturating_from_num(FeeRate::::get(netuid)) - .safe_div(U64F64::saturating_from_num(u16::MAX)); - U64F64::saturating_from_num(amount) - .saturating_mul(fee_rate) - .saturating_to_num::() - } - _ => 0, - } - } - } - - /// Add fees to the global fee counters - fn add_fees(netuid: NetUid, order_type: OrderType, fee: u64) { - let liquidity_curr = Self::current_liquidity_safe(netuid); - - if liquidity_curr == 0 { - return; - } - - let fee_global_tao = FeeGlobalTao::::get(netuid); - let fee_global_alpha = FeeGlobalAlpha::::get(netuid); - let fee_fixed = U64F64::saturating_from_num(fee); - - match order_type { - OrderType::Sell => { - FeeGlobalAlpha::::set( - netuid, - fee_global_alpha.saturating_add(fee_fixed.safe_div(liquidity_curr)), - ); - } - OrderType::Buy => { - FeeGlobalTao::::set( - netuid, - fee_global_tao.saturating_add(fee_fixed.safe_div(liquidity_curr)), - ); - } - } - } - - /// Convert input amount (delta_in) to output amount (delta_out) - /// - /// This is the core method of uniswap V3 that tells how much output token is given for an - /// amount of input token within one price tick. - pub(super) fn convert_deltas(netuid: NetUid, order_type: OrderType, delta_in: u64) -> u64 { - // Skip conversion if delta_in is zero - if delta_in == 0 { - return 0; - } - - let liquidity_curr = SqrtPrice::saturating_from_num(CurrentLiquidity::::get(netuid)); - let sqrt_price_curr = Pallet::::current_price_sqrt(netuid); - let delta_fixed = SqrtPrice::saturating_from_num(delta_in); - - // Calculate result based on order type with proper fixed-point math - // Using safe math operations throughout to prevent overflows - let result = match order_type { - OrderType::Sell => { - // liquidity_curr / (liquidity_curr / sqrt_price_curr + delta_fixed); - let denom = liquidity_curr - .safe_div(sqrt_price_curr) - .saturating_add(delta_fixed); - let a = liquidity_curr.safe_div(denom); - // a * sqrt_price_curr; - let b = a.saturating_mul(sqrt_price_curr); - - // delta_fixed * b; - delta_fixed.saturating_mul(b) - } - OrderType::Buy => { - // (liquidity_curr * sqrt_price_curr + delta_fixed) * sqrt_price_curr; - let a = liquidity_curr - .saturating_mul(sqrt_price_curr) - .saturating_add(delta_fixed) - .saturating_mul(sqrt_price_curr); - // liquidity_curr / a; - let b = liquidity_curr.safe_div(a); - // b * delta_fixed; - b.saturating_mul(delta_fixed) - } - }; - - result.saturating_to_num::() - } - - /// Get the target square root price based on the input amount - /// - /// This is the price that would be reached if - /// - There are no liquidity positions other than protocol liquidity - /// - Full delta_in amount is executed - /// - fn sqrt_price_target( - order_type: OrderType, - liquidity_curr: U64F64, - sqrt_price_curr: SqrtPrice, - delta_in: u64, - ) -> SqrtPrice { - let delta_fixed = U64F64::saturating_from_num(delta_in); - let one = U64F64::saturating_from_num(1); - - // No liquidity means that price should go to the limit - if liquidity_curr == 0 { - return match order_type { - OrderType::Sell => SqrtPrice::saturating_from_num(Self::min_price()), - OrderType::Buy => SqrtPrice::saturating_from_num(Self::max_price()), - }; + return C::ZERO; } - match order_type { - OrderType::Sell => one.safe_div( - delta_fixed - .safe_div(liquidity_curr) - .saturating_add(one.safe_div(sqrt_price_curr)), - ), - OrderType::Buy => delta_fixed - .safe_div(liquidity_curr) - .saturating_add(sqrt_price_curr), - } - } - - /// Update liquidity when crossing a tick - fn update_liquidity_at_crossing(netuid: NetUid, order_type: OrderType) -> Result<(), Error> { - let mut liquidity_curr = CurrentLiquidity::::get(netuid); - let current_tick_index = TickIndex::current_bounded::(netuid); - - // Find the appropriate tick based on order type - let tick = match order_type { - OrderType::Sell => { - // Self::find_closest_lower_active_tick(netuid, current_tick_index) - let current_price = Pallet::::current_price_sqrt(netuid); - let current_tick_price = current_tick_index.as_sqrt_price_bounded(); - let is_active = - ActiveTickIndexManager::::tick_is_active(netuid, current_tick_index); - - let lower_tick = if is_active && current_price > current_tick_price { - ActiveTickIndexManager::::find_closest_lower(netuid, current_tick_index) - .unwrap_or(TickIndex::MIN) - } else { - ActiveTickIndexManager::::find_closest_lower( - netuid, - current_tick_index.prev().unwrap_or(TickIndex::MIN), - ) - .unwrap_or(TickIndex::MIN) - }; - Ticks::::get(netuid, lower_tick) - } - OrderType::Buy => { - // Self::find_closest_higher_active_tick(netuid, current_tick_index), - let upper_tick = ActiveTickIndexManager::::find_closest_higher( - netuid, - current_tick_index.next().unwrap_or(TickIndex::MAX), - ) - .unwrap_or(TickIndex::MAX); - Ticks::::get(netuid, upper_tick) + match T::SubnetInfo::mechanism(netuid) { + 1 => { + let fee_rate = U64F64::saturating_from_num(FeeRate::::get(netuid)) + .safe_div(U64F64::saturating_from_num(u16::MAX)); + U64F64::saturating_from_num(amount) + .saturating_mul(fee_rate) + .saturating_to_num::() + .into() } + _ => C::ZERO, } - .ok_or(Error::::InsufficientLiquidity)?; - - let liquidity_update_abs_u64 = tick.liquidity_net_as_u64(); - - // Update liquidity based on the sign of liquidity_net and the order type - liquidity_curr = match (order_type, tick.liquidity_net >= 0) { - (OrderType::Sell, true) | (OrderType::Buy, false) => { - liquidity_curr.saturating_sub(liquidity_update_abs_u64) - } - (OrderType::Sell, false) | (OrderType::Buy, true) => { - liquidity_curr.saturating_add(liquidity_update_abs_u64) - } - }; - - CurrentLiquidity::::set(netuid, liquidity_curr); - - Ok(()) } pub fn find_closest_lower_active_tick(netuid: NetUid, index: TickIndex) -> Option { @@ -795,7 +392,7 @@ impl Pallet { } /// Here we subtract minimum safe liquidity from current liquidity to stay in the safe range - fn current_liquidity_safe(netuid: NetUid) -> U64F64 { + pub(crate) fn current_liquidity_safe(netuid: NetUid) -> U64F64 { U64F64::saturating_from_num( CurrentLiquidity::::get(netuid).saturating_sub(T::MinimumLiquidity::get()), ) @@ -906,7 +503,7 @@ impl Pallet { let position_id = PositionId::new::(); let position = Position::new(position_id, netuid, tick_low, tick_high, liquidity); - let current_price_sqrt = Pallet::::current_price_sqrt(netuid); + let current_price_sqrt = AlphaSqrtPrice::::get(netuid); let (tao, alpha) = position.to_token_amounts(current_price_sqrt)?; SwapV3Initialized::::set(netuid, true); @@ -985,7 +582,7 @@ impl Pallet { let mut delta_liquidity_abs = liquidity_delta.unsigned_abs(); // Determine the effective price for token calculations - let current_price_sqrt = Pallet::::current_price_sqrt(netuid); + let current_price_sqrt = AlphaSqrtPrice::::get(netuid); let sqrt_pa: SqrtPrice = position .tick_low .try_to_sqrt_price() @@ -1213,6 +810,23 @@ impl Pallet { T::ProtocolId::get().into_account_truncating() } + pub(crate) fn min_price_inner() -> C { + TickIndex::min_sqrt_price() + .saturating_mul(TickIndex::min_sqrt_price()) + .saturating_mul(SqrtPrice::saturating_from_num(1_000_000_000)) + .saturating_to_num::() + .into() + } + + pub(crate) fn max_price_inner() -> C { + TickIndex::max_sqrt_price() + .saturating_mul(TickIndex::max_sqrt_price()) + .saturating_mul(SqrtPrice::saturating_from_num(1_000_000_000)) + .saturating_round() + .saturating_to_num::() + .into() + } + /// Dissolve all LPs and clean state. pub fn do_dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { if SwapV3Initialized::::get(netuid) { @@ -1269,7 +883,7 @@ impl Pallet { if rm.tao > TaoCurrency::ZERO { T::BalanceOps::increase_balance(&owner, rm.tao); user_refunded_tao = user_refunded_tao.saturating_add(rm.tao); - T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); + T::TaoReserve::decrease_provided(netuid, rm.tao); } // 2) Stake ALL withdrawn α (principal + fees) to the best permitted validator. @@ -1303,10 +917,7 @@ impl Pallet { ); } - T::BalanceOps::decrease_provided_alpha_reserve( - netuid, - alpha_total_from_pool, - ); + T::AlphaReserve::decrease_provided(netuid, alpha_total_from_pool); } } Err(e) => { @@ -1407,83 +1018,106 @@ impl Pallet { } } -impl SwapHandler for Pallet { +impl DefaultPriceLimit for Pallet { + fn default_price_limit() -> C { + Self::max_price_inner::() + } +} + +impl DefaultPriceLimit for Pallet { + fn default_price_limit() -> C { + Self::min_price_inner::() + } +} + +impl SwapEngine for Pallet +where + T: Config, + O: OrderT, + BasicSwapStep: SwapStep, + Self: DefaultPriceLimit, +{ fn swap( netuid: NetUid, - order_t: OrderType, - amount: u64, - price_limit: u64, + order: O, + price_limit: TaoCurrency, drop_fees: bool, should_rollback: bool, - ) -> Result { - let limit_sqrt_price = SqrtPrice::saturating_from_num(price_limit) + ) -> Result, DispatchError> { + let limit_sqrt_price = SqrtPrice::saturating_from_num(price_limit.to_u64()) .safe_div(SqrtPrice::saturating_from_num(1_000_000_000)) .checked_sqrt(SqrtPrice::saturating_from_num(0.0000000001)) .ok_or(Error::::PriceLimitExceeded)?; - Self::do_swap( + Self::do_swap::( NetUid::from(netuid), - order_t, - amount, + order, limit_sqrt_price, drop_fees, should_rollback, ) .map_err(Into::into) } +} - fn sim_swap( +impl SwapHandler for Pallet { + fn swap( netuid: NetUid, - order_t: OrderType, - amount: u64, - ) -> Result { + order: O, + price_limit: TaoCurrency, + drop_fees: bool, + should_rollback: bool, + ) -> Result, DispatchError> + where + O: OrderT, + Self: SwapEngine, + { + >::swap(netuid, order, price_limit, drop_fees, should_rollback) + } + + fn sim_swap( + netuid: NetUid, + order: O, + ) -> Result, DispatchError> + where + O: OrderT, + Self: SwapEngine, + { match T::SubnetInfo::mechanism(netuid) { 1 => { - let price_limit = match order_t { - OrderType::Buy => Self::max_price(), - OrderType::Sell => Self::min_price(), - }; + let price_limit = Self::default_price_limit::(); - Self::swap(netuid, order_t, amount, price_limit, false, true) + >::swap(netuid, order, price_limit, false, true) } _ => { let actual_amount = if T::SubnetInfo::exists(netuid) { - amount + order.amount() } else { - 0 + O::PaidIn::ZERO }; Ok(SwapResult { amount_paid_in: actual_amount, - amount_paid_out: actual_amount, - fee_paid: 0, - tao_reserve_delta: 0, - alpha_reserve_delta: 0, + amount_paid_out: actual_amount.to_u64().into(), + fee_paid: 0.into(), }) } } } - fn approx_fee_amount(netuid: NetUid, amount: u64) -> u64 { - Self::calculate_fee_amount(netuid.into(), amount, false) + fn approx_fee_amount(netuid: NetUid, amount: C) -> C { + Self::calculate_fee_amount(netuid, amount, false) } fn current_alpha_price(netuid: NetUid) -> U96F32 { Self::current_price(netuid.into()) } - fn min_price() -> u64 { - TickIndex::min_sqrt_price() - .saturating_mul(TickIndex::min_sqrt_price()) - .saturating_mul(SqrtPrice::saturating_from_num(1_000_000_000)) - .saturating_to_num() + fn min_price() -> C { + Self::min_price_inner() } - fn max_price() -> u64 { - TickIndex::max_sqrt_price() - .saturating_mul(TickIndex::max_sqrt_price()) - .saturating_mul(SqrtPrice::saturating_from_num(1_000_000_000)) - .saturating_round() - .saturating_to_num() + fn max_price() -> C { + Self::max_price_inner() } fn adjust_protocol_liquidity( @@ -1507,17 +1141,3 @@ impl SwapHandler for Pallet { Self::do_clear_protocol_liquidity(netuid) } } - -#[derive(Debug, PartialEq)] -struct SwapStepResult { - amount_to_take: u64, - fee_paid: u64, - delta_in: u64, - delta_out: u64, -} - -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum SwapStepAction { - Crossing, - Stop, -} diff --git a/pallets/swap/src/pallet/mod.rs b/pallets/swap/src/pallet/mod.rs index af4cbdc3cb..1501f9cb37 100644 --- a/pallets/swap/src/pallet/mod.rs +++ b/pallets/swap/src/pallet/mod.rs @@ -5,7 +5,7 @@ use frame_support::{PalletId, pallet_prelude::*, traits::Get}; use frame_system::pallet_prelude::*; use substrate_fixed::types::U64F64; use subtensor_runtime_common::{ - AlphaCurrency, BalanceOps, Currency, NetUid, SubnetInfo, TaoCurrency, + AlphaCurrency, BalanceOps, Currency, CurrencyReserve, NetUid, SubnetInfo, TaoCurrency, }; use crate::{ @@ -17,11 +17,13 @@ use crate::{ pub use pallet::*; mod impls; +mod swap_step; #[cfg(test)] mod tests; #[allow(clippy::module_inception)] #[frame_support::pallet] +#[allow(clippy::expect_used)] mod pallet { use super::*; use frame_system::{ensure_root, ensure_signed}; @@ -36,6 +38,12 @@ mod pallet { /// [`SubnetInfo`](subtensor_swap_interface::SubnetInfo). type SubnetInfo: SubnetInfo; + /// Tao reserves info. + type TaoReserve: CurrencyReserve; + + /// Alpha reserves info. + type AlphaReserve: CurrencyReserve; + /// Implementor of /// [`BalanceOps`](subtensor_swap_interface::BalanceOps). type BalanceOps: BalanceOps; @@ -139,6 +147,15 @@ mod pallet { ValueQuery, >; + /// TAO reservoir for scraps of protocol claimed fees. + #[pallet::storage] + pub type ScrapReservoirTao = StorageMap<_, Twox64Concat, NetUid, TaoCurrency, ValueQuery>; + + /// Alpha reservoir for scraps of protocol claimed fees. + #[pallet::storage] + pub type ScrapReservoirAlpha = + StorageMap<_, Twox64Concat, NetUid, AlphaCurrency, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -273,6 +290,8 @@ mod pallet { #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// Set the fee rate for swaps on a specific subnet (normalized value). /// For example, 0.3% is approximately 196. /// @@ -391,8 +410,8 @@ mod pallet { ensure!(alpha_provided == alpha, Error::::InsufficientBalance); // Add provided liquidity to user-provided reserves - T::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_provided); - T::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_provided); + T::TaoReserve::increase_provided(netuid.into(), tao_provided); + T::AlphaReserve::increase_provided(netuid.into(), alpha_provided); // Emit an event Self::deposit_event(Event::LiquidityAdded { @@ -447,8 +466,8 @@ mod pallet { )?; // Remove withdrawn liquidity from user-provided reserves - T::BalanceOps::decrease_provided_tao_reserve(netuid.into(), result.tao); - T::BalanceOps::decrease_provided_alpha_reserve(netuid.into(), result.alpha); + T::TaoReserve::decrease_provided(netuid.into(), result.tao); + T::AlphaReserve::decrease_provided(netuid.into(), result.alpha); // Emit an event Self::deposit_event(Event::LiquidityRemoved { diff --git a/pallets/swap/src/pallet/swap_step.rs b/pallets/swap/src/pallet/swap_step.rs new file mode 100644 index 0000000000..6791835b1a --- /dev/null +++ b/pallets/swap/src/pallet/swap_step.rs @@ -0,0 +1,562 @@ +use core::marker::PhantomData; + +use safe_math::*; +use substrate_fixed::types::{I64F64, U64F64}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; + +use super::pallet::*; +use crate::{ + SqrtPrice, + tick::{ActiveTickIndexManager, TickIndex}, +}; + +/// A struct representing a single swap step with all its parameters and state +pub(crate) struct BasicSwapStep +where + T: Config, + PaidIn: Currency, + PaidOut: Currency, +{ + // Input parameters + netuid: NetUid, + drop_fees: bool, + + // Computed values + current_liquidity: U64F64, + possible_delta_in: PaidIn, + + // Ticks and prices (current, limit, edge, target) + target_sqrt_price: SqrtPrice, + limit_sqrt_price: SqrtPrice, + current_sqrt_price: SqrtPrice, + edge_sqrt_price: SqrtPrice, + edge_tick: TickIndex, + + // Result values + action: SwapStepAction, + delta_in: PaidIn, + final_price: SqrtPrice, + fee: PaidIn, + + _phantom: PhantomData<(T, PaidIn, PaidOut)>, +} + +impl BasicSwapStep +where + T: Config, + PaidIn: Currency, + PaidOut: Currency, + Self: SwapStep, +{ + /// Creates and initializes a new swap step + pub(crate) fn new( + netuid: NetUid, + amount_remaining: PaidIn, + limit_sqrt_price: SqrtPrice, + drop_fees: bool, + ) -> Self { + // Calculate prices and ticks + let current_tick = CurrentTick::::get(netuid); + let current_sqrt_price = AlphaSqrtPrice::::get(netuid); + let edge_tick = Self::tick_edge(netuid, current_tick); + let edge_sqrt_price = edge_tick.as_sqrt_price_bounded(); + + let fee = Pallet::::calculate_fee_amount(netuid, amount_remaining, drop_fees); + let possible_delta_in = amount_remaining.saturating_sub(fee); + + // Target price and quantities + let current_liquidity = U64F64::saturating_from_num(CurrentLiquidity::::get(netuid)); + let target_sqrt_price = + Self::sqrt_price_target(current_liquidity, current_sqrt_price, possible_delta_in); + + Self { + netuid, + drop_fees, + target_sqrt_price, + limit_sqrt_price, + current_sqrt_price, + edge_sqrt_price, + edge_tick, + possible_delta_in, + current_liquidity, + action: SwapStepAction::Stop, + delta_in: PaidIn::ZERO, + final_price: target_sqrt_price, + fee, + _phantom: PhantomData, + } + } + + /// Execute the swap step and return the result + pub(crate) fn execute(&mut self) -> Result, Error> { + self.determine_action(); + self.process_swap() + } + + /// Determine the appropriate action for this swap step + fn determine_action(&mut self) { + let mut recalculate_fee = false; + + // Calculate the stopping price: The price at which we either reach the limit price, + // exchange the full amount, or reach the edge price. + if Self::price_is_closer(&self.target_sqrt_price, &self.limit_sqrt_price) + && Self::price_is_closer(&self.target_sqrt_price, &self.edge_sqrt_price) + { + // Case 1. target_quantity is the lowest + // The trade completely happens within one tick, no tick crossing happens. + self.action = SwapStepAction::Stop; + self.final_price = self.target_sqrt_price; + self.delta_in = self.possible_delta_in; + } else if Self::price_is_closer(&self.limit_sqrt_price, &self.target_sqrt_price) + && Self::price_is_closer(&self.limit_sqrt_price, &self.edge_sqrt_price) + { + // Case 2. lim_quantity is the lowest + // The trade also completely happens within one tick, no tick crossing happens. + self.action = SwapStepAction::Stop; + self.final_price = self.limit_sqrt_price; + self.delta_in = Self::delta_in( + self.current_liquidity, + self.current_sqrt_price, + self.limit_sqrt_price, + ); + recalculate_fee = true; + } else { + // Case 3. edge_quantity is the lowest + // Tick crossing is likely + self.action = SwapStepAction::Crossing; + self.delta_in = Self::delta_in( + self.current_liquidity, + self.current_sqrt_price, + self.edge_sqrt_price, + ); + self.final_price = self.edge_sqrt_price; + recalculate_fee = true; + } + + log::trace!("\tAction : {:?}", self.action); + log::trace!( + "\tCurrent Price : {}", + self.current_sqrt_price + .saturating_mul(self.current_sqrt_price) + ); + log::trace!( + "\tTarget Price : {}", + self.target_sqrt_price + .saturating_mul(self.target_sqrt_price) + ); + log::trace!( + "\tLimit Price : {}", + self.limit_sqrt_price.saturating_mul(self.limit_sqrt_price) + ); + log::trace!( + "\tEdge Price : {}", + self.edge_sqrt_price.saturating_mul(self.edge_sqrt_price) + ); + log::trace!("\tDelta In : {}", self.delta_in); + + // Because on step creation we calculate fee off the total amount, we might need to + // recalculate it in case if we hit the limit price or the edge price. + if recalculate_fee { + let u16_max = U64F64::saturating_from_num(u16::MAX); + let fee_rate = if self.drop_fees { + U64F64::saturating_from_num(0) + } else { + U64F64::saturating_from_num(FeeRate::::get(self.netuid)) + }; + let delta_fixed = U64F64::saturating_from_num(self.delta_in); + self.fee = delta_fixed + .saturating_mul(fee_rate.safe_div(u16_max.saturating_sub(fee_rate))) + .saturating_to_num::() + .into(); + } + + // Now correct the action if we stopped exactly at the edge no matter what was the case + // above. Because order type buy moves the price up and tick semi-open interval doesn't + // include its right point, we cross on buys and stop on sells. + let natural_reason_stop_price = + if Self::price_is_closer(&self.limit_sqrt_price, &self.target_sqrt_price) { + self.limit_sqrt_price + } else { + self.target_sqrt_price + }; + if natural_reason_stop_price == self.edge_sqrt_price { + self.action = Self::action_on_edge_sqrt_price(); + } + } + + /// Process a single step of a swap + fn process_swap(&self) -> Result, Error> { + // Hold the fees + Self::add_fees( + self.netuid, + Pallet::::current_liquidity_safe(self.netuid), + self.fee, + ); + let delta_out = Self::convert_deltas(self.netuid, self.delta_in); + // log::trace!("\tDelta Out : {delta_out:?}"); + + if self.action == SwapStepAction::Crossing { + let mut tick = Ticks::::get(self.netuid, self.edge_tick).unwrap_or_default(); + tick.fees_out_tao = I64F64::saturating_from_num(FeeGlobalTao::::get(self.netuid)) + .saturating_sub(tick.fees_out_tao); + tick.fees_out_alpha = + I64F64::saturating_from_num(FeeGlobalAlpha::::get(self.netuid)) + .saturating_sub(tick.fees_out_alpha); + Self::update_liquidity_at_crossing(self.netuid)?; + Ticks::::insert(self.netuid, self.edge_tick, tick); + } + + // Update current price + AlphaSqrtPrice::::set(self.netuid, self.final_price); + + // Update current tick + let new_current_tick = TickIndex::from_sqrt_price_bounded(self.final_price); + CurrentTick::::set(self.netuid, new_current_tick); + + Ok(SwapStepResult { + amount_to_take: self.delta_in.saturating_add(self.fee), + fee_paid: self.fee, + delta_in: self.delta_in, + delta_out, + }) + } + + pub(crate) fn action(&self) -> SwapStepAction { + self.action + } +} + +impl SwapStep + for BasicSwapStep +{ + fn delta_in( + liquidity_curr: U64F64, + sqrt_price_curr: SqrtPrice, + sqrt_price_target: SqrtPrice, + ) -> TaoCurrency { + liquidity_curr + .saturating_mul(sqrt_price_target.saturating_sub(sqrt_price_curr)) + .saturating_to_num::() + .into() + } + + fn tick_edge(netuid: NetUid, current_tick: TickIndex) -> TickIndex { + ActiveTickIndexManager::::find_closest_higher( + netuid, + current_tick.next().unwrap_or(TickIndex::MAX), + ) + .unwrap_or(TickIndex::MAX) + } + + fn sqrt_price_target( + liquidity_curr: U64F64, + sqrt_price_curr: SqrtPrice, + delta_in: TaoCurrency, + ) -> SqrtPrice { + let delta_fixed = U64F64::saturating_from_num(delta_in); + + // No liquidity means that price should go to the limit + if liquidity_curr == 0 { + return SqrtPrice::saturating_from_num( + Pallet::::max_price_inner::().to_u64(), + ); + } + + delta_fixed + .safe_div(liquidity_curr) + .saturating_add(sqrt_price_curr) + } + + fn price_is_closer(sq_price1: &SqrtPrice, sq_price2: &SqrtPrice) -> bool { + sq_price1 <= sq_price2 + } + + fn action_on_edge_sqrt_price() -> SwapStepAction { + SwapStepAction::Crossing + } + + fn add_fees(netuid: NetUid, current_liquidity: U64F64, fee: TaoCurrency) { + if current_liquidity == 0 { + return; + } + + let fee_fixed = U64F64::saturating_from_num(fee.to_u64()); + + FeeGlobalTao::::mutate(netuid, |value| { + *value = value.saturating_add(fee_fixed.safe_div(current_liquidity)) + }); + } + + fn convert_deltas(netuid: NetUid, delta_in: TaoCurrency) -> AlphaCurrency { + // Skip conversion if delta_in is zero + if delta_in.is_zero() { + return AlphaCurrency::ZERO; + } + + let liquidity_curr = SqrtPrice::saturating_from_num(CurrentLiquidity::::get(netuid)); + let sqrt_price_curr = AlphaSqrtPrice::::get(netuid); + let delta_fixed = SqrtPrice::saturating_from_num(delta_in.to_u64()); + + // Calculate result based on order type with proper fixed-point math + // Using safe math operations throughout to prevent overflows + let result = { + // (liquidity_curr * sqrt_price_curr + delta_fixed) * sqrt_price_curr; + let a = liquidity_curr + .saturating_mul(sqrt_price_curr) + .saturating_add(delta_fixed) + .saturating_mul(sqrt_price_curr); + // liquidity_curr / a; + let b = liquidity_curr.safe_div(a); + // b * delta_fixed; + b.saturating_mul(delta_fixed) + }; + + result.saturating_to_num::().into() + } + + fn update_liquidity_at_crossing(netuid: NetUid) -> Result<(), Error> { + let mut liquidity_curr = CurrentLiquidity::::get(netuid); + let current_tick_index = TickIndex::current_bounded::(netuid); + + // Find the appropriate tick based on order type + let tick = { + // Self::find_closest_higher_active_tick(netuid, current_tick_index), + let upper_tick = ActiveTickIndexManager::::find_closest_higher( + netuid, + current_tick_index.next().unwrap_or(TickIndex::MAX), + ) + .unwrap_or(TickIndex::MAX); + Ticks::::get(netuid, upper_tick) + } + .ok_or(Error::::InsufficientLiquidity)?; + + let liquidity_update_abs_u64 = tick.liquidity_net_as_u64(); + + // Update liquidity based on the sign of liquidity_net and the order type + liquidity_curr = if tick.liquidity_net >= 0 { + liquidity_curr.saturating_add(liquidity_update_abs_u64) + } else { + liquidity_curr.saturating_sub(liquidity_update_abs_u64) + }; + + CurrentLiquidity::::set(netuid, liquidity_curr); + + Ok(()) + } +} + +impl SwapStep + for BasicSwapStep +{ + fn delta_in( + liquidity_curr: U64F64, + sqrt_price_curr: SqrtPrice, + sqrt_price_target: SqrtPrice, + ) -> AlphaCurrency { + let one = U64F64::saturating_from_num(1); + + liquidity_curr + .saturating_mul( + one.safe_div(sqrt_price_target.into()) + .saturating_sub(one.safe_div(sqrt_price_curr)), + ) + .saturating_to_num::() + .into() + } + + fn tick_edge(netuid: NetUid, current_tick: TickIndex) -> TickIndex { + let current_price: SqrtPrice = AlphaSqrtPrice::::get(netuid); + let current_tick_price = current_tick.as_sqrt_price_bounded(); + let is_active = ActiveTickIndexManager::::tick_is_active(netuid, current_tick); + + if is_active && current_price > current_tick_price { + return ActiveTickIndexManager::::find_closest_lower(netuid, current_tick) + .unwrap_or(TickIndex::MIN); + } + + ActiveTickIndexManager::::find_closest_lower( + netuid, + current_tick.prev().unwrap_or(TickIndex::MIN), + ) + .unwrap_or(TickIndex::MIN) + } + + fn sqrt_price_target( + liquidity_curr: U64F64, + sqrt_price_curr: SqrtPrice, + delta_in: AlphaCurrency, + ) -> SqrtPrice { + let delta_fixed = U64F64::saturating_from_num(delta_in); + let one = U64F64::saturating_from_num(1); + + // No liquidity means that price should go to the limit + if liquidity_curr == 0 { + return SqrtPrice::saturating_from_num( + Pallet::::min_price_inner::().to_u64(), + ); + } + + one.safe_div( + delta_fixed + .safe_div(liquidity_curr) + .saturating_add(one.safe_div(sqrt_price_curr)), + ) + } + + fn price_is_closer(sq_price1: &SqrtPrice, sq_price2: &SqrtPrice) -> bool { + sq_price1 >= sq_price2 + } + + fn action_on_edge_sqrt_price() -> SwapStepAction { + SwapStepAction::Stop + } + + fn add_fees(netuid: NetUid, current_liquidity: U64F64, fee: AlphaCurrency) { + if current_liquidity == 0 { + return; + } + + let fee_fixed = U64F64::saturating_from_num(fee.to_u64()); + + FeeGlobalAlpha::::mutate(netuid, |value| { + *value = value.saturating_add(fee_fixed.safe_div(current_liquidity)) + }); + } + + fn convert_deltas(netuid: NetUid, delta_in: AlphaCurrency) -> TaoCurrency { + // Skip conversion if delta_in is zero + if delta_in.is_zero() { + return TaoCurrency::ZERO; + } + + let liquidity_curr = SqrtPrice::saturating_from_num(CurrentLiquidity::::get(netuid)); + let sqrt_price_curr = AlphaSqrtPrice::::get(netuid); + let delta_fixed = SqrtPrice::saturating_from_num(delta_in.to_u64()); + + // Calculate result based on order type with proper fixed-point math + // Using safe math operations throughout to prevent overflows + let result = { + // liquidity_curr / (liquidity_curr / sqrt_price_curr + delta_fixed); + let denom = liquidity_curr + .safe_div(sqrt_price_curr) + .saturating_add(delta_fixed); + let a = liquidity_curr.safe_div(denom); + // a * sqrt_price_curr; + let b = a.saturating_mul(sqrt_price_curr); + + // delta_fixed * b; + delta_fixed.saturating_mul(b) + }; + + result.saturating_to_num::().into() + } + + fn update_liquidity_at_crossing(netuid: NetUid) -> Result<(), Error> { + let mut liquidity_curr = CurrentLiquidity::::get(netuid); + let current_tick_index = TickIndex::current_bounded::(netuid); + + // Find the appropriate tick based on order type + let tick = { + // Self::find_closest_lower_active_tick(netuid, current_tick_index) + let current_price = AlphaSqrtPrice::::get(netuid); + let current_tick_price = current_tick_index.as_sqrt_price_bounded(); + let is_active = ActiveTickIndexManager::::tick_is_active(netuid, current_tick_index); + + let lower_tick = if is_active && current_price > current_tick_price { + ActiveTickIndexManager::::find_closest_lower(netuid, current_tick_index) + .unwrap_or(TickIndex::MIN) + } else { + ActiveTickIndexManager::::find_closest_lower( + netuid, + current_tick_index.prev().unwrap_or(TickIndex::MIN), + ) + .unwrap_or(TickIndex::MIN) + }; + Ticks::::get(netuid, lower_tick) + } + .ok_or(Error::::InsufficientLiquidity)?; + + let liquidity_update_abs_u64 = tick.liquidity_net_as_u64(); + + // Update liquidity based on the sign of liquidity_net and the order type + liquidity_curr = if tick.liquidity_net >= 0 { + liquidity_curr.saturating_sub(liquidity_update_abs_u64) + } else { + liquidity_curr.saturating_add(liquidity_update_abs_u64) + }; + + CurrentLiquidity::::set(netuid, liquidity_curr); + + Ok(()) + } +} + +pub(crate) trait SwapStep +where + T: Config, + PaidIn: Currency, + PaidOut: Currency, +{ + /// Get the input amount needed to reach the target price + fn delta_in( + liquidity_curr: U64F64, + sqrt_price_curr: SqrtPrice, + sqrt_price_target: SqrtPrice, + ) -> PaidIn; + + /// Get the tick at the current tick edge. + /// + /// If anything is wrong with tick math and it returns Err, we just abort the deal, i.e. return + /// the edge that is impossible to execute + fn tick_edge(netuid: NetUid, current_tick: TickIndex) -> TickIndex; + + /// Get the target square root price based on the input amount + /// + /// This is the price that would be reached if + /// - There are no liquidity positions other than protocol liquidity + /// - Full delta_in amount is executed + fn sqrt_price_target( + liquidity_curr: U64F64, + sqrt_price_curr: SqrtPrice, + delta_in: PaidIn, + ) -> SqrtPrice; + + /// Returns True if sq_price1 is closer to the current price than sq_price2 + /// in terms of order direction. + /// For buying: sq_price1 <= sq_price2 + /// For selling: sq_price1 >= sq_price2 + fn price_is_closer(sq_price1: &SqrtPrice, sq_price2: &SqrtPrice) -> bool; + + /// Get swap step action on the edge sqrt price. + fn action_on_edge_sqrt_price() -> SwapStepAction; + + /// Add fees to the global fee counters + fn add_fees(netuid: NetUid, current_liquidity: U64F64, fee: PaidIn); + + /// Convert input amount (delta_in) to output amount (delta_out) + /// + /// This is the core method of uniswap V3 that tells how much output token is given for an + /// amount of input token within one price tick. + fn convert_deltas(netuid: NetUid, delta_in: PaidIn) -> PaidOut; + + /// Update liquidity when crossing a tick + fn update_liquidity_at_crossing(netuid: NetUid) -> Result<(), Error>; +} + +#[derive(Debug, PartialEq)] +pub(crate) struct SwapStepResult +where + PaidIn: Currency, + PaidOut: Currency, +{ + pub(crate) amount_to_take: PaidIn, + pub(crate) fee_paid: PaidIn, + pub(crate) delta_in: PaidIn, + pub(crate) delta_out: PaidOut, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum SwapStepAction { + Crossing, + Stop, +} diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index 72c33d698f..5b8cca643f 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -1,6 +1,9 @@ -#![allow(clippy::unwrap_used)] -#![allow(clippy::indexing_slicing)] -#![allow(clippy::arithmetic_side_effects)] +#![allow( + clippy::arithmetic_side_effects, + clippy::expect_used, + clippy::indexing_slicing, + clippy::unwrap_used +)] use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_noop, assert_ok}; @@ -8,9 +11,11 @@ use sp_arithmetic::helpers_128bit; use sp_runtime::DispatchError; use substrate_fixed::types::U96F32; use subtensor_runtime_common::NetUid; +use subtensor_swap_interface::Order as OrderT; use super::*; -use crate::{OrderType, SqrtPrice, mock::*}; +use crate::pallet::swap_step::*; +use crate::{SqrtPrice, mock::*}; // this function is used to convert price (NON-SQRT price!) to TickIndex. it's only utility for // testing, all the implementation logic is based on sqrt prices @@ -153,8 +158,8 @@ fn test_swap_initialization() { let netuid = NetUid::from(1); // Get reserves from the mock provider - let tao = MockLiquidityProvider::tao_reserve(netuid.into()); - let alpha = MockLiquidityProvider::alpha_reserve(netuid.into()); + let tao = TaoReserve::reserve(netuid.into()); + let alpha = AlphaReserve::reserve(netuid.into()); assert_ok!(Pallet::::maybe_initialize_v3(netuid)); @@ -664,15 +669,8 @@ fn test_modify_position_basic() { // Swap to create fees on the position let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); - Pallet::::do_swap( - netuid, - OrderType::Buy, - liquidity / 10, - sqrt_limit_price, - false, - false, - ) - .unwrap(); + let order = GetAlphaForTao::with_amount(liquidity / 10); + Pallet::::do_swap(netuid, order, sqrt_limit_price, false, false).unwrap(); // Modify liquidity (also causes claiming of fees) let liquidity_before = CurrentLiquidity::::get(netuid); @@ -754,68 +752,272 @@ fn test_modify_position_basic() { #[test] fn test_swap_basic() { new_test_ext().execute_with(|| { + fn perform_test( + netuid: NetUid, + order: Order, + limit_price: f64, + output_amount: u64, + price_should_grow: bool, + ) where + Order: OrderT, + Order::PaidIn: GlobalFeeInfo, + BasicSwapStep: + SwapStep, + { + // Consumed liquidity ticks + let tick_low = TickIndex::MIN; + let tick_high = TickIndex::MAX; + let liquidity = order.amount().to_u64(); + + // Setup swap + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Get tick infos before the swap + let tick_low_info_before = Ticks::::get(netuid, tick_low).unwrap_or_default(); + let tick_high_info_before = Ticks::::get(netuid, tick_high).unwrap_or_default(); + let liquidity_before = CurrentLiquidity::::get(netuid); + + // Get current price + let current_price = Pallet::::current_price(netuid); + + // Swap + let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); + let swap_result = + Pallet::::do_swap(netuid, order.clone(), sqrt_limit_price, false, false) + .unwrap(); + assert_abs_diff_eq!( + swap_result.amount_paid_out.to_u64(), + output_amount, + epsilon = output_amount / 100 + ); + + assert_abs_diff_eq!( + swap_result.paid_in_reserve_delta() as u64, + liquidity, + epsilon = liquidity / 10 + ); + assert_abs_diff_eq!( + swap_result.paid_out_reserve_delta() as i64, + -(output_amount as i64), + epsilon = output_amount as i64 / 10 + ); + + // Check that low and high ticks' fees were updated properly, and liquidity values were not updated + let tick_low_info = Ticks::::get(netuid, tick_low).unwrap(); + let tick_high_info = Ticks::::get(netuid, tick_high).unwrap(); + let expected_liquidity_net_low = tick_low_info_before.liquidity_net; + let expected_liquidity_gross_low = tick_low_info_before.liquidity_gross; + let expected_liquidity_net_high = tick_high_info_before.liquidity_net; + let expected_liquidity_gross_high = tick_high_info_before.liquidity_gross; + assert_eq!(tick_low_info.liquidity_net, expected_liquidity_net_low,); + assert_eq!(tick_low_info.liquidity_gross, expected_liquidity_gross_low,); + assert_eq!(tick_high_info.liquidity_net, expected_liquidity_net_high,); + assert_eq!( + tick_high_info.liquidity_gross, + expected_liquidity_gross_high, + ); + + // Expected fee amount + let fee_rate = FeeRate::::get(netuid) as f64 / u16::MAX as f64; + let expected_fee = (liquidity as f64 * fee_rate) as u64; + + // Global fees should be updated + let actual_global_fee = (order.amount().global_fee(netuid).to_num::() + * (liquidity_before as f64)) as u64; + + assert!((swap_result.fee_paid.to_u64() as i64 - expected_fee as i64).abs() <= 1); + assert!((actual_global_fee as i64 - expected_fee as i64).abs() <= 1); + + // Tick fees should be updated + + // Liquidity position should not be updated + let protocol_id = Pallet::::protocol_account_id(); + let positions = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + let position = positions.first().unwrap(); + + assert_eq!( + position.liquidity, + helpers_128bit::sqrt( + TaoReserve::reserve(netuid.into()).to_u64() as u128 + * AlphaReserve::reserve(netuid.into()).to_u64() as u128 + ) as u64 + ); + assert_eq!(position.tick_low, tick_low); + assert_eq!(position.tick_high, tick_high); + assert_eq!(position.fees_alpha, 0); + assert_eq!(position.fees_tao, 0); + + // Current liquidity is not updated + assert_eq!(CurrentLiquidity::::get(netuid), liquidity_before); + + // Assert that price movement is in correct direction + let sqrt_current_price_after = AlphaSqrtPrice::::get(netuid); + let current_price_after = Pallet::::current_price(netuid); + assert_eq!(current_price_after >= current_price, price_should_grow); + + // Assert that current tick is updated + let current_tick = CurrentTick::::get(netuid); + let expected_current_tick = + TickIndex::from_sqrt_price_bounded(sqrt_current_price_after); + assert_eq!(current_tick, expected_current_tick); + } + // Current price is 0.25 // Test case is (order_type, liquidity, limit_price, output_amount) - [ - (OrderType::Buy, 1_000u64, 1000.0_f64, 3990_u64), - (OrderType::Sell, 1_000u64, 0.0001_f64, 250_u64), - (OrderType::Buy, 500_000_000, 1000.0, 2_000_000_000), - ] - .into_iter() - .enumerate() - .map(|(n, v)| (NetUid::from(n as u16 + 1), v.0, v.1, v.2, v.3)) - .for_each( - |(netuid, order_type, liquidity, limit_price, output_amount)| { - // Consumed liquidity ticks - let tick_low = TickIndex::MIN; - let tick_high = TickIndex::MAX; + perform_test( + 1.into(), + GetAlphaForTao::with_amount(1_000), + 1000.0, + 3990, + true, + ); + perform_test( + 2.into(), + GetTaoForAlpha::with_amount(1_000), + 0.0001, + 250, + false, + ); + perform_test( + 3.into(), + GetAlphaForTao::with_amount(500_000_000), + 1000.0, + 2_000_000_000, + true, + ); + }); +} - // Setup swap +// In this test the swap starts and ends within one (large liquidity) position +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor-swap --lib -- pallet::tests::test_swap_single_position --exact --show-output +#[test] +fn test_swap_single_position() { + let min_price = tick_to_price(TickIndex::MIN); + let max_price = tick_to_price(TickIndex::MAX); + let max_tick = price_to_tick(max_price); + let netuid = NetUid::from(1); + assert_eq!(max_tick, TickIndex::MAX); + + let mut current_price_low = 0_f64; + let mut current_price_high = 0_f64; + let mut current_price = 0_f64; + new_test_ext().execute_with(|| { + let (low, high) = get_ticked_prices_around_current_price(); + current_price_low = low; + current_price_high = high; + current_price = Pallet::::current_price(netuid).to_num::(); + }); + + macro_rules! perform_test { + ($order_t:ident, + $price_low_offset:expr, + $price_high_offset:expr, + $position_liquidity:expr, + $liquidity_fraction:expr, + $limit_price:expr, + $price_should_grow:expr + ) => { + new_test_ext().execute_with(|| { + let price_low_offset = $price_low_offset; + let price_high_offset = $price_high_offset; + let position_liquidity = $position_liquidity; + let order_liquidity_fraction = $liquidity_fraction; + let limit_price = $limit_price; + let price_should_grow = $price_should_grow; + + ////////////////////////////////////////////// + // Initialize pool and add the user position assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + let tao_reserve = TaoReserve::reserve(netuid.into()).to_u64(); + let alpha_reserve = AlphaReserve::reserve(netuid.into()).to_u64(); + let protocol_liquidity = (tao_reserve as f64 * alpha_reserve as f64).sqrt(); + + // Add liquidity + let current_price = Pallet::::current_price(netuid).to_num::(); + let sqrt_current_price = AlphaSqrtPrice::::get(netuid).to_num::(); + + let price_low = price_low_offset + current_price; + let price_high = price_high_offset + current_price; + let tick_low = price_to_tick(price_low); + let tick_high = price_to_tick(price_high); + let (_position_id, _tao, _alpha) = Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + position_liquidity, + ) + .unwrap(); + + // Liquidity position at correct ticks + assert_eq!( + Pallet::::count_positions(netuid, &OK_COLDKEY_ACCOUNT_ID), + 1 + ); // Get tick infos before the swap let tick_low_info_before = Ticks::::get(netuid, tick_low).unwrap_or_default(); let tick_high_info_before = Ticks::::get(netuid, tick_high).unwrap_or_default(); let liquidity_before = CurrentLiquidity::::get(netuid); + assert_abs_diff_eq!( + liquidity_before as f64, + protocol_liquidity + position_liquidity as f64, + epsilon = liquidity_before as f64 / 1000. + ); - // Get current price - let current_price = Pallet::::current_price(netuid); - + ////////////////////////////////////////////// // Swap + + // Calculate the expected output amount for the cornercase of one step + let order_liquidity = order_liquidity_fraction * position_liquidity as f64; + + let output_amount = >::approx_expected_swap_output( + sqrt_current_price, + liquidity_before as f64, + order_liquidity, + ); + + // Do the swap let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); - let swap_result = Pallet::::do_swap( - netuid, - order_type, - liquidity, - sqrt_limit_price, - false, - false, - ) - .unwrap(); + let order = $order_t::with_amount(order_liquidity as u64); + let swap_result = + Pallet::::do_swap(netuid, order, sqrt_limit_price, false, false).unwrap(); assert_abs_diff_eq!( - swap_result.amount_paid_out, + swap_result.amount_paid_out.to_u64() as f64, output_amount, - epsilon = output_amount / 100 + epsilon = output_amount / 10. ); - let (tao_delta_expected, alpha_delta_expected) = match order_type { - OrderType::Buy => (liquidity as i64, -(output_amount as i64)), - OrderType::Sell => (-(output_amount as i64), liquidity as i64), - }; + if order_liquidity_fraction <= 0.001 { + assert_abs_diff_eq!( + swap_result.paid_in_reserve_delta() as i64, + order_liquidity as i64, + epsilon = order_liquidity as i64 / 10 + ); + assert_abs_diff_eq!( + swap_result.paid_out_reserve_delta() as i64, + -(output_amount as i64), + epsilon = output_amount as i64 / 10 + ); + } - assert_abs_diff_eq!( - swap_result.alpha_reserve_delta, - alpha_delta_expected, - epsilon = alpha_delta_expected.abs() / 10 - ); - assert_abs_diff_eq!( - swap_result.tao_reserve_delta, - tao_delta_expected, - epsilon = tao_delta_expected.abs() / 10 - ); + // Assert that price movement is in correct direction + let current_price_after = Pallet::::current_price(netuid); + assert_eq!(price_should_grow, current_price_after > current_price); + + // Assert that for small amounts price stays within the user position + if (order_liquidity_fraction <= 0.001) + && (price_low_offset > 0.0001) + && (price_high_offset > 0.0001) + { + assert!(current_price_after <= price_high); + assert!(current_price_after >= price_low); + } - // Check that low and high ticks' fees were updated properly, and liquidity values were not updated + // Check that low and high ticks' fees were updated properly let tick_low_info = Ticks::::get(netuid, tick_low).unwrap(); let tick_high_info = Ticks::::get(netuid, tick_high).unwrap(); let expected_liquidity_net_low = tick_low_info_before.liquidity_net; @@ -832,79 +1034,38 @@ fn test_swap_basic() { // Expected fee amount let fee_rate = FeeRate::::get(netuid) as f64 / u16::MAX as f64; - let expected_fee = (liquidity as f64 * fee_rate) as u64; - - // Global fees should be updated - let actual_global_fee = ((match order_type { - OrderType::Buy => FeeGlobalTao::::get(netuid), - OrderType::Sell => FeeGlobalAlpha::::get(netuid), - }) - .to_num::() + let expected_fee = (order_liquidity - order_liquidity / (1.0 + fee_rate)) as u64; + + // // Global fees should be updated + let actual_global_fee = ($order_t::with_amount(0) + .amount() + .global_fee(netuid) + .to_num::() * (liquidity_before as f64)) as u64; - assert!((swap_result.fee_paid as i64 - expected_fee as i64).abs() <= 1); - assert!((actual_global_fee as i64 - expected_fee as i64).abs() <= 1); + assert_abs_diff_eq!( + swap_result.fee_paid.to_u64(), + expected_fee, + epsilon = expected_fee / 10 + ); + assert_abs_diff_eq!(actual_global_fee, expected_fee, epsilon = expected_fee / 10); // Tick fees should be updated // Liquidity position should not be updated - let protocol_id = Pallet::::protocol_account_id(); - let positions = Positions::::iter_prefix_values((netuid, protocol_id)) - .collect::>(); + let positions = + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .collect::>(); let position = positions.first().unwrap(); - assert_eq!( - position.liquidity, - helpers_128bit::sqrt( - MockLiquidityProvider::tao_reserve(netuid.into()).to_u64() as u128 - * MockLiquidityProvider::alpha_reserve(netuid.into()).to_u64() as u128 - ) as u64 - ); + assert_eq!(position.liquidity, position_liquidity,); assert_eq!(position.tick_low, tick_low); assert_eq!(position.tick_high, tick_high); assert_eq!(position.fees_alpha, 0); assert_eq!(position.fees_tao, 0); - - // Current liquidity is not updated - assert_eq!(CurrentLiquidity::::get(netuid), liquidity_before); - - // Assert that price movement is in correct direction - let sqrt_current_price_after = Pallet::::current_price_sqrt(netuid); - let current_price_after = Pallet::::current_price(netuid); - match order_type { - OrderType::Buy => assert!(current_price_after >= current_price), - OrderType::Sell => assert!(current_price_after <= current_price), - } - - // Assert that current tick is updated - let current_tick = CurrentTick::::get(netuid); - let expected_current_tick = - TickIndex::from_sqrt_price_bounded(sqrt_current_price_after); - assert_eq!(current_tick, expected_current_tick); - }, - ); - }); -} - -// In this test the swap starts and ends within one (large liquidity) position -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor-swap --lib -- pallet::tests::test_swap_single_position --exact --show-output -#[test] -fn test_swap_single_position() { - let min_price = tick_to_price(TickIndex::MIN); - let max_price = tick_to_price(TickIndex::MAX); - let max_tick = price_to_tick(max_price); - let netuid = NetUid::from(1); - assert_eq!(max_tick, TickIndex::MAX); - - let mut current_price_low = 0_f64; - let mut current_price_high = 0_f64; - let mut current_price = 0_f64; - new_test_ext().execute_with(|| { - let (low, high) = get_ticked_prices_around_current_price(); - current_price_low = low; - current_price_high = high; - current_price = Pallet::::current_price(netuid).to_num::(); - }); + }); + }; + } // Current price is 0.25 // The test case is based on the current price and position prices are defined as a price @@ -945,195 +1106,26 @@ fn test_swap_single_position() { |(price_low_offset, price_high_offset, position_liquidity)| { // Inner part of test case is Order: (order_type, order_liquidity, limit_price) // order_liquidity is represented as a fraction of position_liquidity - [ - (OrderType::Buy, 0.0001, 1000.0_f64), - (OrderType::Sell, 0.0001, 0.0001_f64), - (OrderType::Buy, 0.001, 1000.0_f64), - (OrderType::Sell, 0.001, 0.0001_f64), - (OrderType::Buy, 0.01, 1000.0_f64), - (OrderType::Sell, 0.01, 0.0001_f64), - (OrderType::Buy, 0.1, 1000.0_f64), - (OrderType::Sell, 0.1, 0.0001), - (OrderType::Buy, 0.2, 1000.0_f64), - (OrderType::Sell, 0.2, 0.0001), - (OrderType::Buy, 0.5, 1000.0), - (OrderType::Sell, 0.5, 0.0001), - ] - .into_iter() - .for_each(|(order_type, order_liquidity_fraction, limit_price)| { - new_test_ext().execute_with(|| { - ////////////////////////////////////////////// - // Initialize pool and add the user position - assert_ok!(Pallet::::maybe_initialize_v3(netuid)); - let tao_reserve = MockLiquidityProvider::tao_reserve(netuid.into()).to_u64(); - let alpha_reserve = - MockLiquidityProvider::alpha_reserve(netuid.into()).to_u64(); - let protocol_liquidity = (tao_reserve as f64 * alpha_reserve as f64).sqrt(); - - // Add liquidity - let current_price = Pallet::::current_price(netuid).to_num::(); - let sqrt_current_price = - Pallet::::current_price_sqrt(netuid).to_num::(); - - let price_low = price_low_offset + current_price; - let price_high = price_high_offset + current_price; - let tick_low = price_to_tick(price_low); - let tick_high = price_to_tick(price_high); - let (_position_id, _tao, _alpha) = Pallet::::do_add_liquidity( - netuid, - &OK_COLDKEY_ACCOUNT_ID, - &OK_HOTKEY_ACCOUNT_ID, - tick_low, - tick_high, - position_liquidity, - ) - .unwrap(); - - // Liquidity position at correct ticks - assert_eq!( - Pallet::::count_positions(netuid, &OK_COLDKEY_ACCOUNT_ID), - 1 - ); - - // Get tick infos before the swap - let tick_low_info_before = - Ticks::::get(netuid, tick_low).unwrap_or_default(); - let tick_high_info_before = - Ticks::::get(netuid, tick_high).unwrap_or_default(); - let liquidity_before = CurrentLiquidity::::get(netuid); - assert_abs_diff_eq!( - liquidity_before as f64, - protocol_liquidity + position_liquidity as f64, - epsilon = liquidity_before as f64 / 1000. - ); - - ////////////////////////////////////////////// - // Swap - - // Calculate the expected output amount for the cornercase of one step - let order_liquidity = order_liquidity_fraction * position_liquidity as f64; - - let output_amount = match order_type { - OrderType::Buy => { - let denom = sqrt_current_price - * (sqrt_current_price * liquidity_before as f64 + order_liquidity); - let per_order_liq = liquidity_before as f64 / denom; - per_order_liq * order_liquidity - } - OrderType::Sell => { - let denom = - liquidity_before as f64 / sqrt_current_price + order_liquidity; - let per_order_liq = - sqrt_current_price * liquidity_before as f64 / denom; - per_order_liq * order_liquidity - } - }; - - // Do the swap - let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); - let swap_result = Pallet::::do_swap( - netuid, - order_type, - order_liquidity as u64, - sqrt_limit_price, - false, - false, - ) - .unwrap(); - assert_abs_diff_eq!( - swap_result.amount_paid_out as f64, - output_amount, - epsilon = output_amount / 10. - ); - - if order_liquidity_fraction <= 0.001 { - let (tao_delta_expected, alpha_delta_expected) = match order_type { - OrderType::Buy => (order_liquidity as i64, -(output_amount as i64)), - OrderType::Sell => (-(output_amount as i64), order_liquidity as i64), - }; - assert_abs_diff_eq!( - swap_result.alpha_reserve_delta, - alpha_delta_expected, - epsilon = alpha_delta_expected.abs() / 10 - ); - assert_abs_diff_eq!( - swap_result.tao_reserve_delta, - tao_delta_expected, - epsilon = tao_delta_expected.abs() / 10 - ); - } - - // Assert that price movement is in correct direction - let current_price_after = Pallet::::current_price(netuid); - match order_type { - OrderType::Buy => assert!(current_price_after > current_price), - OrderType::Sell => assert!(current_price_after < current_price), - } - - // Assert that for small amounts price stays within the user position - if (order_liquidity_fraction <= 0.001) - && (price_low_offset > 0.0001) - && (price_high_offset > 0.0001) - { - assert!(current_price_after <= price_high); - assert!(current_price_after >= price_low); - } - - // Check that low and high ticks' fees were updated properly - let tick_low_info = Ticks::::get(netuid, tick_low).unwrap(); - let tick_high_info = Ticks::::get(netuid, tick_high).unwrap(); - let expected_liquidity_net_low = tick_low_info_before.liquidity_net; - let expected_liquidity_gross_low = tick_low_info_before.liquidity_gross; - let expected_liquidity_net_high = tick_high_info_before.liquidity_net; - let expected_liquidity_gross_high = tick_high_info_before.liquidity_gross; - assert_eq!(tick_low_info.liquidity_net, expected_liquidity_net_low,); - assert_eq!(tick_low_info.liquidity_gross, expected_liquidity_gross_low,); - assert_eq!(tick_high_info.liquidity_net, expected_liquidity_net_high,); - assert_eq!( - tick_high_info.liquidity_gross, - expected_liquidity_gross_high, - ); - - // Expected fee amount - let fee_rate = FeeRate::::get(netuid) as f64 / u16::MAX as f64; - let expected_fee = - (order_liquidity - order_liquidity / (1.0 + fee_rate)) as u64; - - // Global fees should be updated - let actual_global_fee = ((match order_type { - OrderType::Buy => FeeGlobalTao::::get(netuid), - OrderType::Sell => FeeGlobalAlpha::::get(netuid), - }) - .to_num::() - * (liquidity_before as f64)) - as u64; - - assert_abs_diff_eq!( - swap_result.fee_paid, - expected_fee, - epsilon = expected_fee / 10 - ); - assert_abs_diff_eq!( - actual_global_fee, - expected_fee, - epsilon = expected_fee / 10 - ); - - // Tick fees should be updated - - // Liquidity position should not be updated - let positions = - Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) - .collect::>(); - let position = positions.first().unwrap(); - - assert_eq!(position.liquidity, position_liquidity,); - assert_eq!(position.tick_low, tick_low); - assert_eq!(position.tick_high, tick_high); - assert_eq!(position.fees_alpha, 0); - assert_eq!(position.fees_tao, 0); - }); - }); + for liquidity_fraction in [0.0001, 0.001, 0.01, 0.1, 0.2, 0.5] { + perform_test!( + GetAlphaForTao, + price_low_offset, + price_high_offset, + position_liquidity, + liquidity_fraction, + 1000.0_f64, + true + ); + perform_test!( + GetTaoForAlpha, + price_low_offset, + price_high_offset, + position_liquidity, + liquidity_fraction, + 0.0001_f64, + false + ); + } }, ); } @@ -1209,102 +1201,78 @@ fn test_swap_multiple_positions() { }, ); - // All these orders are executed without swap reset - [ - (OrderType::Buy, 100_000_u64, 1000.0_f64), - (OrderType::Sell, 100_000, 0.0001_f64), - (OrderType::Buy, 1_000_000, 1000.0_f64), - (OrderType::Sell, 1_000_000, 0.0001_f64), - (OrderType::Buy, 10_000_000, 1000.0_f64), - (OrderType::Sell, 10_000_000, 0.0001_f64), - (OrderType::Buy, 100_000_000, 1000.0), - (OrderType::Sell, 100_000_000, 0.0001), - (OrderType::Buy, 200_000_000, 1000.0_f64), - (OrderType::Sell, 200_000_000, 0.0001), - (OrderType::Buy, 500_000_000, 1000.0), - (OrderType::Sell, 500_000_000, 0.0001), - (OrderType::Buy, 1_000_000_000, 1000.0), - (OrderType::Sell, 1_000_000_000, 0.0001), - (OrderType::Buy, 10_000_000_000, 1000.0), - (OrderType::Sell, 10_000_000_000, 0.0001), - ] - .into_iter() - .for_each(|(order_type, order_liquidity, limit_price)| { - ////////////////////////////////////////////// - // Swap - let sqrt_current_price = Pallet::::current_price_sqrt(netuid); - let current_price = (sqrt_current_price * sqrt_current_price).to_num::(); - let liquidity_before = CurrentLiquidity::::get(netuid); + macro_rules! perform_test { + ($order_t:ident, $order_liquidity:expr, $limit_price:expr, $should_price_grow:expr) => { + ////////////////////////////////////////////// + // Swap + let order_liquidity = $order_liquidity; + let limit_price = $limit_price; + let should_price_grow = $should_price_grow; - let output_amount = match order_type { - OrderType::Buy => { - let denom = sqrt_current_price.to_num::() - * (sqrt_current_price.to_num::() * liquidity_before as f64 - + order_liquidity as f64); - let per_order_liq = liquidity_before as f64 / denom; - per_order_liq * order_liquidity as f64 - } - OrderType::Sell => { - let denom = liquidity_before as f64 / sqrt_current_price.to_num::() - + order_liquidity as f64; - let per_order_liq = - sqrt_current_price.to_num::() * liquidity_before as f64 / denom; - per_order_liq * order_liquidity as f64 - } - }; + let sqrt_current_price = AlphaSqrtPrice::::get(netuid); + let current_price = (sqrt_current_price * sqrt_current_price).to_num::(); + let liquidity_before = CurrentLiquidity::::get(netuid); + let output_amount = >::approx_expected_swap_output( + sqrt_current_price.to_num(), + liquidity_before as f64, + order_liquidity as f64, + ); - // Do the swap - let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); - let swap_result = Pallet::::do_swap( - netuid, - order_type, - order_liquidity, - sqrt_limit_price, - false, - false, - ) - .unwrap(); - assert_abs_diff_eq!( - swap_result.amount_paid_out as f64, - output_amount, - epsilon = output_amount / 10. - ); + // Do the swap + let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); + let order = $order_t::with_amount(order_liquidity); + let swap_result = + Pallet::::do_swap(netuid, order, sqrt_limit_price, false, false).unwrap(); + assert_abs_diff_eq!( + swap_result.amount_paid_out.to_u64() as f64, + output_amount, + epsilon = output_amount / 10. + ); - let tao_reserve = MockLiquidityProvider::tao_reserve(netuid.into()).to_u64(); - let alpha_reserve = MockLiquidityProvider::alpha_reserve(netuid.into()).to_u64(); - let output_amount = output_amount as u64; + let tao_reserve = TaoReserve::reserve(netuid.into()).to_u64(); + let alpha_reserve = AlphaReserve::reserve(netuid.into()).to_u64(); + let output_amount = output_amount as u64; - assert!(output_amount > 0); + assert!(output_amount > 0); - if alpha_reserve > order_liquidity && tao_reserve > order_liquidity { - let (tao_delta_expected, alpha_delta_expected) = match order_type { - OrderType::Buy => (order_liquidity as i64, -(output_amount as i64)), - OrderType::Sell => (-(output_amount as i64), order_liquidity as i64), - }; - assert_abs_diff_eq!( - swap_result.alpha_reserve_delta, - alpha_delta_expected, - epsilon = alpha_delta_expected.abs() / 100 - ); - assert_abs_diff_eq!( - swap_result.tao_reserve_delta, - tao_delta_expected, - epsilon = tao_delta_expected.abs() / 100 - ); - } + if alpha_reserve > order_liquidity && tao_reserve > order_liquidity { + assert_abs_diff_eq!( + swap_result.paid_in_reserve_delta() as i64, + order_liquidity as i64, + epsilon = order_liquidity as i64 / 100 + ); + assert_abs_diff_eq!( + swap_result.paid_out_reserve_delta() as i64, + -(output_amount as i64), + epsilon = output_amount as i64 / 100 + ); + } - // Assert that price movement is in correct direction - let sqrt_current_price_after = Pallet::::current_price_sqrt(netuid); - let current_price_after = - (sqrt_current_price_after * sqrt_current_price_after).to_num::(); - match order_type { - OrderType::Buy => assert!(current_price_after > current_price), - OrderType::Sell => assert!(current_price_after < current_price), - } - }); + // Assert that price movement is in correct direction + let sqrt_current_price_after = AlphaSqrtPrice::::get(netuid); + let current_price_after = + (sqrt_current_price_after * sqrt_current_price_after).to_num::(); + assert_eq!(should_price_grow, current_price_after > current_price); + }; + } + + // All these orders are executed without swap reset + for order_liquidity in [ + (100_000_u64), + (1_000_000), + (10_000_000), + (100_000_000), + (200_000_000), + (500_000_000), + (1_000_000_000), + (10_000_000_000), + ] { + perform_test!(GetAlphaForTao, order_liquidity, 1000.0_f64, true); + perform_test!(GetTaoForAlpha, order_liquidity, 0.0001_f64, false); + } // Current price shouldn't be much different from the original - let sqrt_current_price_after = Pallet::::current_price_sqrt(netuid); + let sqrt_current_price_after = AlphaSqrtPrice::::get(netuid); let current_price_after = (sqrt_current_price_after * sqrt_current_price_after).to_num::(); assert_abs_diff_eq!( @@ -1320,8 +1288,7 @@ fn test_swap_multiple_positions() { fn test_swap_precision_edge_case() { new_test_ext().execute_with(|| { let netuid = NetUid::from(123); // 123 is netuid with low edge case liquidity - let order_type = OrderType::Sell; - let liquidity = 1_000_000_000_000_000_000; + let order = GetTaoForAlpha::with_amount(1_000_000_000_000_000_000); let tick_low = TickIndex::MIN; let sqrt_limit_price: SqrtPrice = tick_low.try_to_sqrt_price().unwrap(); @@ -1331,10 +1298,9 @@ fn test_swap_precision_edge_case() { // Swap let swap_result = - Pallet::::do_swap(netuid, order_type, liquidity, sqrt_limit_price, false, true) - .unwrap(); + Pallet::::do_swap(netuid, order, sqrt_limit_price, false, true).unwrap(); - assert!(swap_result.amount_paid_out > 0); + assert!(swap_result.amount_paid_out > TaoCurrency::ZERO); }); } @@ -1412,14 +1378,20 @@ fn test_convert_deltas() { AlphaSqrtPrice::::insert(netuid, sqrt_price); assert_abs_diff_eq!( - Pallet::::convert_deltas(netuid, OrderType::Sell, delta_in), - expected_sell, - epsilon = 2 + BasicSwapStep::::convert_deltas( + netuid, + delta_in.into() + ), + expected_sell.into(), + epsilon = 2.into() ); assert_abs_diff_eq!( - Pallet::::convert_deltas(netuid, OrderType::Buy, delta_in), - expected_buy, - epsilon = 2 + BasicSwapStep::::convert_deltas( + netuid, + delta_in.into() + ), + expected_buy.into(), + epsilon = 2.into() ); } } @@ -1533,8 +1505,7 @@ fn test_swap_fee_correctness() { // Swap buy and swap sell Pallet::::do_swap( netuid, - OrderType::Buy, - liquidity / 10, + GetAlphaForTao::with_amount(liquidity / 10), u64::MAX.into(), false, false, @@ -1542,8 +1513,7 @@ fn test_swap_fee_correctness() { .unwrap(); Pallet::::do_swap( netuid, - OrderType::Sell, - liquidity / 10, + GetTaoForAlpha::with_amount(liquidity / 10), 0_u64.into(), false, false, @@ -1640,8 +1610,7 @@ fn test_rollback_works() { assert_eq!( Pallet::::do_swap( netuid, - OrderType::Buy, - 1_000_000, + GetAlphaForTao::with_amount(1_000_000), u64::MAX.into(), false, true @@ -1649,8 +1618,7 @@ fn test_rollback_works() { .unwrap(), Pallet::::do_swap( netuid, - OrderType::Buy, - 1_000_000, + GetAlphaForTao::with_amount(1_000_000), u64::MAX.into(), false, false @@ -1694,8 +1662,7 @@ fn test_new_lp_doesnt_get_old_fees() { // Swap buy and swap sell Pallet::::do_swap( netuid, - OrderType::Buy, - liquidity / 10, + GetAlphaForTao::with_amount(liquidity / 10), u64::MAX.into(), false, false, @@ -1703,8 +1670,7 @@ fn test_new_lp_doesnt_get_old_fees() { .unwrap(); Pallet::::do_swap( netuid, - OrderType::Sell, - liquidity / 10, + GetTaoForAlpha::with_amount(liquidity / 10), 0_u64.into(), false, false, @@ -1747,7 +1713,7 @@ fn bbox(t: U64F64, a: U64F64, b: U64F64) -> U64F64 { } fn print_current_price(netuid: NetUid) { - let current_sqrt_price = Pallet::::current_price_sqrt(netuid).to_num::(); + let current_sqrt_price = AlphaSqrtPrice::::get(netuid).to_num::(); let current_price = current_sqrt_price * current_sqrt_price; log::trace!("Current price: {current_price:.6}"); } @@ -1775,20 +1741,16 @@ fn test_wrapping_fees() { print_current_price(netuid); - let swap_amt = 800_000_000_u64; - let order_type = OrderType::Sell; + let order = GetTaoForAlpha::with_amount(800_000_000); let sqrt_limit_price = SqrtPrice::from_num(0.000001); - Pallet::::do_swap(netuid, order_type, swap_amt, sqrt_limit_price, false, false) - .unwrap(); + Pallet::::do_swap(netuid, order, sqrt_limit_price, false, false).unwrap(); - let swap_amt = 1_850_000_000_u64; - let order_type = OrderType::Buy; + let order = GetAlphaForTao::with_amount(1_850_000_000); let sqrt_limit_price = SqrtPrice::from_num(1_000_000.0); print_current_price(netuid); - Pallet::::do_swap(netuid, order_type, swap_amt, sqrt_limit_price, false, false) - .unwrap(); + Pallet::::do_swap(netuid, order, sqrt_limit_price, false, false).unwrap(); print_current_price(netuid); @@ -1802,14 +1764,12 @@ fn test_wrapping_fees() { ) .unwrap(); - let swap_amt = 1_800_000_000_u64; - let order_type = OrderType::Sell; + let order = GetTaoForAlpha::with_amount(1_800_000_000); let sqrt_limit_price = SqrtPrice::from_num(0.000001); - let initial_sqrt_price = Pallet::::current_price_sqrt(netuid); - Pallet::::do_swap(netuid, order_type, swap_amt, sqrt_limit_price, false, false) - .unwrap(); - let final_sqrt_price = Pallet::::current_price_sqrt(netuid); + let initial_sqrt_price = AlphaSqrtPrice::::get(netuid); + Pallet::::do_swap(netuid, order, sqrt_limit_price, false, false).unwrap(); + let final_sqrt_price = AlphaSqrtPrice::::get(netuid); print_current_price(netuid); @@ -1875,74 +1835,70 @@ fn test_less_price_movement() { // - Provide liquidity if iteration provides lq // - Buy or sell // - Save end price if iteration doesn't provide lq - [ - (OrderType::Buy, 0_u64), - (OrderType::Buy, 1_000_000_000_000_u64), - (OrderType::Sell, 0_u64), - (OrderType::Sell, 1_000_000_000_000_u64), - ] - .into_iter() - .for_each(|(order_type, provided_liquidity)| { - new_test_ext().execute_with(|| { - // Setup swap - assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + macro_rules! perform_test { + ($order_t:ident, $provided_liquidity:expr, $limit_price:expr, $should_price_shrink:expr) => { + let provided_liquidity = $provided_liquidity; + let should_price_shrink = $should_price_shrink; + let limit_price = $limit_price; + new_test_ext().execute_with(|| { + // Setup swap + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); - // Buy Alpha - assert_ok!(Pallet::::do_swap( - netuid, - OrderType::Buy, - initial_stake_liquidity, - SqrtPrice::from_num(10_000_000_000_u64), - false, - false - )); + // Buy Alpha + assert_ok!(Pallet::::do_swap( + netuid, + GetAlphaForTao::with_amount(initial_stake_liquidity), + SqrtPrice::from_num(10_000_000_000_u64), + false, + false + )); - // Get current price - let start_price = Pallet::::current_price(netuid); + // Get current price + let start_price = Pallet::::current_price(netuid); - // Add liquidity if this test iteration provides - if provided_liquidity > 0 { - let tick_low = price_to_tick(start_price.to_num::() * 0.5); - let tick_high = price_to_tick(start_price.to_num::() * 1.5); - assert_ok!(Pallet::::do_add_liquidity( + // Add liquidity if this test iteration provides + if provided_liquidity > 0 { + let tick_low = price_to_tick(start_price.to_num::() * 0.5); + let tick_high = price_to_tick(start_price.to_num::() * 1.5); + assert_ok!(Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + provided_liquidity, + )); + } + + // Swap + let sqrt_limit_price = SqrtPrice::from_num(limit_price); + assert_ok!(Pallet::::do_swap( netuid, - &OK_COLDKEY_ACCOUNT_ID, - &OK_HOTKEY_ACCOUNT_ID, - tick_low, - tick_high, - provided_liquidity, + $order_t::with_amount(swapped_liquidity), + sqrt_limit_price, + false, + false )); - } - - // Swap - let sqrt_limit_price = if order_type == OrderType::Buy { - SqrtPrice::from_num(1000.) - } else { - SqrtPrice::from_num(0.001) - }; - assert_ok!(Pallet::::do_swap( - netuid, - order_type, - swapped_liquidity, - sqrt_limit_price, - false, - false - )); - let end_price = Pallet::::current_price(netuid); + let end_price = Pallet::::current_price(netuid); - // Save end price if iteration doesn't provide or compare with previous end price if it does - if provided_liquidity > 0 { - if order_type == OrderType::Buy { - assert!(end_price < last_end_price); + // Save end price if iteration doesn't provide or compare with previous end price if + // it does + if provided_liquidity > 0 { + assert_eq!(should_price_shrink, end_price < last_end_price); } else { - assert!(end_price > last_end_price); + last_end_price = end_price; } - } else { - last_end_price = end_price; - } - }); - }); + }); + }; + } + + for provided_liquidity in [0, 1_000_000_000_000_u64] { + perform_test!(GetAlphaForTao, provided_liquidity, 1000.0_f64, true); + } + for provided_liquidity in [0, 1_000_000_000_000_u64] { + perform_test!(GetTaoForAlpha, provided_liquidity, 0.001_f64, false); + } } #[test] @@ -2019,8 +1975,7 @@ fn test_liquidate_v3_removes_positions_ticks_and_state() { let sqrt_limit_price = SqrtPrice::from_num(1_000_000.0); assert_ok!(Pallet::::do_swap( netuid, - OrderType::Buy, - 1_000_000, + GetAlphaForTao::with_amount(1_000_000), sqrt_limit_price, false, false @@ -2320,8 +2275,8 @@ fn liquidate_v3_refunds_user_funds_and_clears_state() { need_alpha.into(), ) .expect("decrease ALPHA"); - ::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_taken); - ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + TaoReserve::increase_provided(netuid.into(), tao_taken); + AlphaReserve::increase_provided(netuid.into(), alpha_taken); // Users‑only liquidation. assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); @@ -2387,7 +2342,7 @@ fn refund_alpha_single_provider_exact() { alpha_needed.into(), ) .expect("decrease ALPHA"); - ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + AlphaReserve::increase_provided(netuid.into(), alpha_taken); // --- Act: users‑only dissolve. assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); @@ -2458,12 +2413,12 @@ fn refund_alpha_multiple_providers_proportional_to_principal() { let a1_taken = ::BalanceOps::decrease_stake(&c1, &h1, netuid.into(), a1.into()) .expect("decrease α #1"); - ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), a1_taken); + AlphaReserve::increase_provided(netuid.into(), a1_taken); let a2_taken = ::BalanceOps::decrease_stake(&c2, &h2, netuid.into(), a2.into()) .expect("decrease α #2"); - ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), a2_taken); + AlphaReserve::increase_provided(netuid.into(), a2_taken); // Act assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); @@ -2520,12 +2475,12 @@ fn refund_alpha_same_cold_multiple_hotkeys_conserved_to_owner() { let t1 = ::BalanceOps::decrease_stake(&cold, &hot1, netuid.into(), a1.into()) .expect("decr α #hot1"); - ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), t1); + AlphaReserve::increase_provided(netuid.into(), t1); let t2 = ::BalanceOps::decrease_stake(&cold, &hot2, netuid.into(), a2.into()) .expect("decr α #hot2"); - ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), t2); + AlphaReserve::increase_provided(netuid.into(), t2); // Act assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); @@ -2615,8 +2570,8 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { ) .expect("decrease ALPHA"); - ::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_taken); - ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + TaoReserve::increase_provided(netuid.into(), tao_taken); + AlphaReserve::increase_provided(netuid.into(), alpha_taken); // --- Act: dissolve (GREEN PATH: permitted validators exist) --- assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); @@ -2789,3 +2744,172 @@ fn test_clear_protocol_liquidity_green_path() { assert!(!SwapV3Initialized::::contains_key(netuid)); }); } + +fn as_tuple( + (t_used, a_used, t_rem, a_rem): (TaoCurrency, AlphaCurrency, TaoCurrency, AlphaCurrency), +) -> (u64, u64, u64, u64) { + ( + u64::from(t_used), + u64::from(a_used), + u64::from(t_rem), + u64::from(a_rem), + ) +} + +#[test] +fn proportional_when_price_is_one_and_tao_is_plenty() { + // sqrt_price = 1.0 => price = 1.0 + let sqrt = U64F64::from_num(1u64); + let amount_tao: TaoCurrency = 10u64.into(); + let amount_alpha: AlphaCurrency = 3u64.into(); + + // alpha * price = 3 * 1 = 3 <= amount_tao(10) + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, amount_tao, amount_alpha); + assert_eq!(as_tuple(out), (3, 3, 7, 0)); +} + +#[test] +fn proportional_when_price_is_one_and_alpha_is_excess() { + // sqrt_price = 1.0 => price = 1.0 + let sqrt = U64F64::from_num(1u64); + let amount_tao: TaoCurrency = 5u64.into(); + let amount_alpha: AlphaCurrency = 10u64.into(); + + // tao is limiting: alpha_equiv = floor(5 / 1) = 5 + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, amount_tao, amount_alpha); + assert_eq!(as_tuple(out), (5, 5, 0, 5)); +} + +#[test] +fn proportional_with_higher_price_and_alpha_limiting() { + // Choose sqrt_price = 2.0 => price = 4.0 (since implementation squares it) + let sqrt = U64F64::from_num(2u64); + let amount_tao: TaoCurrency = 85u64.into(); + let amount_alpha: AlphaCurrency = 20u64.into(); + + // tao_equivalent = alpha * price = 20 * 4 = 80 < 85 => alpha limits tao + // remainders: tao 5, alpha 0 + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, amount_tao, amount_alpha); + assert_eq!(as_tuple(out), (80, 20, 5, 0)); +} + +#[test] +fn proportional_with_higher_price_and_tao_limiting() { + // Choose sqrt_price = 2.0 => price = 4.0 (since implementation squares it) + let sqrt = U64F64::from_num(2u64); + let amount_tao: TaoCurrency = 50u64.into(); + let amount_alpha: AlphaCurrency = 20u64.into(); + + // tao_equivalent = alpha * price = 20 * 4 = 80 > 50 => tao limits alpha + // alpha_equivalent = floor(50 / 4) = 12 + // remainders: tao 0, alpha 20 - 12 = 8 + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, amount_tao, amount_alpha); + assert_eq!(as_tuple(out), (50, 12, 0, 8)); +} + +#[test] +fn zero_price_uses_no_tao_and_all_alpha() { + // sqrt_price = 0 => price = 0 + let sqrt = U64F64::from_num(0u64); + let amount_tao: TaoCurrency = 42u64.into(); + let amount_alpha: AlphaCurrency = 17u64.into(); + + // tao_equivalent = 17 * 0 = 0 <= 42 + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, amount_tao, amount_alpha); + assert_eq!(as_tuple(out), (0, 17, 42, 0)); +} + +#[test] +fn rounding_down_behavior_when_dividing_by_price() { + // sqrt_price = 2.0 => price = 4.0 + let sqrt = U64F64::from_num(2u64); + let amount_tao: TaoCurrency = 13u64.into(); + let amount_alpha: AlphaCurrency = 100u64.into(); + + // tao is limiting; alpha_equiv = floor(13 / 4) = 3 + // remainders: tao 0, alpha 100 - 3 = 97 + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, amount_tao, amount_alpha); + assert_eq!(as_tuple(out), (13, 3, 0, 97)); +} + +#[test] +fn exact_fit_when_tao_matches_alpha_times_price() { + // sqrt_price = 1.0 => price = 1.0 + let sqrt = U64F64::from_num(1u64); + let amount_tao: TaoCurrency = 9u64.into(); + let amount_alpha: AlphaCurrency = 9u64.into(); + + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, amount_tao, amount_alpha); + assert_eq!(as_tuple(out), (9, 9, 0, 0)); +} + +#[test] +fn handles_zero_balances() { + let sqrt = U64F64::from_num(1u64); + + // Zero TAO, some alpha + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, 0u64.into(), 7u64.into()); + // tao limits; alpha_equiv = floor(0 / 1) = 0 + assert_eq!(as_tuple(out), (0, 0, 0, 7)); + + // Some TAO, zero alpha + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, 7u64.into(), 0u64.into()); + // tao_equiv = 0 * 1 = 0 <= 7 + assert_eq!(as_tuple(out), (0, 0, 7, 0)); + + // Both zero + let out = + Pallet::::get_proportional_alpha_tao_and_remainders(sqrt, 0u64.into(), 0u64.into()); + assert_eq!(as_tuple(out), (0, 0, 0, 0)); +} + +#[test] +fn adjust_protocol_liquidity_uses_and_sets_scrap_reservoirs() { + new_test_ext().execute_with(|| { + // --- Arrange + let netuid: NetUid = 1u16.into(); + // Price = 1.0 (since sqrt_price^2 = 1), so proportional match is 1:1 + AlphaSqrtPrice::::insert(netuid, U64F64::saturating_from_num(1u64)); + + // Start with some non-zero scrap reservoirs + ScrapReservoirTao::::insert(netuid, TaoCurrency::from(7u64)); + ScrapReservoirAlpha::::insert(netuid, AlphaCurrency::from(5u64)); + + // Create a minimal protocol position so the function’s body executes. + let protocol = Pallet::::protocol_account_id(); + let position = Position::new( + PositionId::from(0), + netuid, + TickIndex::MIN, + TickIndex::MAX, + 0, + ); + // Ensure collect_fees() returns (0,0) via zeroed fees in `position` (default). + Positions::::insert((netuid, protocol, position.id), position.clone()); + + // --- Act + // No external deltas or fees; only reservoirs should be considered. + // With price=1, the exact proportional pair uses 5 alpha and 5 tao, + // leaving tao scrap = 7 - 5 = 2, alpha scrap = 5 - 5 = 0. + Pallet::::adjust_protocol_liquidity(netuid, 0u64.into(), 0u64.into()); + + // --- Assert: reservoirs were READ (used in proportional calc) and then SET (updated) + assert_eq!( + ScrapReservoirTao::::get(netuid), + TaoCurrency::from(2u64) + ); + assert_eq!( + ScrapReservoirAlpha::::get(netuid), + AlphaCurrency::from(0u64) + ); + }); +} diff --git a/pallets/transaction-fee/src/lib.rs b/pallets/transaction-fee/src/lib.rs index 42ba69c841..89326457f6 100644 --- a/pallets/transaction-fee/src/lib.rs +++ b/pallets/transaction-fee/src/lib.rs @@ -127,7 +127,7 @@ where /// /// If this function returns true, but at the time of execution the Alpha price /// changes and it becomes impossible to pay tx fee with the Alpha balance, - /// the transaction still executes and all Alpha is withdrawn from the account. + /// the transaction still executes and all Alpha is withdrawn from the account. fn can_withdraw_in_alpha( coldkey: &AccountIdOf, alpha_vec: &[(AccountIdOf, NetUid)], diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index 0f690bfd82..ee5b1693ba 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -9,7 +9,7 @@ use frame_support::{ weights::IdentityFee, }; use frame_system::{ - self as system, EnsureNever, EnsureRoot, RawOrigin, limits, offchain::CreateTransactionBase, + self as system, EnsureRoot, RawOrigin, limits, offchain::CreateTransactionBase, }; pub use pallet_subtensor::*; pub use sp_core::U256; @@ -21,8 +21,8 @@ use sp_runtime::{ }; use sp_std::cmp::Ordering; use sp_weights::Weight; -pub use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +pub use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_swap_interface::{Order, SwapHandler}; use crate::SubtensorTxFeeHandler; use pallet_transaction_payment::{ConstFeeMultiplier, Multiplier}; @@ -142,7 +142,6 @@ impl pallet_transaction_payment::Config for Test { parameter_types! { pub const InitialMinAllowedWeights: u16 = 0; pub const InitialEmissionValue: u16 = 0; - pub const InitialMaxWeightsLimit: u16 = u16::MAX; pub BlockWeights: limits::BlockWeights = limits::BlockWeights::with_sensible_defaults( Weight::from_parts(2_000_000_000_000, u64::MAX), Perbill::from_percent(75), @@ -193,7 +192,6 @@ parameter_types! { pub const InitialMinDifficulty: u64 = 1; pub const InitialMaxDifficulty: u64 = u64::MAX; pub const InitialRAORecycledForRegistration: u64 = 0; - pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. @@ -225,13 +223,9 @@ impl pallet_subtensor::Config for Test { type Currency = Balances; type InitialIssuance = InitialIssuance; type SudoRuntimeCall = RuntimeCall; - type CouncilOrigin = EnsureNever; - type SenateMembers = (); - type TriumvirateInterface = (); type Scheduler = Scheduler; type InitialMinAllowedWeights = InitialMinAllowedWeights; type InitialEmissionValue = InitialEmissionValue; - type InitialMaxWeightsLimit = InitialMaxWeightsLimit; type InitialTempo = InitialTempo; type InitialDifficulty = InitialDifficulty; type InitialAdjustmentInterval = InitialAdjustmentInterval; @@ -270,7 +264,6 @@ impl pallet_subtensor::Config for Test { type MinBurnUpperBound = MinBurnUpperBound; type MaxBurnLowerBound = MaxBurnLowerBound; type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; - type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; @@ -406,6 +399,8 @@ impl pallet_subtensor_swap::Config for Test { type SubnetInfo = SubtensorModule; type BalanceOps = SubtensorModule; type ProtocolId = SwapProtocolId; + type TaoReserve = pallet_subtensor::TaoCurrencyReserve; + type AlphaReserve = pallet_subtensor::AlphaCurrencyReserve; type MaxFeeRate = SwapMaxFeeRate; type MaxPositions = SwapMaxPositions; type MinimumLiquidity = SwapMinimumLiquidity; @@ -611,10 +606,10 @@ pub(crate) fn swap_alpha_to_tao_ext( return (alpha.into(), 0); } + let order = GetTaoForAlpha::::with_amount(alpha); let result = ::SwapInterface::swap( netuid.into(), - OrderType::Sell, - alpha.into(), + order, ::SwapInterface::min_price(), drop_fees, true, @@ -624,10 +619,10 @@ pub(crate) fn swap_alpha_to_tao_ext( let result = result.unwrap(); - // we don't want to have silent 0 comparissons in tests - assert!(result.amount_paid_out > 0); + // we don't want to have silent 0 comparisons in tests + assert!(!result.amount_paid_out.is_zero()); - (result.amount_paid_out, result.fee_paid) + (result.amount_paid_out.to_u64(), result.fee_paid.to_u64()) } pub(crate) fn swap_alpha_to_tao(netuid: NetUid, alpha: AlphaCurrency) -> (u64, u64) { diff --git a/pallets/utility/Cargo.toml b/pallets/utility/Cargo.toml index a61327e4b7..5bb38fcf82 100644 --- a/pallets/utility/Cargo.toml +++ b/pallets/utility/Cargo.toml @@ -25,7 +25,6 @@ subtensor-macros.workspace = true [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-subtensor-collective = { workspace = true, default-features = true } pallet-root-testing = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -47,7 +46,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "pallet-subtensor-collective/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] @@ -55,7 +53,6 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", - "pallet-subtensor-collective/try-runtime", "pallet-root-testing/try-runtime", "pallet-timestamp/try-runtime", "sp-runtime/try-runtime", diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs index cac7b0a1b1..c54413c2ea 100644 --- a/pallets/utility/src/lib.rs +++ b/pallets/utility/src/lib.rs @@ -69,7 +69,10 @@ use frame_support::{ }; use sp_core::TypeId; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; +use sp_runtime::{ + DispatchError, + traits::{BadOrigin, Dispatchable, TrailingZeroInput}, +}; pub use weights::WeightInfo; use subtensor_macros::freeze_struct; @@ -77,6 +80,7 @@ use subtensor_macros::freeze_struct; pub use pallet::*; #[frame_support::pallet] +#[allow(clippy::expect_used)] pub mod pallet { use super::*; use frame_support::{dispatch::DispatchClass, pallet_prelude::*}; @@ -167,10 +171,14 @@ pub mod pallet { pub enum Error { /// Too many calls batched. TooManyCalls, + /// Bad input data for derived account ID + InvalidDerivedAccount, } #[pallet::call] impl Pallet { + #![deny(clippy::expect_used)] + /// Send a batch of dispatch calls. /// /// May be called from any origin except `None`. @@ -271,7 +279,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; - let pseudonym = Self::derivative_account_id(who, index); + let pseudonym = Self::derivative_account_id(who, index)?; origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); let info = call.get_dispatch_info(); let result = call.dispatch(origin); @@ -448,7 +456,7 @@ pub mod pallet { } else { Self::deposit_event(Event::BatchCompleted); } - let base_weight = T::WeightInfo::batch(calls_len as u32); + let base_weight = T::WeightInfo::force_batch(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } @@ -638,9 +646,12 @@ impl TypeId for IndexedUtilityPalletId { impl Pallet { /// Derive a derivative account ID from the owner account and the sub-account index. - pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { + pub fn derivative_account_id( + who: T::AccountId, + index: u16, + ) -> Result { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); - Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) - .expect("infinite length input; no invalid inputs for type; qed") + T::AccountId::decode(&mut TrailingZeroInput::new(entropy.as_ref())) + .map_err(|_| Error::::InvalidDerivedAccount.into()) } } diff --git a/pallets/utility/src/tests.rs b/pallets/utility/src/tests.rs index 09bb7192bc..8b0a9274c2 100644 --- a/pallets/utility/src/tests.rs +++ b/pallets/utility/src/tests.rs @@ -18,7 +18,11 @@ // Tests for Utility Pallet #![cfg(test)] -#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] +#![allow( + clippy::arithmetic_side_effects, + clippy::expect_used, + clippy::unwrap_used +)] use super::*; @@ -30,11 +34,9 @@ use frame_support::{ traits::{ConstU64, Contains}, weights::Weight, }; -use pallet_subtensor_collective as pallet_collective; -use pallet_subtensor_collective::{EnsureProportionAtLeast, Instance1}; use sp_runtime::{ BuildStorage, DispatchError, TokenError, - traits::{BadOrigin, BlakeTwo256, Dispatchable, Hash}, + traits::{BadOrigin, Dispatchable}, }; type BlockNumber = u64; @@ -89,40 +91,6 @@ pub mod example { } } -mod mock_democracy { - pub use pallet::*; - #[frame_support::pallet(dev_mode)] - pub mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config + Sized { - type ExternalMajorityOrigin: EnsureOrigin; - } - - #[pallet::call] - impl Pallet { - #[pallet::call_index(3)] - #[pallet::weight(0)] - pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { - T::ExternalMajorityOrigin::ensure_origin(origin)?; - Self::deposit_event(Event::::ExternalProposed); - Ok(()) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - ExternalProposed, - } - } -} - type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( @@ -132,10 +100,8 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp = 2, Balances: pallet_balances = 3, RootTesting: pallet_root_testing = 4, - Council: pallet_collective:: = 5, Utility: utility = 6, Example: example = 7, - Democracy: mock_democracy = 8, } ); @@ -178,42 +144,6 @@ parameter_types! { pub MaxProposalWeight: Weight = BlockWeights::get().max_block.saturating_div(2); } -pub struct MemberProposals; -impl pallet_collective::CanPropose for MemberProposals { - fn can_propose(who: &u64) -> bool { - [1, 2, 3].contains(who) - } -} - -pub struct MemberVotes; -impl pallet_collective::CanVote for MemberVotes { - fn can_vote(who: &u64) -> bool { - [1, 2, 3].contains(who) - } -} - -pub struct StoredVotingMembers; -impl pallet_collective::GetVotingMembers for StoredVotingMembers { - fn get_count() -> u32 { - 3 - } -} - -type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type MotionDuration = MotionDuration; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type WeightInfo = (); - type SetMembersOrigin = frame_system::EnsureRoot; - type CanPropose = MemberProposals; - type CanVote = MemberVotes; - type GetVotingMembers = StoredVotingMembers; -} - impl example::Config for Test {} pub struct TestBaseCallFilter; @@ -227,15 +157,11 @@ impl Contains for TestBaseCallFilter { RuntimeCall::System(frame_system::Call::remark { .. }) => true, // For tests RuntimeCall::Example(_) => true, - // For council origin tests. - RuntimeCall::Democracy(_) => true, _ => false, } } } -impl mock_democracy::Config for Test { - type ExternalMajorityOrigin = EnsureProportionAtLeast; -} + impl Config for Test { type RuntimeCall = RuntimeCall; type PalletsOrigin = OriginCaller; @@ -261,13 +187,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { .assimilate_storage(&mut t) .unwrap(); - pallet_collective::GenesisConfig:: { - members: vec![1, 2, 3], - phantom: Default::default(), - } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -302,7 +221,7 @@ fn utility_events() -> Vec { #[test] fn as_derivative_works() { new_test_ext().execute_with(|| { - let sub_1_0 = Utility::derivative_account_id(1, 0); + let sub_1_0 = Utility::derivative_account_id(1, 0).unwrap(); assert_ok!(Balances::transfer_allow_death( RuntimeOrigin::signed(1), sub_1_0, @@ -893,97 +812,6 @@ fn batch_all_doesnt_work_with_inherents() { }) } -#[test] -fn batch_works_with_council_origin() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Utility(UtilityCall::batch { - calls: vec![RuntimeCall::Democracy( - mock_democracy::Call::external_propose_majority {}, - )], - }); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Council::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - 3, - )); - - assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); - - System::set_block_number(4); - - assert_ok!(Council::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { - proposal_hash: hash, - result: Ok(()), - })); - }) -} - -#[test] -fn force_batch_works_with_council_origin() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Utility(UtilityCall::force_batch { - calls: vec![RuntimeCall::Democracy( - mock_democracy::Call::external_propose_majority {}, - )], - }); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().call_weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Council::propose( - RuntimeOrigin::signed(1), - Box::new(proposal.clone()), - proposal_len, - 3, - )); - - assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); - - System::set_block_number(4); - assert_ok!(Council::close( - RuntimeOrigin::root(), - hash, - 0, - proposal_weight, - proposal_len - )); - - System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { - proposal_hash: hash, - result: Ok(()), - })); - }) -} - -#[test] -fn batch_all_works_with_council_origin() { - new_test_ext().execute_with(|| { - assert_ok!(Utility::batch_all( - RuntimeOrigin::from(pallet_collective::RawOrigin::Members(3, 3)), - vec![RuntimeCall::Democracy( - mock_democracy::Call::external_propose_majority {} - )] - )); - }) -} - #[test] fn with_weight_works() { new_test_ext().execute_with(|| { diff --git a/precompiles/src/alpha.rs b/precompiles/src/alpha.rs index 29f7cab568..9dd0379f10 100644 --- a/precompiles/src/alpha.rs +++ b/precompiles/src/alpha.rs @@ -7,7 +7,7 @@ use sp_core::U256; use sp_std::vec::Vec; use substrate_fixed::types::U96F32; use subtensor_runtime_common::{Currency, NetUid}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_swap_interface::{Order, SwapHandler}; use crate::PrecompileExt; @@ -36,9 +36,7 @@ where #[precompile::view] fn get_alpha_price(_handle: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { let price = - as SwapHandler>::current_alpha_price( - netuid.into(), - ); + as SwapHandler>::current_alpha_price(netuid.into()); let price: SubstrateBalance = price.saturating_to_num::().into(); let price_eth = ::BalanceConverter::into_evm_balance(price) .map(|amount| amount.into_u256()) @@ -104,16 +102,13 @@ where netuid: u16, tao: u64, ) -> EvmResult { + let order = pallet_subtensor::GetAlphaForTao::::with_amount(tao); let swap_result = - as SwapHandler>::sim_swap( - netuid.into(), - OrderType::Buy, - tao, - ) - .map_err(|e| PrecompileFailure::Error { - exit_status: ExitError::Other(Into::<&'static str>::into(e).into()), - })?; - Ok(U256::from(swap_result.amount_paid_out)) + as SwapHandler>::sim_swap(netuid.into(), order) + .map_err(|e| PrecompileFailure::Error { + exit_status: ExitError::Other(Into::<&'static str>::into(e).into()), + })?; + Ok(U256::from(swap_result.amount_paid_out.to_u64())) } #[precompile::public("simSwapAlphaForTao(uint16,uint64)")] @@ -123,16 +118,13 @@ where netuid: u16, alpha: u64, ) -> EvmResult { + let order = pallet_subtensor::GetTaoForAlpha::::with_amount(alpha); let swap_result = - as SwapHandler>::sim_swap( - netuid.into(), - OrderType::Sell, - alpha, - ) - .map_err(|e| PrecompileFailure::Error { - exit_status: ExitError::Other(Into::<&'static str>::into(e).into()), - })?; - Ok(U256::from(swap_result.amount_paid_out)) + as SwapHandler>::sim_swap(netuid.into(), order) + .map_err(|e| PrecompileFailure::Error { + exit_status: ExitError::Other(Into::<&'static str>::into(e).into()), + })?; + Ok(U256::from(swap_result.amount_paid_out.to_u64())) } #[precompile::public("getSubnetMechanism(uint16)")] @@ -201,8 +193,7 @@ where let mut sum_alpha_price: U96F32 = U96F32::from_num(0); for (netuid, _) in netuids { - let price = - as SwapHandler>::current_alpha_price( + let price = as SwapHandler>::current_alpha_price( netuid.into(), ); diff --git a/precompiles/src/solidity/stakingV2.abi b/precompiles/src/solidity/stakingV2.abi index 2c936898ab..40a5acc1d9 100644 --- a/precompiles/src/solidity/stakingV2.abi +++ b/precompiles/src/solidity/stakingV2.abi @@ -9,7 +9,7 @@ ], "name": "addProxy", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -226,7 +226,7 @@ ], "name": "moveStake", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -239,7 +239,7 @@ ], "name": "removeProxy", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -262,7 +262,7 @@ ], "name": "removeStake", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -280,7 +280,7 @@ ], "name": "removeStakeFull", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -303,7 +303,7 @@ ], "name": "removeStakeFullLimit", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -336,7 +336,7 @@ ], "name": "removeStakeLimit", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { @@ -369,7 +369,30 @@ ], "name": "transferStake", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "burnAlpha", + "outputs": [], + "stateMutability": "payable", "type": "function" } ] diff --git a/precompiles/src/solidity/stakingV2.sol b/precompiles/src/solidity/stakingV2.sol index c8c39761ad..fefca82fd9 100644 --- a/precompiles/src/solidity/stakingV2.sol +++ b/precompiles/src/solidity/stakingV2.sol @@ -48,7 +48,7 @@ interface IStaking { bytes32 hotkey, uint256 amount, uint256 netuid - ) external; + ) external payable; /** * @dev Moves a subtensor stake `amount` associated with the `hotkey` to a different hotkey @@ -76,7 +76,7 @@ interface IStaking { uint256 origin_netuid, uint256 destination_netuid, uint256 amount - ) external; + ) external payable; /** * @dev Transfer a subtensor stake `amount` associated with the transaction signer to a different coldkey @@ -104,7 +104,7 @@ interface IStaking { uint256 origin_netuid, uint256 destination_netuid, uint256 amount - ) external; + ) external payable; /** * @dev Returns the amount of RAO staked by the coldkey. @@ -156,14 +156,14 @@ interface IStaking { * * @param delegate The public key (32 bytes) of the delegate. */ - function addProxy(bytes32 delegate) external; + function addProxy(bytes32 delegate) external payable; /** * @dev Removes staking proxy account. * * @param delegate The public key (32 bytes) of the delegate. */ - function removeProxy(bytes32 delegate) external; + function removeProxy(bytes32 delegate) external payable; /** * @dev Returns the validators that have staked alpha under a hotkey. @@ -258,7 +258,7 @@ interface IStaking { uint256 limit_price, bool allow_partial, uint256 netuid - ) external; + ) external payable; /** * @dev Removes all stake from a hotkey on a subnet with a price limit. @@ -270,7 +270,7 @@ interface IStaking { * @param hotkey The hotkey public key (32 bytes). * @param netuid The subnet to remove stake from (uint256). */ - function removeStakeFull(bytes32 hotkey, uint256 netuid) external; + function removeStakeFull(bytes32 hotkey, uint256 netuid) external payable; /** * @dev Removes all stake from a hotkey on a subnet with a price limit. @@ -287,5 +287,27 @@ interface IStaking { bytes32 hotkey, uint256 netuid, uint256 limitPrice - ) external; + ) external payable; + + /** + * @dev Burns alpha tokens from the specified hotkey's stake on a subnet. + * + * This function allows external accounts and contracts to permanently burn (destroy) alpha tokens + * from their stake on a specified hotkey and subnet. The burned tokens are removed from circulation + * and cannot be recovered. + * + * @param hotkey The hotkey public key (32 bytes). + * @param amount The amount of alpha to burn (uint256). + * @param netuid The subnet to burn from (uint256). + * + * Requirements: + * - `hotkey` must be a valid hotkey registered on the network. + * - The caller must have sufficient alpha staked to the specified hotkey on the subnet. + * - `amount` must be greater than zero and not exceed the staked amount. + */ + function burnAlpha( + bytes32 hotkey, + uint256 amount, + uint256 netuid + ) external payable; } diff --git a/precompiles/src/solidity/subnet.abi b/precompiles/src/solidity/subnet.abi index f2d97e1f90..4531f59246 100644 --- a/precompiles/src/solidity/subnet.abi +++ b/precompiles/src/solidity/subnet.abi @@ -831,24 +831,6 @@ "stateMutability": "payable", "type": "function" }, - { - "inputs": [ - { - "internalType": "uint16", - "name": "netuid", - "type": "uint16" - }, - { - "internalType": "uint16", - "name": "maxWeightLimit", - "type": "uint16" - } - ], - "name": "setMaxWeightLimit", - "outputs": [], - "stateMutability": "payable", - "type": "function" - }, { "inputs": [ { diff --git a/precompiles/src/solidity/subnet.sol b/precompiles/src/solidity/subnet.sol index 7517db3019..4e78708d62 100644 --- a/precompiles/src/solidity/subnet.sol +++ b/precompiles/src/solidity/subnet.sol @@ -75,11 +75,6 @@ interface ISubnet { function getMaxWeightLimit(uint16 netuid) external view returns (uint16); - function setMaxWeightLimit( - uint16 netuid, - uint16 maxWeightLimit - ) external payable; - function getImmunityPeriod(uint16) external view returns (uint16); function setImmunityPeriod( diff --git a/precompiles/src/staking.rs b/precompiles/src/staking.rs index dff6caae60..35daaf4f47 100644 --- a/precompiles/src/staking.rs +++ b/precompiles/src/staking.rs @@ -103,6 +103,7 @@ where } #[precompile::public("removeStake(bytes32,uint256,uint256)")] + #[precompile::payable] fn remove_stake( handle: &mut impl PrecompileHandle, address: H256, @@ -141,6 +142,7 @@ where } #[precompile::public("removeStakeFull(bytes32,uint256)")] + #[precompile::payable] fn remove_stake_full( handle: &mut impl PrecompileHandle, hotkey: H256, @@ -150,6 +152,7 @@ where } #[precompile::public("removeStakeFullLimit(bytes32,uint256,uint256)")] + #[precompile::payable] fn remove_stake_full_limit( handle: &mut impl PrecompileHandle, hotkey: H256, @@ -161,6 +164,7 @@ where } #[precompile::public("moveStake(bytes32,bytes32,uint256,uint256,uint256)")] + #[precompile::payable] fn move_stake( handle: &mut impl PrecompileHandle, origin_hotkey: H256, @@ -187,6 +191,7 @@ where } #[precompile::public("transferStake(bytes32,bytes32,uint256,uint256,uint256)")] + #[precompile::payable] fn transfer_stake( handle: &mut impl PrecompileHandle, destination_coldkey: H256, @@ -212,6 +217,27 @@ where handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) } + #[precompile::public("burnAlpha(bytes32,uint256,uint256)")] + #[precompile::payable] + fn burn_alpha( + handle: &mut impl PrecompileHandle, + hotkey: H256, + amount: U256, + netuid: U256, + ) -> EvmResult<()> { + let account_id = handle.caller_account_id::(); + let hotkey = R::AccountId::from(hotkey.0); + let netuid = try_u16_from_u256(netuid)?; + let amount: u64 = amount.unique_saturated_into(); + let call = pallet_subtensor::Call::::burn_alpha { + hotkey, + amount: amount.into(), + netuid: netuid.into(), + }; + + handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) + } + #[precompile::public("getTotalColdkeyStake(bytes32)")] #[precompile::view] fn get_total_coldkey_stake( @@ -301,6 +327,7 @@ where } #[precompile::public("addProxy(bytes32)")] + #[precompile::payable] fn add_proxy(handle: &mut impl PrecompileHandle, delegate: H256) -> EvmResult<()> { let account_id = handle.caller_account_id::(); let delegate = R::AccountId::from(delegate.0); @@ -315,6 +342,7 @@ where } #[precompile::public("removeProxy(bytes32)")] + #[precompile::payable] fn remove_proxy(handle: &mut impl PrecompileHandle, delegate: H256) -> EvmResult<()> { let account_id = handle.caller_account_id::(); let delegate = R::AccountId::from(delegate.0); @@ -329,6 +357,7 @@ where } #[precompile::public("addStakeLimit(bytes32,uint256,uint256,bool,uint256)")] + #[precompile::payable] fn add_stake_limit( handle: &mut impl PrecompileHandle, address: H256, @@ -354,6 +383,7 @@ where } #[precompile::public("removeStakeLimit(bytes32,uint256,uint256,bool,uint256)")] + #[precompile::payable] fn remove_stake_limit( handle: &mut impl PrecompileHandle, address: H256, @@ -444,6 +474,7 @@ where } #[precompile::public("removeStake(bytes32,uint256,uint256)")] + #[precompile::payable] fn remove_stake( handle: &mut impl PrecompileHandle, address: H256, @@ -532,6 +563,7 @@ where } #[precompile::public("addProxy(bytes32)")] + #[precompile::payable] fn add_proxy(handle: &mut impl PrecompileHandle, delegate: H256) -> EvmResult<()> { let account_id = handle.caller_account_id::(); let delegate = R::AccountId::from(delegate.0); @@ -546,6 +578,7 @@ where } #[precompile::public("removeProxy(bytes32)")] + #[precompile::payable] fn remove_proxy(handle: &mut impl PrecompileHandle, delegate: H256) -> EvmResult<()> { let account_id = handle.caller_account_id::(); let delegate = R::AccountId::from(delegate.0); diff --git a/precompiles/src/subnet.rs b/precompiles/src/subnet.rs index 8b4d0eff88..b7f5cdb098 100644 --- a/precompiles/src/subnet.rs +++ b/precompiles/src/subnet.rs @@ -290,27 +290,9 @@ where #[precompile::public("getMaxWeightLimit(uint16)")] #[precompile::view] fn get_max_weight_limit(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { - Ok(pallet_subtensor::MaxWeightsLimit::::get(NetUid::from( - netuid, - ))) - } - - #[precompile::public("setMaxWeightLimit(uint16,uint16)")] - #[precompile::payable] - fn set_max_weight_limit( - handle: &mut impl PrecompileHandle, - netuid: u16, - max_weight_limit: u16, - ) -> EvmResult<()> { - let call = pallet_admin_utils::Call::::sudo_set_max_weight_limit { - netuid: netuid.into(), - max_weight_limit, - }; - - handle.try_dispatch_runtime_call::( - call, - RawOrigin::Signed(handle.caller_account_id::()), - ) + Ok(pallet_subtensor::Pallet::::get_max_weight_limit( + NetUid::from(netuid), + )) } #[precompile::public("getImmunityPeriod(uint16)")] diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 0c4353d630..9760ac1b53 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -64,10 +64,6 @@ pallet-sudo.workspace = true pallet-admin-utils.workspace = true -# Used for sudo decentralization -pallet-subtensor-collective.workspace = true -pallet-membership.workspace = true - # Multisig pallet-multisig.workspace = true @@ -97,6 +93,11 @@ pallet-commitments.workspace = true # for prod_or_fast! macro runtime-common.workspace = true + +# Wasm smart contracts support +pallet-contracts.workspace = true +subtensor-chain-extensions.workspace = true + # NPoS frame-election-provider-support = { workspace = true } pallet-authority-discovery = { workspace = true } @@ -205,8 +206,6 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", - "pallet-subtensor-collective/std", - "pallet-membership/std", "pallet-registry/std", "pallet-admin-utils/std", "subtensor-custom-rpc-runtime-api/std", @@ -269,6 +268,8 @@ std = [ "pallet-subtensor-swap/std", "pallet-subtensor-swap-runtime-api/std", "subtensor-swap-interface/std", + "pallet-contracts/std", + "subtensor-chain-extensions/std", "ethereum/std", ] runtime-benchmarks = [ @@ -283,8 +284,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "pallet-safe-mode/runtime-benchmarks", "pallet-subtensor/runtime-benchmarks", - "pallet-subtensor-collective/runtime-benchmarks", - "pallet-membership/runtime-benchmarks", "pallet-subtensor-proxy/runtime-benchmarks", "pallet-registry/runtime-benchmarks", "pallet-commitments/runtime-benchmarks", @@ -304,6 +303,7 @@ runtime-benchmarks = [ "pallet-nomination-pools/runtime-benchmarks", "pallet-offences/runtime-benchmarks", "sp-staking/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", # EVM + Frontier "pallet-ethereum/runtime-benchmarks", @@ -331,8 +331,6 @@ try-runtime = [ "pallet-subtensor-utility/try-runtime", "pallet-safe-mode/try-runtime", "pallet-subtensor/try-runtime", - "pallet-subtensor-collective/try-runtime", - "pallet-membership/try-runtime", "pallet-subtensor-proxy/try-runtime", "pallet-multisig/try-runtime", "pallet-scheduler/try-runtime", @@ -345,6 +343,7 @@ try-runtime = [ "pallet-babe/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", + "pallet-contracts/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "frame-election-provider-support/try-runtime", "pallet-authority-discovery/try-runtime", diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 51932583d3..d8277164e2 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -20,12 +20,12 @@ use codec::{Compact, Decode, Encode}; use ethereum::AuthorizationList; use frame_support::{ PalletId, - dispatch::{DispatchResult, DispatchResultWithPostInfo}, + dispatch::DispatchResult, genesis_builder_helper::{build_state, get_preset}, pallet_prelude::Get, traits::{Contains, InsideBoth, LinearStoragePrice, fungible::HoldConsideration}, }; -use frame_system::{EnsureNever, EnsureRoot, EnsureRootWithSuccess, RawOrigin}; +use frame_system::{EnsureRoot, EnsureRootWithSuccess, EnsureSigned}; use pallet_commitments::{CanCommit, OnMetadataCommitment}; use pallet_grandpa::{AuthorityId as GrandpaId, fg_primitives}; use pallet_registry::CanRegisterIdentity; @@ -38,7 +38,7 @@ use pallet_subtensor::rpc_info::{ stake_info::StakeInfo, subnet_info::{SubnetHyperparams, SubnetHyperparamsV2, SubnetInfo, SubnetInfov2}, }; -use pallet_subtensor_collective as pallet_collective; +use pallet_subtensor::{CommitmentsInterface, ProxyInterface}; use pallet_subtensor_proxy as pallet_proxy; use pallet_subtensor_swap_runtime_api::SimSwapResult; use pallet_subtensor_utility as pallet_utility; @@ -68,7 +68,7 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use subtensor_precompiles::Precompiles; use subtensor_runtime_common::{AlphaCurrency, TaoCurrency, time::*, *}; -use subtensor_swap_interface::{OrderType, SwapHandler}; +use subtensor_swap_interface::{Order, SwapHandler}; // A few exports that help ease life for downstream crates. pub use frame_support::{ @@ -175,9 +175,6 @@ impl frame_system::offchain::CreateSignedTransaction pub use pallet_scheduler; pub use pallet_subtensor; -// Member type for membership -type MemberCount = u32; - // Method used to calculate the fee of an extrinsic pub const fn deposit(items: u32, bytes: u32) -> Balance { pub const ITEMS_FEE: Balance = 2_000 * 10_000; @@ -223,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 326, + spec_version: 334, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -489,149 +486,6 @@ impl pallet_transaction_payment::Config for Runtime { type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } -// Configure collective pallet for council -parameter_types! { - pub const CouncilMotionDuration: BlockNumber = 12 * HOURS; - pub const CouncilMaxProposals: u32 = 10; - pub const CouncilMaxMembers: u32 = 3; -} - -// Configure collective pallet for Senate -parameter_types! { - pub const SenateMaxMembers: u32 = 12; -} - -use pallet_collective::{CanPropose, CanVote, GetVotingMembers}; -pub struct CanProposeToTriumvirate; -impl CanPropose for CanProposeToTriumvirate { - fn can_propose(account: &AccountId) -> bool { - Triumvirate::is_member(account) - } -} - -pub struct CanVoteToTriumvirate; -impl CanVote for CanVoteToTriumvirate { - fn can_vote(_: &AccountId) -> bool { - //Senate::is_member(account) - false // Disable voting from pallet_collective::vote - } -} - -use pallet_subtensor::{ - CollectiveInterface, CommitmentsInterface, MemberManagement, ProxyInterface, -}; -pub struct ManageSenateMembers; -impl MemberManagement for ManageSenateMembers { - fn add_member(account: &AccountId) -> DispatchResultWithPostInfo { - let who = Address::Id(account.clone()); - SenateMembers::add_member(RawOrigin::Root.into(), who) - } - - fn remove_member(account: &AccountId) -> DispatchResultWithPostInfo { - let who = Address::Id(account.clone()); - SenateMembers::remove_member(RawOrigin::Root.into(), who) - } - - fn swap_member(rm: &AccountId, add: &AccountId) -> DispatchResultWithPostInfo { - let remove = Address::Id(rm.clone()); - let add = Address::Id(add.clone()); - - Triumvirate::remove_votes(rm)?; - SenateMembers::swap_member(RawOrigin::Root.into(), remove, add) - } - - fn is_member(account: &AccountId) -> bool { - SenateMembers::members().contains(account) - } - - fn members() -> Vec { - SenateMembers::members().into() - } - - fn max_members() -> u32 { - SenateMaxMembers::get() - } -} - -pub struct GetSenateMemberCount; -impl GetVotingMembers for GetSenateMemberCount { - fn get_count() -> MemberCount { - SenateMembers::members().len() as u32 - } -} -impl Get for GetSenateMemberCount { - fn get() -> MemberCount { - SenateMaxMembers::get() - } -} - -pub struct TriumvirateVotes; -impl CollectiveInterface for TriumvirateVotes { - fn remove_votes(hotkey: &AccountId) -> Result { - Triumvirate::remove_votes(hotkey) - } - - fn add_vote( - hotkey: &AccountId, - proposal: Hash, - index: u32, - approve: bool, - ) -> Result { - Triumvirate::do_vote(hotkey.clone(), proposal, index, approve) - } -} - -type EnsureMajoritySenate = - pallet_collective::EnsureProportionMoreThan; - -// We call pallet_collective TriumvirateCollective -type TriumvirateCollective = pallet_collective::Instance1; -impl pallet_collective::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type MotionDuration = CouncilMotionDuration; - type MaxProposals = CouncilMaxProposals; - type MaxMembers = GetSenateMemberCount; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type WeightInfo = pallet_collective::weights::SubstrateWeight; - type SetMembersOrigin = EnsureNever; - type CanPropose = CanProposeToTriumvirate; - type CanVote = CanVoteToTriumvirate; - type GetVotingMembers = GetSenateMemberCount; -} - -// We call council members Triumvirate -#[allow(dead_code)] -type TriumvirateMembership = pallet_membership::Instance1; -impl pallet_membership::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type AddOrigin = EnsureRoot; - type RemoveOrigin = EnsureRoot; - type SwapOrigin = EnsureRoot; - type ResetOrigin = EnsureRoot; - type PrimeOrigin = EnsureRoot; - type MembershipInitialized = Triumvirate; - type MembershipChanged = Triumvirate; - type MaxMembers = CouncilMaxMembers; - type WeightInfo = pallet_membership::weights::SubstrateWeight; -} - -// We call our top K delegates membership Senate -#[allow(dead_code)] -type SenateMembership = pallet_membership::Instance2; -impl pallet_membership::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type AddOrigin = EnsureRoot; - type RemoveOrigin = EnsureRoot; - type SwapOrigin = EnsureRoot; - type ResetOrigin = EnsureRoot; - type PrimeOrigin = EnsureRoot; - type MembershipInitialized = (); - type MembershipChanged = (); - type MaxMembers = SenateMaxMembers; - type WeightInfo = pallet_membership::weights::SubstrateWeight; -} - impl pallet_sudo::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -688,7 +542,7 @@ impl InstanceFilter for ProxyType { ) | RuntimeCall::SubtensorModule(pallet_subtensor::Call::swap_coldkey { .. }) ), - ProxyType::NonFungibile => !matches!( + ProxyType::NonFungible => !matches!( c, RuntimeCall::Balances(..) | RuntimeCall::SubtensorModule(pallet_subtensor::Call::add_stake { .. }) @@ -759,20 +613,11 @@ impl InstanceFilter for ProxyType { RuntimeCall::SubtensorModule(pallet_subtensor::Call::dissolve_network { .. }) | RuntimeCall::SubtensorModule(pallet_subtensor::Call::root_register { .. }) | RuntimeCall::SubtensorModule(pallet_subtensor::Call::burned_register { .. }) - | RuntimeCall::Triumvirate(..) | RuntimeCall::Sudo(..) ), - ProxyType::Triumvirate => matches!( - c, - RuntimeCall::Triumvirate(..) | RuntimeCall::TriumvirateMembers(..) - ), - ProxyType::Senate => matches!(c, RuntimeCall::SenateMembers(..)), - ProxyType::Governance => matches!( - c, - RuntimeCall::SenateMembers(..) - | RuntimeCall::Triumvirate(..) - | RuntimeCall::TriumvirateMembers(..) - ), + ProxyType::Triumvirate => false, // deprecated + ProxyType::Senate => false, // deprecated + ProxyType::Governance => false, // deprecated ProxyType::Staking => matches!( c, RuntimeCall::SubtensorModule(pallet_subtensor::Call::add_stake { .. }) @@ -838,9 +683,6 @@ impl InstanceFilter for ProxyType { | RuntimeCall::AdminUtils( pallet_admin_utils::Call::sudo_set_adjustment_alpha { .. } ) - | RuntimeCall::AdminUtils( - pallet_admin_utils::Call::sudo_set_max_weight_limit { .. } - ) | RuntimeCall::AdminUtils( pallet_admin_utils::Call::sudo_set_immunity_period { .. } ) @@ -881,6 +723,10 @@ impl InstanceFilter for ProxyType { pallet_admin_utils::Call::sudo_set_toggle_transfer { .. } ) ), + ProxyType::RootClaim => matches!( + c, + RuntimeCall::SubtensorModule(pallet_subtensor::Call::claim_root { .. }) + ), } } fn is_superset(&self, o: &Self) -> bool { @@ -892,7 +738,6 @@ impl InstanceFilter for ProxyType { // NonTransfer is NOT a superset of Transfer or SmallTransfer !matches!(o, ProxyType::Transfer | ProxyType::SmallTransfer) } - (ProxyType::Governance, ProxyType::Triumvirate | ProxyType::Senate) => true, (ProxyType::Transfer, ProxyType::SmallTransfer) => true, _ => false, } @@ -964,21 +809,6 @@ impl PrivilegeCmp for OriginPrivilegeCmp { match (left, right) { // Root is greater than anything. (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), - // Check which one has more yes votes. - ( - OriginCaller::Triumvirate(pallet_collective::RawOrigin::Members( - l_yes_votes, - l_count, - )), - OriginCaller::Triumvirate(pallet_collective::RawOrigin::Members( - r_yes_votes, - r_count, - )), // Equivalent to (l_yes_votes / l_count).cmp(&(r_yes_votes / r_count)) - ) => Some( - l_yes_votes - .saturating_mul(*r_count) - .cmp(&r_yes_votes.saturating_mul(*l_count)), - ), // For every other origin we don't care, as they are not used for `ScheduleOrigin`. _ => None, } @@ -1151,7 +981,6 @@ parameter_types! { pub const SubtensorInitialIssuance: u64 = 0; pub const SubtensorInitialMinAllowedWeights: u16 = 1024; pub const SubtensorInitialEmissionValue: u16 = 0; - pub const SubtensorInitialMaxWeightsLimit: u16 = 1000; // 1000/2^16 = 0.015 pub const SubtensorInitialValidatorPruneLen: u64 = 1; pub const SubtensorInitialScalingLawPower: u16 = 50; // 0.5 pub const SubtensorInitialMaxAllowedValidators: u16 = 128; @@ -1185,7 +1014,7 @@ parameter_types! { pub const SubtensorInitialTxDelegateTakeRateLimit: u64 = 216000; // 30 days at 12 seconds per block pub const SubtensorInitialTxChildKeyTakeRateLimit: u64 = INITIAL_CHILDKEY_TAKE_RATELIMIT; pub const SubtensorInitialRAORecycledForRegistration: u64 = 0; // 0 rao - pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake + pub const SubtensorInitialRequiredStakePercentage: u64 = 1; // 1 percent of total stake pub const SubtensorInitialNetworkImmunity: u64 = 1_296_000; pub const SubtensorInitialMinAllowedUids: u16 = 64; pub const SubtensorInitialMinLockCost: u64 = 1_000_000_000_000; // 1000 TAO @@ -1217,9 +1046,6 @@ impl pallet_subtensor::Config for Runtime { type RuntimeCall = RuntimeCall; type SudoRuntimeCall = RuntimeCall; type Currency = Balances; - type CouncilOrigin = EnsureMajoritySenate; - type SenateMembers = ManageSenateMembers; - type TriumvirateInterface = TriumvirateVotes; type Scheduler = Scheduler; type InitialRho = SubtensorInitialRho; type InitialAlphaSigmoidSteepness = SubtensorInitialAlphaSigmoidSteepness; @@ -1232,7 +1058,6 @@ impl pallet_subtensor::Config for Runtime { type InitialIssuance = SubtensorInitialIssuance; type InitialMinAllowedWeights = SubtensorInitialMinAllowedWeights; type InitialEmissionValue = SubtensorInitialEmissionValue; - type InitialMaxWeightsLimit = SubtensorInitialMaxWeightsLimit; type InitialValidatorPruneLen = SubtensorInitialValidatorPruneLen; type InitialScalingLawPower = SubtensorInitialScalingLawPower; type InitialTempo = SubtensorInitialTempo; @@ -1263,7 +1088,6 @@ impl pallet_subtensor::Config for Runtime { type InitialTxChildKeyTakeRateLimit = SubtensorInitialTxChildKeyTakeRateLimit; type InitialMaxChildKeyTake = SubtensorInitialMaxChildKeyTake; type InitialRAORecycledForRegistration = SubtensorInitialRAORecycledForRegistration; - type InitialSenateRequiredStakePercentage = SubtensorInitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = SubtensorInitialNetworkImmunity; type InitialNetworkMinLockCost = SubtensorInitialMinLockCost; type InitialNetworkLockReductionInterval = SubtensorInitialNetworkLockReductionInterval; @@ -1297,14 +1121,15 @@ parameter_types! { pub const SwapMaxFeeRate: u16 = 10000; // 15.26% pub const SwapMaxPositions: u32 = 100; pub const SwapMinimumLiquidity: u64 = 1_000; - pub const SwapMinimumReserve: NonZeroU64 = NonZeroU64::new(1_000_000) - .expect("1_000_000 fits NonZeroU64"); + pub const SwapMinimumReserve: NonZeroU64 = unsafe { NonZeroU64::new_unchecked(1_000_000) }; } impl pallet_subtensor_swap::Config for Runtime { type SubnetInfo = SubtensorModule; type BalanceOps = SubtensorModule; type ProtocolId = SwapProtocolId; + type TaoReserve = pallet_subtensor::TaoCurrencyReserve; + type AlphaReserve = pallet_subtensor::AlphaCurrencyReserve; type MaxFeeRate = SwapMaxFeeRate; type MaxPositions = SwapMaxPositions; type MinimumLiquidity = SwapMinimumLiquidity; @@ -1524,6 +1349,7 @@ impl Default for TransactionConverter { } } +#[allow(clippy::expect_used)] impl fp_rpc::ConvertTransaction<::Extrinsic> for TransactionConverter { fn convert_transaction( &self, @@ -1623,6 +1449,89 @@ impl pallet_crowdloan::Config for Runtime { type MaxContributors = MaxContributors; } +fn contracts_schedule() -> pallet_contracts::Schedule { + pallet_contracts::Schedule { + limits: pallet_contracts::Limits { + runtime_memory: 1024 * 1024 * 1024, + validator_runtime_memory: 1024 * 1024 * 1024 * 2, + ..Default::default() + }, + ..Default::default() + } +} + +const CONTRACT_STORAGE_KEY_PERCENT: Balance = 15; +const CONTRACT_STORAGE_BYTE_PERCENT: Balance = 6; + +/// Contracts deposits charged at 15% of the existential deposit per key, 6% per byte. +pub const fn contract_deposit(items: u32, bytes: u32) -> Balance { + let key_fee = + (EXISTENTIAL_DEPOSIT as Balance).saturating_mul(CONTRACT_STORAGE_KEY_PERCENT) / 100; + let byte_fee = + (EXISTENTIAL_DEPOSIT as Balance).saturating_mul(CONTRACT_STORAGE_BYTE_PERCENT) / 100; + + (items as Balance) + .saturating_mul(key_fee) + .saturating_add((bytes as Balance).saturating_mul(byte_fee)) +} + +parameter_types! { + pub const ContractDepositPerItem: Balance = contract_deposit(1, 0); + pub const ContractDepositPerByte: Balance = contract_deposit(0, 1); + pub const ContractDefaultDepositLimit: Balance = contract_deposit(1024, 1024 * 1024); + pub ContractsSchedule: pallet_contracts::Schedule = contracts_schedule::(); + pub const CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(30); + pub const ContractMaxDelegateDependencies: u32 = 32; +} + +pub struct ContractCallFilter; + +/// Whitelist dispatchables that are allowed to be called from contracts +impl Contains for ContractCallFilter { + fn contains(call: &RuntimeCall) -> bool { + match call { + RuntimeCall::Proxy(inner) => matches!(inner, pallet_proxy::Call::proxy { .. }), + _ => false, + } + } +} + +impl pallet_contracts::Config for Runtime { + type Time = Timestamp; + type Randomness = RandomnessCollectiveFlip; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type CallFilter = ContractCallFilter; + type DepositPerItem = ContractDepositPerItem; + type DepositPerByte = ContractDepositPerByte; + type DefaultDepositLimit = ContractDefaultDepositLimit; + type CallStack = [pallet_contracts::Frame; 5]; + type WeightPrice = pallet_transaction_payment::Pallet; + type WeightInfo = pallet_contracts::weights::SubstrateWeight; + type ChainExtension = subtensor_chain_extensions::SubtensorChainExtension; + type Schedule = ContractsSchedule; + type AddressGenerator = pallet_contracts::DefaultAddressGenerator; + type MaxCodeLen = ConstU32<{ 128 * 1024 }>; + type MaxStorageKeyLen = ConstU32<128>; + type UnsafeUnstableInterface = ConstBool; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type RuntimeHoldReason = RuntimeHoldReason; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = (); + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_contracts::migration::codegen::BenchMigrations; + type MaxDelegateDependencies = ContractMaxDelegateDependencies; + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type Debug = (); + type Environment = (); + type Xcm = (); + type MaxTransientStorageSize = ConstU32<{ 1024 * 1024 }>; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; + type ApiVersion = (); +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub struct Runtime @@ -1635,9 +1544,9 @@ construct_runtime!( Balances: pallet_balances = 5, TransactionPayment: pallet_transaction_payment = 6, SubtensorModule: pallet_subtensor = 7, - Triumvirate: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 8, - TriumvirateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 9, - SenateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 10, + // pallet_collective:: (triumvirate) was 8 + // pallet_membership:: (triumvirate members) was 9 + // pallet_membership:: (senate members) was 10 Utility: pallet_utility = 11, Sudo: pallet_sudo = 12, Multisig: pallet_multisig = 13, @@ -1659,6 +1568,7 @@ construct_runtime!( Drand: pallet_drand = 26, Crowdloan: pallet_crowdloan = 27, Swap: pallet_subtensor_swap = 28, + Contracts: pallet_contracts = 29, } ); @@ -1683,12 +1593,22 @@ pub type TransactionExtensions = ( frame_metadata_hash_extension::CheckMetadataHash, ); +parameter_types! { + pub const TriumviratePalletStr: &'static str = "Triumvirate"; + pub const TriumvirateMembersPalletStr: &'static str = "TriumvirateMembers"; + pub const SenateMembersPalletStr: &'static str = "SenateMembers"; +} + type Migrations = ( // Leave this migration in the runtime, so every runtime upgrade tiny rounding errors (fractions of fractions // of a cent) are cleaned up. These tiny rounding errors occur due to floating point coversion. pallet_subtensor::migrations::migrate_init_total_issuance::initialise_total_issuance::Migration< Runtime, >, + // Remove storage from removed governance pallets + frame_support::migrations::RemovePallet, + frame_support::migrations::RemovePallet, + frame_support::migrations::RemovePallet, ); // Unchecked extrinsic type as expected by this runtime. @@ -1772,6 +1692,8 @@ fn generate_genesis_json() -> Vec { json_str.as_bytes().to_vec() } +type EventRecord = frame_system::EventRecord; + impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { @@ -2235,6 +2157,77 @@ impl_runtime_apis! { } } + impl pallet_contracts::ContractsApi + for Runtime + { + fn call( + origin: AccountId, + dest: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + input_data: Vec, + ) -> pallet_contracts::ContractExecResult { + let gas_limit = gas_limit.unwrap_or(BlockWeights::get().max_block); + Contracts::bare_call( + origin, + dest, + value, + gas_limit, + storage_deposit_limit, + input_data, + pallet_contracts::DebugInfo::Skip, + pallet_contracts::CollectEvents::Skip, + pallet_contracts::Determinism::Enforced, + ) + } + + fn instantiate( + origin: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + code: pallet_contracts::Code, + data: Vec, + salt: Vec, + ) -> pallet_contracts::ContractInstantiateResult + { + let gas_limit = gas_limit.unwrap_or(BlockWeights::get().max_block); + Contracts::bare_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit, + code, + data, + salt, + pallet_contracts::DebugInfo::Skip, + pallet_contracts::CollectEvents::Skip, + ) + } + + fn upload_code( + origin: AccountId, + code: Vec, + storage_deposit_limit: Option, + determinism: pallet_contracts::Determinism, + ) -> pallet_contracts::CodeUploadResult { + Contracts::bare_upload_code( + origin, + code, + storage_deposit_limit, + determinism, + ) + } + + fn get_storage( + address: AccountId, + key: Vec, + ) -> pallet_contracts::GetStorageResult { + Contracts::get_storage(address, key) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( @@ -2291,6 +2284,7 @@ impl_runtime_apis! { (weight, BlockWeights::get().max_block) } + #[allow(clippy::expect_used)] fn execute_block( block: Block, state_root_check: bool, @@ -2493,10 +2487,10 @@ impl_runtime_apis! { } fn sim_swap_tao_for_alpha(netuid: NetUid, tao: TaoCurrency) -> SimSwapResult { + let order = pallet_subtensor::GetAlphaForTao::::with_amount(tao); pallet_subtensor_swap::Pallet::::sim_swap( netuid.into(), - OrderType::Buy, - tao.into(), + order, ) .map_or_else( |_| SimSwapResult { @@ -2515,10 +2509,10 @@ impl_runtime_apis! { } fn sim_swap_alpha_for_tao(netuid: NetUid, alpha: AlphaCurrency) -> SimSwapResult { + let order = pallet_subtensor::GetTaoForAlpha::::with_amount(alpha); pallet_subtensor_swap::Pallet::::sim_swap( netuid.into(), - OrderType::Sell, - alpha.into(), + order, ) .map_or_else( |_| SimSwapResult { diff --git a/runtime/tests/pallet_proxy.rs b/runtime/tests/pallet_proxy.rs index e2a9fea415..422885eaba 100644 --- a/runtime/tests/pallet_proxy.rs +++ b/runtime/tests/pallet_proxy.rs @@ -1,12 +1,10 @@ #![allow(clippy::unwrap_used)] -use codec::Encode; -use frame_support::{BoundedVec, assert_ok, traits::InstanceFilter}; +use frame_support::{assert_ok, traits::InstanceFilter}; use node_subtensor_runtime::{ BalancesCall, BuildStorage, Proxy, Runtime, RuntimeCall, RuntimeEvent, RuntimeGenesisConfig, RuntimeOrigin, SubtensorModule, System, SystemCall, }; -use pallet_subtensor_collective as pallet_collective; use pallet_subtensor_proxy as pallet_proxy; use subtensor_runtime_common::{AccountId, NetUid, ProxyType}; @@ -28,15 +26,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ], dev_accounts: None, }, - - triumvirate: pallet_collective::GenesisConfig { - members: vec![AccountId::from(ACCOUNT)], - phantom: Default::default(), - }, - senate_members: pallet_membership::GenesisConfig { - members: BoundedVec::try_from(vec![AccountId::from(ACCOUNT)]).unwrap(), - phantom: Default::default(), - }, ..Default::default() } .build_storage() @@ -105,18 +94,6 @@ fn call_update_symbol() -> RuntimeCall { }) } -// critical call for Subtensor -fn call_propose() -> RuntimeCall { - let proposal = call_remark(); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - - RuntimeCall::Triumvirate(pallet_collective::Call::propose { - proposal: Box::new(call_remark()), - length_bound: proposal_len, - duration: 100_000_000_u32, - }) -} - // critical call for Subtensor fn call_root_register() -> RuntimeCall { RuntimeCall::SubtensorModule(pallet_subtensor::Call::root_register { @@ -124,20 +101,6 @@ fn call_root_register() -> RuntimeCall { }) } -// triumvirate call -fn call_triumvirate() -> RuntimeCall { - RuntimeCall::TriumvirateMembers(pallet_membership::Call::change_key { - new: AccountId::from(ACCOUNT).into(), - }) -} - -// senate call -fn call_senate() -> RuntimeCall { - RuntimeCall::SenateMembers(pallet_membership::Call::change_key { - new: AccountId::from(ACCOUNT).into(), - }) -} - // staking call fn call_add_stake() -> RuntimeCall { let netuid = NetUid::from(1); @@ -205,10 +168,7 @@ fn test_proxy_pallet() { ProxyType::Owner, ProxyType::NonCritical, ProxyType::NonTransfer, - ProxyType::Senate, - ProxyType::NonFungibile, - ProxyType::Triumvirate, - ProxyType::Governance, + ProxyType::NonFungible, ProxyType::Staking, ProxyType::Registration, ]; @@ -217,10 +177,7 @@ fn test_proxy_pallet() { call_transfer, call_remark, call_owner_util, - call_propose, call_root_register, - call_triumvirate, - call_senate, call_add_stake, call_register, ]; diff --git a/scripts/srtool/build-srtool-image.sh b/scripts/srtool/build-srtool-image.sh new file mode 100755 index 0000000000..d736fe8a1e --- /dev/null +++ b/scripts/srtool/build-srtool-image.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker build --build-arg RUSTC_VERSION="1.85.0" -t srtool https://github.com/paritytech/srtool.git#refs/tags/v0.17.0 \ No newline at end of file diff --git a/scripts/srtool/run-srtool.sh b/scripts/srtool/run-srtool.sh new file mode 100755 index 0000000000..4719f7a4bc --- /dev/null +++ b/scripts/srtool/run-srtool.sh @@ -0,0 +1,26 @@ +#!/bin/bash +if [ -z "$CARGO_HOME" ]; then + echo "CARGO_HOME is not set" + exit 1 +fi + +cd runtime # check for symlink +if [ ! -L node-subtensor ]; then + ln -s . node-subtensor +fi +cd .. + +docker run --rm --user root --platform=linux/amd64 \ + -e PACKAGE=node-subtensor-runtime \ + -e BUILD_OPTS="--features=metadata-hash" \ + -e PROFILE=production \ + -v $HOME/.cargo:/cargo-home \ + -v $(pwd):/build \ + -it srtool bash -c "git config --global --add safe.directory /build && \ + /srtool/build --app > /build/runtime/node-subtensor/srtool-output.log; \ + BUILD_EXIT_CODE=\$?; \ + if [ \"\$BUILD_EXIT_CODE\" -ne 0 ]; then \ + cat /build/runtime/node-subtensor/srtool-output.log; \ + exit \$BUILD_EXIT_CODE; \ + fi && \ + tail -n 1 /build/runtime/node-subtensor/srtool-output.log > /build/runtime/node-subtensor/subtensor-digest.json" \ No newline at end of file diff --git a/support/linting/src/forbid_as_primitive.rs b/support/linting/src/forbid_as_primitive.rs index 228b978ef2..5e01d0741c 100644 --- a/support/linting/src/forbid_as_primitive.rs +++ b/support/linting/src/forbid_as_primitive.rs @@ -49,6 +49,7 @@ mod tests { fn lint(input: proc_macro2::TokenStream) -> Result { let mut visitor = AsPrimitiveVisitor::default(); + #[allow(clippy::expect_used)] let expr: ExprMethodCall = syn::parse2(input).expect("should be a valid method call"); visitor.visit_expr_method_call(&expr); if !visitor.errors.is_empty() { diff --git a/support/linting/src/forbid_keys_remove.rs b/support/linting/src/forbid_keys_remove.rs index 204020fa22..163eeb703a 100644 --- a/support/linting/src/forbid_keys_remove.rs +++ b/support/linting/src/forbid_keys_remove.rs @@ -57,6 +57,7 @@ fn is_keys_remove_call(func: &Expr, args: &Punctuated) -> bool { #[cfg(test)] mod tests { + #![allow(clippy::expect_used)] use super::*; use quote::quote; diff --git a/support/linting/src/forbid_saturating_math.rs b/support/linting/src/forbid_saturating_math.rs index 9ad5385b36..02f99c0bcd 100644 --- a/support/linting/src/forbid_saturating_math.rs +++ b/support/linting/src/forbid_saturating_math.rs @@ -56,6 +56,7 @@ fn is_saturating_math_call(func: &Expr) -> bool { #[cfg(test)] mod tests { + #![allow(clippy::expect_used)] use super::*; use quote::quote; diff --git a/support/linting/src/pallet_index.rs b/support/linting/src/pallet_index.rs index 069c96f628..e14617be24 100644 --- a/support/linting/src/pallet_index.rs +++ b/support/linting/src/pallet_index.rs @@ -168,9 +168,6 @@ mod tests { Balances : pallet_balances = 5, TransactionPayment : pallet_transaction_payment = 6, SubtensorModule : pallet_subtensor = 7, - Triumvirate : pallet_subtensor_collective::::{ Pallet, Call, Storage, Origin, Event, Config } = 8, - TriumvirateMembers : pallet_membership::::{ Pallet, Call, Storage, Event, Config } = 9, - SenateMembers : pallet_membership::::{ Pallet, Call, Storage, Event, Config } = 10, Utility : pallet_subtensor_utility = 11, Sudo : pallet_sudo = 12, Multisig : pallet_multisig = 13, @@ -195,10 +192,6 @@ mod tests { pub enum Test { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, - Triumvirate: pallet_subtensor_collective::::{Pallet, Call, Storage, Origin, Event, Config}, - TriumvirateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config}, - Senate: pallet_subtensor_collective::::{Pallet, Call, Storage, Origin, Event, Config}, - SenateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config}, SubtensorModule: pallet_subtensor::{Pallet, Call, Storage, Event}, Utility: pallet_utility::{Pallet, Call, Storage, Event}, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, @@ -284,10 +277,6 @@ mod tests { pub enum Test { System: frame_system::{Pallet, Call, Config, Storage, Event} = 1, Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, - Triumvirate: pallet_subtensor_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 3, - TriumvirateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 4, - Senate: pallet_subtensor_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 5, - SenateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 6, SubtensorModule: pallet_subtensor::{Pallet, Call, Storage, Event} = 7, Utility: pallet_utility::{Pallet, Call, Storage, Event} = 8, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 9, diff --git a/support/linting/src/require_freeze_struct.rs b/support/linting/src/require_freeze_struct.rs index a1d86109e4..b697c5b824 100644 --- a/support/linting/src/require_freeze_struct.rs +++ b/support/linting/src/require_freeze_struct.rs @@ -68,6 +68,7 @@ fn is_derive_encode_or_decode(attr: &Attribute) -> bool { #[cfg(test)] mod tests { + #![allow(clippy::expect_used)] use super::*; fn lint_struct(input: &str) -> Result { diff --git a/support/procedural-fork/src/runtime/parse/pallet_decl.rs b/support/procedural-fork/src/runtime/parse/pallet_decl.rs index fab826eee7..ddc9a7ffe7 100644 --- a/support/procedural-fork/src/runtime/parse/pallet_decl.rs +++ b/support/procedural-fork/src/runtime/parse/pallet_decl.rs @@ -29,8 +29,7 @@ pub struct PalletDeclaration { /// The runtime parameter of the pallet, e.g. `Runtime` in /// `pub type System = frame_system::Pallet`. pub runtime_param: Option, - /// The instance of the pallet, e.g. `Instance1` in `pub type Council = - /// pallet_collective`. + /// The instance of the pallet, e.g. `Instance1` in `pub type Council = pallet_collective`. pub instance: Option, } diff --git a/support/tools/src/bump_version.rs b/support/tools/src/bump_version.rs index a16293c304..24e267f234 100644 --- a/support/tools/src/bump_version.rs +++ b/support/tools/src/bump_version.rs @@ -7,10 +7,9 @@ use std::{ }; use toml_edit::{DocumentMut, Item, Value}; -const TOML_PATHS: [&str; 9] = [ +const TOML_PATHS: [&str; 8] = [ "support/macros", "pallets/commitments", - "pallets/collective", "pallets/registry", "pallets/subtensor", "pallets/subtensor/runtime-api",