diff --git a/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md b/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md new file mode 100644 index 0000000000..28e9d3a4d8 --- /dev/null +++ b/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md @@ -0,0 +1 @@ +- Merged commands `keys add` and `keys restore` into single command `keys add`. The flag to specify the key name for the CLI command `keys add` has been changed from `-n` to `-k`. Restoring a key now takes a file containing the mnemonic as input instead of directly taking the mnemonic. ([#1075](https://github.com/informalsystems/ibc-rs/issues/1075)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md b/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md new file mode 100644 index 0000000000..41a113d0e8 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md @@ -0,0 +1,2 @@ +- Fix `execute_schedule` method dropping operational data due to improper + handling of errors. ([#2118](https://github.com/informalsystems/ibc-rs/issues/1153)) diff --git a/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md b/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md new file mode 100644 index 0000000000..29b56f825b --- /dev/null +++ b/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md @@ -0,0 +1,2 @@ +- Added CLI command `keys balance` which outputs the balance of an account associated with a + key. ([#912](https://github.com/informalsystems/ibc-rs/issues/912)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md b/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md new file mode 100644 index 0000000000..6daf2f0af0 --- /dev/null +++ b/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md @@ -0,0 +1,2 @@ +- Added CLI command `query channel client` which outputs the channel's client state. + ([#999](https://github.com/informalsystems/ibc-rs/issues/999)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md b/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md new file mode 100644 index 0000000000..8d7cbbbbad --- /dev/null +++ b/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md @@ -0,0 +1 @@ +- Complete ICS20 implementation ([#1759](https://github.com/informalsystems/ibc-rs/issues/1759)) diff --git a/.changelog/unreleased/2181-update-codeowners.md b/.changelog/v0.15.0/2181-update-codeowners.md similarity index 100% rename from .changelog/unreleased/2181-update-codeowners.md rename to .changelog/v0.15.0/2181-update-codeowners.md diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer/1971-non-batch-fix.md b/.changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md similarity index 100% rename from .changelog/unreleased/bug-fixes/ibc-relayer/1971-non-batch-fix.md rename to .changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md diff --git a/.changelog/unreleased/bug-fixes/relayer/2180-client-expiry-time.md b/.changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md similarity index 100% rename from .changelog/unreleased/bug-fixes/relayer/2180-client-expiry-time.md rename to .changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md diff --git a/.changelog/unreleased/bug-fixes/ibc/2104-fix-commitment-computation.md b/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md similarity index 100% rename from .changelog/unreleased/bug-fixes/ibc/2104-fix-commitment-computation.md rename to .changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md diff --git a/.changelog/unreleased/bug-fixes/ibc/2114-fix-ack-verification.md b/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md similarity index 100% rename from .changelog/unreleased/bug-fixes/ibc/2114-fix-ack-verification.md rename to .changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md diff --git a/.changelog/unreleased/bug-fixes/ibc/2178-conn-ack-bug-fix.md b/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md similarity index 100% rename from .changelog/unreleased/bug-fixes/ibc/2178-conn-ack-bug-fix.md rename to .changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md diff --git a/.changelog/unreleased/features/1986-gaia-e2e-tests.md b/.changelog/v0.15.0/features/1986-gaia-e2e-tests.md similarity index 100% rename from .changelog/unreleased/features/1986-gaia-e2e-tests.md rename to .changelog/v0.15.0/features/1986-gaia-e2e-tests.md diff --git a/.changelog/unreleased/features/ibc-relayer/2112-new-metrics.md b/.changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md similarity index 100% rename from .changelog/unreleased/features/ibc-relayer/2112-new-metrics.md rename to .changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md diff --git a/.changelog/unreleased/improvements/ibc-relayer/1971-max-msg-num-min-bound.md b/.changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md similarity index 100% rename from .changelog/unreleased/improvements/ibc-relayer/1971-max-msg-num-min-bound.md rename to .changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md diff --git a/.changelog/unreleased/improvements/ibc-relayer/2031-misleading-misbehavior-error.md b/.changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md similarity index 100% rename from .changelog/unreleased/improvements/ibc-relayer/2031-misleading-misbehavior-error.md rename to .changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md diff --git a/.changelog/unreleased/improvements/relayer/2087-incremental-packet-clearing.md b/.changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md similarity index 100% rename from .changelog/unreleased/improvements/relayer/2087-incremental-packet-clearing.md rename to .changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md diff --git a/.changelog/unreleased/improvements/relayer/2192-adr009-impl.md b/.changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md similarity index 100% rename from .changelog/unreleased/improvements/relayer/2192-adr009-impl.md rename to .changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md diff --git a/.changelog/unreleased/improvements/ibc/2159-remove-ocaps.md b/.changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md similarity index 100% rename from .changelog/unreleased/improvements/ibc/2159-remove-ocaps.md rename to .changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md diff --git a/.changelog/v0.15.0/summary.md b/.changelog/v0.15.0/summary.md new file mode 100644 index 0000000000..5fb579018a --- /dev/null +++ b/.changelog/v0.15.0/summary.md @@ -0,0 +1,4 @@ +This release brings a number of bug fixes, some performance improvements, +notably when [clearing packets](//github.com/informalsystems/ibc-rs/issues/2087), +as well as [new metrics](https://github.com/informalsystems/ibc-rs/issues/2112) +for better observability of the relayer's operations. diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index fb3a68656e..c0b92677b9 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -173,3 +173,42 @@ jobs: nix shell .#python .#ica -c cargo \ test -p ibc-integration-test --features ica --no-fail-fast -- \ --nocapture --test-threads=1 test_ica_filter + + model-based-test: + runs-on: ubuntu-latest + strategy: + matrix: + gaiad: + - gaia6 + steps: + - uses: actions/checkout@v2 + - uses: cachix/install-nix-action@v15 + with: + install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install + install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' + extra_nix_config: | + experimental-features = nix-command flakes + - uses: cachix/cachix-action@v10 + with: + name: cosmos + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: Swatinem/rust-cache@v1 + - uses: actions-rs/cargo@v1 + with: + command: test + args: -p ibc-integration-test --features mbt --no-fail-fast --no-run + # Disable running MBT tests until flakiness is addressed + # - env: + # RUST_LOG: debug + # RUST_BACKTRACE: 1 + # NO_COLOR_LOG: 1 + # run: | + # nix shell \ + # .#${{ matrix.gaiad }} \ + # .#apalache \ + # -c cargo \ + # test -p ibc-integration-test --features mbt --no-fail-fast -- \ + # --nocapture --test-threads=1 mbt diff --git a/CHANGELOG.md b/CHANGELOG.md index 5396203c31..a69bdcb182 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,54 @@ # CHANGELOG +## v0.15.0 + +*May 23rd, 2022* + +This release brings a number of bug fixes, some performance improvements, +notably when [clearing packets](//github.com/informalsystems/ibc-rs/issues/2087), +as well as [new metrics](https://github.com/informalsystems/ibc-rs/issues/2112) +for better observability of the relayer's operations. + +### BUG FIXES + +- [IBC Modules](modules) + - Fix packet commitment calculation to match ibc-go + ([#2104](https://github.com/informalsystems/ibc-rs/issues/2104)) + - Fix incorrect acknowledgement verification + ([#2114](https://github.com/informalsystems/ibc-rs/issues/2114)) + - fix connection id mix-up in connection acknowledgement processing + ([#2178](https://github.com/informalsystems/ibc-rs/issues/2178)) +- [Relayer Library](relayer) + - Fix a bug where connection and channel handshakes would fail with non-batching transactions + ([#1971](https://github.com/informalsystems/ibc-rs/issues/1971)) + - Fixed client expiry computation to avoid using local time. + ([#2180](https://github.com/informalsystems/ibc-rs/issues/2180)) + +### FEATURES + +- General + - Replaced gaia v5 with v7 in E2E tests. + ([#1986](https://github.com/informalsystems/ibc-rs/issues/1986)) +- [Relayer Library](relayer) + - Add six new metrics: `wallet_balance`, `ws_events`, `ws_reconnect`, + `tx_latency_submitted`, `tx_latency_confirmed`, `msg_num` + ([#2112](https://github.com/informalsystems/ibc-rs/issues/2112)) + +### IMPROVEMENTS + +- [IBC Modules](modules) + - Remove object capabilities from the modules + ([#2159](https://github.com/informalsystems/ibc-rs/issues/2159)) +- [Relayer Library](relayer) + - Ensure `max_msg_num` is between 1 and 100 with a default of 30 + ([#1971](https://github.com/informalsystems/ibc-rs/issues/1971)) + - Fixed misleading error message leaking from the misbehavior detection task. + ([#2031](https://github.com/informalsystems/ibc-rs/issues/2031)) + - Added support for incremental processing of packet clearing commands. + ([#2087](https://github.com/informalsystems/ibc-rs/issues/2087)) + - Implement ADR 9: add domain type for request messages that are passed to query + functions ([#2192](https://github.com/informalsystems/ibc-rs/issues/2192)) + ## v0.14.1 *May 2nd, 2022* @@ -562,7 +611,7 @@ then on top of the changes above, `mode.connections.enabled` and `mode.channels. *October 29th, 2021* This is the final release of version 0.8.0, which now depends on the official releases of the `prost` and `tonic` crates. -In addition to everything that's included in v0.8.0-pre.1, this release updates the minimum supported Rust version to 1.56, +In addition to everything that's included in v0.8.0-pre.1, this release updates the minimum supported Rust version to 1.56, and contains various bug fixes and performance improvements which make the relayer more reliable. #### Notice for operators @@ -592,7 +641,7 @@ For Cosmos-SDK chains a good approximation is `timeout_propose` + `timeout_commi - Update to official releases of `prost` 0.9 and `tonic` 0.6 ([#1502](https://github.com/informalsystems/ibc-rs/issues/1502)) - [IBC Modules](modules) - - Support for converting `ibc::events::IbcEvent` into `tendermint::abci::Event` + - Support for converting `ibc::events::IbcEvent` into `tendermint::abci::Event` ([#838](https://github.com/informalsystems/ibc-rs/issues/838)) - Restructure the layout of the `ibc` crate to match `ibc-go`'s [layout](https://github.com/cosmos/ibc-go#contents) ([#1436](https://github.com/informalsystems/ibc-rs/issues/1436)) diff --git a/Cargo.lock b/Cargo.lock index 3f19924294..9a86776e58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,9 +165,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.3" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f523b4e98ba6897ae90994bc18423d9877c54f9047b06a00ddc8122a957b1c70" +checksum = "ab2504b827a8bef941ba3dd64bdffe9cf56ca182908a147edd6189c95fbcae7d" dependencies = [ "async-trait", "axum-core", @@ -177,7 +177,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.1", + "itoa 1.0.2", "matchit", "memchr", "mime", @@ -194,9 +194,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3ddbd16eabff8b45f21b98671fddcc93daaa7ac4c84f8473693437226040de5" +checksum = "da31c0ed7b4690e2c78fe4b880d21cd7db04a346ebc658b4270251b695437f17" dependencies = [ "async-trait", "bytes", @@ -889,9 +889,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d5c4b5e5959dc2c2b89918d8e2cc40fcdd623cef026ed09d2f0ee05199dc8e4" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ "signature", ] @@ -980,9 +980,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ "rand_core 0.6.3", "subtle", @@ -1493,14 +1493,14 @@ checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa 1.0.2", ] [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -1546,7 +1546,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.1", + "itoa 1.0.2", "pin-project-lite", "socket2", "tokio", @@ -1606,7 +1606,7 @@ dependencies = [ [[package]] name = "ibc" -version = "0.14.1" +version = "0.15.0" dependencies = [ "beefy-generic-client", "beefy-primitives", @@ -1645,11 +1645,12 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber 0.3.11", + "uint", ] [[package]] name = "ibc-proto" -version = "0.17.1" +version = "0.18.0" dependencies = [ "base64", "bytes", @@ -1783,9 +1784,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" @@ -1835,7 +1836,7 @@ dependencies = [ "soketto", "thiserror", "tokio", - "tokio-rustls 0.23.3", + "tokio-rustls 0.23.4", "tokio-util", "tracing", "webpki-roots 0.22.3", @@ -2001,9 +2002,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.125" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libm" @@ -2081,9 +2082,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if 1.0.0", ] @@ -2209,25 +2210,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" dependencies = [ "libc", "log", - "miow", - "ntapi", "wasi 0.11.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "windows-sys", ] [[package]] @@ -2300,15 +2290,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "ntapi" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" -dependencies = [ - "winapi", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -2405,27 +2386,27 @@ dependencies = [ [[package]] name = "num_threads" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba1801fb138d8e85e11d0fc70baf4fe1cdfffda7c6cd34a854905df588e5ed0" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "object" -version = "0.28.3" +version = "0.28.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40bec70ba014595f99f7aa110b84331ffe1ee9aece7fe6f387cc7e3ecda4d456" +checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] name = "opaque-debug" @@ -2447,9 +2428,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "os_str_bytes" -version = "6.0.0" +version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435" [[package]] name = "pallet-beefy" @@ -2795,18 +2776,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.37" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] name = "prost" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a07b0857a71a8cb765763950499cae2413c3f9cede1133478c43600d9e146890" +checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" dependencies = [ "bytes", "prost-derive", @@ -2949,9 +2930,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd249e82c21598a9a426a4e00dd7adc1d640b22445ec8545feef801d1a74c221" +checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ "autocfg", "crossbeam-deque", @@ -2961,9 +2942,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f51245e1e62e1f1629cbfec37b5793bbabcaeb90f30e94d2ba03564687353e4" +checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -3013,9 +2994,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" dependencies = [ "aho-corasick", "memchr", @@ -3033,9 +3014,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" [[package]] name = "remove_dir_all" @@ -3126,9 +3107,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.4" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" +checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", @@ -3171,9 +3152,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" [[package]] name = "safe-proc-macro2" @@ -3259,19 +3240,19 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys", ] [[package]] name = "schemars" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b5a3c80cea1ab61f4260238409510e814e38b4b563c06044edf91e7dc070e3" +checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" dependencies = [ "dyn-clone", "schemars_derive", @@ -3281,9 +3262,9 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ae4dce13e8614c46ac3c38ef1c0d668b101df6ac39817aebdaa26642ddae9b" +checksum = "af4d7e1b012cb3d9129567661a63755ea4b8a7386d339dc945ae187e403c6743" dependencies = [ "proc-macro2", "quote", @@ -3428,9 +3409,9 @@ dependencies = [ [[package]] name = "serde_derive_internals" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dbab34ca63057a1f15280bdf3c39f2b1eb1b54c17e98360e511637aef7418c6" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ "proc-macro2", "quote", @@ -3443,7 +3424,7 @@ version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ - "itoa 1.0.1", + "itoa 1.0.2", "ryu", "serde", ] @@ -4556,13 +4537,13 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.92" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff7c592601f11445996a06f8ad0c27f094a58857c2f89e97974ab9235b92c52" +checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -4844,9 +4825,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.0" +version = "1.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f48b6d60512a392e34dbf7fd456249fd2de3c83669ab642e021903f4015185b" +checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" dependencies = [ "bytes", "libc", @@ -4896,11 +4877,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.3" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.4", + "rustls 0.20.6", "tokio", "webpki 0.22.0", ] @@ -4918,9 +4899,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" dependencies = [ "bytes", "futures-core", @@ -4994,9 +4975,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.2.5" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8" +checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae" dependencies = [ "bitflags", "bytes", @@ -5229,6 +5210,12 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +[[package]] +name = "unicode-ident" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" + [[package]] name = "unicode-normalization" version = "0.1.19" @@ -5240,9 +5227,9 @@ dependencies = [ [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "untrusted" @@ -5261,7 +5248,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.20.4", + "rustls 0.20.6", "url", "webpki 0.22.0", "webpki-roots 0.22.3", diff --git a/README.md b/README.md index cdfdac0d81..8b90bbc69a 100644 --- a/README.md +++ b/README.md @@ -24,19 +24,21 @@ This project comprises primarily four crates: and its [IBC structs](https://github.com/cosmos/ibc-go/tree/main/proto/ibc). - The [`ibc-telemetry`][ibc-telemetry-crate-link] crate is a library for use in the `hermes` CLI, for gathering telemetry data and exposing that in a Prometheus endpoint. +- The [`ibc-test-framework`][ibc-test-framework-crate-link] crate provides the infrastructure and framework for writing end-to-end (E2E) tests that include the spawning of the relayer together with Cosmos full nodes. See the table below for more details. Includes [TLA+ specifications](docs/spec). -| Crate name | Type | Version | Docs | +| Crate name | Type | Version | Docs | |:-------------:|:------:|:-------------:|:-----:| -| [ibc](./modules) (modules) | lib| [![IBC Crate][ibc-crate-image]][ibc-crate-link] | [![IBC Docs][ibc-docs-image]][ibc-docs-link] | -| [ibc-relayer](./relayer) | lib | [![IBC Relayer Crate][relayer-crate-image]][relayer-crate-link] | [![IBC Relayer Docs][relayer-docs-image]][relayer-docs-link] | -| [ibc-relayer-cli](./relayer-cli) | bin: [hermes](relayer-cli/) | [![IBC Relayer CLI Crate][relayer-cli-crate-image]][relayer-cli-crate-link] | [![IBC Relayer CLI Docs][relayer-cli-docs-image]][relayer-cli-docs-link] | -| [ibc-relayer-rest](./relayer-rest) | lib | [![IBC Relayer REST Crate][relayer-rest-crate-image]][relayer-rest-crate-link] | [![IBC Relayer REST Docs][relayer-rest-docs-image]][relayer-rest-docs-link] | -| [ibc-proto](./proto) | lib | [![IBC Proto Crate][ibc-proto-crate-image]][ibc-proto-crate-link] | [![IBC Proto Docs][ibc-proto-docs-image]][ibc-proto-docs-link] | -| [ibc-telemetry](./telemetry) | lib | [![IBC Telemetry Crate][ibc-telemetry-crate-image]][ibc-telemetry-crate-link] | [![IBC Telemetry Docs][ibc-telemetry-docs-image]][ibc-telemetry-docs-link] | +| [ibc](./modules) | lib | [![IBC Crate][ibc-crate-image]][ibc-crate-link] | [![IBC Docs][ibc-docs-image]][ibc-docs-link] | +| [ibc-relayer](./relayer) | lib | [![IBC Relayer Crate][relayer-crate-image]][relayer-crate-link] | [![IBC Relayer Docs][relayer-docs-image]][relayer-docs-link] | +| [ibc-relayer-cli](./relayer-cli) | bin: [hermes](relayer-cli/) | [![IBC Relayer CLI Crate][relayer-cli-crate-image]][relayer-cli-crate-link] | [![IBC Relayer CLI Docs][relayer-cli-docs-image]][relayer-cli-docs-link] | +| [ibc-relayer-rest](./relayer-rest) | lib | [![IBC Relayer REST Crate][relayer-rest-crate-image]][relayer-rest-crate-link] | [![IBC Relayer REST Docs][relayer-rest-docs-image]][relayer-rest-docs-link] | +| [ibc-proto](./proto) | lib | [![IBC Proto Crate][ibc-proto-crate-image]][ibc-proto-crate-link] | [![IBC Proto Docs][ibc-proto-docs-image]][ibc-proto-docs-link] | +| [ibc-telemetry](./telemetry) | lib | [![IBC Telemetry Crate][ibc-telemetry-crate-image]][ibc-telemetry-crate-link] | [![IBC Telemetry Docs][ibc-telemetry-docs-image]][ibc-telemetry-docs-link] | +| [ibc-test-framework](./tools/test-framework) | lib | [![IBC Test Framework Crate][ibc-test-framework-crate-image]][ibc-test-framework-crate-link] | [![IBC Test Framework Docs][ibc-test-framework-docs-image]][ibc-test-framework-docs-link] | ## Requirements @@ -105,6 +107,10 @@ Unless required by applicable law or agreed to in writing, software distributed [ibc-telemetry-crate-link]: https://crates.io/crates/ibc-telemetry [ibc-telemetry-docs-image]: https://docs.rs/ibc-telemetry/badge.svg [ibc-telemetry-docs-link]: https://docs.rs/ibc-telemetry/ +[ibc-test-framework-crate-image]: https://img.shields.io/crates/v/ibc-test-framework.svg +[ibc-test-framework-crate-link]: https://crates.io/crates/ibc-test-framework +[ibc-test-framework-docs-image]: https://docs.rs/ibc-test-framework/badge.svg +[ibc-test-framework-docs-link]: https://docs.rs/ibc-test-framework/ [build-image]: https://github.com/informalsystems/ibc-rs/workflows/Rust/badge.svg [build-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3ARust diff --git a/ci/e2e.sh b/ci/e2e.sh index f1bdc06dce..cd8b26a387 100755 --- a/ci/e2e.sh +++ b/ci/e2e.sh @@ -36,8 +36,8 @@ echo "Add keys for chains" echo "-----------------------------------------------------------------------------------------------------------------" hermes -c "$CONFIG_PATH" keys add "$CHAIN_A" -f user_seed_"$CHAIN_A".json hermes -c "$CONFIG_PATH" keys add "$CHAIN_B" -f user_seed_"$CHAIN_B".json -hermes -c "$CONFIG_PATH" keys add "$CHAIN_A" -f user2_seed_"$CHAIN_A".json -n user2 -hermes -c "$CONFIG_PATH" keys add "$CHAIN_B" -f user2_seed_"$CHAIN_B".json -n user2 +hermes -c "$CONFIG_PATH" keys add "$CHAIN_A" -f user2_seed_"$CHAIN_A".json -k user2 +hermes -c "$CONFIG_PATH" keys add "$CHAIN_B" -f user2_seed_"$CHAIN_B".json -k user2 echo "=================================================================================================================" echo " END-TO-END TESTS " diff --git a/ci/no-std-check/Cargo.lock b/ci/no-std-check/Cargo.lock index 7ee0f45b85..0ff13be31b 100644 --- a/ci/no-std-check/Cargo.lock +++ b/ci/no-std-check/Cargo.lock @@ -723,7 +723,7 @@ dependencies = [ [[package]] name = "ibc-proto" -version = "0.17.1" +version = "0.18.0" dependencies = [ "base64", "bytes", diff --git a/flake.nix b/flake.nix index 2bb84d56bc..d8a412d92e 100644 --- a/flake.nix +++ b/flake.nix @@ -36,6 +36,7 @@ gaia6-ordered ibc-go-v2-simapp ibc-go-v3-simapp + apalache ; python = nixpkgs.python3.withPackages (p: [ diff --git a/guide/README.md b/guide/README.md index a56ec22258..c5aa9dd797 100644 --- a/guide/README.md +++ b/guide/README.md @@ -11,7 +11,7 @@ mdBook is a utility to create modern online books from Markdown files. This guide should be permanently deployed at its latest stable version at [hermes.informal.systems](https://hermes.informal.systems). -Current version: `0.14.1`. +Current version: `0.15.0`. The version of this guide is aligned with the [versioning of the ibc crates](../README.md). diff --git a/guide/src/SUMMARY.md b/guide/src/SUMMARY.md index 97f24cde37..d6acbed04a 100644 --- a/guide/src/SUMMARY.md +++ b/guide/src/SUMMARY.md @@ -1,6 +1,6 @@ # Summary -# Hermes (v0.14.1) +# Hermes (v0.15.0) --- - [Introduction](./index.md) diff --git a/guide/src/commands/global.md b/guide/src/commands/global.md index 162842b9d3..6c302987f0 100644 --- a/guide/src/commands/global.md +++ b/guide/src/commands/global.md @@ -3,7 +3,7 @@ Hermes accepts global options which affect all commands. ```shell -hermes 0.14.1 +hermes 0.15.0 Informal Systems Implementation of `hermes`, an IBC Relayer developed in Rust. diff --git a/guide/src/commands/keys/index.md b/guide/src/commands/keys/index.md index e84200573e..e246d0ef4a 100644 --- a/guide/src/commands/keys/index.md +++ b/guide/src/commands/keys/index.md @@ -4,9 +4,8 @@ > store the private key file. The key file will be stored on the local file system > in the user __$HOME__ folder under `$HOME/.hermes/keys/` -> __BREAKING__: As of Hermes v0.2.0, the format of the keys stored on disk has changed, and -> keys which had been previously configured must now be re-imported using either the `keys add` -> or the `keys restore` commands. +> __BREAKING__: As of Hermes v1.0.0, the sub-command `keys restore` has been removed. +> Please use the sub-command `keys add` in order to restore a key. --- @@ -20,7 +19,7 @@ To see the available sub-commands for the `keys` command run: hermes help keys ``` -Currently there are two sub-commands supported `add` and `list`: +The available sub-commands are the following: ```shell USAGE: @@ -31,10 +30,12 @@ DESCRIPTION: SUBCOMMANDS: help Get usage information - add Adds a key to a configured chain + add Adds key to a configured chain or restores a key to a configured chain + using a mnemonic + balance Query balance for a key from a configured chain. If no key is given, the + key is retrieved from the configuration file delete Delete key(s) from a configured chain list List keys configured on a chain - restore restore a key to a configured chain using a mnemonic ``` ### Key Seed file (Private Key) @@ -63,23 +64,55 @@ The command outputs a JSON similar to the one below. You can save this to a file (e.g. `key_seed.json`) and use it to add to the relayer with `hermes keys add -f key_seed.json`. See the `Adding Keys` section for more details. -### Adding Keys +### Adding and restoring Keys + +The command `keys add` has two exclusive flags, `--key-file` and `--mnemonic-file` which are respectively used to add and restore a key. + +```shell + hermes keys add [OPTIONS] --key-file --mnemonic-file + +DESCRIPTION: + Adds key to a configured chain or restores a key to a configured chain using a mnemonic + +ARGS: + chain_id identifier of the chain + +FLAGS: + -f, --key-file + path to the key file + + -m, --mnemonic-file + path to file containing mnemonic to restore the key from + +OPTIONS: + -k, --key-name + name of the key (defaults to the `key_name` defined in the config) + + -p, --hd-path + derivation path for this key [default: m/44'/118'/0'/0/0] +``` #### Add a private key to a chain from a key file ```shell - hermes keys add + hermes keys add [OPTIONS] --key-file DESCRIPTION: - Adds a key to a configured chain + Adds key to a configured chain or restores a key to a configured chain using a mnemonic -POSITIONAL ARGUMENTS: +ARGS: chain_id identifier of the chain FLAGS: - -f, --file FILE path to the key file - -n, --name NAME name of the key (defaults to the `key_name` defined in the config) - -p, --hd-path HD-PATH derivation path for this key (default: m/44'/118'/0'/0/0) + -f, --key-file + path to the key file + +OPTIONS: + -k, --key-name + name of the key (defaults to the `key_name` defined in the config) + + -p, --hd-path + derivation path for this key [default: m/44'/118'/0'/0/0] ``` To add a private key file to a chain: @@ -88,6 +121,18 @@ To add a private key file to a chain: hermes -c config.toml keys add [CHAIN_ID] -f [PRIVATE_KEY_FILE] ``` +The content of the file key should have the same format as the output of the `gaiad keys add` command: + +```json +{ + "name": "testkey", + "type": "local", + "address": "cosmos1tc3vcuxyyac0dmayf887t95tdg7qpyql48w7gj", + "pubkey": "cosmospub1addwnpepqgg7ng4ycm60pdxfzdfh4hjvkwcr3da59mr8k883vsstx60ruv7kur4525u", + "mnemonic": "[24 words mnemonic]" +} +``` + If the command is successful a message similar to the one below will be displayed: ```json @@ -96,34 +141,39 @@ Success: Added key testkey ([ADDRESS]) on [CHAIN ID] chain > **Key name:** > By default, the key will be named after the `key_name` property specified in the configuration file. -> To use a different key name, specify the `--name` option when invoking `keys add`. +> To use a different key name, specify the `--key-name` option when invoking `keys add`. > > ``` -> hermes -c config.toml keys add [CHAINID] -f [PRIVATE_KEY_FILE] -n [KEY_NAME] +> hermes -c config.toml keys add [CHAINID] -f [PRIVATE_KEY_FILE] -k [KEY_NAME] > ``` #### Restore a private key to a chain from a mnemonic ```shell -USAGE: - hermes keys restore + hermes keys add [OPTIONS] --mnemonic-file DESCRIPTION: - restore a key to a configured chain using a mnemonic + Adds key to a configured chain or restores a key to a configured chain using a mnemonic -POSITIONAL ARGUMENTS: +ARGS: chain_id identifier of the chain FLAGS: - -m, --mnemonic MNEMONIC mnemonic to restore the key from - -n, --name NAME name of the key (defaults to the `key_name` defined in the config) - -p, --hd-path HD-PATH derivation path for this key (default: m/44'/118'/0'/0/0) + -m, --mnemonic-file + path to file containing mnemonic to restore the key from + +OPTIONS: + -k, --key-name + name of the key (defaults to the `key_name` defined in the config) + + -p, --hd-path + derivation path for this key [default: m/44'/118'/0'/0/0] ``` To restore a key from its mnemonic: ```shell -hermes -c config.toml keys restore [CHAIN_ID] -m "[MNEMONIC]" +hermes -c config.toml keys add [CHAIN_ID] -m "[MNEMONIC_FILE]" ``` or using an explicit [derivation path](https://github.com/satoshilabs/slips/blob/master/slip-0044.md), for example @@ -131,9 +181,13 @@ an Ethereum coin type (used for Evmos, Injective, Umee, Cronos, and possibly other networks): ```shell -hermes -c config.toml keys restore --mnemonic --hd-path "m/44'/60'/0'/0/0" +hermes -c config.toml keys add --mnemonic-file --hd-path "m/44'/60'/0'/0/0" ``` +The mnemonic file needs to have the 24 mnemonic words on the same line, separated by a white space. So the content should have the following format: +``` +word1 word2 word3 ... word24 +``` If the command is successful a message similar to the one below will be displayed: @@ -143,10 +197,10 @@ Success: Restore key testkey ([ADDRESS]) on [CHAIN ID] chain > **Key name:** > By default, the key will be named after the `key_name` property specified in the configuration file. -> To use a different key name, specify the `--name` option when invoking `keys restore`. +> To use a different key name, specify the `--key-name` option when invoking `keys add`. > > ``` -> hermes -c config.toml keys restore [CHAINID] -m "[MNEMONIC]" -n [KEY_NAME] +> hermes -c config.toml keys add [CHAINID] -m "[MNEMONIC_FILE]" -k [KEY_NAME] > ``` ### Delete keys @@ -242,3 +296,49 @@ If the command is successful a message similar to the one below will be displaye "status": "success" } ``` +### Query balance + +In order to retrieve the balance of an account associated with a key use the `keys balance` command + +```shell +USAGE: + hermes keys balance [OPTIONS] + +DESCRIPTION: + Query balance for a key from a configured chain. If no key is given, the key is retrieved from the configuration file + +ARGS: + chain_id identifier of the chain + +OPTIONS: + -k, --key-name (optional) name of the key (defaults to the `key_name` defined in the config) +``` + +If the command is successful a message with the following format will be displayed: + +``` +Success: balance for key `KEY_NAME`: 100000000000 stake +``` + +**JSON:** + +```shell + hermes --json keys balance [OPTIONS] +``` +or + +```shell + hermes -j keys balance [OPTIONS] +``` + +If the command is successful a message with the following format will be displayed: + +```json +{ + "result": { + "amount": "99989207", + "denom": "stake" + }, + "status": "success" +} +``` \ No newline at end of file diff --git a/guide/src/commands/queries/channel.md b/guide/src/commands/queries/channel.md index 54c731f12b..fed5deb4ea 100644 --- a/guide/src/commands/queries/channel.md +++ b/guide/src/commands/queries/channel.md @@ -58,6 +58,7 @@ DESCRIPTION: Query information about channels SUBCOMMANDS: + client Query channel's client state end Query channel end ends Query channel ends and underlying connection and client objects ``` @@ -182,3 +183,231 @@ Success: ChannelEndsSummary { Passing the `-v` flag will additionally print all the details of the channel, connection, and client on both ends. + +## Query the channel client state + +Use the `query channel client` command to obtain the channel's client state: + +```shell +USAGE: + hermes query channel client --port-id --channel-id + +DESCRIPTION: + Query channel's client state + +ARGS: + identifier of the chain to query + +FLAGS: + --channel-id identifier of the channel to query + --port-id identifier of the port to query +``` + +If the command is successful a message with the following format will be displayed: +``` +Success: Some( + IdentifiedAnyClientState { + client_id: ClientId( + "07-tendermint-0", + ), + client_state: Tendermint( + ClientState { + chain_id: ChainId { + id: "network2", + version: 0, + }, + trust_level: TrustThreshold { + numerator: 1, + denominator: 3, + }, + trusting_period: 1209600s, + unbonding_period: 1814400s, + max_clock_drift: 40s, + latest_height: Height { + revision: 0, + height: 2775, + }, + proof_specs: ProofSpecs( + [ + ProofSpec( + ProofSpec { + leaf_spec: Some( + LeafOp { + hash: Sha256, + prehash_key: NoHash, + prehash_value: Sha256, + length: VarProto, + prefix: [ + 0, + ], + }, + ), + inner_spec: Some( + InnerSpec { + child_order: [ + 0, + 1, + ], + child_size: 33, + min_prefix_length: 4, + max_prefix_length: 12, + empty_child: [], + hash: Sha256, + }, + ), + max_depth: 0, + min_depth: 0, + }, + ), + ProofSpec( + ProofSpec { + leaf_spec: Some( + LeafOp { + hash: Sha256, + prehash_key: NoHash, + prehash_value: Sha256, + length: VarProto, + prefix: [ + 0, + ], + }, + ), + inner_spec: Some( + InnerSpec { + child_order: [ + 0, + 1, + ], + child_size: 32, + min_prefix_length: 1, + max_prefix_length: 1, + empty_child: [], + hash: Sha256, + }, + ), + max_depth: 0, + min_depth: 0, + }, + ), + ], + ), + upgrade_path: [ + "upgrade", + "upgradedIBCState", + ], + allow_update: AllowUpdate { + after_expiry: true, + after_misbehaviour: true, + }, + frozen_height: None, + }, + ), + }, +) +``` + +**JSON:** + +```shell + hermes --json query channel client --port-id --channel-id +``` +or + +```shell + hermes -j query channel client --port-id --channel-id +``` + +If the command is successful a message with the following format will be displayed: + +```json +{ + "result": + { + "client_id":"07-tendermint-0", + "client_state": + { + "allow_update": + { + "after_expiry":true, + "after_misbehaviour":true + }, + "chain_id":"network2", + "frozen_height":null, + "latest_height": + { + "revision_height":2775, + "revision_number":0 + }, + "max_clock_drift": + { + "nanos":0, + "secs":40 + }, + "proof_specs": + [ + { + "inner_spec": + { + "child_order":[0,1], + "child_size":33, + "empty_child":"", + "hash":1, + "max_prefix_length":12, + "min_prefix_length":4 + }, + "leaf_spec": + { + "hash":1, + "length":1, + "prefix":"AA==", + "prehash_key":0, + "prehash_value":1 + }, + "max_depth":0, + "min_depth":0 + }, + { + "inner_spec": + { + "child_order":[0,1], + "child_size":32, + "empty_child":"", + "hash":1, + "max_prefix_length":1, + "min_prefix_length":1 + }, + "leaf_spec": + { + "hash":1, + "length":1, + "prefix":"AA==", + "prehash_key":0, + "prehash_value":1 + }, + "max_depth":0, + "min_depth":0 + } + ], + "trust_level": + { + "denominator":3, + "numerator":1 + }, + "trusting_period": + { + "nanos":0, + "secs":1209600 + }, + "type":"Tendermint", + "unbonding_period": + { + "nanos":0, + "secs":1814400 + }, + "upgrade_path":["upgrade","upgradedIBCState"] + }, + "type":"IdentifiedAnyClientState" + }, + "status":"success" +} +``` \ No newline at end of file diff --git a/guide/src/config.md b/guide/src/config.md index b0c0f7e633..cdde0904bd 100644 --- a/guide/src/config.md +++ b/guide/src/config.md @@ -25,7 +25,7 @@ hermes [-c CONFIG_FILE] COMMAND The configuration file must have one `global` section, and one `chains` section for each chain. > **Note:** As of 0.6.0, the Hermes configuration file is self-documented. -> Please read the configuration file [`config.toml`](https://github.com/informalsystems/ibc-rs/blob/v0.14.1/config.toml) +> Please read the configuration file [`config.toml`](https://github.com/informalsystems/ibc-rs/blob/v0.15.0/config.toml) > itself for the most up-to-date documentation of parameters. By default, Hermes will relay on all channels available between all the configured chains. @@ -36,7 +36,7 @@ For example, if there are only two chains configured, then Hermes will only rela i.e. the two chains will serve as a source for each other, and likewise as a destination for each other's relevant events. Hermes will ignore all events that pertain to chains which are unknown (ie. not present in config.toml). -To restrict relaying on specific channels, or uni-directionally, you can use [packet filtering policies](https://github.com/informalsystems/ibc-rs/blob/v0.14.1/config.toml#L207-L224). +To restrict relaying on specific channels, or uni-directionally, you can use [packet filtering policies](https://github.com/informalsystems/ibc-rs/blob/v0.15.0/config.toml#L207-L224). ## Adding private keys diff --git a/guide/src/index.md b/guide/src/index.md index a10fa35ba3..52806e83ee 100644 --- a/guide/src/index.md +++ b/guide/src/index.md @@ -1,4 +1,4 @@ -# Hermes Guide (v0.14.1) +# Hermes Guide (v0.15.0) Hermes is a an open-source Rust implementation of a relayer for the diff --git a/guide/src/installation.md b/guide/src/installation.md index 9471abf421..5a9bfe166f 100644 --- a/guide/src/installation.md +++ b/guide/src/installation.md @@ -14,8 +14,8 @@ There are two main approaches for obtaining Hermes: Simply head to the GitHub [Releases][releases] page and download the latest version of Hermes binary matching your platform: -- MacOS: `hermes-v0.14.1-x86_64-apple-darwin.tar.gz` (or .zip), -- Linux: `hermes-v0.14.1-x86_64-unknown-linux-gnu.tar.gz` (or .zip). +- MacOS: `hermes-v0.15.0-x86_64-apple-darwin.tar.gz` (or .zip), +- Linux: `hermes-v0.15.0-x86_64-unknown-linux-gnu.tar.gz` (or .zip). The step-by-step instruction below should carry you through the whole process: @@ -47,7 +47,7 @@ hermes version ``` ``` -hermes 0.14.1 +hermes 0.15.0 ``` ## Install via Cargo @@ -81,7 +81,7 @@ hermes version ``` ``` -hermes 0.14.1 +hermes 0.15.0 ``` ## Build from source @@ -103,10 +103,10 @@ cd ibc-rs Go to the [ibc-rs releases](https://github.com/informalsystems/ibc-rs/releases) page to see what is the most recent release. -Then checkout the release, for example if the most recent release is `v0.14.1` then execute the command: +Then checkout the release, for example if the most recent release is `v0.15.0` then execute the command: ```shell -git checkout v0.14.1 +git checkout v0.15.0 ``` ### Building with `cargo build` @@ -151,7 +151,7 @@ If you run the `hermes` without any additional parameters you should see the usa ``` ``` -hermes 0.14.1 +hermes 0.15.0 Informal Systems USAGE: diff --git a/guide/src/rest-api.md b/guide/src/rest-api.md index 1b5d58224b..3593b1f2ae 100644 --- a/guide/src/rest-api.md +++ b/guide/src/rest-api.md @@ -39,7 +39,7 @@ as the version of the REST server itself (under the `ibc-relayer-rest` key). [ { "name": "ibc-relayer", - "version": "0.14.1" + "version": "0.15.0" }, { "name": "ibc-relayer-rest", diff --git a/guide/src/tutorials/local-chains/relay-paths/create-new-path.md b/guide/src/tutorials/local-chains/relay-paths/create-new-path.md index 035af4fda2..1ddc70699e 100644 --- a/guide/src/tutorials/local-chains/relay-paths/create-new-path.md +++ b/guide/src/tutorials/local-chains/relay-paths/create-new-path.md @@ -3,7 +3,7 @@ Perform client creation, connection and channel handshake to establish a new path between the `transfer` ports on `ibc-0` and `ibc-1` chains. ```shell -hermes create channel ibc-0 ibc-1 --port-a transfer --port-b transfer +hermes create channel ibc-0 -c ibc-1 --port-a transfer --port-b transfer --new-client-connection ``` If all the handshakes are performed successfully you should see a message similar to the one below: diff --git a/guide/src/tutorials/local-chains/start.md b/guide/src/tutorials/local-chains/start.md index edf9b61f5a..791d977fb3 100644 --- a/guide/src/tutorials/local-chains/start.md +++ b/guide/src/tutorials/local-chains/start.md @@ -8,7 +8,7 @@ To this end, clone the `ibc-rs` repository and check out the current version: ```bash git clone git@github.com:informalsystems/ibc-rs.git cd ibc-rs -git checkout v0.14.1 +git checkout v0.15.0 ``` ### Stop existing `gaiad` processes diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 758ec0241d..ab11162874 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc" -version = "0.14.1" +version = "0.15.0" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -21,6 +21,7 @@ std = [ "flex-error/std", "flex-error/eyre_tracer", "ibc-proto/std", + "ics23/std", "clock", "beefy-client/std", "sp-runtime/std", @@ -39,7 +40,7 @@ mocks = ["tendermint-testgen", "clock", "std", "sp-io", "sp-io/std"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. borsh = { version = "0.9.3", default-features = false } -ibc-proto = { version = "0.17.1", path = "../proto", default-features = false } +ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } ics23 = { version = "=0.8.0-alpha", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } @@ -54,7 +55,8 @@ subtle-encoding = { version = "0.5", default-features = false } sha2 = { version = "0.10.2", default-features = false } flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } -derive_more = { version = "0.99.17", default-features = false, features = ["from", "display"] } +derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } +uint = { version = "0.9", default-features = false } beefy-client = { package = "beefy-generic-client", git = "https://github.com/ComposableFi/beefy-client", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.22", default-features = false } diff --git a/modules/src/applications/ics20_fungible_token_transfer/context.rs b/modules/src/applications/ics20_fungible_token_transfer/context.rs deleted file mode 100644 index 58b7e6aaca..0000000000 --- a/modules/src/applications/ics20_fungible_token_transfer/context.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; - -/// Captures all the dependencies which the ICS20 module requires to be able to dispatch and -/// process IBC messages. -pub trait Ics20Context: ChannelReader + ChannelKeeper {} diff --git a/modules/src/applications/ics20_fungible_token_transfer/denom.rs b/modules/src/applications/ics20_fungible_token_transfer/denom.rs deleted file mode 100644 index 542e25f00a..0000000000 --- a/modules/src/applications/ics20_fungible_token_transfer/denom.rs +++ /dev/null @@ -1,28 +0,0 @@ -use sha2::{Digest, Sha256}; -use subtle_encoding::hex; - -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; - -use super::error::Error; - -pub fn derive_ibc_denom( - port_id: &PortId, - channel_id: &ChannelId, - denom: &str, -) -> Result { - let transfer_path = format!("{}/{}/{}", port_id, channel_id, denom); - derive_ibc_denom_with_path(&transfer_path) -} - -/// Derive the transferred token denomination using -/// -pub fn derive_ibc_denom_with_path(transfer_path: &str) -> Result { - let mut hasher = Sha256::new(); - hasher.update(transfer_path.as_bytes()); - - let denom_bytes = hasher.finalize(); - let denom_hex = String::from_utf8(hex::encode_upper(denom_bytes)).map_err(Error::utf8)?; - - Ok(format!("ibc/{}", denom_hex)) -} diff --git a/modules/src/applications/ics20_fungible_token_transfer/error.rs b/modules/src/applications/ics20_fungible_token_transfer/error.rs deleted file mode 100644 index a7cb6a81b6..0000000000 --- a/modules/src/applications/ics20_fungible_token_transfer/error.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::core::ics04_channel::error as channel_error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; - -use alloc::string::FromUtf8Error; -use flex_error::{define_error, DisplayOnly}; - -define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - UnknowMessageTypeUrl - { url: String } - | e | { format_args!("unrecognized ICS-20 transfer message type URL {0}", e.url) }, - - Ics04Channel - [ channel_error::Error ] - |_ | { "Ics04 channel error" }, - - DestinationChannelNotFound - { port_id: PortId, channel_id: ChannelId } - | e | { format_args!("destination channel not found in the counterparty of port_id {0} and channel_id {1} ", e.port_id, e.channel_id) }, - - InvalidPortId - { context: String } - [ ValidationError ] - | _ | { "invalid port identifier" }, - - InvalidChannelId - { context: String } - [ ValidationError ] - | _ | { "invalid channel identifier" }, - - InvalidPacketTimeoutHeight - { context: String } - | _ | { "invalid packet timeout height value" }, - - InvalidPacketTimeoutTimestamp - { timestamp: u64 } - | _ | { "invalid packet timeout timestamp value" }, - - Utf8 - [ DisplayOnly ] - | _ | { "utf8 decoding error" }, - } -} diff --git a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic.rs b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic.rs deleted file mode 100644 index eb612947c2..0000000000 --- a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! This module implements the processing logic for ICS20 (token transfer) message. - -pub mod send_transfer; diff --git a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs b/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs deleted file mode 100644 index aae23629a2..0000000000 --- a/modules/src/applications/ics20_fungible_token_transfer/relay_application_logic/send_transfer.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::applications::ics20_fungible_token_transfer::error::Error; -use crate::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; -use crate::clients::host_functions::HostFunctionsProvider; -use crate::core::ics04_channel::handler::send_packet::send_packet; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics26_routing::context::LightClientContext; -use crate::handler::HandlerOutput; -use crate::prelude::*; - -pub(crate) fn send_transfer( - ctx: &Ctx, - msg: MsgTransfer, -) -> Result, Error> -where - Ctx: LightClientContext, -{ - let source_channel_end = ctx - .channel_end(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; - - let destination_port = source_channel_end.counterparty().port_id().clone(); - let destination_channel = source_channel_end - .counterparty() - .channel_id() - .ok_or_else(|| { - Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) - })?; - - // get the next sequence - let sequence = ctx - .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; - - //TODO: Application LOGIC. - - let packet = Packet { - sequence, - source_port: msg.source_port, - source_channel: msg.source_channel, - destination_port, - destination_channel: *destination_channel, - data: vec![0], - timeout_height: msg.timeout_height, - timeout_timestamp: msg.timeout_timestamp, - }; - - let handler_output = send_packet(ctx, packet).map_err(Error::ics04_channel)?; - - //TODO: add event/atributes and writes to the store issued by the application logic for packet sending. - Ok(handler_output) -} diff --git a/modules/src/applications/mod.rs b/modules/src/applications/mod.rs index 9e6905bb39..6de19dfe0d 100644 --- a/modules/src/applications/mod.rs +++ b/modules/src/applications/mod.rs @@ -1,3 +1,3 @@ //! Various packet encoding semantics which underpin the various types of transactions. -pub mod ics20_fungible_token_transfer; +pub mod transfer; diff --git a/modules/src/applications/transfer/acknowledgement.rs b/modules/src/applications/transfer/acknowledgement.rs new file mode 100644 index 0000000000..cd7a6272dd --- /dev/null +++ b/modules/src/applications/transfer/acknowledgement.rs @@ -0,0 +1,61 @@ +use super::error::Error; +use crate::core::ics26_routing::context::Acknowledgement as AckTrait; +use crate::prelude::*; +use core::fmt::{Display, Formatter}; + +use serde::{Deserialize, Deserializer}; + +/// A string constant included in error acknowledgements. +/// NOTE: Changing this const is state machine breaking as acknowledgements are written into state +pub const ACK_ERR_STR: &str = "error handling packet on destination chain: see events for details"; +pub const ACK_SUCCESS_B64: &[u8] = b"AQ=="; + +#[derive(Clone, Debug)] +pub enum Acknowledgement { + /// Equivalent to b"AQ==" (i.e. `base64::encode(0x01)`) + Success(Vec), + /// Error Acknowledgement + Error(String), +} + +impl Acknowledgement { + pub fn success() -> Self { + Self::Success(ACK_SUCCESS_B64.to_vec()) + } + + pub fn from_error(err: Error) -> Self { + Self::Error(format!("{}: {}", ACK_ERR_STR, err)) + } +} + +impl AsRef<[u8]> for Acknowledgement { + fn as_ref(&self) -> &[u8] { + match self { + Acknowledgement::Success(b) => b.as_slice(), + Acknowledgement::Error(s) => s.as_bytes(), + } + } +} + +impl<'de> Deserialize<'de> for Acknowledgement { + fn deserialize>(deserializer: D) -> Result { + let s = String::deserialize(deserializer)?; + let ack = if s.as_bytes() == ACK_SUCCESS_B64 { + Self::Success(ACK_SUCCESS_B64.to_vec()) + } else { + Self::Error(s) + }; + Ok(ack) + } +} + +impl Display for Acknowledgement { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + Acknowledgement::Success(_) => write!(f, "AQ=="), + Acknowledgement::Error(err_str) => write!(f, "{}", err_str), + } + } +} + +impl AckTrait for Acknowledgement {} diff --git a/modules/src/applications/transfer/context.rs b/modules/src/applications/transfer/context.rs new file mode 100644 index 0000000000..ab9c1afdb8 --- /dev/null +++ b/modules/src/applications/transfer/context.rs @@ -0,0 +1,360 @@ +use subtle_encoding::hex; + +use super::error::Error as Ics20Error; +use crate::applications::transfer::acknowledgement::Acknowledgement; +use crate::applications::transfer::events::{AckEvent, AckStatusEvent, RecvEvent, TimeoutEvent}; +use crate::applications::transfer::packet::PacketData; +use crate::applications::transfer::relay::on_ack_packet::process_ack_packet; +use crate::applications::transfer::relay::on_recv_packet::process_recv_packet; +use crate::applications::transfer::relay::on_timeout_packet::process_timeout_packet; +use crate::applications::transfer::{PrefixedCoin, PrefixedDenom, VERSION}; +use crate::core::ics04_channel::channel::{Counterparty, Order}; +use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; +use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement as GenericAcknowledgement; +use crate::core::ics04_channel::packet::Packet; +use crate::core::ics04_channel::Version; +use crate::core::ics05_port::context::PortReader; +use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; +use crate::core::ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck, ReaderContext}; +use crate::prelude::*; +use crate::signer::Signer; + +pub trait Ics20Keeper: + ChannelKeeper + BankKeeper::AccountId> +{ + type AccountId; +} + +pub trait Ics20Reader: ChannelReader + PortReader +where + Self: Sized, +{ + type AccountId: TryFrom; + + /// get_port returns the portID for the transfer module. + fn get_port(&self) -> Result; + + /// Returns the escrow account id for a port and channel combination + fn get_channel_escrow_address( + &self, + port_id: &PortId, + channel_id: ChannelId, + ) -> Result<::AccountId, Ics20Error> { + let hash = cosmos_adr028_escrow_address(self, port_id, channel_id); + + String::from_utf8(hex::encode_upper(hash)) + .expect("hex encoded bytes are not valid UTF8") + .parse::() + .map_err(Ics20Error::signer)? + .try_into() + .map_err(|_| Ics20Error::parse_account_failure()) + } + + /// Returns true iff send is enabled. + fn is_send_enabled(&self) -> bool; + + /// Returns true iff receive is enabled. + fn is_receive_enabled(&self) -> bool; + + /// Returns a hash of the prefixed denom. + /// Implement only if the host chain supports hashed denominations. + fn denom_hash_string(&self, _denom: &PrefixedDenom) -> Option { + None + } +} + +// https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-028-public-key-addresses.md +fn cosmos_adr028_escrow_address( + ctx: &dyn ChannelReader, + port_id: &PortId, + channel_id: ChannelId, +) -> Vec { + let contents = format!("{}/{}", port_id, channel_id); + let mut data = VERSION.as_bytes().to_vec(); + data.extend_from_slice(&[0]); + data.extend_from_slice(contents.as_bytes()); + + let mut hash = ctx.hash(data); + hash.truncate(20); + hash +} + +pub trait BankKeeper { + type AccountId; + + /// This function should enable sending ibc fungible tokens from one account to another + fn send_coins( + &mut self, + from: &Self::AccountId, + to: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; + + /// This function to enable minting ibc tokens to a user account + fn mint_coins( + &mut self, + account: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; + + /// This function should enable burning of minted tokens in a user account + fn burn_coins( + &mut self, + account: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; +} + +/// Captures all the dependencies which the ICS20 module requires to be able to dispatch and +/// process IBC messages. +pub trait Ics20Context: + Ics20Keeper::AccountId> + + Ics20Reader::AccountId> + + ReaderContext +{ + type AccountId: TryFrom; +} + +fn validate_transfer_channel_params( + ctx: &mut impl Ics20Context, + order: Order, + port_id: &PortId, + channel_id: &ChannelId, + version: &Version, +) -> Result<(), Ics20Error> { + if channel_id.sequence() > (u32::MAX as u64) { + return Err(Ics20Error::chan_seq_exceeds_limit(channel_id.sequence())); + } + + if order != Order::Unordered { + return Err(Ics20Error::channel_not_unordered(order)); + } + + let bound_port = ctx.get_port()?; + if port_id != &bound_port { + return Err(Ics20Error::invalid_port(port_id.clone(), bound_port)); + } + + if version != &Version::ics20() { + return Err(Ics20Error::invalid_version(version.clone())); + } + + Ok(()) +} + +fn validate_counterparty_version(counterparty_version: &Version) -> Result<(), Ics20Error> { + if counterparty_version == &Version::ics20() { + Ok(()) + } else { + Err(Ics20Error::invalid_counterparty_version( + counterparty_version.clone(), + )) + } +} + +#[allow(clippy::too_many_arguments)] +pub fn on_chan_open_init( + ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + order: Order, + _connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, +) -> Result<(), Ics20Error> { + validate_transfer_channel_params(ctx, order, port_id, channel_id, version) +} + +#[allow(clippy::too_many_arguments)] +pub fn on_chan_open_try( + ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + order: Order, + _connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, + counterparty_version: &Version, +) -> Result { + validate_transfer_channel_params(ctx, order, port_id, channel_id, version)?; + validate_counterparty_version(counterparty_version)?; + Ok(Version::ics20()) +} + +pub fn on_chan_open_ack( + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + counterparty_version: &Version, +) -> Result<(), Ics20Error> { + validate_counterparty_version(counterparty_version)?; + Ok(()) +} + +pub fn on_chan_open_confirm( + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, +) -> Result<(), Ics20Error> { + Ok(()) +} + +pub fn on_chan_close_init( + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, +) -> Result<(), Ics20Error> { + Err(Ics20Error::cant_close_channel()) +} + +pub fn on_chan_close_confirm( + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, +) -> Result<(), Ics20Error> { + Ok(()) +} + +pub fn on_recv_packet( + ctx: &Ctx, + output: &mut ModuleOutputBuilder, + packet: &Packet, + _relayer: &Signer, +) -> OnRecvPacketAck { + let data = match serde_json::from_slice::(&packet.data) { + Ok(data) => data, + Err(_) => { + return OnRecvPacketAck::Failed(Box::new(Acknowledgement::Error( + Ics20Error::packet_data_deserialization().to_string(), + ))) + } + }; + + let ack = match process_recv_packet(ctx, output, packet, data.clone()) { + Ok(write_fn) => OnRecvPacketAck::Successful(Box::new(Acknowledgement::success()), write_fn), + Err(e) => OnRecvPacketAck::Failed(Box::new(Acknowledgement::from_error(e))), + }; + + let recv_event = RecvEvent { + receiver: data.receiver, + denom: data.token.denom, + amount: data.token.amount, + success: ack.is_successful(), + }; + output.emit(recv_event.into()); + + ack +} + +pub fn on_acknowledgement_packet( + ctx: &mut impl Ics20Context, + output: &mut ModuleOutputBuilder, + packet: &Packet, + acknowledgement: &GenericAcknowledgement, + _relayer: &Signer, +) -> Result<(), Ics20Error> { + let data = serde_json::from_slice::(&packet.data) + .map_err(|_| Ics20Error::packet_data_deserialization())?; + + let acknowledgement = serde_json::from_slice::(acknowledgement.as_ref()) + .map_err(|_| Ics20Error::ack_deserialization())?; + + process_ack_packet(ctx, packet, &data, &acknowledgement)?; + + let ack_event = AckEvent { + receiver: data.receiver, + denom: data.token.denom, + amount: data.token.amount, + acknowledgement: acknowledgement.clone(), + }; + output.emit(ack_event.into()); + output.emit(AckStatusEvent { acknowledgement }.into()); + + Ok(()) +} + +pub fn on_timeout_packet( + ctx: &mut impl Ics20Context, + output: &mut ModuleOutputBuilder, + packet: &Packet, + _relayer: &Signer, +) -> Result<(), Ics20Error> { + let data = serde_json::from_slice::(&packet.data) + .map_err(|_| Ics20Error::packet_data_deserialization())?; + + process_timeout_packet(ctx, packet, &data)?; + + let timeout_event = TimeoutEvent { + refund_receiver: data.sender, + refund_denom: data.token.denom, + refund_amount: data.token.amount, + }; + output.emit(timeout_event.into()); + + Ok(()) +} + +#[cfg(test)] +pub(crate) mod test { + use std::sync::Mutex; + + use std::sync::Arc; + use subtle_encoding::bech32; + + use crate::applications::transfer::context::cosmos_adr028_escrow_address; + use crate::applications::transfer::error::Error as Ics20Error; + use crate::applications::transfer::msgs::transfer::MsgTransfer; + use crate::applications::transfer::relay::send_transfer::send_transfer; + use crate::applications::transfer::PrefixedCoin; + use crate::core::ics04_channel::error::Error; + use crate::handler::HandlerOutputBuilder; + use crate::mock::context::MockIbcStore; + use crate::prelude::*; + use crate::test_utils::DummyTransferModule; + + pub(crate) fn deliver( + ctx: &mut DummyTransferModule, + output: &mut HandlerOutputBuilder<()>, + msg: MsgTransfer, + ) -> Result<(), Error> { + send_transfer(ctx, output, msg).map_err(|e: Ics20Error| Error::app_module(e.to_string())) + } + + #[test] + fn test_cosmos_escrow_address() { + fn assert_eq_escrow_address(port_id: &str, channel_id: &str, address: &str) { + let port_id = port_id.parse().unwrap(); + let channel_id = channel_id.parse().unwrap(); + let gen_address = { + let ibc_store = MockIbcStore::default(); + let ctx = DummyTransferModule::new(Arc::new(Mutex::new(ibc_store))); + let addr = cosmos_adr028_escrow_address(&ctx, &port_id, channel_id); + bech32::encode("cosmos", addr) + }; + assert_eq!(gen_address, address.to_owned()) + } + + // addresses obtained using `gaiad query ibc-transfer escrow-address [port-id] [channel-id]` + assert_eq_escrow_address( + "transfer", + "channel-141", + "cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf", + ); + assert_eq_escrow_address( + "transfer", + "channel-207", + "cosmos1ju6tlfclulxumtt2kglvnxduj5d93a64r5czge", + ); + assert_eq_escrow_address( + "transfer", + "channel-187", + "cosmos177x69sver58mcfs74x6dg0tv6ls4s3xmmcaw53", + ); + } +} diff --git a/modules/src/applications/transfer/denom.rs b/modules/src/applications/transfer/denom.rs new file mode 100644 index 0000000000..998f731a40 --- /dev/null +++ b/modules/src/applications/transfer/denom.rs @@ -0,0 +1,473 @@ +use core::fmt; +use core::str::FromStr; + +use derive_more::{Display, From, Into}; +use ibc_proto::cosmos::base::v1beta1::Coin as RawCoin; +use ibc_proto::ibc::applications::transfer::v1::DenomTrace as RawDenomTrace; +use serde::{Deserialize, Serialize}; + +use super::error::Error; +use crate::bigint::U256; +use crate::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::prelude::*; +use crate::serializers::serde_string; + +/// A `Coin` type with fully qualified `PrefixedDenom`. +pub type PrefixedCoin = Coin; + +/// A `Coin` type with an unprefixed denomination. +pub type BaseCoin = Coin; + +/// Base denomination type +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, Display)] +#[serde(transparent)] +pub struct BaseDenom(String); + +impl FromStr for BaseDenom { + type Err = Error; + + fn from_str(s: &str) -> Result { + if s.trim().is_empty() { + Err(Error::empty_base_denom()) + } else { + Ok(BaseDenom(s.to_owned())) + } + } +} + +#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub struct TracePrefix { + port_id: PortId, + channel_id: ChannelId, +} + +impl TracePrefix { + pub fn new(port_id: PortId, channel_id: ChannelId) -> Self { + Self { + port_id, + channel_id, + } + } +} + +impl fmt::Display for TracePrefix { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.port_id, self.channel_id) + } +} + +/// A full trace path modelled as a collection of `TracePrefix`s. +// Internally, the `TracePath` is modelled as a `Vec` but with the order reversed, i.e. +// "transfer/channel-0/transfer/channel-1/uatom" => `["transfer/channel-1", "transfer/channel-0"]` +// This is done for ease of addition/removal of prefixes. +#[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Ord, From)] +pub struct TracePath(Vec); + +impl TracePath { + /// Returns true iff this path starts with the specified prefix + pub fn starts_with(&self, prefix: &TracePrefix) -> bool { + self.0.last().map(|p| p == prefix).unwrap_or(false) + } + + /// Removes the specified prefix from the path if there is a match, otherwise does nothing. + pub fn remove_prefix(&mut self, prefix: &TracePrefix) { + if self.starts_with(prefix) { + self.0.pop(); + } + } + + /// Adds the specified prefix to the path. + pub fn add_prefix(&mut self, prefix: TracePrefix) { + self.0.push(prefix) + } + + /// Returns true if the path is empty and false otherwise. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl<'a> TryFrom> for TracePath { + type Error = Error; + + fn try_from(v: Vec<&'a str>) -> Result { + if v.len() % 2 != 0 { + return Err(Error::invalid_trace_length(v.len())); + } + + let mut trace = vec![]; + let id_pairs = v.chunks_exact(2).map(|paths| (paths[0], paths[1])); + for (pos, (port_id, channel_id)) in id_pairs.rev().enumerate() { + let port_id = + PortId::from_str(port_id).map_err(|e| Error::invalid_trace_port_id(pos, e))?; + let channel_id = ChannelId::from_str(channel_id) + .map_err(|e| Error::invalid_trace_channel_id(pos, e))?; + trace.push(TracePrefix { + port_id, + channel_id, + }); + } + + Ok(trace.into()) + } +} + +impl FromStr for TracePath { + type Err = Error; + + fn from_str(s: &str) -> Result { + let parts = { + let parts: Vec<&str> = s.split('/').collect(); + if parts.len() == 1 && parts[0].trim().is_empty() { + vec![] + } else { + parts + } + }; + parts.try_into() + } +} + +impl fmt::Display for TracePath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let path = self + .0 + .iter() + .rev() + .map(|prefix| prefix.to_string()) + .collect::>() + .join("/"); + write!(f, "{}", path) + } +} + +/// A type that contains the base denomination for ICS20 and the source tracing information path. +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct PrefixedDenom { + /// A series of `{port-id}/{channel-id}`s for tracing the source of the token. + #[serde(with = "serde_string")] + trace_path: TracePath, + /// Base denomination of the relayed fungible token. + base_denom: BaseDenom, +} + +impl PrefixedDenom { + /// Removes the specified prefix from the trace path if there is a match, otherwise does nothing. + pub fn remove_trace_prefix(&mut self, prefix: &TracePrefix) { + self.trace_path.remove_prefix(prefix) + } + + /// Adds the specified prefix to the trace path. + pub fn add_trace_prefix(&mut self, prefix: TracePrefix) { + self.trace_path.add_prefix(prefix) + } +} + +/// Returns true if the denomination originally came from the sender chain and +/// false otherwise. +/// +/// Note: It is better to think of the "source" chain as the chain that +/// escrows/unescrows the token, while the other chain mints/burns the tokens, +/// respectively. A chain being the "source" of a token does NOT mean it is the +/// original creator of the token (e.g. "uatom"), as "source" might suggest. +/// +/// This means that in any given transfer, a chain can very well be the source +/// of a token of which it is not the creator. For example, let +/// +/// A: sender chain in this transfer, port "transfer" and channel "c2b" (to B) +/// B: receiver chain in this transfer, port "transfer" and channel "c2a" (to A) +/// token denom: "transfer/someOtherChannel/someDenom" +/// +/// A, initiator of the transfer, needs to figure out if it should escrow the +/// tokens, or burn them. If B had originally sent the token to A in a previous +/// transfer, then A would have stored the token as "transfer/c2b/someDenom". +/// Now, A is sending to B, so to check if B is the source of the token, we need +/// to check if the token starts with "transfer/c2b". In this example, it +/// doesn't, so the token doesn't originate from B. A is considered the source, +/// even though it is not the creator of the token. Specifically, the token was +/// created by the chain at the other end of A's port "transfer" and channel +/// "someOtherChannel". +pub fn is_sender_chain_source( + source_port: PortId, + source_channel: ChannelId, + denom: &PrefixedDenom, +) -> bool { + !is_receiver_chain_source(source_port, source_channel, denom) +} + +/// Returns true if the denomination originally came from the receiving chain and false otherwise. +pub fn is_receiver_chain_source( + source_port: PortId, + source_channel: ChannelId, + denom: &PrefixedDenom, +) -> bool { + // For example, let + // A: sender chain in this transfer, port "transfer" and channel "c2b" (to B) + // B: receiver chain in this transfer, port "transfer" and channel "c2a" (to A) + // + // If B had originally sent the token in a previous tranfer, then A would have stored the token as + // "transfer/c2b/{token_denom}". Now, A is sending to B, so to check if B is the source of the token, + // we need to check if the token starts with "transfer/c2b". + let prefix = TracePrefix::new(source_port, source_channel); + denom.trace_path.starts_with(&prefix) +} + +impl FromStr for PrefixedDenom { + type Err = Error; + + fn from_str(s: &str) -> Result { + let mut parts: Vec<&str> = s.split('/').collect(); + let last_part = parts.pop().expect("split() returned an empty iterator"); + + let (base_denom, trace_path) = { + if last_part == s { + (BaseDenom::from_str(s)?, TracePath::default()) + } else { + let base_denom = BaseDenom::from_str(last_part)?; + let trace_path = TracePath::try_from(parts)?; + (base_denom, trace_path) + } + }; + + Ok(Self { + trace_path, + base_denom, + }) + } +} + +impl TryFrom for PrefixedDenom { + type Error = Error; + + fn try_from(value: RawDenomTrace) -> Result { + let base_denom = BaseDenom::from_str(&value.base_denom)?; + let trace_path = TracePath::from_str(&value.path)?; + Ok(Self { + trace_path, + base_denom, + }) + } +} + +impl From for RawDenomTrace { + fn from(value: PrefixedDenom) -> Self { + Self { + path: value.trace_path.to_string(), + base_denom: value.base_denom.to_string(), + } + } +} + +impl From for PrefixedDenom { + fn from(denom: BaseDenom) -> Self { + Self { + trace_path: Default::default(), + base_denom: denom, + } + } +} + +impl fmt::Display for PrefixedDenom { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.trace_path.0.is_empty() { + write!(f, "{}", self.base_denom) + } else { + write!(f, "{}/{}", self.trace_path, self.base_denom) + } + } +} + +/// A type for representing token transfer amounts. +#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Display, From, Into)] +pub struct Amount(U256); + +impl Amount { + pub fn checked_add(self, rhs: Self) -> Option { + self.0.checked_add(rhs.0).map(Self) + } + + pub fn checked_sub(self, rhs: Self) -> Option { + self.0.checked_sub(rhs.0).map(Self) + } +} + +impl FromStr for Amount { + type Err = Error; + + fn from_str(s: &str) -> Result { + let amount = U256::from_str_radix(s, 10).map_err(Error::invalid_amount)?; + Ok(Self(amount)) + } +} + +impl From for Amount { + fn from(v: u64) -> Self { + Self(v.into()) + } +} + +/// Coin defines a token with a denomination and an amount. +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct Coin { + /// Denomination + pub denom: D, + /// Amount + #[serde(with = "serde_string")] + pub amount: Amount, +} + +impl TryFrom for Coin +where + Error: From<::Err>, +{ + type Error = Error; + + fn try_from(proto: RawCoin) -> Result, Self::Error> { + let denom = D::from_str(&proto.denom)?; + let amount = Amount::from_str(&proto.amount)?; + Ok(Self { denom, amount }) + } +} + +impl From> for RawCoin { + fn from(coin: Coin) -> RawCoin { + RawCoin { + denom: coin.denom.to_string(), + amount: coin.amount.to_string(), + } + } +} + +impl From for PrefixedCoin { + fn from(coin: BaseCoin) -> PrefixedCoin { + PrefixedCoin { + denom: coin.denom.into(), + amount: coin.amount, + } + } +} + +impl fmt::Display for PrefixedCoin { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}-{}", self.amount, self.denom) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_denom_validation() -> Result<(), Error> { + assert!(BaseDenom::from_str("").is_err(), "empty base denom"); + assert!(BaseDenom::from_str("uatom").is_ok(), "valid base denom"); + assert!(PrefixedDenom::from_str("").is_err(), "empty denom trace"); + assert!( + PrefixedDenom::from_str("transfer/channel-0/").is_err(), + "empty base denom with trace" + ); + assert!(PrefixedDenom::from_str("/uatom").is_err(), "empty prefix"); + assert!(PrefixedDenom::from_str("//uatom").is_err(), "empty ids"); + assert!( + PrefixedDenom::from_str("transfer/").is_err(), + "single trace" + ); + assert!( + PrefixedDenom::from_str("transfer/atom").is_err(), + "single trace with base denom" + ); + assert!( + PrefixedDenom::from_str("transfer/channel-0/uatom").is_ok(), + "valid single trace info" + ); + assert!( + PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom").is_ok(), + "valid multiple trace info" + ); + assert!( + PrefixedDenom::from_str("(transfer)/channel-0/uatom").is_err(), + "invalid port" + ); + assert!( + PrefixedDenom::from_str("transfer/(channel-0)/uatom").is_err(), + "invalid channel" + ); + + Ok(()) + } + + #[test] + fn test_denom_trace() -> Result<(), Error> { + assert_eq!( + PrefixedDenom::from_str("transfer/channel-0/uatom")?, + PrefixedDenom { + trace_path: "transfer/channel-0".parse()?, + base_denom: "uatom".parse()? + }, + "valid single trace info" + ); + assert_eq!( + PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom")?, + PrefixedDenom { + trace_path: "transfer/channel-0/transfer/channel-1".parse()?, + base_denom: "uatom".parse()? + }, + "valid multiple trace info" + ); + + Ok(()) + } + + #[test] + fn test_denom_serde() -> Result<(), Error> { + let dt_str = "transfer/channel-0/uatom"; + let dt = PrefixedDenom::from_str(dt_str)?; + assert_eq!(dt.to_string(), dt_str, "valid single trace info"); + + let dt_str = "transfer/channel-0/transfer/channel-1/uatom"; + let dt = PrefixedDenom::from_str(dt_str)?; + assert_eq!(dt.to_string(), dt_str, "valid multiple trace info"); + + Ok(()) + } + + #[test] + fn test_trace_path() -> Result<(), Error> { + assert!(TracePath::from_str("").is_ok(), "empty trace path"); + assert!( + TracePath::from_str("transfer/uatom").is_err(), + "invalid trace path: bad ChannelId" + ); + assert!( + TracePath::from_str("transfer//uatom").is_err(), + "malformed trace path: missing ChannelId" + ); + assert!( + TracePath::from_str("transfer/channel-0/").is_err(), + "malformed trace path: trailing delimiter" + ); + + let prefix_1 = TracePrefix::new("transfer".parse().unwrap(), "channel-1".parse().unwrap()); + let prefix_2 = TracePrefix::new("transfer".parse().unwrap(), "channel-0".parse().unwrap()); + let mut trace_path = TracePath(vec![prefix_1.clone()]); + + trace_path.add_prefix(prefix_2.clone()); + assert_eq!( + TracePath::from_str("transfer/channel-0/transfer/channel-1")?, + trace_path + ); + assert_eq!( + TracePath(vec![prefix_1.clone(), prefix_2.clone()]), + trace_path + ); + + trace_path.remove_prefix(&prefix_2); + assert_eq!(TracePath::from_str("transfer/channel-1")?, trace_path); + assert_eq!(TracePath(vec![prefix_1.clone()]), trace_path); + + trace_path.remove_prefix(&prefix_1); + assert!(trace_path.is_empty()); + + Ok(()) + } +} diff --git a/modules/src/applications/transfer/error.rs b/modules/src/applications/transfer/error.rs new file mode 100644 index 0000000000..c9f71fdc33 --- /dev/null +++ b/modules/src/applications/transfer/error.rs @@ -0,0 +1,140 @@ +use alloc::string::FromUtf8Error; + +use flex_error::{define_error, DisplayOnly, TraceError}; +use subtle_encoding::Error as EncodingError; +use tendermint_proto::Error as TendermintProtoError; +use uint::FromStrRadixErr; + +use crate::core::ics04_channel::channel::Order; +use crate::core::ics04_channel::error as channel_error; +use crate::core::ics04_channel::Version; +use crate::core::ics24_host::error::ValidationError; +use crate::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::prelude::*; +use crate::signer::SignerError; + +define_error! { + #[derive(Debug, PartialEq, Eq)] + Error { + UnknowMessageTypeUrl + { url: String } + | e | { format_args!("unrecognized ICS-20 transfer message type URL {0}", e.url) }, + + Ics04Channel + [ channel_error::Error ] + |_ | { "Ics04 channel error" }, + + DestinationChannelNotFound + { port_id: PortId, channel_id: ChannelId } + | e | { format_args!("destination channel not found in the counterparty of port_id {0} and channel_id {1} ", e.port_id, e.channel_id) }, + + InvalidPortId + { context: String } + [ ValidationError ] + | _ | { "invalid port identifier" }, + + InvalidChannelId + { context: String } + [ ValidationError ] + | _ | { "invalid channel identifier" }, + + InvalidPacketTimeoutHeight + { context: String } + | _ | { "invalid packet timeout height value" }, + + InvalidPacketTimeoutTimestamp + { timestamp: u64 } + | _ | { "invalid packet timeout timestamp value" }, + + Utf8 + [ DisplayOnly ] + | _ | { "utf8 decoding error" }, + + EmptyBaseDenom + |_| { "base denomination is empty" }, + + InvalidTracePortId + { pos: usize } + [ ValidationError ] + | e | { format_args!("invalid port id in trace at position: {0}", e.pos) }, + + InvalidTraceChannelId + { pos: usize } + [ ValidationError ] + | e | { format_args!("invalid channel id in trace at position: {0}", e.pos) }, + + InvalidTraceLength + { len: usize } + | e | { format_args!("trace length must be even but got: {0}", e.len) }, + + InvalidAmount + [ TraceError ] + | _ | { "invalid amount" }, + + InvalidToken + | _ | { "invalid token" }, + + Signer + [ SignerError ] + | _ | { "failed to parse signer" }, + + MissingDenomIbcPrefix + | _ | { "missing 'ibc/' prefix in denomination" }, + + MalformedHashDenom + | _ | { "hashed denom must be of the form 'ibc/{Hash}'" }, + + ParseHex + [ TraceError ] + | _ | { "invalid hex string" }, + + ChanSeqExceedsLimit + { sequence: u64 } + | e | { format_args!("channel sequence ({0}) exceeds limit of {1}", e.sequence, u32::MAX) }, + + ChannelNotUnordered + { order: Order } + | e | { format_args!("expected '{0}' channel, got '{1}'", Order::Unordered, e.order) }, + + InvalidVersion + { version: Version } + | e | { format_args!("expected version '{0}', got '{1}'", Version::ics20(), e.version) }, + + InvalidCounterpartyVersion + { version: Version } + | e | { format_args!("expected counterparty version '{0}', got '{1}'", Version::ics20(), e.version) }, + + CantCloseChannel + | _ | { "channel cannot be closed" }, + + PacketDataDeserialization + | _ | { "failed to deserialize packet data" }, + + AckDeserialization + | _ | { "failed to deserialize acknowledgement" }, + + ReceiveDisabled + | _ | { "receive is not enabled" }, + + SendDisabled + | _ | { "send is not enabled" }, + + ParseAccountFailure + | _ | { "failed to parse as AccountId" }, + + InvalidPort + { port_id: PortId, exp_port_id: PortId } + | e | { format_args!("invalid port: '{0}', expected '{1}'", e.port_id, e.exp_port_id) }, + + TraceNotFound + | _ | { "no trace associated with specified hash" }, + + DecodeRawMsg + [ TraceError ] + | _ | { "error decoding raw msg" }, + + UnknownMsgType + { msg_type: String } + | e | { format_args!("unknown msg type: {0}", e.msg_type) }, + } +} diff --git a/modules/src/applications/transfer/events.rs b/modules/src/applications/transfer/events.rs new file mode 100644 index 0000000000..590638f448 --- /dev/null +++ b/modules/src/applications/transfer/events.rs @@ -0,0 +1,172 @@ +use crate::applications::transfer::acknowledgement::Acknowledgement; +use crate::applications::transfer::{Amount, PrefixedDenom, MODULE_ID_STR}; +use crate::events::ModuleEvent; +use crate::prelude::*; +use crate::signer::Signer; + +const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; +const EVENT_TYPE_TIMEOUT: &str = "timeout"; +const EVENT_TYPE_DENOM_TRACE: &str = "denomination_trace"; +const EVENT_TYPE_TRANSFER: &str = "ibc_transfer"; + +pub enum Event { + Recv(RecvEvent), + Ack(AckEvent), + AckStatus(AckStatusEvent), + Timeout(TimeoutEvent), + DenomTrace(DenomTraceEvent), + Transfer(TransferEvent), +} + +pub struct RecvEvent { + pub receiver: Signer, + pub denom: PrefixedDenom, + pub amount: Amount, + pub success: bool, +} + +impl From for ModuleEvent { + fn from(ev: RecvEvent) -> Self { + let RecvEvent { + receiver, + denom, + amount, + success, + } = ev; + Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("receiver", receiver).into(), + ("denom", denom).into(), + ("amount", amount).into(), + ("success", success).into(), + ], + } + } +} + +pub struct AckEvent { + pub receiver: Signer, + pub denom: PrefixedDenom, + pub amount: Amount, + pub acknowledgement: Acknowledgement, +} + +impl From for ModuleEvent { + fn from(ev: AckEvent) -> Self { + let AckEvent { + receiver, + denom, + amount, + acknowledgement, + } = ev; + Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("receiver", receiver).into(), + ("denom", denom).into(), + ("amount", amount).into(), + ("acknowledgement", acknowledgement).into(), + ], + } + } +} + +pub struct AckStatusEvent { + pub acknowledgement: Acknowledgement, +} + +impl From for ModuleEvent { + fn from(ev: AckStatusEvent) -> Self { + let AckStatusEvent { acknowledgement } = ev; + let mut event = Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![], + }; + let attr_label = match acknowledgement { + Acknowledgement::Success(_) => "success", + Acknowledgement::Error(_) => "error", + }; + event + .attributes + .push((attr_label, acknowledgement.to_string()).into()); + event + } +} + +pub struct TimeoutEvent { + pub refund_receiver: Signer, + pub refund_denom: PrefixedDenom, + pub refund_amount: Amount, +} + +impl From for ModuleEvent { + fn from(ev: TimeoutEvent) -> Self { + let TimeoutEvent { + refund_receiver, + refund_denom, + refund_amount, + } = ev; + Self { + kind: EVENT_TYPE_TIMEOUT.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("refund_receiver", refund_receiver).into(), + ("refund_denom", refund_denom).into(), + ("refund_amount", refund_amount).into(), + ], + } + } +} + +pub struct DenomTraceEvent { + pub trace_hash: Option, + pub denom: PrefixedDenom, +} + +impl From for ModuleEvent { + fn from(ev: DenomTraceEvent) -> Self { + let DenomTraceEvent { trace_hash, denom } = ev; + let mut ev = Self { + kind: EVENT_TYPE_DENOM_TRACE.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![("denom", denom).into()], + }; + if let Some(hash) = trace_hash { + ev.attributes.push(("trace_hash", hash).into()); + } + ev + } +} + +pub struct TransferEvent { + pub sender: Signer, + pub receiver: Signer, +} + +impl From for ModuleEvent { + fn from(ev: TransferEvent) -> Self { + let TransferEvent { sender, receiver } = ev; + Self { + kind: EVENT_TYPE_TRANSFER.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![("sender", sender).into(), ("receiver", receiver).into()], + } + } +} + +impl From for ModuleEvent { + fn from(ev: Event) -> Self { + match ev { + Event::Recv(ev) => ev.into(), + Event::Ack(ev) => ev.into(), + Event::AckStatus(ev) => ev.into(), + Event::Timeout(ev) => ev.into(), + Event::DenomTrace(ev) => ev.into(), + Event::Transfer(ev) => ev.into(), + } + } +} diff --git a/modules/src/applications/ics20_fungible_token_transfer/mod.rs b/modules/src/applications/transfer/mod.rs similarity index 66% rename from modules/src/applications/ics20_fungible_token_transfer/mod.rs rename to modules/src/applications/transfer/mod.rs index e28b4e7c55..1a8a87a89a 100644 --- a/modules/src/applications/ics20_fungible_token_transfer/mod.rs +++ b/modules/src/applications/transfer/mod.rs @@ -1,17 +1,23 @@ //! ICS 20: Token Transfer implementation allows for multi-chain denomination handling, which //! constitutes a "fungible token transfer bridge module" between the IBC routing module and an //! asset tracking module. +pub mod acknowledgement; pub mod context; +pub mod denom; pub mod error; +pub mod events; pub mod msgs; -pub mod relay_application_logic; +pub mod packet; +pub mod relay; -mod denom; pub use denom::*; +/// Module identifier for the ICS20 application. +pub const MODULE_ID_STR: &str = "transfer"; + /// The port identifier that the ICS20 applications /// typically bind with. -pub const PORT_ID: &str = "transfer"; +pub const PORT_ID_STR: &str = "transfer"; /// ICS20 application current version. pub const VERSION: &str = "ics20-1"; diff --git a/modules/src/applications/ics20_fungible_token_transfer/msgs.rs b/modules/src/applications/transfer/msgs.rs similarity index 100% rename from modules/src/applications/ics20_fungible_token_transfer/msgs.rs rename to modules/src/applications/transfer/msgs.rs diff --git a/modules/src/applications/ics20_fungible_token_transfer/msgs/transfer.rs b/modules/src/applications/transfer/msgs/transfer.rs similarity index 62% rename from modules/src/applications/ics20_fungible_token_transfer/msgs/transfer.rs rename to modules/src/applications/transfer/msgs/transfer.rs index 60170d2c73..ee9c8afd1f 100644 --- a/modules/src/applications/ics20_fungible_token_transfer/msgs/transfer.rs +++ b/modules/src/applications/transfer/msgs/transfer.rs @@ -2,11 +2,12 @@ use crate::prelude::*; +use ibc_proto::cosmos::base::v1beta1::Coin; +use ibc_proto::google::protobuf::Any; +use ibc_proto::ibc::applications::transfer::v1::MsgTransfer as RawMsgTransfer; use tendermint_proto::Protobuf; -use ibc_proto::ibc::apps::transfer::v1::MsgTransfer as RawMsgTransfer; - -use crate::applications::ics20_fungible_token_transfer::error::Error; +use crate::applications::transfer::error::Error; use crate::core::ics02_client::height::Height; use crate::core::ics24_host::identifier::{ChannelId, PortId}; use crate::signer::Signer; @@ -15,15 +16,21 @@ use crate::tx_msg::Msg; pub const TYPE_URL: &str = "/ibc.applications.transfer.v1.MsgTransfer"; -/// Message definition for the "packet receiving" datagram. +/// Message used to build an ICS20 token transfer packet. +/// +/// Note that this message is not a packet yet, as it lacks the proper sequence +/// number, and destination port/channel. This is by design. The sender of the +/// packet, which might be the user of a command line application, should only +/// have to specify the information related to the transfer of the token, and +/// let the library figure out how to build the packet properly. #[derive(Clone, Debug, PartialEq)] -pub struct MsgTransfer { +pub struct MsgTransfer { /// the port on which the packet will be sent pub source_port: PortId, /// the channel by which the packet will be sent pub source_channel: ChannelId, /// the tokens to be transferred - pub token: Option, + pub token: C, /// the sender address pub sender: Signer, /// the recipient address on the destination chain @@ -49,8 +56,6 @@ impl Msg for MsgTransfer { } } -impl Protobuf for MsgTransfer {} - impl TryFrom for MsgTransfer { type Error = Error; @@ -74,9 +79,9 @@ impl TryFrom for MsgTransfer { .source_channel .parse() .map_err(|e| Error::invalid_channel_id(raw_msg.source_channel.clone(), e))?, - token: raw_msg.token, - sender: raw_msg.sender.into(), - receiver: raw_msg.receiver.into(), + token: raw_msg.token.ok_or_else(Error::invalid_token)?, + sender: raw_msg.sender.parse().map_err(Error::signer)?, + receiver: raw_msg.receiver.parse().map_err(Error::signer)?, timeout_height, timeout_timestamp, }) @@ -88,7 +93,7 @@ impl From for RawMsgTransfer { RawMsgTransfer { source_port: domain_msg.source_port.to_string(), source_channel: domain_msg.source_channel.to_string(), - token: domain_msg.token, + token: Some(domain_msg.token), sender: domain_msg.sender.to_string(), receiver: domain_msg.receiver.to_string(), timeout_height: Some(domain_msg.timeout_height.into()), @@ -97,30 +102,59 @@ impl From for RawMsgTransfer { } } +impl Protobuf for MsgTransfer {} + +impl TryFrom for MsgTransfer { + type Error = Error; + + fn try_from(raw: Any) -> Result { + match raw.type_url.as_str() { + TYPE_URL => MsgTransfer::decode_vec(&raw.value).map_err(Error::decode_raw_msg), + _ => Err(Error::unknown_msg_type(raw.type_url)), + } + } +} + +impl From for Any { + fn from(msg: MsgTransfer) -> Self { + Self { + type_url: TYPE_URL.to_string(), + value: msg + .encode_vec() + .expect("encoding to `Any` from `MsgTranfer`"), + } + } +} + #[cfg(test)] pub mod test_util { use core::ops::Add; use core::time::Duration; + use super::MsgTransfer; + use crate::bigint::U256; + use crate::signer::Signer; use crate::{ + applications::transfer::{BaseCoin, PrefixedCoin}, core::ics24_host::identifier::{ChannelId, PortId}, - test_utils::get_dummy_account_id, + test_utils::get_dummy_bech32_account, timestamp::Timestamp, Height, }; - use super::MsgTransfer; - - // Returns a dummy `RawMsgTransfer`, for testing only! - pub fn get_dummy_msg_transfer(height: u64) -> MsgTransfer { - let id = get_dummy_account_id(); - + // Returns a dummy ICS20 `MsgTransfer`, for testing only! + pub fn get_dummy_msg_transfer(height: u64) -> MsgTransfer { + let address: Signer = get_dummy_bech32_account().as_str().parse().unwrap(); MsgTransfer { source_port: PortId::default(), source_channel: ChannelId::default(), - token: None, - sender: id.clone(), - receiver: id, + token: BaseCoin { + denom: "uatom".parse().unwrap(), + amount: U256::from(10).into(), + } + .into(), + sender: address.clone(), + receiver: address, timeout_timestamp: Timestamp::now().add(Duration::from_secs(10)).unwrap(), timeout_height: Height { revision_number: 0, diff --git a/modules/src/applications/transfer/packet.rs b/modules/src/applications/transfer/packet.rs new file mode 100644 index 0000000000..643ee5e465 --- /dev/null +++ b/modules/src/applications/transfer/packet.rs @@ -0,0 +1,43 @@ +use alloc::string::ToString; +use core::convert::TryFrom; +use core::str::FromStr; + +use ibc_proto::ibc::applications::transfer::v2::FungibleTokenPacketData as RawPacketData; +use serde::{Deserialize, Serialize}; + +use super::error::Error; +use super::{Amount, PrefixedCoin, PrefixedDenom}; +use crate::signer::Signer; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PacketData { + pub token: PrefixedCoin, + pub sender: Signer, + pub receiver: Signer, +} + +impl TryFrom for PacketData { + type Error = Error; + + fn try_from(raw_pkt_data: RawPacketData) -> Result { + // This denom may be prefixed or unprefixed. + let denom = PrefixedDenom::from_str(&raw_pkt_data.denom)?; + let amount = Amount::from_str(&raw_pkt_data.amount)?; + Ok(Self { + token: PrefixedCoin { denom, amount }, + sender: raw_pkt_data.sender.parse().map_err(Error::signer)?, + receiver: raw_pkt_data.receiver.parse().map_err(Error::signer)?, + }) + } +} + +impl From for RawPacketData { + fn from(pkt_data: PacketData) -> Self { + Self { + denom: pkt_data.token.denom.to_string(), + amount: pkt_data.token.amount.to_string(), + sender: pkt_data.sender.to_string(), + receiver: pkt_data.receiver.to_string(), + } + } +} diff --git a/modules/src/applications/transfer/relay.rs b/modules/src/applications/transfer/relay.rs new file mode 100644 index 0000000000..63fb15103b --- /dev/null +++ b/modules/src/applications/transfer/relay.rs @@ -0,0 +1,40 @@ +//! This module implements the processing logic for ICS20 (token transfer) message. +use crate::applications::transfer::context::Ics20Context; +use crate::applications::transfer::error::Error as Ics20Error; +use crate::applications::transfer::is_sender_chain_source; +use crate::applications::transfer::packet::PacketData; +use crate::core::ics04_channel::packet::Packet; +use crate::prelude::*; + +pub mod on_ack_packet; +pub mod on_recv_packet; +pub mod on_timeout_packet; +pub mod send_transfer; + +fn refund_packet_token( + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, +) -> Result<(), Ics20Error> { + let sender = data + .sender + .clone() + .try_into() + .map_err(|_| Ics20Error::parse_account_failure())?; + + if is_sender_chain_source( + packet.source_port.clone(), + packet.source_channel, + &data.token.denom, + ) { + // unescrow tokens back to sender + let escrow_address = + ctx.get_channel_escrow_address(&packet.source_port, packet.source_channel)?; + + ctx.send_coins(&escrow_address, &sender, &data.token) + } + // mint vouchers back to sender + else { + ctx.mint_coins(&sender, &data.token) + } +} diff --git a/modules/src/applications/transfer/relay/on_ack_packet.rs b/modules/src/applications/transfer/relay/on_ack_packet.rs new file mode 100644 index 0000000000..a254de1fc9 --- /dev/null +++ b/modules/src/applications/transfer/relay/on_ack_packet.rs @@ -0,0 +1,19 @@ +use crate::applications::transfer::acknowledgement::Acknowledgement; +use crate::applications::transfer::context::Ics20Context; +use crate::applications::transfer::error::Error as Ics20Error; +use crate::applications::transfer::packet::PacketData; +use crate::applications::transfer::relay::refund_packet_token; +use crate::core::ics04_channel::packet::Packet; + +pub fn process_ack_packet( + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, + ack: &Acknowledgement, +) -> Result<(), Ics20Error> { + if matches!(ack, Acknowledgement::Error(_)) { + refund_packet_token(ctx, packet, data)?; + } + + Ok(()) +} diff --git a/modules/src/applications/transfer/relay/on_recv_packet.rs b/modules/src/applications/transfer/relay/on_recv_packet.rs new file mode 100644 index 0000000000..f6069ee10a --- /dev/null +++ b/modules/src/applications/transfer/relay/on_recv_packet.rs @@ -0,0 +1,68 @@ +use crate::applications::transfer::context::Ics20Context; +use crate::applications::transfer::error::Error as Ics20Error; +use crate::applications::transfer::events::DenomTraceEvent; +use crate::applications::transfer::packet::PacketData; +use crate::applications::transfer::{is_receiver_chain_source, TracePrefix}; +use crate::core::ics04_channel::packet::Packet; +use crate::core::ics26_routing::context::{ModuleOutputBuilder, WriteFn}; +use crate::prelude::*; + +pub fn process_recv_packet( + ctx: &Ctx, + output: &mut ModuleOutputBuilder, + packet: &Packet, + data: PacketData, +) -> Result, Ics20Error> { + if !ctx.is_receive_enabled() { + return Err(Ics20Error::receive_disabled()); + } + + let receiver_account = data + .receiver + .clone() + .try_into() + .map_err(|_| Ics20Error::parse_account_failure())?; + + if is_receiver_chain_source( + packet.source_port.clone(), + packet.source_channel, + &data.token.denom, + ) { + // sender chain is not the source, unescrow tokens + let prefix = TracePrefix::new(packet.source_port.clone(), packet.source_channel); + let coin = { + let mut c = data.token; + c.denom.remove_trace_prefix(&prefix); + c + }; + + let escrow_address = + ctx.get_channel_escrow_address(&packet.destination_port, packet.destination_channel)?; + + Ok(Box::new(move |ctx| { + let ctx = ctx.downcast_mut::().unwrap(); + ctx.send_coins(&escrow_address, &receiver_account, &coin) + .map_err(|e| e.to_string()) + })) + } else { + // sender chain is the source, mint vouchers + let prefix = TracePrefix::new(packet.destination_port.clone(), packet.destination_channel); + let coin = { + let mut c = data.token; + c.denom.add_trace_prefix(prefix); + c + }; + + let denom_trace_event = DenomTraceEvent { + trace_hash: ctx.denom_hash_string(&coin.denom), + denom: coin.denom.clone(), + }; + output.emit(denom_trace_event.into()); + + Ok(Box::new(move |ctx| { + let ctx = ctx.downcast_mut::().unwrap(); + ctx.mint_coins(&receiver_account, &coin) + .map_err(|e| e.to_string()) + })) + } +} diff --git a/modules/src/applications/transfer/relay/on_timeout_packet.rs b/modules/src/applications/transfer/relay/on_timeout_packet.rs new file mode 100644 index 0000000000..192a3dd9b6 --- /dev/null +++ b/modules/src/applications/transfer/relay/on_timeout_packet.rs @@ -0,0 +1,13 @@ +use crate::applications::transfer::context::Ics20Context; +use crate::applications::transfer::error::Error as Ics20Error; +use crate::applications::transfer::packet::PacketData; +use crate::applications::transfer::relay::refund_packet_token; +use crate::core::ics04_channel::packet::Packet; + +pub fn process_timeout_packet( + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, +) -> Result<(), Ics20Error> { + refund_packet_token(ctx, packet, data) +} diff --git a/modules/src/applications/transfer/relay/send_transfer.rs b/modules/src/applications/transfer/relay/send_transfer.rs new file mode 100644 index 0000000000..6925c6776c --- /dev/null +++ b/modules/src/applications/transfer/relay/send_transfer.rs @@ -0,0 +1,115 @@ +use crate::applications::transfer::context::Ics20Context; +use crate::applications::transfer::error::Error; +use crate::applications::transfer::events::TransferEvent; +use crate::applications::transfer::msgs::transfer::MsgTransfer; +use crate::applications::transfer::packet::PacketData; +use crate::applications::transfer::{is_sender_chain_source, Coin, PrefixedCoin}; +use crate::core::ics04_channel::handler::send_packet::send_packet; +use crate::core::ics04_channel::packet::Packet; +use crate::events::ModuleEvent; +use crate::handler::{HandlerOutput, HandlerOutputBuilder}; +use crate::prelude::*; + +/// This function handles the transfer sending logic. +/// If this method returns an error, the runtime is expected to rollback all state modifications to +/// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. +pub fn send_transfer( + ctx: &mut Ctx, + output: &mut HandlerOutputBuilder<()>, + msg: MsgTransfer, +) -> Result<(), Error> +where + Ctx: Ics20Context, + C: TryInto, +{ + if !ctx.is_send_enabled() { + return Err(Error::send_disabled()); + } + + let source_channel_end = ctx + .channel_end(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; + + let destination_port = source_channel_end.counterparty().port_id().clone(); + let destination_channel = *source_channel_end + .counterparty() + .channel_id() + .ok_or_else(|| { + Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) + })?; + + // get the next sequence + let sequence = ctx + .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; + + let token = msg.token.try_into().map_err(|_| Error::invalid_token())?; + let denom = token.denom.clone(); + let coin = Coin { + denom: denom.clone(), + amount: token.amount, + }; + + let sender = msg + .sender + .clone() + .try_into() + .map_err(|_| Error::parse_account_failure())?; + + if is_sender_chain_source(msg.source_port.clone(), msg.source_channel, &denom) { + let escrow_address = + ctx.get_channel_escrow_address(&msg.source_port, msg.source_channel)?; + ctx.send_coins(&sender, &escrow_address, &coin)?; + } else { + ctx.burn_coins(&sender, &coin)?; + } + + let data = { + let data = PacketData { + token: coin, + sender: msg.sender.clone(), + receiver: msg.receiver.clone(), + }; + serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") + }; + + let packet = Packet { + sequence, + source_port: msg.source_port, + source_channel: msg.source_channel, + destination_port, + destination_channel, + data, + timeout_height: msg.timeout_height, + timeout_timestamp: msg.timeout_timestamp, + }; + + let HandlerOutput { + result, + log, + events, + } = send_packet(ctx, packet).map_err(Error::ics04_channel)?; + + ctx.store_packet_result(result) + .map_err(Error::ics04_channel)?; + + output.merge_output( + HandlerOutput::builder() + .with_log(log) + .with_events(events) + .with_result(()), + ); + + output.log(format!( + "IBC fungible token transfer: {} --({})--> {}", + msg.sender, token, msg.receiver + )); + + let transfer_event = TransferEvent { + sender: msg.sender, + receiver: msg.receiver, + }; + output.emit(ModuleEvent::from(transfer_event).into()); + + Ok(()) +} diff --git a/relayer/src/util/bigint.rs b/modules/src/bigint.rs similarity index 100% rename from relayer/src/util/bigint.rs rename to modules/src/bigint.rs diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs index 8311d361af..8f3250d57f 100644 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ b/modules/src/clients/ics07_tendermint/client_def.rs @@ -31,7 +31,7 @@ use crate::core::ics24_host::path::{ ConnectionsPath, ReceiptsPath, SeqRecvsPath, }; use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::downcast; use crate::prelude::*; use crate::Height; @@ -48,7 +48,7 @@ impl ClientDef for TendermintClient { fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -137,7 +137,7 @@ impl ClientDef for TendermintClient { fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -161,7 +161,7 @@ impl ClientDef for TendermintClient { fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -242,7 +242,7 @@ impl ClientDef for TendermintClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -267,7 +267,7 @@ impl ClientDef for TendermintClient { fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -288,7 +288,7 @@ impl ClientDef for TendermintClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -310,7 +310,7 @@ impl ClientDef for TendermintClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -330,7 +330,7 @@ impl ClientDef for TendermintClient { fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -363,7 +363,7 @@ impl ClientDef for TendermintClient { fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -396,7 +396,7 @@ impl ClientDef for TendermintClient { fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -428,7 +428,7 @@ impl ClientDef for TendermintClient { fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, _client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -512,7 +512,7 @@ fn verify_non_membership( } fn verify_delay_passed( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Ics02Error> { diff --git a/modules/src/clients/ics11_beefy/client_def.rs b/modules/src/clients/ics11_beefy/client_def.rs index 8f1d948a1e..d734b1cf3b 100644 --- a/modules/src/clients/ics11_beefy/client_def.rs +++ b/modules/src/clients/ics11_beefy/client_def.rs @@ -29,7 +29,7 @@ use crate::core::ics23_commitment::commitment::{ use crate::core::ics24_host::identifier::ConnectionId; use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use crate::Height; use core::marker::PhantomData; @@ -56,7 +56,7 @@ impl ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient ClientDef for BeefyClient>( } fn verify_delay_passed( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, ) -> Result<(), Error> { diff --git a/modules/src/clients/ics13_near/client_def.rs b/modules/src/clients/ics13_near/client_def.rs index b8523d6450..a59f8e93bf 100644 --- a/modules/src/clients/ics13_near/client_def.rs +++ b/modules/src/clients/ics13_near/client_def.rs @@ -10,7 +10,7 @@ use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, }; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::Height; use core::marker::PhantomData; @@ -54,7 +54,7 @@ impl ClientDef for NearClient { // rehydrate client from its own storage, then call this function fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -65,7 +65,7 @@ impl ClientDef for NearClient { fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -93,7 +93,7 @@ impl ClientDef for NearClient { fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -113,7 +113,7 @@ impl ClientDef for NearClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -129,7 +129,7 @@ impl ClientDef for NearClient { // Consensus state will be verified in the verification functions before these are called fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -144,7 +144,7 @@ impl ClientDef for NearClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -160,7 +160,7 @@ impl ClientDef for NearClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -174,7 +174,7 @@ impl ClientDef for NearClient { fn verify_packet_data( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -191,7 +191,7 @@ impl ClientDef for NearClient { fn verify_packet_acknowledgement( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -208,7 +208,7 @@ impl ClientDef for NearClient { fn verify_next_sequence_recv( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -224,7 +224,7 @@ impl ClientDef for NearClient { fn verify_packet_receipt_absence( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index ffe6c067ec..08470e7a29 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -14,7 +14,7 @@ use crate::core::ics23_commitment::commitment::{ CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, }; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::downcast; use crate::prelude::*; use crate::Height; @@ -36,7 +36,7 @@ pub trait ClientDef: Clone { fn verify_header( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -44,7 +44,7 @@ pub trait ClientDef: Clone { fn update_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -58,7 +58,7 @@ pub trait ClientDef: Clone { fn check_for_misbehaviour( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -83,7 +83,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_consensus_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -98,7 +98,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_connection_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -113,7 +113,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_channel_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -129,7 +129,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_client_full_state( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_state: &Self::ClientState, height: Height, prefix: &CommitmentPrefix, @@ -143,7 +143,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_data( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -160,7 +160,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_acknowledgement( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -177,7 +177,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_next_sequence_recv( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -193,7 +193,7 @@ pub trait ClientDef: Clone { #[allow(clippy::too_many_arguments)] fn verify_packet_receipt_absence( &self, - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_id: &ClientId, client_state: &Self::ClientState, height: Height, @@ -236,7 +236,7 @@ impl ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ClientDef for AnyClient ] | _ | { "invalid any client consensus state" }, + + Signer + [ SignerError ] + | _ | { "failed to parse signer" }, } } diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index 34fad217db..455bd51d3d 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -2,7 +2,7 @@ use crate::clients::host_functions::HostFunctionsProvider; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::msgs::ClientMsg; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::handler::HandlerOutput; use core::fmt::Debug; @@ -23,7 +23,7 @@ pub fn dispatch( msg: ClientMsg, ) -> Result, Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { match msg { diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 169864be05..41cb2bf136 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -1,6 +1,6 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgCreateAnyClient`. -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use core::fmt::Debug; @@ -30,7 +30,7 @@ pub struct Result { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgCreateAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index 3f1d3b98ac..472ebc9054 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -12,7 +12,7 @@ use crate::core::ics02_client::handler::ClientResult; use crate::core::ics02_client::height::Height; use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -30,7 +30,7 @@ pub struct Result { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgUpdateAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); @@ -473,9 +473,7 @@ mod tests { let block_ref = ctx_b.host_block(client_height); let latest_header: AnyHeader = match block_ref.cloned().map(Into::into).unwrap() { AnyHeader::Tendermint(mut theader) => { - let cons_state = ctx - .latest_consensus_states(&client_id, &client_height) - .clone(); + let cons_state = ctx.latest_consensus_states(&client_id, &client_height); if let AnyConsensusState::Tendermint(tcs) = cons_state { theader.signed_header.header.time = tcs.timestamp; theader.trusted_height = Height::new(1, 11) @@ -512,10 +510,7 @@ mod tests { Update(upd_res) => { assert_eq!(upd_res.client_id, client_id); assert!(!upd_res.client_state.is_frozen()); - assert_eq!( - upd_res.client_state, - ctx.latest_client_states(&client_id).clone() - ); + assert_eq!(upd_res.client_state, ctx.latest_client_states(&client_id)); assert_eq!(upd_res.client_state.latest_height(), msg.header.height(),) } _ => panic!("update handler result has incorrect type"), diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 3cc7722c43..0ef3738593 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -8,7 +8,7 @@ use crate::core::ics02_client::events::Attributes; use crate::core::ics02_client::handler::ClientResult; use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -24,7 +24,7 @@ pub struct Result { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgUpgradeAnyClient, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index effd6bec72..c3018f4e47 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -78,7 +78,7 @@ impl TryFrom for MsgCreateAnyClient { MsgCreateAnyClient::new( AnyClientState::try_from(raw_client_state)?, consensus_state, - raw.signer.into(), + raw.signer.parse().map_err(Error::signer)?, ) } } diff --git a/modules/src/core/ics02_client/msgs/misbehavior.rs b/modules/src/core/ics02_client/msgs/misbehavior.rs index 6f0ff000e6..648aaf9d2f 100644 --- a/modules/src/core/ics02_client/msgs/misbehavior.rs +++ b/modules/src/core/ics02_client/msgs/misbehavior.rs @@ -1,8 +1,7 @@ use crate::prelude::*; -use tendermint_proto::Protobuf; - use ibc_proto::ibc::core::client::v1::MsgSubmitMisbehaviour as RawMsgSubmitMisbehaviour; +use tendermint_proto::Protobuf; use crate::core::ics02_client::error::Error; use crate::core::ics02_client::misbehaviour::AnyMisbehaviour; @@ -52,7 +51,7 @@ impl TryFrom for MsgSubmitAnyMisbehaviour { .parse() .map_err(Error::invalid_raw_misbehaviour)?, misbehaviour: AnyMisbehaviour::try_from(raw_misbehaviour)?, - signer: raw.signer.into(), + signer: raw.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics02_client/msgs/update_client.rs b/modules/src/core/ics02_client/msgs/update_client.rs index b38875e5ae..b7cdc4c53d 100644 --- a/modules/src/core/ics02_client/msgs/update_client.rs +++ b/modules/src/core/ics02_client/msgs/update_client.rs @@ -60,7 +60,7 @@ impl TryFrom for MsgUpdateAnyClient { .parse() .map_err(Error::invalid_msg_update_client_id)?, header: AnyHeader::try_from(raw_header)?, - signer: raw.signer.into(), + signer: raw.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index db7907dfed..fe5b94347c 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -93,7 +93,7 @@ impl TryFrom for MsgUpgradeAnyClient { consensus_state: AnyConsensusState::try_from(raw_consensus_state)?, proof_upgrade_client: proto_msg.proof_upgrade_client, proof_upgrade_consensus_state: proto_msg.proof_upgrade_consensus_state, - signer: proto_msg.signer.into(), + signer: Signer::from_str(proto_msg.signer.as_str()).map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics03_connection/error.rs b/modules/src/core/ics03_connection/error.rs index 689cc026c0..4c90f06c41 100644 --- a/modules/src/core/ics03_connection/error.rs +++ b/modules/src/core/ics03_connection/error.rs @@ -4,7 +4,9 @@ use crate::core::ics24_host::error::ValidationError; use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; use crate::prelude::*; use crate::proofs::ProofError; +use crate::signer::SignerError; use crate::Height; + use flex_error::define_error; define_error! { @@ -91,7 +93,8 @@ define_error! { [ client_error::Error ] | _ | { "error verifying connnection state" }, - InvalidSigner + Signer + [ SignerError ] | _ | { "invalid signer" }, ConnectionNotFound diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index e3d596b3c7..154c1a3e6f 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -4,7 +4,7 @@ use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error; use crate::core::ics03_connection::msgs::ConnectionMsg; use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::handler::HandlerOutput; use core::fmt::Debug; @@ -47,7 +47,7 @@ pub fn dispatch( msg: ConnectionMsg, ) -> Result, Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { match msg { diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 1fd4fca6bd..7f1dd4a9da 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -9,13 +9,13 @@ use crate::core::ics03_connection::handler::verify::{ }; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index aa196d60a5..d331820a37 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -7,13 +7,13 @@ use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::verify::verify_proofs; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index 89b8efbb68..f2c2f386d7 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -6,13 +6,13 @@ use crate::core::ics03_connection::events::Attributes; use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index dfc0c9301a..e5f556c780 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -10,13 +10,13 @@ use crate::core::ics03_connection::handler::verify::{ use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: MsgConnectionOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 2acc9544e3..66f9404f70 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -6,13 +6,13 @@ use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; use crate::core::ics03_connection::connection::ConnectionEnd; use crate::core::ics03_connection::error::Error; use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::proofs::{ConsensusProof, Proofs}; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS3 message. pub fn verify_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, client_state: Option, height: Height, connection_end: &ConnectionEnd, @@ -60,7 +60,7 @@ pub fn verify_proofs( /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. pub fn verify_connection_proof( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, expected_conn: &ConnectionEnd, @@ -115,7 +115,7 @@ pub fn verify_connection_proof( /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. pub fn verify_client_proof( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, expected_client_state: AnyClientState, @@ -154,7 +154,7 @@ pub fn verify_client_proof( } pub fn verify_consensus_proof( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, proof: &ConsensusProof, @@ -197,7 +197,7 @@ pub fn verify_consensus_proof( /// Checks that `claimed_height` is within normal bounds, i.e., fresh enough so that the chain has /// not pruned it yet, but not newer than the current (actual) height of the local chain. pub fn check_client_consensus_height( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, claimed_height: Height, ) -> Result<(), Error> { if claimed_height > ctx.host_height() { diff --git a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs index 0e70132e4a..60a42712ff 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs @@ -99,7 +99,7 @@ impl TryFrom for MsgConnectionOpenAck { proof_height, ) .map_err(Error::invalid_proof)?, - signer: msg.signer.into(), + signer: msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs b/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs index 3044a6b8f2..8a15be9860 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs @@ -59,7 +59,7 @@ impl TryFrom for MsgConnectionOpenConfirm { proof_height, ) .map_err(Error::invalid_proof)?, - signer: msg.signer.into(), + signer: msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_init.rs b/modules/src/core/ics03_connection/msgs/conn_open_init.rs index 7ce701e946..196fbcf119 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_init.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_init.rs @@ -53,7 +53,7 @@ impl TryFrom for MsgConnectionOpenInit { .try_into()?, version: msg.version.map(|version| version.try_into()).transpose()?, delay_period: Duration::from_nanos(msg.delay_period), - signer: msg.signer.into(), + signer: msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_try.rs b/modules/src/core/ics03_connection/msgs/conn_open_try.rs index 3bdad32bba..4709733489 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_try.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_try.rs @@ -126,7 +126,7 @@ impl TryFrom for MsgConnectionOpenTry { ) .map_err(Error::invalid_proof)?, delay_period: Duration::from_nanos(msg.delay_period), - signer: msg.signer.into(), + signer: msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics04_channel/commitment.rs b/modules/src/core/ics04_channel/commitment.rs index a6e981eb4e..100a989627 100644 --- a/modules/src/core/ics04_channel/commitment.rs +++ b/modules/src/core/ics04_channel/commitment.rs @@ -1,7 +1,9 @@ use crate::prelude::*; +use serde_derive::{Deserialize, Serialize}; + /// Packet commitment -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct PacketCommitment(Vec); impl PacketCommitment { @@ -17,7 +19,7 @@ impl From> for PacketCommitment { } /// Acknowledgement commitment to be stored -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct AcknowledgementCommitment(Vec); impl AcknowledgementCommitment { diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index 13f2df1ffc..0cd1aff12f 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -15,7 +15,7 @@ use crate::prelude::*; use crate::timestamp::Timestamp; use crate::Height; -use super::packet::{PacketResult, Sequence}; +use super::packet::{Packet, PacketResult, Sequence}; /// A context supplying all the necessary read-only dependencies for processing any `ChannelMsg`. pub trait ChannelReader { @@ -71,7 +71,7 @@ pub trait ChannelReader { self.hash(ack.into_bytes()).into() } - /// A hashing function for packet commitments + /// A Sha2_256 hashing function fn hash(&self, value: Vec) -> Vec; /// Returns the time when the client state for the given [`ClientId`] was updated with a header for the given [`Height`] @@ -137,6 +137,8 @@ pub trait ChannelKeeper { (res.port_id.clone(), res.channel_id, res.seq), res.commitment, )?; + + self.store_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; } PacketResult::Recv(res) => { let res = match res { @@ -199,6 +201,13 @@ pub trait ChannelKeeper { commitment: PacketCommitment, ) -> Result<(), Error>; + /// Allow implementers to optionally store packet in storage + fn store_packet( + &mut self, + key: (PortId, ChannelId, Sequence), + packet: Packet, + ) -> Result<(), Error>; + fn delete_packet_commitment(&mut self, key: (PortId, ChannelId, Sequence)) -> Result<(), Error>; diff --git a/modules/src/core/ics04_channel/error.rs b/modules/src/core/ics04_channel/error.rs index edd14f54b7..b4473966e3 100644 --- a/modules/src/core/ics04_channel/error.rs +++ b/modules/src/core/ics04_channel/error.rs @@ -7,6 +7,7 @@ use crate::core::ics24_host::error::ValidationError; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::prelude::*; use crate::proofs::ProofError; +use crate::signer::SignerError; use crate::timestamp::Timestamp; use crate::Height; @@ -60,7 +61,8 @@ define_error! { [ TraceError ] | _ | { "invalid version" }, - InvalidSigner + Signer + [ SignerError ] | _ | { "invalid signer address" }, InvalidProof @@ -342,6 +344,14 @@ define_error! { ImplementationSpecific { reason: String } | e | { format_args!("implementation specific error: {}", e.reason) }, + + AppModule + { description: String } + | e | { + format_args!( + "application module error: {0}", + e.description) + }, } } diff --git a/modules/src/core/ics04_channel/events.rs b/modules/src/core/ics04_channel/events.rs index 63a8c81cae..35b4865ef6 100644 --- a/modules/src/core/ics04_channel/events.rs +++ b/modules/src/core/ics04_channel/events.rs @@ -850,6 +850,18 @@ impl From for IbcEvent { } } +impl TryFrom for AbciEvent { + type Error = Error; + + fn try_from(v: ReceivePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { + type_str: IbcEventType::ReceivePacket.as_str().to_string(), + attributes, + }) + } +} + impl core::fmt::Display for ReceivePacket { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { write!(f, "ReceivePacket - h:{}, {}", self.height, self.packet) @@ -1066,6 +1078,18 @@ impl From for IbcEvent { } } +impl TryFrom for AbciEvent { + type Error = Error; + + fn try_from(v: TimeoutOnClosePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { + type_str: IbcEventType::TimeoutOnClose.as_str().to_string(), + attributes, + }) + } +} + impl core::fmt::Display for TimeoutOnClosePacket { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { write!( diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index 3f69cdbf49..bb0a789a23 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -7,7 +7,7 @@ use crate::core::ics04_channel::msgs::ChannelMsg; use crate::core::ics04_channel::{msgs::PacketMsg, packet::PacketResult}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; use crate::core::ics26_routing::context::{ - Ics26Context, LightClientContext, ModuleId, ModuleOutput, OnRecvPacketAck, Router, + Ics26Context, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, ReaderContext, Router, }; use crate::handler::{HandlerOutput, HandlerOutputBuilder}; use core::fmt::Debug; @@ -64,7 +64,7 @@ pub fn channel_dispatch( msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { let output = match msg { @@ -93,7 +93,7 @@ pub fn channel_callback( module_id: &ModuleId, msg: &ChannelMsg, mut result: ChannelResult, - module_output: &mut ModuleOutput, + module_output: &mut ModuleOutputBuilder, ) -> Result where Ctx: Ics26Context, @@ -121,6 +121,7 @@ where &msg.port_id, &result.channel_id, msg.channel.counterparty(), + msg.channel.version(), &msg.counterparty_version, )?; result.channel_end.version = version; @@ -176,7 +177,7 @@ pub fn packet_dispatch( msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where - Ctx: LightClientContext, + Ctx: ReaderContext, HostFunctions: HostFunctionsProvider, { let output = match msg { @@ -198,7 +199,7 @@ pub fn packet_callback( ctx: &mut Ctx, module_id: &ModuleId, msg: &PacketMsg, - module_output: &mut ModuleOutput, + module_output: &mut ModuleOutputBuilder, ) -> Result<(), Error> where Ctx: Ics26Context, @@ -213,7 +214,7 @@ where let result = cb.on_recv_packet(module_output, &msg.packet, &msg.signer); match result { OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { - write_fn(cb.as_any_mut()); + write_fn(cb.as_any_mut()).map_err(Error::app_module)?; } OnRecvPacketAck::Failed(_) => {} } diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index d316e2a703..ccdc305a55 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -8,7 +8,7 @@ use crate::core::ics04_channel::handler::verify::verify_packet_acknowledgement_p use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -23,7 +23,7 @@ pub struct AckPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgAcknowledgement, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index 308d1e90bb..54bdcbef43 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -8,13 +8,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelCloseConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 1cfd416536..8abfd0e339 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -6,12 +6,12 @@ use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelCloseInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index d98316303a..7138a55a66 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -7,13 +7,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenAck, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index 7704959d67..f605500d50 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -7,13 +7,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenConfirm, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index 4a1e9ecc7a..4c7af9e743 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -6,13 +6,13 @@ use crate::core::ics04_channel::events::Attributes; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; use crate::core::ics24_host::identifier::ChannelId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenInit, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 810cc4c3bc..2564486364 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -9,13 +9,13 @@ use crate::core::ics04_channel::handler::verify::verify_channel_proofs; use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; use crate::core::ics24_host::identifier::ChannelId; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub(crate) fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgChannelOpenTry, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 8d04445504..8ac7ae0c71 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -7,7 +7,7 @@ use crate::core::ics04_channel::handler::verify::verify_packet_recv_proofs; use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; use crate::core::ics04_channel::packet::{PacketResult, Receipt, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -31,7 +31,7 @@ pub enum RecvPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgRecvPacket, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index bf0b34436d..ca484371f8 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -6,7 +6,7 @@ use crate::core::ics04_channel::events::SendPacket; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; use crate::core::ics04_channel::{error::Error, packet::Packet}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -19,12 +19,10 @@ pub struct SendPacketResult { pub seq: Sequence, pub seq_number: Sequence, pub commitment: PacketCommitment, + pub packet: Packet, } -pub fn send_packet( - ctx: &dyn LightClientContext, - packet: Packet, -) -> HandlerResult { +pub fn send_packet(ctx: &dyn ReaderContext, packet: Packet) -> HandlerResult { let mut output = HandlerOutput::builder(); let source_channel_end = @@ -96,6 +94,7 @@ pub fn send_packet( channel_id: packet.source_channel, seq: packet.sequence, seq_number: next_seq_send.increment(), + packet: packet.clone(), commitment: ctx.packet_commitment( packet.data.clone(), packet.timeout_height, diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index cf7c08b015..5f2e7b4480 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -9,7 +9,7 @@ use crate::core::ics04_channel::handler::verify::{ use crate::core::ics04_channel::msgs::timeout::MsgTimeout; use crate::core::ics04_channel::packet::{PacketResult, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; @@ -25,7 +25,7 @@ pub struct TimeoutPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgTimeout, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 2be21ade6c..3b194a395f 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -9,13 +9,13 @@ use crate::core::ics04_channel::handler::verify::{ use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; use crate::core::ics04_channel::packet::PacketResult; use crate::core::ics04_channel::{error::Error, handler::timeout::TimeoutPacketResult}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::events::IbcEvent; use crate::handler::{HandlerOutput, HandlerResult}; use crate::prelude::*; pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, msg: &MsgTimeoutOnClose, ) -> HandlerResult { let mut output = HandlerOutput::builder(); diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index 086cf591c8..7e2f2e9ea9 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -7,14 +7,14 @@ use crate::core::ics04_channel::channel::ChannelEnd; use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use crate::proofs::Proofs; use crate::Height; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. pub fn verify_channel_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, channel_end: &ChannelEnd, connection_end: &ConnectionEnd, @@ -57,7 +57,7 @@ pub fn verify_channel_proofs( /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. pub fn verify_packet_recv_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, packet: &Packet, connection_end: &ConnectionEnd, @@ -105,7 +105,7 @@ pub fn verify_packet_recv_proofs( /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, packet: &Packet, acknowledgement: Acknowledgement, @@ -150,7 +150,7 @@ pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, @@ -191,7 +191,7 @@ pub fn verify_next_sequence_recv( } pub fn verify_packet_receipt_absence( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, height: Height, connection_end: &ConnectionEnd, packet: Packet, diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index e86f1f5821..299bd68f9d 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -4,7 +4,7 @@ use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::events::WriteAcknowledgement; use crate::core::ics04_channel::packet::{Packet, PacketResult, Sequence}; use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::prelude::*; use crate::{ events::IbcEvent, @@ -20,7 +20,7 @@ pub struct WriteAckPacketResult { } pub fn process( - ctx: &dyn LightClientContext, + ctx: &dyn ReaderContext, packet: Packet, ack: Vec, ) -> HandlerResult { diff --git a/modules/src/core/ics04_channel/msgs/acknowledgement.rs b/modules/src/core/ics04_channel/msgs/acknowledgement.rs index a5806f096b..95bf93279f 100644 --- a/modules/src/core/ics04_channel/msgs/acknowledgement.rs +++ b/modules/src/core/ics04_channel/msgs/acknowledgement.rs @@ -32,6 +32,12 @@ impl From> for Acknowledgement { } } +impl AsRef<[u8]> for Acknowledgement { + fn as_ref(&self) -> &[u8] { + self.0.as_slice() + } +} + /// /// Message definition for packet acknowledgements. /// @@ -107,7 +113,7 @@ impl TryFrom for MsgAcknowledgement { .ok_or_else(Error::missing_packet)? .try_into()?, acknowledgement: raw_msg.acknowledgement.into(), - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, proofs, }) } @@ -160,6 +166,7 @@ mod test { use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::acknowledgement::test_util::get_dummy_raw_msg_acknowledgement; use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; + use crate::test_utils::get_dummy_bech32_account; #[test] fn msg_acknowledgment_try_from_raw() { @@ -197,7 +204,7 @@ mod test { Test { name: "Empty signer".to_string(), raw: RawMsgAcknowledgement { - signer: "".to_string(), + signer: get_dummy_bech32_account(), ..default_raw_msg.clone() }, want_pass: true, diff --git a/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs b/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs index f5ca89607e..2dc6bd27dc 100644 --- a/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs @@ -73,7 +73,7 @@ impl TryFrom for MsgChannelCloseConfirm { port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, proofs, - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics04_channel/msgs/chan_close_init.rs b/modules/src/core/ics04_channel/msgs/chan_close_init.rs index 3dfac2565f..ceb85b5421 100644 --- a/modules/src/core/ics04_channel/msgs/chan_close_init.rs +++ b/modules/src/core/ics04_channel/msgs/chan_close_init.rs @@ -53,7 +53,7 @@ impl TryFrom for MsgChannelCloseInit { Ok(MsgChannelCloseInit { port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_ack.rs b/modules/src/core/ics04_channel/msgs/chan_open_ack.rs index 46161037af..14cd6d85c6 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_ack.rs @@ -84,7 +84,7 @@ impl TryFrom for MsgChannelOpenAck { .map_err(Error::identifier)?, counterparty_version: raw_msg.counterparty_version.into(), proofs, - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs b/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs index a283e516d7..7ad004adb3 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs @@ -68,7 +68,7 @@ impl TryFrom for MsgChannelOpenConfirm { port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, proofs, - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_init.rs b/modules/src/core/ics04_channel/msgs/chan_open_init.rs index c95dd2cbb2..b5b4130a90 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_init.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_init.rs @@ -55,7 +55,7 @@ impl TryFrom for MsgChannelOpenInit { .channel .ok_or_else(Error::missing_channel)? .try_into()?, - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, }) } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_try.rs b/modules/src/core/ics04_channel/msgs/chan_open_try.rs index 68122e2d44..5af004d529 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_try.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_try.rs @@ -104,7 +104,7 @@ impl TryFrom for MsgChannelOpenTry { .try_into()?, counterparty_version: raw_msg.counterparty_version.into(), proofs, - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(ChannelError::signer)?, }; msg.validate_basic() diff --git a/modules/src/core/ics04_channel/msgs/recv_packet.rs b/modules/src/core/ics04_channel/msgs/recv_packet.rs index 076fd079f0..7aad7c6eec 100644 --- a/modules/src/core/ics04_channel/msgs/recv_packet.rs +++ b/modules/src/core/ics04_channel/msgs/recv_packet.rs @@ -72,7 +72,7 @@ impl TryFrom for MsgRecvPacket { .ok_or_else(Error::missing_packet)? .try_into()?, proofs, - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, }) } } @@ -129,6 +129,7 @@ mod test { use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::recv_packet::test_util::get_dummy_raw_msg_recv_packet; use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; + use crate::test_utils::get_dummy_bech32_account; #[test] fn msg_recv_packet_try_from_raw() { @@ -165,7 +166,7 @@ mod test { Test { name: "Empty signer".to_string(), raw: RawMsgRecvPacket { - signer: "".to_string(), + signer: get_dummy_bech32_account(), ..default_raw_msg }, want_pass: true, diff --git a/modules/src/core/ics04_channel/msgs/timeout.rs b/modules/src/core/ics04_channel/msgs/timeout.rs index 74efc8dd07..a2887e1013 100644 --- a/modules/src/core/ics04_channel/msgs/timeout.rs +++ b/modules/src/core/ics04_channel/msgs/timeout.rs @@ -81,7 +81,7 @@ impl TryFrom for MsgTimeout { .ok_or_else(Error::missing_packet)? .try_into()?, next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, proofs, }) } @@ -134,6 +134,7 @@ mod test { use crate::core::ics04_channel::error::Error; use crate::core::ics04_channel::msgs::timeout::test_util::get_dummy_raw_msg_timeout; use crate::core::ics04_channel::msgs::timeout::MsgTimeout; + use crate::test_utils::get_dummy_bech32_account; #[test] fn msg_timeout_try_from_raw() { @@ -180,7 +181,7 @@ mod test { Test { name: "Empty signer".to_string(), raw: RawMsgTimeout { - signer: "".to_string(), + signer: get_dummy_bech32_account(), ..default_raw_msg }, want_pass: true, diff --git a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs index 64ef629cf4..05fda53a54 100644 --- a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs @@ -80,7 +80,7 @@ impl TryFrom for MsgTimeoutOnClose { .ok_or_else(Error::missing_packet)? .try_into()?, next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), - signer: raw_msg.signer.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, proofs, }) } diff --git a/modules/src/core/ics04_channel/version.rs b/modules/src/core/ics04_channel/version.rs index a5c611bf7a..b0f7918dd0 100644 --- a/modules/src/core/ics04_channel/version.rs +++ b/modules/src/core/ics04_channel/version.rs @@ -7,7 +7,7 @@ use core::fmt; use core::str::FromStr; use serde_derive::{Deserialize, Serialize}; -use crate::applications::ics20_fungible_token_transfer; +use crate::applications::transfer; use crate::prelude::*; /// The version field for a `ChannelEnd`. @@ -24,7 +24,7 @@ impl Version { } pub fn ics20() -> Self { - Self::new(ics20_fungible_token_transfer::VERSION.to_string()) + Self::new(transfer::VERSION.to_string()) } pub fn empty() -> Self { diff --git a/modules/src/core/ics05_port/context.rs b/modules/src/core/ics05_port/context.rs index 87a4390b24..2c59d4a592 100644 --- a/modules/src/core/ics05_port/context.rs +++ b/modules/src/core/ics05_port/context.rs @@ -8,8 +8,3 @@ pub trait PortReader { /// Return the module_id associated with a given port_id fn lookup_module_by_port(&self, port_id: &PortId) -> Result; } - -pub trait PortKeeper: PortReader { - /// Binds a module to a port. - fn bind_module_to_port(&mut self, module_id: ModuleId, port_id: PortId) -> Result<(), Error>; -} diff --git a/modules/src/core/ics24_host/identifier.rs b/modules/src/core/ics24_host/identifier.rs index 560ee8a973..db69345c61 100644 --- a/modules/src/core/ics24_host/identifier.rs +++ b/modules/src/core/ics24_host/identifier.rs @@ -355,7 +355,7 @@ impl ChannelId { Self(counter) } - pub fn counter(&self) -> u64 { + pub fn sequence(&self) -> u64 { self.0 } diff --git a/modules/src/core/ics26_routing/context.rs b/modules/src/core/ics26_routing/context.rs index 90cbe915c0..0582bd9673 100644 --- a/modules/src/core/ics26_routing/context.rs +++ b/modules/src/core/ics26_routing/context.rs @@ -5,7 +5,8 @@ use core::any::Any; use core::fmt::Debug; use core::{fmt, str::FromStr}; -use crate::applications::ics20_fungible_token_transfer::context::Ics20Context; +use serde::{Deserialize, Serialize}; + use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; use crate::core::ics03_connection::context::{ConnectionKeeper, ConnectionReader}; use crate::core::ics04_channel::channel::{Counterparty, Order}; @@ -16,12 +17,12 @@ use crate::core::ics04_channel::packet::Packet; use crate::core::ics04_channel::Version; use crate::core::ics05_port::context::PortReader; use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::events::IbcEvent; -use crate::handler::HandlerOutput; +use crate::events::ModuleEvent; +use crate::handler::HandlerOutputBuilder; use crate::signer::Signer; /// This trait captures all the functional dependencies of needed in light client implementations -pub trait LightClientContext: ClientReader + ConnectionReader + ChannelReader {} +pub trait ReaderContext: ClientReader + ConnectionReader + ChannelReader {} /// This trait captures all the functional dependencies (i.e., context) which the ICS26 module /// requires to be able to dispatch and process IBC messages. In other words, this is the @@ -34,8 +35,7 @@ pub trait Ics26Context: + ChannelKeeper + ChannelReader + PortReader - + Ics20Context - + LightClientContext + + ReaderContext { type Router: Router; @@ -47,7 +47,7 @@ pub trait Ics26Context: #[derive(Debug, PartialEq)] pub struct InvalidModuleId; -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)] pub struct ModuleId(String); impl ModuleId { @@ -83,7 +83,7 @@ impl Borrow for ModuleId { /// Types implementing this trait are expected to implement `From` pub trait Acknowledgement: AsRef<[u8]> {} -pub type WriteFn = dyn FnOnce(&mut dyn Any); +pub type WriteFn = dyn FnOnce(&mut dyn Any) -> Result<(), String>; pub enum OnRecvPacketAck { Nil(Box), @@ -91,15 +91,19 @@ pub enum OnRecvPacketAck { Failed(Box), } -pub type ModuleEvent = IbcEvent; +impl OnRecvPacketAck { + pub fn is_successful(&self) -> bool { + matches!(self, OnRecvPacketAck::Successful(_, _)) + } +} -pub type ModuleOutput = HandlerOutput<(), ModuleEvent>; +pub type ModuleOutputBuilder = HandlerOutputBuilder<(), ModuleEvent>; pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { #[allow(clippy::too_many_arguments)] fn on_chan_open_init( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _order: Order, _connection_hops: &[ConnectionId], _port_id: &PortId, @@ -113,18 +117,19 @@ pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { #[allow(clippy::too_many_arguments)] fn on_chan_open_try( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _order: Order, _connection_hops: &[ConnectionId], _port_id: &PortId, _channel_id: &ChannelId, _counterparty: &Counterparty, + _version: &Version, _counterparty_version: &Version, ) -> Result; fn on_chan_open_ack( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _port_id: &PortId, _channel_id: &ChannelId, _counterparty_version: &Version, @@ -134,7 +139,7 @@ pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { fn on_chan_open_confirm( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _port_id: &PortId, _channel_id: &ChannelId, ) -> Result<(), Error> { @@ -143,7 +148,7 @@ pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { fn on_chan_close_init( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _port_id: &PortId, _channel_id: &ChannelId, ) -> Result<(), Error> { @@ -152,7 +157,7 @@ pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { fn on_chan_close_confirm( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _port_id: &PortId, _channel_id: &ChannelId, ) -> Result<(), Error> { @@ -161,16 +166,16 @@ pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { fn on_recv_packet( &self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _packet: &Packet, _relayer: &Signer, ) -> OnRecvPacketAck { - OnRecvPacketAck::Nil(Box::new(|_| {})) + OnRecvPacketAck::Nil(Box::new(|_| Ok(()))) } fn on_acknowledgement_packet( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _packet: &Packet, _acknowledgement: &GenericAcknowledgement, _relayer: &Signer, @@ -180,7 +185,7 @@ pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { fn on_timeout_packet( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _packet: &Packet, _relayer: &Signer, ) -> Result<(), Error> { diff --git a/modules/src/core/ics26_routing/error.rs b/modules/src/core/ics26_routing/error.rs index 2c4af31ae6..260a688cdd 100644 --- a/modules/src/core/ics26_routing/error.rs +++ b/modules/src/core/ics26_routing/error.rs @@ -1,7 +1,7 @@ use crate::prelude::*; use flex_error::{define_error, TraceError}; -use crate::applications::ics20_fungible_token_transfer; +use crate::applications::transfer; use crate::core::ics02_client; use crate::core::ics03_connection; use crate::core::ics04_channel; @@ -22,7 +22,7 @@ define_error! { | _ | { "ICS04 channel error" }, Ics20FungibleTokenTransfer - [ ics20_fungible_token_transfer::error::Error ] + [ transfer::error::Error ] | _ | { "ICS20 fungible token transfer error" }, UnknownMessageTypeUrl diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index d9d92d8a4b..c2f0765b88 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -3,7 +3,6 @@ use crate::prelude::*; use ibc_proto::google::protobuf::Any; -use crate::applications::ics20_fungible_token_transfer::relay_application_logic::send_transfer::send_transfer as ics20_msg_dispatcher; use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; use crate::core::ics04_channel::handler::{ @@ -15,20 +14,24 @@ use crate::core::ics04_channel::handler::{ packet_dispatch as ics4_packet_msg_dispatcher, }; use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics26_routing::context::Ics26Context; +use crate::core::ics26_routing::context::{Ics26Context, ModuleOutputBuilder}; use crate::core::ics26_routing::error::Error; use crate::core::ics26_routing::msgs::Ics26Envelope::{ - self, Ics20Msg, Ics2Msg, Ics3Msg, Ics4ChannelMsg, Ics4PacketMsg, + self, Ics2Msg, Ics3Msg, Ics4ChannelMsg, Ics4PacketMsg, }; use crate::{events::IbcEvent, handler::HandlerOutput}; +/// Result of message execution - comprises of events emitted and logs entries created during the +/// execution of a transaction message. +pub struct MsgReceipt { + pub events: Vec, + pub log: Vec, +} + /// Mimics the DeliverTx ABCI interface, but for a single message and at a slightly lower level. /// No need for authentication info or signature checks here. /// Returns a vector of all events that got generated as a byproduct of processing `message`. -pub fn deliver( - ctx: &mut Ctx, - message: Any, -) -> Result<(Vec, Vec), Error> +pub fn deliver(ctx: &mut Ctx, message: Any) -> Result where Ctx: Ics26Context, HostFunctions: HostFunctionsProvider, @@ -37,9 +40,9 @@ where let envelope = decode(message)?; // Process the envelope, and accumulate any events that were generated. - let output = dispatch::<_, HostFunctions>(ctx, envelope)?; + let HandlerOutput { log, events, .. } = dispatch::<_, HostFunctions>(ctx, envelope)?; - Ok((output.events, output.log)) + Ok(MsgReceipt { events, log }) } /// Attempts to convert a message into a [Ics26Envelope] message @@ -94,7 +97,7 @@ where let (mut handler_builder, channel_result) = ics4_msg_dispatcher::<_, HostFunctions>(ctx, &msg).map_err(Error::ics04_channel)?; - let mut module_output = HandlerOutput::builder().with_result(()); + let mut module_output = ModuleOutputBuilder::new(); let cb_result = ics4_callback(ctx, &module_id, &msg, channel_result, &mut module_output); handler_builder.merge(module_output); @@ -107,20 +110,6 @@ where handler_builder.with_result(()) } - Ics20Msg(msg) => { - let handler_output = ics20_msg_dispatcher::<_, HostFunctions>(ctx, msg) - .map_err(Error::ics20_fungible_token_transfer)?; - - // Apply any results to the host chain store. - ctx.store_packet_result(handler_output.result) - .map_err(Error::ics04_channel)?; - - HandlerOutput::builder() - .with_log(handler_output.log) - .with_events(handler_output.events) - .with_result(()) - } - Ics4PacketMsg(msg) => { let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; let (mut handler_builder, packet_result) = @@ -131,7 +120,7 @@ where return Ok(handler_builder.with_result(())); } - let mut module_output = HandlerOutput::builder().with_result(()); + let mut module_output = ModuleOutputBuilder::new(); let cb_result = ics4_packet_callback(ctx, &module_id, &msg, &mut module_output); handler_builder.merge(module_output); cb_result.map_err(Error::ics04_channel)?; @@ -153,12 +142,10 @@ mod tests { use test_log::test; - use crate::applications::ics20_fungible_token_transfer::msgs::transfer::test_util::get_dummy_msg_transfer; + use crate::applications::transfer::context::test::deliver as ics20_deliver; + use crate::applications::transfer::PrefixedCoin; use crate::core::ics02_client::client_consensus::AnyConsensusState; use crate::core::ics02_client::client_state::AnyClientState; - use crate::events::IbcEvent; - use crate::test_utils::Crypto; - use crate::core::ics02_client::msgs::{ create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, upgrade_client::MsgUpgradeAnyClient, ClientMsg, @@ -181,15 +168,24 @@ mod tests { timeout_on_close::{test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose}, ChannelMsg, PacketMsg, }; + use crate::events::IbcEvent; + use crate::test_utils::Crypto; + use crate::{ + applications::transfer::msgs::transfer::test_util::get_dummy_msg_transfer, + applications::transfer::msgs::transfer::MsgTransfer, + applications::transfer::packet::PacketData, applications::transfer::MODULE_ID_STR, + }; use crate::core::ics24_host::identifier::ConnectionId; - use crate::core::ics26_routing::context::{ModuleId, RouterBuilder}; + use crate::core::ics26_routing::context::{Ics26Context, ModuleId, Router, RouterBuilder}; + use crate::core::ics26_routing::error::Error; use crate::core::ics26_routing::handler::dispatch; use crate::core::ics26_routing::msgs::Ics26Envelope; + use crate::handler::HandlerOutputBuilder; use crate::mock::client_state::{MockClientState, MockConsensusState}; use crate::mock::context::{MockContext, MockRouterBuilder}; use crate::mock::header::MockHeader; - use crate::test_utils::{get_dummy_account_id, DummyModule}; + use crate::test_utils::{get_dummy_account_id, DummyTransferModule}; use crate::timestamp::Timestamp; use crate::Height; @@ -199,10 +195,28 @@ mod tests { /// to work with the context and correctly store results (i.e., the `ClientKeeper`, /// `ConnectionKeeper`, and `ChannelKeeper` traits). fn routing_module_and_keepers() { + #[derive(Clone, Debug)] + enum TestMsg { + Ics26(Ics26Envelope), + Ics20(MsgTransfer), + } + + impl From for TestMsg { + fn from(msg: Ics26Envelope) -> Self { + Self::Ics26(msg) + } + } + + impl From> for TestMsg { + fn from(msg: MsgTransfer) -> Self { + Self::Ics20(msg) + } + } + // Test parameters struct Test { name: String, - msg: Ics26Envelope, + msg: TestMsg, want_pass: bool, } let default_signer = get_dummy_account_id(); @@ -217,16 +231,18 @@ mod tests { let upgrade_client_height_second = Height::new(1, 1); - let module = DummyModule::default(); - let module_id: ModuleId = "dummymodule".parse().unwrap(); - - let router = MockRouterBuilder::default() - .add_route(module_id.clone(), module) - .unwrap() - .build(); + let transfer_module_id: ModuleId = MODULE_ID_STR.parse().unwrap(); // We reuse this same context across all tests. Nothing in particular needs parametrizing. - let mut ctx = MockContext::default().with_router(router); + let mut ctx = { + let ctx = MockContext::default(); + let module = DummyTransferModule::new(ctx.ibc_store_share()); + let router = MockRouterBuilder::default() + .add_route(transfer_module_id.clone(), module) + .unwrap() + .build(); + ctx.with_router(router) + }; let create_client_msg = MsgCreateAnyClient::new( AnyClientState::from(MockClientState::new(MockHeader::new(start_client_height))), @@ -293,6 +309,20 @@ mod tests { msg_to_on_close.packet.timeout_height = msg_transfer_two.timeout_height; msg_to_on_close.packet.timeout_timestamp = msg_transfer_two.timeout_timestamp; + let denom = msg_transfer_two.token.denom.clone(); + let packet_data = { + let data = PacketData { + token: PrefixedCoin { + denom, + amount: msg_transfer_two.token.amount, + }, + sender: msg_transfer_two.sender.clone(), + receiver: msg_transfer_two.receiver.clone(), + }; + serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") + }; + msg_to_on_close.packet.data = packet_data; + let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); // First, create a client.. @@ -308,7 +338,7 @@ mod tests { res ); - ctx.scope_port_to_module(msg_chan_init.port_id.clone(), module_id); + ctx.scope_port_to_module(msg_chan_init.port_id.clone(), transfer_module_id.clone()); // Figure out the ID of the client that was just created. let mut events = res.unwrap().events; @@ -332,7 +362,8 @@ mod tests { .with_timestamp(Timestamp::now()) .into(), signer: default_signer.clone(), - })), + })) + .into(), want_pass: true, }, Test { @@ -341,14 +372,16 @@ mod tests { client_id: client_id.clone(), header: MockHeader::new(update_client_height).into(), signer: default_signer.clone(), - })), + })) + .into(), want_pass: false, }, Test { name: "Connection open init succeeds".to_string(), msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit( msg_conn_init.with_client_id(client_id.clone()), - )), + )) + .into(), want_pass: true, }, Test { @@ -356,50 +389,54 @@ mod tests { .to_string(), msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( incorrect_msg_conn_try, - ))), + ))) + .into(), want_pass: false, }, Test { name: "Connection open try succeeds".to_string(), msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( correct_msg_conn_try.with_client_id(client_id.clone()), - ))), + ))) + .into(), want_pass: true, }, Test { name: "Connection open ack succeeds".to_string(), msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck(Box::new( msg_conn_ack, - ))), + ))) + .into(), want_pass: true, }, // ICS04 Test { name: "Channel open init succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(msg_chan_init)), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(msg_chan_init)) + .into(), want_pass: true, }, Test { name: "Channel open init fail due to missing connection".to_string(), msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit( incorrect_msg_chan_init, - )), + )) + .into(), want_pass: false, }, Test { name: "Channel open try succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(msg_chan_try)), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(msg_chan_try)).into(), want_pass: true, }, Test { name: "Channel open ack succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(msg_chan_ack)), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(msg_chan_ack)).into(), want_pass: true, }, - //ICS20-04-packet Test { name: "Packet send".to_string(), - msg: Ics26Envelope::Ics20Msg(msg_transfer), + msg: msg_transfer.into(), want_pass: true, }, // The client update is required in this test, because the proof associated with @@ -412,22 +449,24 @@ mod tests { .with_timestamp(Timestamp::now()) .into(), signer: default_signer.clone(), - })), + })) + .into(), want_pass: true, }, Test { name: "Receive packet".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet.clone())), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet.clone())) + .into(), want_pass: true, }, Test { name: "Re-Receive packet".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet)), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet)).into(), want_pass: true, }, Test { name: "Packet send".to_string(), - msg: Ics26Envelope::Ics20Msg(msg_transfer_two), + msg: msg_transfer_two.into(), want_pass: true, }, Test { @@ -436,7 +475,8 @@ mod tests { client_id: client_id.clone(), header: MockHeader::new(update_client_height_after_second_send).into(), signer: default_signer.clone(), - })), + })) + .into(), want_pass: true, }, //ICS04-close channel @@ -444,20 +484,22 @@ mod tests { name: "Channel close init succeeds".to_string(), msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( msg_chan_close_init, - )), + )) + .into(), want_pass: true, }, Test { name: "Channel close confirm fails cause channel is already closed".to_string(), msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm( msg_chan_close_confirm, - )), + )) + .into(), want_pass: false, }, //ICS04-to_on_close Test { name: "Timeout on close".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg_to_on_close)), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg_to_on_close)).into(), want_pass: true, }, Test { @@ -473,7 +515,8 @@ mod tests { Vec::new(), Vec::new(), default_signer.clone(), - ))), + ))) + .into(), want_pass: true, }, Test { @@ -489,7 +532,8 @@ mod tests { Vec::new(), Vec::new(), default_signer, - ))), + ))) + .into(), want_pass: false, }, ] @@ -497,7 +541,23 @@ mod tests { .collect(); for test in tests { - let res = dispatch::<_, Crypto>(&mut ctx, test.msg.clone()); + let res = match test.msg.clone() { + TestMsg::Ics26(msg) => dispatch::<_, Crypto>(&mut ctx, msg).map(|_| ()), + TestMsg::Ics20(msg) => { + let transfer_module = + ctx.router_mut().get_route_mut(&transfer_module_id).unwrap(); + ics20_deliver( + transfer_module + .as_any_mut() + .downcast_mut::() + .unwrap(), + &mut HandlerOutputBuilder::new(), + msg, + ) + .map(|_| ()) + .map_err(Error::ics04_channel) + } + }; assert_eq!( test.want_pass, diff --git a/modules/src/core/ics26_routing/msgs.rs b/modules/src/core/ics26_routing/msgs.rs index 362f88144d..3f2306e6e9 100644 --- a/modules/src/core/ics26_routing/msgs.rs +++ b/modules/src/core/ics26_routing/msgs.rs @@ -2,7 +2,6 @@ use crate::prelude::*; use ibc_proto::google::protobuf::Any; -use crate::applications::ics20_fungible_token_transfer::msgs::{transfer, transfer::MsgTransfer}; use crate::core::ics02_client::msgs::{create_client, update_client, upgrade_client, ClientMsg}; use crate::core::ics03_connection::msgs::{ conn_open_ack, conn_open_confirm, conn_open_init, conn_open_try, ConnectionMsg, @@ -21,7 +20,6 @@ pub enum Ics26Envelope { Ics3Msg(ConnectionMsg), Ics4ChannelMsg(ChannelMsg), Ics4PacketMsg(PacketMsg), - Ics20Msg(MsgTransfer), } impl TryFrom for Ics26Envelope { @@ -123,12 +121,6 @@ impl TryFrom for Ics26Envelope { ChannelMsg::ChannelCloseConfirm(domain_msg), )) } - // ICS20 - 04 - Send packet - transfer::TYPE_URL => { - let domain_msg = transfer::MsgTransfer::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics20Msg(domain_msg)) - } // ICS04 packet messages recv_packet::TYPE_URL => { let domain_msg = recv_packet::MsgRecvPacket::decode_vec(&any_msg.value) diff --git a/modules/src/events.rs b/modules/src/events.rs index 3093f70c05..58e6d54e2b 100644 --- a/modules/src/events.rs +++ b/modules/src/events.rs @@ -6,6 +6,7 @@ use core::str::FromStr; use flex_error::{define_error, TraceError}; use prost::alloc::fmt::Formatter; use serde_derive::{Deserialize, Serialize}; +use tendermint::abci::tag::Tag; use tendermint::abci::Event as AbciEvent; use crate::core::ics02_client::error as client_error; @@ -19,6 +20,7 @@ use crate::core::ics04_channel::events as ChannelEvents; use crate::core::ics04_channel::events::Attributes as ChannelAttributes; use crate::core::ics04_channel::packet::Packet; use crate::core::ics24_host::error::ValidationError; +use crate::core::ics26_routing::context::ModuleId; use crate::timestamp::ParseTimestampError; use crate::Height; @@ -62,6 +64,10 @@ define_error! { IncorrectEventType { event: String } | e | { format_args!("incorrect event type: {}", e.event) }, + + MalformedModuleEvent + { event: ModuleEvent } + | e | { format_args!("module event cannot use core event types: {:?}", e.event) }, } } @@ -89,6 +95,7 @@ impl WithBlockDataType { const NEW_BLOCK_EVENT: &str = "new_block"; const EMPTY_EVENT: &str = "empty"; const CHAIN_ERROR_EVENT: &str = "chain_error"; +const APP_MODULE_EVENT: &str = "app_module"; /// Client event types const CREATE_CLIENT_EVENT: &str = "create_client"; const UPDATE_CLIENT_EVENT: &str = "update_client"; @@ -138,6 +145,7 @@ pub enum IbcEventType { AckPacket, Timeout, TimeoutOnClose, + AppModule, Empty, ChainError, } @@ -166,6 +174,7 @@ impl IbcEventType { IbcEventType::AckPacket => ACK_PACKET_EVENT, IbcEventType::Timeout => TIMEOUT_EVENT, IbcEventType::TimeoutOnClose => TIMEOUT_ON_CLOSE_EVENT, + IbcEventType::AppModule => APP_MODULE_EVENT, IbcEventType::Empty => EMPTY_EVENT, IbcEventType::ChainError => CHAIN_ERROR_EVENT, } @@ -200,6 +209,7 @@ impl FromStr for IbcEventType { TIMEOUT_ON_CLOSE_EVENT => Ok(IbcEventType::TimeoutOnClose), EMPTY_EVENT => Ok(IbcEventType::Empty), CHAIN_ERROR_EVENT => Ok(IbcEventType::ChainError), + // from_str() for `APP_MODULE_EVENT` MUST fail because a `ModuleEvent`'s type isn't constant _ => Err(Error::incorrect_event_type(s.to_string())), } } @@ -234,6 +244,8 @@ pub enum IbcEvent { TimeoutPacket(ChannelEvents::TimeoutPacket), TimeoutOnClosePacket(ChannelEvents::TimeoutOnClosePacket), + AppModule(ModuleEvent), + Empty(String), // Special event, signifying empty response ChainError(String), // Special event, signifying an error on CheckTx or DeliverTx } @@ -285,6 +297,8 @@ impl fmt::Display for IbcEvent { IbcEvent::TimeoutPacket(ev) => write!(f, "TimeoutPacketEv({})", ev), IbcEvent::TimeoutOnClosePacket(ev) => write!(f, "TimeoutOnClosePacketEv({})", ev), + IbcEvent::AppModule(ev) => write!(f, "AppModuleEv({:?})", ev), + IbcEvent::Empty(ev) => write!(f, "EmptyEv({})", ev), IbcEvent::ChainError(ev) => write!(f, "ChainErrorEv({})", ev), } @@ -311,10 +325,15 @@ impl TryFrom for AbciEvent { IbcEvent::CloseInitChannel(event) => event.into(), IbcEvent::CloseConfirmChannel(event) => event.into(), IbcEvent::SendPacket(event) => event.try_into().map_err(Error::channel)?, + IbcEvent::ReceivePacket(event) => event.try_into().map_err(Error::channel)?, IbcEvent::WriteAcknowledgement(event) => event.try_into().map_err(Error::channel)?, IbcEvent::AcknowledgePacket(event) => event.try_into().map_err(Error::channel)?, IbcEvent::TimeoutPacket(event) => event.try_into().map_err(Error::channel)?, - _ => return Err(Error::incorrect_event_type(event.to_string())), + IbcEvent::TimeoutOnClosePacket(event) => event.try_into().map_err(Error::channel)?, + IbcEvent::AppModule(event) => event.try_into()?, + IbcEvent::NewBlock(_) | IbcEvent::Empty(_) | IbcEvent::ChainError(_) => { + return Err(Error::incorrect_event_type(event.to_string())) + } }) } } @@ -420,6 +439,7 @@ impl IbcEvent { IbcEvent::AcknowledgePacket(_) => IbcEventType::AckPacket, IbcEvent::TimeoutPacket(_) => IbcEventType::Timeout, IbcEvent::TimeoutOnClosePacket(_) => IbcEventType::TimeoutOnClose, + IbcEvent::AppModule(_) => IbcEventType::AppModule, IbcEvent::Empty(_) => IbcEventType::Empty, IbcEvent::ChainError(_) => IbcEventType::ChainError, } @@ -465,6 +485,65 @@ impl IbcEvent { } } +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct ModuleEvent { + pub kind: String, + pub module_name: ModuleId, + pub attributes: Vec, +} + +impl TryFrom for AbciEvent { + type Error = Error; + + fn try_from(event: ModuleEvent) -> Result { + if IbcEventType::from_str(event.kind.as_str()).is_ok() { + return Err(Error::malformed_module_event(event)); + } + + let attributes = event.attributes.into_iter().map(Into::into).collect(); + Ok(AbciEvent { + type_str: event.kind, + attributes, + }) + } +} + +impl From for IbcEvent { + fn from(e: ModuleEvent) -> Self { + IbcEvent::AppModule(e) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct ModuleEventAttribute { + pub key: String, + pub value: String, +} + +impl From<(K, V)> for ModuleEventAttribute { + fn from((k, v): (K, V)) -> Self { + Self { + key: k.to_string(), + value: v.to_string(), + } + } +} + +impl From for Tag { + fn from(attr: ModuleEventAttribute) -> Self { + Self { + key: attr + .key + .parse() + .expect("Key::from_str() impl is infallible"), + value: attr + .key + .parse() + .expect("Value::from_str() impl is infallible"), + } + } +} + #[derive(Debug, Clone, Serialize)] pub struct RawObject<'a> { pub height: Height, diff --git a/modules/src/handler.rs b/modules/src/handler.rs index bb52bce2d0..f8bf203ebb 100644 --- a/modules/src/handler.rs +++ b/modules/src/handler.rs @@ -11,20 +11,20 @@ pub struct HandlerOutput { pub events: Vec, } -impl HandlerOutput { - pub fn builder() -> HandlerOutputBuilder { +impl HandlerOutput { + pub fn builder() -> HandlerOutputBuilder { HandlerOutputBuilder::new() } } #[derive(Clone, Debug, Default)] -pub struct HandlerOutputBuilder { +pub struct HandlerOutputBuilder { log: Vec, - events: Vec, + events: Vec, marker: PhantomData, } -impl HandlerOutputBuilder { +impl HandlerOutputBuilder { pub fn new() -> Self { Self { log: Vec::new(), @@ -42,16 +42,16 @@ impl HandlerOutputBuilder { self.log.push(log.into()); } - pub fn with_events(mut self, mut events: Vec) -> Self { + pub fn with_events(mut self, mut events: Vec) -> Self { self.events.append(&mut events); self } - pub fn emit(&mut self, event: IbcEvent) { + pub fn emit(&mut self, event: E) { self.events.push(event); } - pub fn with_result(self, result: T) -> HandlerOutput { + pub fn with_result(self, result: T) -> HandlerOutput { HandlerOutput { result, log: self.log, @@ -59,13 +59,21 @@ impl HandlerOutputBuilder { } } - pub fn merge(&mut self, other: HandlerOutput<()>) { + pub fn merge>(&mut self, other: HandlerOutputBuilder<(), Event>) { + let HandlerOutputBuilder { + mut log, events, .. + } = other; + self.log.append(&mut log); + self.events + .append(&mut events.into_iter().map(Into::into).collect()); + } + + pub fn merge_output>(&mut self, other: HandlerOutput<(), Event>) { let HandlerOutput { - mut log, - mut events, - .. + mut log, events, .. } = other; self.log.append(&mut log); - self.events.append(&mut events); + self.events + .append(&mut events.into_iter().map(Into::into).collect()); } } diff --git a/modules/src/lib.rs b/modules/src/lib.rs index 2f17fcfed7..3a0655460c 100644 --- a/modules/src/lib.rs +++ b/modules/src/lib.rs @@ -52,6 +52,7 @@ extern crate std; mod prelude; pub mod applications; +pub mod bigint; pub mod clients; pub mod core; pub mod events; diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 155666847e..728d5be17a 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -13,7 +13,7 @@ use crate::core::ics23_commitment::merkle::apply_prefix; use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics24_host::path::ClientConsensusStatePath; use crate::core::ics24_host::Path; -use crate::core::ics26_routing::context::LightClientContext; +use crate::core::ics26_routing::context::ReaderContext; use crate::mock::client_state::{MockClientState, MockConsensusState}; use crate::mock::header::MockHeader; use crate::prelude::*; @@ -30,7 +30,7 @@ impl ClientDef for MockClient { fn update_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, client_state: Self::ClientState, header: Self::Header, @@ -50,7 +50,7 @@ impl ClientDef for MockClient { fn verify_client_consensus_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, prefix: &CommitmentPrefix, @@ -74,7 +74,7 @@ impl ClientDef for MockClient { fn verify_connection_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -89,7 +89,7 @@ impl ClientDef for MockClient { fn verify_channel_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -105,7 +105,7 @@ impl ClientDef for MockClient { fn verify_client_full_state( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_state: &Self::ClientState, _height: Height, _prefix: &CommitmentPrefix, @@ -119,7 +119,7 @@ impl ClientDef for MockClient { fn verify_packet_data( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -136,7 +136,7 @@ impl ClientDef for MockClient { fn verify_packet_acknowledgement( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -153,7 +153,7 @@ impl ClientDef for MockClient { fn verify_next_sequence_recv( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -169,7 +169,7 @@ impl ClientDef for MockClient { fn verify_packet_receipt_absence( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: &ClientId, _client_state: &Self::ClientState, _height: Height, @@ -198,7 +198,7 @@ impl ClientDef for MockClient { fn verify_header( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, @@ -216,7 +216,7 @@ impl ClientDef for MockClient { fn check_for_misbehaviour( &self, - _ctx: &dyn LightClientContext, + _ctx: &dyn ReaderContext, _client_id: ClientId, _client_state: Self::ClientState, _header: Self::Header, diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 4b47a5594c..777aaa7c7b 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -8,12 +8,12 @@ use core::cmp::min; use core::fmt::Debug; use core::ops::{Add, Sub}; use core::time::Duration; +use std::sync::Mutex; use ibc_proto::google::protobuf::Any; use sha2::Digest; use tracing::debug; -use crate::applications::ics20_fungible_token_transfer::context::Ics20Context; use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; use crate::clients::ics11_beefy::client_state::test_util::get_dummy_beefy_state; use crate::clients::ics11_beefy::consensus_state::test_util::get_dummy_beefy_consensus_state; @@ -37,9 +37,9 @@ use crate::core::ics05_port::error::Error; use crate::core::ics23_commitment::commitment::CommitmentPrefix; use crate::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{ - Ics26Context, LightClientContext, Module, ModuleId, Router, RouterBuilder, + Ics26Context, Module, ModuleId, ReaderContext, Router, RouterBuilder, }; -use crate::core::ics26_routing::handler::{deliver, dispatch}; +use crate::core::ics26_routing::handler::{deliver, dispatch, MsgReceipt}; use crate::core::ics26_routing::msgs::Ics26Envelope; use crate::events::IbcEvent; use crate::mock::client_state::{MockClientRecord, MockClientState, MockConsensusState}; @@ -55,7 +55,7 @@ use crate::Height; pub const DEFAULT_BLOCK_TIME_SECS: u64 = 3; /// A context implementing the dependencies necessary for testing any IBC module. -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct MockContext { /// The type of host chain underlying this mock context. host_chain_type: HostType, @@ -70,60 +70,12 @@ pub struct MockContext { /// blocks, ascending order by their height (latest block is on the last position). history: Vec, - /// The set of all clients, indexed by their id. - clients: BTreeMap, - - /// Tracks the processed time for clients header updates - client_processed_times: BTreeMap<(ClientId, Height), Timestamp>, - - /// Tracks the processed height for the clients - client_processed_heights: BTreeMap<(ClientId, Height), Height>, - - /// Counter for the client identifiers, necessary for `increase_client_counter` and the - /// `client_counter` methods. - client_ids_counter: u64, - - /// Association between client ids and connection ids. - client_connections: BTreeMap, - - /// All the connections in the store. - connections: BTreeMap, - - /// Counter for connection identifiers (see `increase_connection_counter`). - connection_ids_counter: u64, - - /// Association between connection ids and channel ids. - connection_channels: BTreeMap>, - - /// Counter for channel identifiers (see `increase_channel_counter`). - channel_ids_counter: u64, - - /// All the channels in the store. TODO Make new key PortId X ChanneId - channels: BTreeMap<(PortId, ChannelId), ChannelEnd>, - - /// Tracks the sequence number for the next packet to be sent. - next_sequence_send: BTreeMap<(PortId, ChannelId), Sequence>, - - /// Tracks the sequence number for the next packet to be received. - next_sequence_recv: BTreeMap<(PortId, ChannelId), Sequence>, - - /// Tracks the sequence number for the next packet to be acknowledged. - next_sequence_ack: BTreeMap<(PortId, ChannelId), Sequence>, - - packet_acknowledgement: BTreeMap<(PortId, ChannelId, Sequence), AcknowledgementCommitment>, - - /// Maps ports to the the module that owns it - port_to_module: BTreeMap, - - /// Constant-size commitments to packets data fields - packet_commitment: BTreeMap<(PortId, ChannelId, Sequence), PacketCommitment>, - - // Used by unordered channel - packet_receipt: BTreeMap<(PortId, ChannelId, Sequence), Receipt>, - /// Average time duration between blocks block_time: Duration, + /// An object that stores all IBC related data. + pub ibc_store: Arc>, + /// ICS26 router impl router: MockRouter, } @@ -142,6 +94,26 @@ impl Default for MockContext { } } +/// A manual clone impl is provided because the tests are oblivious to the fact that the `ibc_store` +/// is a shared ptr. +impl Clone for MockContext { + fn clone(&self) -> Self { + let ibc_store = { + let ibc_store = self.ibc_store.lock().unwrap().clone(); + Arc::new(Mutex::new(ibc_store)) + }; + Self { + host_chain_type: self.host_chain_type, + host_chain_id: self.host_chain_id.clone(), + max_history_size: self.max_history_size, + history: self.history.clone(), + block_time: self.block_time, + ibc_store, + router: self.router.clone(), + } + } +} + /// Implementation of internal interface for use in testing. The methods in this interface should /// _not_ be accessible to any Ics handler. impl MockContext { @@ -195,24 +167,8 @@ impl MockContext { ) }) .collect(), - connections: Default::default(), - client_ids_counter: 0, - clients: Default::default(), - client_processed_times: Default::default(), - client_processed_heights: Default::default(), - client_connections: Default::default(), - channels: Default::default(), - connection_channels: Default::default(), - next_sequence_send: Default::default(), - next_sequence_recv: Default::default(), - next_sequence_ack: Default::default(), - port_to_module: Default::default(), - packet_commitment: Default::default(), - packet_receipt: Default::default(), - packet_acknowledgement: Default::default(), - connection_ids_counter: 0, - channel_ids_counter: 0, block_time, + ibc_store: Arc::new(Mutex::new(MockIbcStore::default())), router: Default::default(), } } @@ -231,7 +187,7 @@ impl MockContext { /// `consensus_state_height` is None, then the client will be initialized with a consensus /// state matching the same height as the client state (`client_state_height`). pub fn with_client_parametrized( - mut self, + self, client_id: &ClientId, client_state_height: Height, client_type: Option, @@ -276,12 +232,16 @@ impl MockContext { client_state, consensus_states, }; - self.clients.insert(client_id.clone(), client_record); + self.ibc_store + .lock() + .unwrap() + .clients + .insert(client_id.clone(), client_record); self } pub fn with_client_parametrized_history( - mut self, + self, client_id: &ClientId, client_state_height: Height, client_type: Option, @@ -353,17 +313,25 @@ impl MockContext { consensus_states, }; - self.clients.insert(client_id.clone(), client_record); + self.ibc_store + .lock() + .unwrap() + .clients + .insert(client_id.clone(), client_record); self } /// Associates a connection to this context. pub fn with_connection( - mut self, + self, connection_id: ConnectionId, connection_end: ConnectionEnd, ) -> Self { - self.connections.insert(connection_id, connection_end); + self.ibc_store + .lock() + .unwrap() + .connections + .insert(connection_id, connection_end); self } @@ -374,9 +342,10 @@ impl MockContext { chan_id: ChannelId, channel_end: ChannelEnd, ) -> Self { - let mut channels = self.channels.clone(); + let mut channels = self.ibc_store.lock().unwrap().channels.clone(); channels.insert((port_id, chan_id), channel_end); - Self { channels, ..self } + self.ibc_store.lock().unwrap().channels = channels; + self } pub fn with_send_sequence( @@ -385,12 +354,10 @@ impl MockContext { chan_id: ChannelId, seq_number: Sequence, ) -> Self { - let mut next_sequence_send = self.next_sequence_send.clone(); + let mut next_sequence_send = self.ibc_store.lock().unwrap().next_sequence_send.clone(); next_sequence_send.insert((port_id, chan_id), seq_number); - Self { - next_sequence_send, - ..self - } + self.ibc_store.lock().unwrap().next_sequence_send = next_sequence_send; + self } pub fn with_recv_sequence( @@ -399,12 +366,10 @@ impl MockContext { chan_id: ChannelId, seq_number: Sequence, ) -> Self { - let mut next_sequence_recv = self.next_sequence_recv.clone(); + let mut next_sequence_recv = self.ibc_store.lock().unwrap().next_sequence_recv.clone(); next_sequence_recv.insert((port_id, chan_id), seq_number); - Self { - next_sequence_recv, - ..self - } + self.ibc_store.lock().unwrap().next_sequence_recv = next_sequence_recv; + self } pub fn with_ack_sequence( @@ -413,12 +378,10 @@ impl MockContext { chan_id: ChannelId, seq_number: Sequence, ) -> Self { - let mut next_sequence_ack = self.next_sequence_send.clone(); + let mut next_sequence_ack = self.ibc_store.lock().unwrap().next_sequence_send.clone(); next_sequence_ack.insert((port_id, chan_id), seq_number); - Self { - next_sequence_ack, - ..self - } + self.ibc_store.lock().unwrap().next_sequence_ack = next_sequence_ack; + self } pub fn with_height(self, target_height: Height) -> Self { @@ -449,12 +412,10 @@ impl MockContext { seq: Sequence, data: PacketCommitment, ) -> Self { - let mut packet_commitment = self.packet_commitment.clone(); + let mut packet_commitment = self.ibc_store.lock().unwrap().packet_commitment.clone(); packet_commitment.insert((port_id, chan_id, seq), data); - Self { - packet_commitment, - ..self - } + self.ibc_store.lock().unwrap().packet_commitment = packet_commitment; + self } pub fn with_router(self, router: MockRouter) -> Self { @@ -536,15 +497,23 @@ impl MockContext { pub fn add_port(&mut self, port_id: PortId) { let module_id = ModuleId::new(format!("module{}", port_id).into()).unwrap(); - self.port_to_module.insert(port_id, module_id); + self.ibc_store + .lock() + .unwrap() + .port_to_module + .insert(port_id, module_id); } pub fn scope_port_to_module(&mut self, port_id: PortId, module_id: ModuleId) { - self.port_to_module.insert(port_id, module_id); + self.ibc_store + .lock() + .unwrap() + .port_to_module + .insert(port_id, module_id); } pub fn consensus_states(&self, client_id: &ClientId) -> Vec { - self.clients[client_id] + self.ibc_store.lock().unwrap().clients[client_id] .consensus_states .iter() .map(|(k, v)| AnyConsensusStateWithHeight { @@ -554,19 +523,24 @@ impl MockContext { .collect() } - pub fn latest_client_states(&self, client_id: &ClientId) -> &AnyClientState { - self.clients[client_id].client_state.as_ref().unwrap() + pub fn latest_client_states(&self, client_id: &ClientId) -> AnyClientState { + self.ibc_store.lock().unwrap().clients[client_id] + .client_state + .as_ref() + .unwrap() + .clone() } pub fn latest_consensus_states( &self, client_id: &ClientId, height: &Height, - ) -> &AnyConsensusState { - self.clients[client_id] + ) -> AnyConsensusState { + self.ibc_store.lock().unwrap().clients[client_id] .consensus_states .get(height) .unwrap() + .clone() } #[inline] @@ -576,6 +550,65 @@ impl MockContext { .expect("history cannot be empty") .height() } + + pub fn ibc_store_share(&self) -> Arc> { + self.ibc_store.clone() + } +} + +/// An object that stores all IBC related data. +#[derive(Clone, Debug, Default)] +pub struct MockIbcStore { + /// The set of all clients, indexed by their id. + pub clients: BTreeMap, + + /// Tracks the processed time for clients header updates + pub client_processed_times: BTreeMap<(ClientId, Height), Timestamp>, + + /// Tracks the processed height for the clients + pub client_processed_heights: BTreeMap<(ClientId, Height), Height>, + + /// Counter for the client identifiers, necessary for `increase_client_counter` and the + /// `client_counter` methods. + pub client_ids_counter: u64, + + /// Association between client ids and connection ids. + pub client_connections: BTreeMap, + + /// All the connections in the store. + pub connections: BTreeMap, + + /// Counter for connection identifiers (see `increase_connection_counter`). + pub connection_ids_counter: u64, + + /// Association between connection ids and channel ids. + pub connection_channels: BTreeMap>, + + /// Counter for channel identifiers (see `increase_channel_counter`). + pub channel_ids_counter: u64, + + /// All the channels in the store. TODO Make new key PortId X ChanneId + pub channels: BTreeMap<(PortId, ChannelId), ChannelEnd>, + + /// Tracks the sequence number for the next packet to be sent. + pub next_sequence_send: BTreeMap<(PortId, ChannelId), Sequence>, + + /// Tracks the sequence number for the next packet to be received. + pub next_sequence_recv: BTreeMap<(PortId, ChannelId), Sequence>, + + /// Tracks the sequence number for the next packet to be acknowledged. + pub next_sequence_ack: BTreeMap<(PortId, ChannelId), Sequence>, + + pub packet_acknowledgement: BTreeMap<(PortId, ChannelId, Sequence), AcknowledgementCommitment>, + + /// Maps ports to the the module that owns it + pub port_to_module: BTreeMap, + + /// Constant-size commitments to packets data fields + pub packet_commitment: BTreeMap<(PortId, ChannelId, Sequence), PacketCommitment>, + + // Used by unordered channel + pub packet_receipt: BTreeMap<(PortId, ChannelId, Sequence), Receipt>, } #[derive(Default)] @@ -609,7 +642,7 @@ impl Router for MockRouter { } } -impl LightClientContext for MockContext {} +impl ReaderContext for MockContext {} impl Ics26Context for MockContext { type Router = MockRouter; @@ -623,11 +656,9 @@ impl Ics26Context for MockContext { } } -impl Ics20Context for MockContext {} - impl PortReader for MockContext { fn lookup_module_by_port(&self, port_id: &PortId) -> Result { - match self.port_to_module.get(port_id) { + match self.ibc_store.lock().unwrap().port_to_module.get(port_id) { Some(mod_id) => Ok(mod_id.clone()), None => Err(Ics05Error::unknown_port(port_id.clone())), } @@ -636,7 +667,7 @@ impl PortReader for MockContext { impl ChannelReader for MockContext { fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { - match self.channels.get(pcid) { + match self.ibc_store.lock().unwrap().channels.get(pcid) { Some(channel_end) => Ok(channel_end.clone()), None => Err(Ics04Error::channel_not_found(pcid.0.clone(), pcid.1)), } @@ -646,7 +677,7 @@ impl ChannelReader for MockContext { &self, cid: &ConnectionId, ) -> Result, Ics04Error> { - match self.connection_channels.get(cid) { + match self.ibc_store.lock().unwrap().connection_channels.get(cid) { Some(pcid) => Ok(pcid.clone()), None => Err(Ics04Error::missing_channel()), } @@ -656,7 +687,13 @@ impl ChannelReader for MockContext { &self, port_channel_id: &(PortId, ChannelId), ) -> Result { - match self.next_sequence_send.get(port_channel_id) { + match self + .ibc_store + .lock() + .unwrap() + .next_sequence_send + .get(port_channel_id) + { Some(sequence) => Ok(*sequence), None => Err(Ics04Error::missing_next_send_seq(port_channel_id.clone())), } @@ -666,7 +703,13 @@ impl ChannelReader for MockContext { &self, port_channel_id: &(PortId, ChannelId), ) -> Result { - match self.next_sequence_recv.get(port_channel_id) { + match self + .ibc_store + .lock() + .unwrap() + .next_sequence_recv + .get(port_channel_id) + { Some(sequence) => Ok(*sequence), None => Err(Ics04Error::missing_next_recv_seq(port_channel_id.clone())), } @@ -676,7 +719,13 @@ impl ChannelReader for MockContext { &self, port_channel_id: &(PortId, ChannelId), ) -> Result { - match self.next_sequence_ack.get(port_channel_id) { + match self + .ibc_store + .lock() + .unwrap() + .next_sequence_ack + .get(port_channel_id) + { Some(sequence) => Ok(*sequence), None => Err(Ics04Error::missing_next_ack_seq(port_channel_id.clone())), } @@ -686,7 +735,7 @@ impl ChannelReader for MockContext { &self, key: &(PortId, ChannelId, Sequence), ) -> Result { - match self.packet_commitment.get(key) { + match self.ibc_store.lock().unwrap().packet_commitment.get(key) { Some(commitment) => Ok(commitment.clone()), None => Err(Ics04Error::packet_commitment_not_found(key.2)), } @@ -696,7 +745,7 @@ impl ChannelReader for MockContext { &self, key: &(PortId, ChannelId, Sequence), ) -> Result { - match self.packet_receipt.get(key) { + match self.ibc_store.lock().unwrap().packet_receipt.get(key) { Some(receipt) => Ok(receipt.clone()), None => Err(Ics04Error::packet_receipt_not_found(key.2)), } @@ -706,7 +755,13 @@ impl ChannelReader for MockContext { &self, key: &(PortId, ChannelId, Sequence), ) -> Result { - match self.packet_acknowledgement.get(key) { + match self + .ibc_store + .lock() + .unwrap() + .packet_acknowledgement + .get(key) + { Some(ack) => Ok(ack.clone()), None => Err(Ics04Error::packet_acknowledgement_not_found(key.2)), } @@ -722,6 +777,9 @@ impl ChannelReader for MockContext { height: Height, ) -> Result { match self + .ibc_store + .lock() + .unwrap() .client_processed_times .get(&(client_id.clone(), height)) { @@ -739,6 +797,9 @@ impl ChannelReader for MockContext { height: Height, ) -> Result { match self + .ibc_store + .lock() + .unwrap() .client_processed_heights .get(&(client_id.clone(), height)) { @@ -751,7 +812,7 @@ impl ChannelReader for MockContext { } fn channel_counter(&self) -> Result { - Ok(self.channel_ids_counter) + Ok(self.ibc_store.lock().unwrap().channel_ids_counter) } fn max_expected_time_per_block(&self) -> Duration { @@ -765,7 +826,11 @@ impl ChannelKeeper for MockContext { key: (PortId, ChannelId, Sequence), commitment: PacketCommitment, ) -> Result<(), Ics04Error> { - self.packet_commitment.insert(key, commitment); + self.ibc_store + .lock() + .unwrap() + .packet_commitment + .insert(key, commitment); Ok(()) } @@ -774,7 +839,11 @@ impl ChannelKeeper for MockContext { key: (PortId, ChannelId, Sequence), ack_commitment: AcknowledgementCommitment, ) -> Result<(), Ics04Error> { - self.packet_acknowledgement.insert(key, ack_commitment); + self.ibc_store + .lock() + .unwrap() + .packet_acknowledgement + .insert(key, ack_commitment); Ok(()) } @@ -782,7 +851,11 @@ impl ChannelKeeper for MockContext { &mut self, key: (PortId, ChannelId, Sequence), ) -> Result<(), Ics04Error> { - self.packet_acknowledgement.remove(&key); + self.ibc_store + .lock() + .unwrap() + .packet_acknowledgement + .remove(&key); Ok(()) } @@ -791,7 +864,10 @@ impl ChannelKeeper for MockContext { cid: ConnectionId, port_channel_id: &(PortId, ChannelId), ) -> Result<(), Ics04Error> { - self.connection_channels + self.ibc_store + .lock() + .unwrap() + .connection_channels .entry(cid) .or_insert_with(Vec::new) .push(port_channel_id.clone()); @@ -803,7 +879,11 @@ impl ChannelKeeper for MockContext { port_channel_id: (PortId, ChannelId), channel_end: &ChannelEnd, ) -> Result<(), Ics04Error> { - self.channels.insert(port_channel_id, channel_end.clone()); + self.ibc_store + .lock() + .unwrap() + .channels + .insert(port_channel_id, channel_end.clone()); Ok(()) } @@ -812,7 +892,11 @@ impl ChannelKeeper for MockContext { port_channel_id: (PortId, ChannelId), seq: Sequence, ) -> Result<(), Ics04Error> { - self.next_sequence_send.insert(port_channel_id, seq); + self.ibc_store + .lock() + .unwrap() + .next_sequence_send + .insert(port_channel_id, seq); Ok(()) } @@ -821,7 +905,11 @@ impl ChannelKeeper for MockContext { port_channel_id: (PortId, ChannelId), seq: Sequence, ) -> Result<(), Ics04Error> { - self.next_sequence_recv.insert(port_channel_id, seq); + self.ibc_store + .lock() + .unwrap() + .next_sequence_recv + .insert(port_channel_id, seq); Ok(()) } @@ -830,19 +918,27 @@ impl ChannelKeeper for MockContext { port_channel_id: (PortId, ChannelId), seq: Sequence, ) -> Result<(), Ics04Error> { - self.next_sequence_ack.insert(port_channel_id, seq); + self.ibc_store + .lock() + .unwrap() + .next_sequence_ack + .insert(port_channel_id, seq); Ok(()) } fn increase_channel_counter(&mut self) { - self.channel_ids_counter += 1; + self.ibc_store.lock().unwrap().channel_ids_counter += 1; } fn delete_packet_commitment( &mut self, key: (PortId, ChannelId, Sequence), ) -> Result<(), Ics04Error> { - self.packet_commitment.remove(&key); + self.ibc_store + .lock() + .unwrap() + .packet_commitment + .remove(&key); Ok(()) } @@ -851,14 +947,26 @@ impl ChannelKeeper for MockContext { key: (PortId, ChannelId, Sequence), receipt: Receipt, ) -> Result<(), Ics04Error> { - self.packet_receipt.insert(key, receipt); + self.ibc_store + .lock() + .unwrap() + .packet_receipt + .insert(key, receipt); + Ok(()) + } + + fn store_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Ics04Error> { Ok(()) } } impl ConnectionReader for MockContext { fn connection_end(&self, cid: &ConnectionId) -> Result { - match self.connections.get(cid) { + match self.ibc_store.lock().unwrap().connections.get(cid) { Some(connection_end) => Ok(connection_end.clone()), None => Err(Ics03Error::connection_not_found(cid.clone())), } @@ -874,7 +982,7 @@ impl ConnectionReader for MockContext { } fn connection_counter(&self) -> Result { - Ok(self.connection_ids_counter) + Ok(self.ibc_store.lock().unwrap().connection_ids_counter) } } @@ -884,7 +992,10 @@ impl ConnectionKeeper for MockContext { connection_id: ConnectionId, connection_end: &ConnectionEnd, ) -> Result<(), Ics03Error> { - self.connections + self.ibc_store + .lock() + .unwrap() + .connections .insert(connection_id, connection_end.clone()); Ok(()) } @@ -894,26 +1005,29 @@ impl ConnectionKeeper for MockContext { connection_id: ConnectionId, client_id: &ClientId, ) -> Result<(), Ics03Error> { - self.client_connections + self.ibc_store + .lock() + .unwrap() + .client_connections .insert(client_id.clone(), connection_id); Ok(()) } fn increase_connection_counter(&mut self) { - self.connection_ids_counter += 1; + self.ibc_store.lock().unwrap().connection_ids_counter += 1; } } impl ClientReader for MockContext { fn client_type(&self, client_id: &ClientId) -> Result { - match self.clients.get(client_id) { + match self.ibc_store.lock().unwrap().clients.get(client_id) { Some(client_record) => Ok(client_record.client_type), None => Err(Ics02Error::client_not_found(client_id.clone())), } } fn client_state(&self, client_id: &ClientId) -> Result { - match self.clients.get(client_id) { + match self.ibc_store.lock().unwrap().clients.get(client_id) { Some(client_record) => client_record .client_state .clone() @@ -927,7 +1041,7 @@ impl ClientReader for MockContext { client_id: &ClientId, height: Height, ) -> Result { - match self.clients.get(client_id) { + match self.ibc_store.lock().unwrap().clients.get(client_id) { Some(client_record) => match client_record.consensus_states.get(&height) { Some(consensus_state) => Ok(consensus_state.clone()), None => Err(Ics02Error::consensus_state_not_found( @@ -948,7 +1062,8 @@ impl ClientReader for MockContext { client_id: &ClientId, height: Height, ) -> Result, Ics02Error> { - let client_record = self + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store .clients .get(client_id) .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; @@ -975,7 +1090,8 @@ impl ClientReader for MockContext { client_id: &ClientId, height: Height, ) -> Result, Ics02Error> { - let client_record = self + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store .clients .get(client_id) .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; @@ -1017,7 +1133,7 @@ impl ClientReader for MockContext { } fn client_counter(&self) -> Result { - Ok(self.client_ids_counter) + Ok(self.ibc_store.lock().unwrap().client_ids_counter) } } @@ -1027,11 +1143,15 @@ impl ClientKeeper for MockContext { client_id: ClientId, client_type: ClientType, ) -> Result<(), Ics02Error> { - let mut client_record = self.clients.entry(client_id).or_insert(MockClientRecord { - client_type, - consensus_states: Default::default(), - client_state: Default::default(), - }); + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store + .clients + .entry(client_id) + .or_insert(MockClientRecord { + client_type, + consensus_states: Default::default(), + client_state: Default::default(), + }); client_record.client_type = client_type; Ok(()) @@ -1042,11 +1162,15 @@ impl ClientKeeper for MockContext { client_id: ClientId, client_state: AnyClientState, ) -> Result<(), Ics02Error> { - let mut client_record = self.clients.entry(client_id).or_insert(MockClientRecord { - client_type: client_state.client_type(), - consensus_states: Default::default(), - client_state: Default::default(), - }); + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store + .clients + .entry(client_id) + .or_insert(MockClientRecord { + client_type: client_state.client_type(), + consensus_states: Default::default(), + client_state: Default::default(), + }); client_record.client_state = Some(client_state); Ok(()) @@ -1058,11 +1182,15 @@ impl ClientKeeper for MockContext { height: Height, consensus_state: AnyConsensusState, ) -> Result<(), Ics02Error> { - let client_record = self.clients.entry(client_id).or_insert(MockClientRecord { - client_type: ClientType::Mock, - consensus_states: Default::default(), - client_state: Default::default(), - }); + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store + .clients + .entry(client_id) + .or_insert(MockClientRecord { + client_type: ClientType::Mock, + consensus_states: Default::default(), + client_state: Default::default(), + }); client_record .consensus_states @@ -1071,7 +1199,7 @@ impl ClientKeeper for MockContext { } fn increase_client_counter(&mut self) { - self.client_ids_counter += 1 + self.ibc_store.lock().unwrap().client_ids_counter += 1 } fn store_update_time( @@ -1081,6 +1209,9 @@ impl ClientKeeper for MockContext { timestamp: Timestamp, ) -> Result<(), Ics02Error> { let _ = self + .ibc_store + .lock() + .unwrap() .client_processed_times .insert((client_id, height), timestamp); Ok(()) @@ -1093,6 +1224,9 @@ impl ClientKeeper for MockContext { host_height: Height, ) -> Result<(), Ics02Error> { let _ = self + .ibc_store + .lock() + .unwrap() .client_processed_heights .insert((client_id, height), host_height); Ok(()) @@ -1118,7 +1252,7 @@ impl Ics18Context for MockContext { // Forward call to Ics26 delivery method. let mut all_events = vec![]; for msg in msgs { - let (mut events, _) = + let MsgReceipt { mut events, .. } = deliver::<_, Crypto>(self, msg).map_err(Ics18Error::transaction_failed)?; all_events.append(&mut events); } @@ -1144,13 +1278,15 @@ mod tests { use crate::core::ics24_host::identifier::ChainId; use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; use crate::core::ics26_routing::context::{ - Acknowledgement, Module, ModuleId, ModuleOutput, OnRecvPacketAck, Router, RouterBuilder, + Acknowledgement, Module, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, Router, + RouterBuilder, }; use crate::mock::context::MockContext; use crate::mock::context::MockRouterBuilder; use crate::mock::host::HostType; use crate::prelude::*; use crate::signer::Signer; + use crate::test_utils::get_dummy_bech32_account; use crate::Height; #[test] @@ -1314,12 +1450,13 @@ mod tests { impl Module for FooModule { fn on_chan_open_try( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _order: Order, _connection_hops: &[ConnectionId], _port_id: &PortId, _channel_id: &ChannelId, _counterparty: &Counterparty, + _version: &Version, counterparty_version: &Version, ) -> Result { Ok(counterparty_version.clone()) @@ -1327,7 +1464,7 @@ mod tests { fn on_recv_packet( &self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _packet: &Packet, _relayer: &Signer, ) -> OnRecvPacketAck { @@ -1336,6 +1473,7 @@ mod tests { Box::new(|module| { let module = module.downcast_mut::().unwrap(); module.counter += 1; + Ok(()) }), ) } @@ -1347,12 +1485,13 @@ mod tests { impl Module for BarModule { fn on_chan_open_try( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _order: Order, _connection_hops: &[ConnectionId], _port_id: &PortId, _channel_id: &ChannelId, _counterparty: &Counterparty, + _version: &Version, counterparty_version: &Version, ) -> Result { Ok(counterparty_version.clone()) @@ -1378,9 +1517,9 @@ mod tests { let module_id = ModuleId::from_str(module_id).unwrap(); let m = ctx.router.get_route_mut(&module_id).unwrap(); let result = m.on_recv_packet( - &mut ModuleOutput::builder().with_result(()), + &mut ModuleOutputBuilder::new(), &Packet::default(), - &Signer::new(""), + &get_dummy_bech32_account().parse().unwrap(), ); (module_id, result) }; @@ -1398,7 +1537,7 @@ mod tests { _ => None, }) .for_each(|(mid, write_fn)| { - write_fn(ctx.router.get_route_mut(&mid).unwrap().as_any_mut()) + write_fn(ctx.router.get_route_mut(&mid).unwrap().as_any_mut()).unwrap() }); } } diff --git a/modules/src/serializers.rs b/modules/src/serializers.rs index be74530812..bd6f346336 100644 --- a/modules/src/serializers.rs +++ b/modules/src/serializers.rs @@ -9,3 +9,30 @@ where let hex = Hex::upper_case().encode_to_string(data).unwrap(); hex.serialize(serializer) } + +pub mod serde_string { + use alloc::string::String; + use core::fmt::Display; + use core::str::FromStr; + + use serde::{de, Deserialize, Deserializer, Serializer}; + + pub fn serialize(value: &T, serializer: S) -> Result + where + T: Display, + S: Serializer, + { + serializer.collect_str(value) + } + + pub fn deserialize<'de, T, D>(deserializer: D) -> Result + where + T: FromStr, + T::Err: Display, + D: Deserializer<'de>, + { + String::deserialize(deserializer)? + .parse() + .map_err(de::Error::custom) + } +} diff --git a/modules/src/signer.rs b/modules/src/signer.rs index a5e4ea2432..21c62bf116 100644 --- a/modules/src/signer.rs +++ b/modules/src/signer.rs @@ -1,36 +1,36 @@ -use crate::prelude::*; -use core::{convert::Infallible, fmt::Display, str::FromStr}; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct Signer(String); +use core::str::FromStr; -impl Signer { - pub fn new(s: impl ToString) -> Self { - Self(s.to_string()) - } +use crate::prelude::*; - pub fn as_str(&self) -> &str { - &self.0 - } -} +use derive_more::Display; +use flex_error::define_error; +use serde::{Deserialize, Serialize}; -impl Display for Signer { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.0) +define_error! { + #[derive(Debug, PartialEq, Eq)] + SignerError { + EmptySigner + | _ | { "signer cannot be empty" }, } } -impl From for Signer { - fn from(s: String) -> Self { - Self(s) - } -} +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Display)] +pub struct Signer(String); impl FromStr for Signer { - type Err = Infallible; + type Err = SignerError; fn from_str(s: &str) -> Result { - Ok(Self(s.to_string())) + let s = s.to_string(); + if s.trim().is_empty() { + return Err(SignerError::empty_signer()); + } + Ok(Self(s)) + } +} + +impl AsRef for Signer { + fn as_ref(&self) -> &str { + self.0.as_str() } } diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 4038cd3e72..23bc3df164 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -1,6 +1,9 @@ -#![allow(dead_code)] +use std::sync::{Arc, Mutex}; +use std::time::Duration; use crate::clients::host_functions::HostFunctionsProvider; +use crate::core::ics02_client::context::ClientReader; +use crate::core::ics03_connection::context::ConnectionReader; use crate::prelude::*; use sp_core::keccak_256; use sp_trie::LayoutV0; @@ -8,12 +11,27 @@ use tendermint::{block, consensus, evidence, public_key::Algorithm}; use crate::clients::ics11_beefy::error::Error as BeefyError; use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics04_channel::channel::{Counterparty, Order}; + +use crate::applications::transfer::context::{BankKeeper, Ics20Context, Ics20Keeper, Ics20Reader}; +use crate::applications::transfer::{error::Error as Ics20Error, PrefixedCoin}; +use crate::core::ics02_client::client_consensus::AnyConsensusState; +use crate::core::ics02_client::client_state::AnyClientState; +use crate::core::ics03_connection::connection::ConnectionEnd; +use crate::core::ics03_connection::error::Error as Ics03Error; +use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; +use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; +use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; use crate::core::ics04_channel::error::Error; +use crate::core::ics04_channel::packet::{Receipt, Sequence}; use crate::core::ics04_channel::Version; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{Module, ModuleOutput}; +use crate::core::ics05_port::context::PortReader; +use crate::core::ics05_port::error::Error as PortError; +use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; +use crate::core::ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder, ReaderContext}; +use crate::mock::context::MockIbcStore; use crate::signer::Signer; +use crate::timestamp::Timestamp; +use crate::Height; // Needed in mocks. pub fn default_consensus_params() -> consensus::Params { @@ -49,18 +67,27 @@ pub fn get_dummy_bech32_account() -> String { "cosmos1wxeyh7zgn4tctjzs0vtqpc6p5cxq5t2muzl7ng".to_string() } -#[derive(Debug, Default)] -pub struct DummyModule; +#[derive(Debug)] +pub struct DummyTransferModule { + ibc_store: Arc>, +} + +impl DummyTransferModule { + pub fn new(ibc_store: Arc>) -> Self { + Self { ibc_store } + } +} -impl Module for DummyModule { +impl Module for DummyTransferModule { fn on_chan_open_try( &mut self, - _output: &mut ModuleOutput, + _output: &mut ModuleOutputBuilder, _order: Order, _connection_hops: &[ConnectionId], _port_id: &PortId, _channel_id: &ChannelId, _counterparty: &Counterparty, + _version: &Version, counterparty_version: &Version, ) -> Result { Ok(counterparty_version.clone()) @@ -84,7 +111,7 @@ impl HostFunctionsProvider for Crypto { .map(|val| val.to_vec()) } - fn ed25519_verify(_signature: &[u8; 64], _value: &[u8; 32], public_key: &[u8; 32]) -> bool { + fn ed25519_verify(_signature: &[u8; 64], _value: &[u8; 32], _public_key: &[u8]) -> bool { true } @@ -117,3 +144,349 @@ impl HostFunctionsProvider for Crypto { sp_io::hashing::sha2_256(data) } } + +impl Ics20Keeper for DummyTransferModule { + type AccountId = Signer; +} + +impl ChannelKeeper for DummyTransferModule { + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Error> { + self.ibc_store + .lock() + .unwrap() + .packet_commitment + .insert(key, commitment); + Ok(()) + } + + fn delete_packet_commitment( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_receipt( + &mut self, + _key: (PortId, ChannelId, Sequence), + _receipt: Receipt, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + _ack: AcknowledgementCommitment, + ) -> Result<(), Error> { + unimplemented!() + } + + fn delete_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_connection_channels( + &mut self, + _conn_id: ConnectionId, + _port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_channel( + &mut self, + _port_channel_id: (PortId, ChannelId), + _channel_end: &ChannelEnd, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error> { + self.ibc_store + .lock() + .unwrap() + .next_sequence_send + .insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_recv( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_ack( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn increase_channel_counter(&mut self) { + unimplemented!() + } + + fn store_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Error> { + Ok(()) + } +} + +impl PortReader for DummyTransferModule { + fn lookup_module_by_port(&self, _port_id: &PortId) -> Result { + unimplemented!() + } +} + +impl BankKeeper for DummyTransferModule { + type AccountId = Signer; + + fn send_coins( + &mut self, + _from: &Self::AccountId, + _to: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn mint_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn burn_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } +} + +impl Ics20Reader for DummyTransferModule { + type AccountId = Signer; + + fn get_port(&self) -> Result { + Ok(PortId::transfer()) + } + + fn is_send_enabled(&self) -> bool { + true + } + + fn is_receive_enabled(&self) -> bool { + true + } +} + +impl ConnectionReader for DummyTransferModule { + fn connection_end(&self, cid: &ConnectionId) -> Result { + match self.ibc_store.lock().unwrap().connections.get(cid) { + Some(connection_end) => Ok(connection_end.clone()), + None => Err(Ics03Error::connection_not_found(cid.clone())), + } + } + + fn host_oldest_height(&self) -> Height { + todo!() + } + + fn commitment_prefix(&self) -> crate::core::ics23_commitment::commitment::CommitmentPrefix { + todo!() + } + + fn connection_counter(&self) -> Result { + todo!() + } +} + +impl ClientReader for DummyTransferModule { + fn client_state(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => client_record + .client_state + .clone() + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn host_height(&self) -> Height { + Height::zero() + } + + fn host_consensus_state(&self, _height: Height) -> Result { + unimplemented!() + } + + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => match client_record.consensus_states.get(&height) { + Some(consensus_state) => Ok(consensus_state.clone()), + None => Err(Ics02Error::consensus_state_not_found( + client_id.clone(), + height, + )), + }, + None => Err(Ics02Error::consensus_state_not_found( + client_id.clone(), + height, + )), + } + } + + fn client_type( + &self, + _client_id: &ClientId, + ) -> Result { + todo!() + } + + fn next_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn prev_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn host_timestamp(&self) -> Timestamp { + todo!() + } + + fn client_counter(&self) -> Result { + todo!() + } +} + +impl ChannelReader for DummyTransferModule { + fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { + match self.ibc_store.lock().unwrap().channels.get(pcid) { + Some(channel_end) => Ok(channel_end.clone()), + None => Err(Error::channel_not_found(pcid.0.clone(), pcid.1)), + } + } + + fn connection_channels(&self, _cid: &ConnectionId) -> Result, Error> { + unimplemented!() + } + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self + .ibc_store + .lock() + .unwrap() + .next_sequence_send + .get(port_channel_id) + { + Some(sequence) => Ok(*sequence), + None => Err(Error::missing_next_send_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_recv( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_next_sequence_ack( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_packet_commitment( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn get_packet_receipt(&self, _key: &(PortId, ChannelId, Sequence)) -> Result { + unimplemented!() + } + + fn get_packet_acknowledgement( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn hash(&self, value: Vec) -> Vec { + use sha2::Digest; + + sha2::Sha256::digest(value).to_vec() + } + + fn client_update_time( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn client_update_height( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn channel_counter(&self) -> Result { + unimplemented!() + } + + fn max_expected_time_per_block(&self) -> Duration { + unimplemented!() + } +} + +impl Ics20Context for DummyTransferModule { + type AccountId = Signer; +} + +impl ReaderContext for DummyTransferModule {} diff --git a/modules/tests/runner/mod.rs b/modules/tests/runner/mod.rs index 47b03c4af8..8cf9d2f3cd 100644 --- a/modules/tests/runner/mod.rs +++ b/modules/tests/runner/mod.rs @@ -164,7 +164,9 @@ impl IbcTestRunner { } fn signer() -> Signer { - Signer::new("") + "cosmos1wxeyh7zgn4tctjzs0vtqpc6p5cxq5t2muzl7ng" + .parse() + .unwrap() } pub fn counterparty(client_id: u64, connection_id: Option) -> Counterparty { diff --git a/proto-compiler/Cargo.lock b/proto-compiler/Cargo.lock index d1f12cd84d..676f49a61a 100644 --- a/proto-compiler/Cargo.lock +++ b/proto-compiler/Cargo.lock @@ -342,7 +342,7 @@ checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "hyper" -version = "0.14.14" +version = "0.15.04" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b" dependencies = [ diff --git a/proto/Cargo.toml b/proto/Cargo.toml index 148acce52c..ef2904cb95 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-proto" -version = "0.17.1" +version = "0.18.0" authors = ["Informal Systems "] edition = "2021" license = "Apache-2.0" diff --git a/proto/src/lib.rs b/proto/src/lib.rs index e09f2e765d..91255dae08 100644 --- a/proto/src/lib.rs +++ b/proto/src/lib.rs @@ -8,7 +8,7 @@ #![allow(clippy::large_enum_variant)] #![allow(rustdoc::bare_urls)] #![forbid(unsafe_code)] -#![doc(html_root_url = "https://docs.rs/ibc-proto/0.17.1")] +#![doc(html_root_url = "https://docs.rs/ibc-proto/0.18.0")] pub mod google; @@ -153,6 +153,9 @@ pub mod ibc { pub mod v1 { include_proto!("ibc.applications.transfer.v1.rs"); } + pub mod v2 { + include_proto!("ibc.applications.transfer.v2.rs"); + } } pub mod interchain_accounts { pub mod v1 { diff --git a/relayer-cli/Cargo.toml b/relayer-cli/Cargo.toml index 7b9f4c0a42..0e61126001 100644 --- a/relayer-cli/Cargo.toml +++ b/relayer-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-relayer-cli" -version = "0.14.1" +version = "0.15.0" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -26,11 +26,11 @@ telemetry = ["ibc-relayer/telemetry", "ibc-telemetry"] rest-server = ["ibc-relayer-rest"] [dependencies] -ibc = { version = "0.14.1", path = "../modules", features = ["std", "clock"] } -ibc-relayer = { version = "0.14.1", path = "../relayer" } -ibc-proto = { version = "0.17.1", path = "../proto" } -ibc-telemetry = { version = "0.14.1", path = "../telemetry", optional = true } -ibc-relayer-rest = { version = "0.14.1", path = "../relayer-rest", optional = true } +ibc = { version = "0.15.0", path = "../modules", features = ["std", "clock"] } +ibc-relayer = { version = "0.15.0", path = "../relayer" } +ibc-proto = { version = "0.18.0", path = "../proto" } +ibc-telemetry = { version = "0.15.0", path = "../telemetry", optional = true } +ibc-relayer-rest = { version = "0.15.0", path = "../relayer-rest", optional = true } clap = { version = "3.1", features = ["cargo"] } clap_complete = "3.1" @@ -81,5 +81,5 @@ features = ["options"] [dev-dependencies] abscissa_core = { version = "=0.6.0", features = ["testing"] } -once_cell = "1.9" +once_cell = "1.12" regex = "1.5" diff --git a/relayer-cli/src/commands/keys.rs b/relayer-cli/src/commands/keys.rs index 8f0b5db16d..20a67429b1 100644 --- a/relayer-cli/src/commands/keys.rs +++ b/relayer-cli/src/commands/keys.rs @@ -3,14 +3,14 @@ use abscissa_core::clap::Parser; use abscissa_core::{Command, Runnable}; mod add; +mod balance; mod delete; mod list; -mod restore; /// `keys` subcommand #[derive(Command, Debug, Parser, Runnable)] pub enum KeysCmd { - /// Adds a key to a configured chain + /// Adds key to a configured chain or restores a key to a configured chain using a mnemonic Add(add::KeysAddCmd), /// Delete key(s) from a configured chain @@ -19,6 +19,6 @@ pub enum KeysCmd { /// List keys configured on a chain List(list::KeysListCmd), - /// Restore a key to a configured chain using a mnemonic - Restore(restore::KeyRestoreCmd), + /// Query balance for a key from a configured chain. If no key is given, the key is retrieved from the configuration file. + Balance(balance::KeyBalanceCmd), } diff --git a/relayer-cli/src/commands/keys/add.rs b/relayer-cli/src/commands/keys/add.rs index 1e128b97e5..30e4844e89 100644 --- a/relayer-cli/src/commands/keys/add.rs +++ b/relayer-cli/src/commands/keys/add.rs @@ -16,20 +16,49 @@ use ibc_relayer::{ use crate::application::app_config; use crate::conclude::Output; +/// The data structure that represents the arguments when invoking the `keys add` CLI command. +/// +/// The command has one argument and two exclusive flags: +/// +/// The command to add a key from a file: +/// +/// `keys add [OPTIONS] --key-file ` +/// +/// The command to restore a key from a file containing mnemonic: +/// +/// `keys add [OPTIONS] --mnemonic-file ` +/// +/// The key-file and mnemonic-file flags can't be given at the same time, this will cause a terminating error. +/// If successful the key will be created or restored, depending on which flag was given. #[derive(Clone, Command, Debug, Parser)] pub struct KeysAddCmd { #[clap(required = true, help = "identifier of the chain")] chain_id: ChainId, - #[clap(short = 'f', long, required = true, help = "path to the key file")] - file: PathBuf, + #[clap( + short = 'f', + long, + required = true, + help = "path to the key file", + group = "add-restore" + )] + key_file: Option, + + #[clap( + short, + long, + required = true, + help = "path to file containing mnemonic to restore the key from", + group = "add-restore" + )] + mnemonic_file: Option, #[clap( - short = 'n', + short, long, help = "name of the key (defaults to the `key_name` defined in the config)" )] - name: Option, + key_name: Option, #[clap( short = 'p', @@ -47,7 +76,7 @@ impl KeysAddCmd { .ok_or_else(|| format!("chain '{}' not found in configuration file", self.chain_id))?; let name = self - .name + .key_name .clone() .unwrap_or_else(|| chain_config.key_name.clone()); @@ -56,7 +85,6 @@ impl KeysAddCmd { Ok(KeysAddOptions { config: chain_config.clone(), - file: self.file.clone(), name, hd_path, }) @@ -67,7 +95,6 @@ impl KeysAddCmd { pub struct KeysAddOptions { pub name: String, pub config: ChainConfig, - pub file: PathBuf, pub hd_path: HDPath, } @@ -80,15 +107,43 @@ impl Runnable for KeysAddCmd { Ok(result) => result, }; - let key = add_key(&opts.config, &opts.name, &opts.file, &opts.hd_path); - - match key { - Ok(key) => Output::success_msg(format!( - "Added key '{}' ({}) on chain {}", - opts.name, key.account, opts.config.id - )) - .exit(), - Err(e) => Output::error(format!("{}", e)).exit(), + // Check if --file or --mnemonic was given as input. + match (self.key_file.clone(), self.mnemonic_file.clone()) { + (Some(key_file), _) => { + let key = add_key(&opts.config, &opts.name, &key_file, &opts.hd_path); + match key { + Ok(key) => Output::success_msg(format!( + "Added key '{}' ({}) on chain {}", + opts.name, key.account, opts.config.id + )) + .exit(), + Err(e) => Output::error(format!( + "An error occurred adding the key on chain {} from file {:?}: {}", + self.chain_id, key_file, e + )) + .exit(), + } + } + (_, Some(mnemonic_file)) => { + let key = restore_key(&mnemonic_file, &opts.name, &opts.hd_path, &opts.config); + + match key { + Ok(key) => Output::success_msg(format!( + "Restored key '{}' ({}) on chain {}", + opts.name, key.account, opts.config.id + )) + .exit(), + Err(e) => Output::error(format!( + "An error occurred restoring the key on chain {} from file {:?}: {}", + self.chain_id, mnemonic_file, e + )) + .exit(), + } + } + // This case should never trigger. + // The 'required' parameter for the flags will trigger an error if both flags have not been given. + // And the 'group' parameter for the flags will trigger an error if both flags are given. + _ => Output::error(format!("--mnemonic-file and --key-file can't both be None")).exit(), } } } @@ -107,3 +162,19 @@ pub fn add_key( keyring.add_key(key_name, key.clone())?; Ok(key) } + +pub fn restore_key( + mnemonic: &Path, + key_name: &str, + hdpath: &HDPath, + config: &ChainConfig, +) -> Result> { + let mnemonic_content = + fs::read_to_string(mnemonic).map_err(|_| "error reading the mnemonic file")?; + + let mut keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; + let key_entry = keyring.key_from_mnemonic(&mnemonic_content, hdpath, &config.address_type)?; + + keyring.add_key(key_name, key_entry.clone())?; + Ok(key_entry) +} diff --git a/relayer-cli/src/commands/keys/balance.rs b/relayer-cli/src/commands/keys/balance.rs new file mode 100644 index 0000000000..2b0558f27e --- /dev/null +++ b/relayer-cli/src/commands/keys/balance.rs @@ -0,0 +1,66 @@ +use abscissa_core::clap::Parser; +use abscissa_core::{Command, Runnable}; + +use ibc::core::ics24_host::identifier::ChainId; +use ibc_relayer::chain::handle::ChainHandle; + +use crate::application::app_config; +use crate::cli_utils::spawn_chain_runtime; +use crate::conclude::{exit_with_unrecoverable_error, json, Output}; + +/// The data structure that represents the arguments when invoking the `keys balance` CLI command. +/// +/// The command has one argument and one optional flag: +/// +/// `keys balance --key-name ` +/// +/// If no key name is given, it will be taken from the configuration file. +/// If successful the balance and denominator of the account, associated with the key name +/// on the given chain, will be displayed. +#[derive(Clone, Command, Debug, Parser)] +pub struct KeyBalanceCmd { + #[clap(required = true, help = "identifier of the chain")] + chain_id: ChainId, + + #[clap( + long, + short, + help = "(optional) name of the key (defaults to the `key_name` defined in the config)" + )] + key_name: Option, +} + +impl Runnable for KeyBalanceCmd { + fn run(&self) { + let config = app_config(); + + let chain = spawn_chain_runtime(&config, &self.chain_id) + .unwrap_or_else(exit_with_unrecoverable_error); + let key_name = self.key_name.clone(); + + match chain.query_balance(key_name.clone()) { + Ok(balance) if json() => Output::success(balance).exit(), + Ok(balance) => { + // Retrieve the key name string to output. + let key_name_str = match key_name { + Some(name) => name, + None => { + let chain_config = + chain.config().unwrap_or_else(exit_with_unrecoverable_error); + chain_config.key_name + } + }; + Output::success_msg(format!( + "balance for key `{}`: {} {}", + key_name_str, balance.amount, balance.denom + )) + .exit() + } + Err(e) => Output::error(format!( + "there was a problem querying the chain balance: {}", + e + )) + .exit(), + } + } +} diff --git a/relayer-cli/src/commands/keys/restore.rs b/relayer-cli/src/commands/keys/restore.rs deleted file mode 100644 index 2b33031368..0000000000 --- a/relayer-cli/src/commands/keys/restore.rs +++ /dev/null @@ -1,108 +0,0 @@ -use core::str::FromStr; - -use abscissa_core::clap::Parser; -use abscissa_core::{Command, Runnable}; - -use ibc::core::ics24_host::identifier::ChainId; -use ibc_relayer::{ - config::{ChainConfig, Config}, - keyring::{HDPath, KeyEntry, KeyRing, Store}, -}; - -use crate::application::app_config; -use crate::conclude::Output; - -#[derive(Clone, Command, Debug, Parser)] -pub struct KeyRestoreCmd { - #[clap(required = true, help = "identifier of the chain")] - chain_id: ChainId, - - #[clap( - short = 'm', - long, - required = true, - help = "mnemonic to restore the key from" - )] - mnemonic: String, - - #[clap( - short = 'n', - long, - help = "name of the key (defaults to the `key_name` defined in the config)" - )] - name: Option, - - #[clap( - short = 'p', - long, - help = "derivation path for this key", - default_value = "m/44'/118'/0'/0/0" - )] - hd_path: String, -} - -#[derive(Clone, Debug)] -pub struct KeysRestoreOptions { - pub mnemonic: String, - pub config: ChainConfig, - pub hd_path: HDPath, - pub key_name: String, -} - -impl KeyRestoreCmd { - fn validate_options(&self, config: &Config) -> Result { - let chain_config = config - .find_chain(&self.chain_id) - .ok_or_else(|| format!("chain '{}' not found in configuration file", self.chain_id))?; - - let hd_path = HDPath::from_str(&self.hd_path) - .map_err(|_| format!("invalid derivation path: {}", self.hd_path))?; - - let key_name = self - .name - .clone() - .unwrap_or_else(|| chain_config.key_name.clone()); - - Ok(KeysRestoreOptions { - mnemonic: self.mnemonic.clone(), - config: chain_config.clone(), - hd_path, - key_name, - }) - } -} - -impl Runnable for KeyRestoreCmd { - fn run(&self) { - let config = app_config(); - - let opts = match self.validate_options(&config) { - Err(err) => Output::error(err).exit(), - Ok(result) => result, - }; - - let key = restore_key(&opts.mnemonic, &opts.key_name, &opts.hd_path, &opts.config); - - match key { - Ok(key) => Output::success_msg(format!( - "Restored key '{}' ({}) on chain {}", - opts.key_name, key.account, opts.config.id - )) - .exit(), - Err(e) => Output::error(format!("{}", e)).exit(), - } - } -} - -pub fn restore_key( - mnemonic: &str, - key_name: &str, - hdpath: &HDPath, - config: &ChainConfig, -) -> Result> { - let mut keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; - let key_entry = keyring.key_from_mnemonic(mnemonic, hdpath, &config.address_type)?; - - keyring.add_key(key_name, key_entry.clone())?; - Ok(key_entry) -} diff --git a/relayer-cli/src/commands/query.rs b/relayer-cli/src/commands/query.rs index 6b56eb11cf..e8d0580da5 100644 --- a/relayer-cli/src/commands/query.rs +++ b/relayer-cli/src/commands/query.rs @@ -3,11 +3,13 @@ use abscissa_core::clap::Parser; use abscissa_core::{Command, Runnable}; +use crate::commands::query::channel_client::QueryChannelClientCmd; use crate::commands::query::channel_ends::QueryChannelEndsCmd; use crate::commands::query::channels::QueryChannelsCmd; use crate::commands::query::packet::QueryPacketCmds; mod channel; +mod channel_client; mod channel_ends; mod channels; mod client; @@ -76,6 +78,9 @@ pub enum QueryConnectionCmds { #[derive(Command, Debug, Parser, Runnable)] pub enum QueryChannelCmds { + /// Query channel's client state + Client(QueryChannelClientCmd), + /// Query channel end End(channel::QueryChannelEndCmd), diff --git a/relayer-cli/src/commands/query/channel_client.rs b/relayer-cli/src/commands/query/channel_client.rs new file mode 100644 index 0000000000..fef376e3a7 --- /dev/null +++ b/relayer-cli/src/commands/query/channel_client.rs @@ -0,0 +1,44 @@ +use abscissa_core::clap::Parser; +use abscissa_core::{Command, Runnable}; + +use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; +use ibc_relayer::chain::handle::ChainHandle; +use ibc_relayer::chain::requests::QueryChannelClientStateRequest; + +use crate::application::app_config; +use crate::cli_utils::spawn_chain_runtime; +use crate::conclude::{exit_with_unrecoverable_error, Output}; + +/// The data structure that represents the arguments when invoking the `query channel client` CLI command. +/// +/// `query channel client --port-id --channel-id ` +/// +/// If successful the channel's client state is displayed. +#[derive(Clone, Command, Debug, Parser)] +pub struct QueryChannelClientCmd { + #[clap(required = true, help = "identifier of the chain to query")] + chain_id: ChainId, + + #[clap(required = true, long, help = "identifier of the port to query")] + port_id: PortId, + + #[clap(required = true, long, help = "identifier of the channel to query")] + channel_id: ChannelId, +} + +impl Runnable for QueryChannelClientCmd { + fn run(&self) { + let config = app_config(); + + let chain = spawn_chain_runtime(&config, &self.chain_id) + .unwrap_or_else(exit_with_unrecoverable_error); + + match chain.query_channel_client_state(QueryChannelClientStateRequest { + port_id: self.port_id.clone(), + channel_id: self.channel_id, + }) { + Ok(cs) => Output::success(cs).exit(), + Err(e) => Output::error(format!("{}", e)).exit(), + } + } +} diff --git a/relayer-cli/src/commands/tx/transfer.rs b/relayer-cli/src/commands/tx/transfer.rs index 8875e19172..860a7c51e2 100644 --- a/relayer-cli/src/commands/tx/transfer.rs +++ b/relayer-cli/src/commands/tx/transfer.rs @@ -3,6 +3,7 @@ use abscissa_core::{config::Override, Command, FrameworkErrorKind, Runnable}; use core::time::Duration; use ibc::{ + applications::transfer::Amount, core::{ ics02_client::client_state::ClientState, ics02_client::height::Height, @@ -14,7 +15,6 @@ use ibc_relayer::chain::handle::ChainHandle; use ibc_relayer::chain::requests::{ QueryChannelRequest, QueryClientStateRequest, QueryConnectionRequest, }; -use ibc_relayer::transfer::Amount; use ibc_relayer::{ config::Config, transfer::{build_and_send_transfer_messages, TransferOptions}, diff --git a/relayer-rest/Cargo.toml b/relayer-rest/Cargo.toml index 0029c0f089..60e4d9bc7e 100644 --- a/relayer-rest/Cargo.toml +++ b/relayer-rest/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-relayer-rest" -version = "0.14.1" +version = "0.15.0" authors = ["Informal Systems "] edition = "2021" license = "Apache-2.0" @@ -14,8 +14,8 @@ description = """ """ [dependencies] -ibc = { version = "0.14.1", path = "../modules" } -ibc-relayer = { version = "0.14.1", path = "../relayer" } +ibc = { version = "0.15.0", path = "../modules" } +ibc-relayer = { version = "0.15.0", path = "../relayer" } crossbeam-channel = "0.5" rouille = "3.5" diff --git a/relayer-rest/tests/mock.rs b/relayer-rest/tests/mock.rs index 034aaa30aa..7fc6e891ec 100644 --- a/relayer-rest/tests/mock.rs +++ b/relayer-rest/tests/mock.rs @@ -63,7 +63,7 @@ fn version() { let rest_api_version = VersionInfo { name: "ibc-relayer-rest".to_string(), - version: "0.14.1".to_string(), + version: "0.15.0".to_string(), }; let result = vec![version.clone(), rest_api_version]; diff --git a/relayer/Cargo.toml b/relayer/Cargo.toml index 227d3548e3..f4452687fd 100644 --- a/relayer/Cargo.toml +++ b/relayer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-relayer" -version = "0.14.1" +version = "0.15.0" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -21,9 +21,9 @@ profiling = [] telemetry = ["ibc-telemetry"] [dependencies] -ibc = { version = "0.14.1", path = "../modules" } -ibc-proto = { version = "0.17.1", path = "../proto" } -ibc-telemetry = { version = "0.14.1", path = "../telemetry", optional = true } +ibc = { version = "0.15.0", path = "../modules" } +ibc-proto = { version = "0.18.0", path = "../proto" } +ibc-telemetry = { version = "0.15.0", path = "../telemetry", optional = true } subtle-encoding = "0.5" humantime-serde = "1.1.1" @@ -37,7 +37,7 @@ serde_json = { version = "1" } bytes = "1.1.0" prost = { version = "0.10" } prost-types = { version = "0.10" } -tonic = { version = "0.7", features = ["tls", "tls-roots"] } +tonic = { version = "0.7.2", features = ["tls", "tls-roots"] } futures = "0.3.21" crossbeam-channel = "0.5.4" k256 = { version = "0.10.4", features = ["ecdsa-core", "ecdsa", "sha256"]} @@ -58,12 +58,11 @@ flex-error = { version = "0.4.4", default-features = false } signature = "1.4.0" anyhow = "1.0" semver = "1.0" -uint = "0.9" humantime = "2.1.0" nanoid = "0.4.0" regex = "1.5.5" -moka = "0.8.3" -uuid = { version = "1.0.0", features = ["v4"] } +moka = "0.8.5" +uuid = { version = "1.1.0", features = ["v4"] } [dependencies.num-bigint] version = "0.4" @@ -94,7 +93,7 @@ default-features = false version = "=0.23.7" [dev-dependencies] -ibc = { version = "0.14.1", path = "../modules", features = ["mocks"] } +ibc = { version = "0.15.0", path = "../modules", features = ["mocks"] } serial_test = "0.6.0" env_logger = "0.9.0" tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"] } diff --git a/relayer/src/chain.rs b/relayer/src/chain.rs index 271cc224fc..959e2f00ee 100644 --- a/relayer/src/chain.rs +++ b/relayer/src/chain.rs @@ -153,8 +153,9 @@ pub trait ChainEndpoint: Sized { // Queries - /// Query the balance of the current account for the denom used to pay tx fees. - fn query_balance(&self) -> Result; + /// Query the balance of the given account for the denom used to pay tx fees. + /// If no account is given, behavior must be specified, e.g. retrieve it from configuration file. + fn query_balance(&self, key_name: Option) -> Result; fn query_commitment_prefix(&self) -> Result; diff --git a/relayer/src/chain/cosmos.rs b/relayer/src/chain/cosmos.rs index 05812c5607..34b931dae7 100644 --- a/relayer/src/chain/cosmos.rs +++ b/relayer/src/chain/cosmos.rs @@ -603,7 +603,9 @@ impl ChainEndpoint for CosmosSdkChain { .map_err(|e| Error::key_not_found(self.config.key_name.clone(), e))?; let bech32 = encode_to_bech32(&key.address.to_hex(), &self.config.account_prefix)?; - Ok(Signer::new(bech32)) + bech32 + .parse() + .map_err(|e| Error::ics02(ClientError::signer(e))) } /// Get the chain configuration @@ -637,12 +639,23 @@ impl ChainEndpoint for CosmosSdkChain { Ok(version_specs.ibc_go_version) } - fn query_balance(&self) -> Result { - let key = self.key()?; + fn query_balance(&self, key_name: Option) -> Result { + // If a key_name is given, extract the account hash. + // Else retrieve the account from the configuration file. + let account = match key_name { + Some(account) => { + let key = self.keybase().get_key(&account).map_err(Error::key_base)?; + key.account + } + _ => { + let key = self.key()?; + key.account + } + }; let balance = self.block_on(query_balance( &self.grpc_addr, - &key.account, + &account, &self.config.gas_price.denom, ))?; diff --git a/relayer/src/chain/handle.rs b/relayer/src/chain/handle.rs index 01b2c86d5e..370714076b 100644 --- a/relayer/src/chain/handle.rs +++ b/relayer/src/chain/handle.rs @@ -150,6 +150,7 @@ pub enum ChainRequest { }, QueryBalance { + key_name: Option, reply_to: ReplyTo, }, @@ -382,8 +383,9 @@ pub trait ChainHandle: Clone + Send + Sync + Serialize + Debug + 'static { /// Return the version of the IBC protocol that this chain is running, if known. fn ibc_version(&self) -> Result, Error>; - /// Query the balance of the current account for the denom used to pay tx fees. - fn query_balance(&self) -> Result; + /// Query the balance of the given account for the denom used to pay tx fees. + /// If no account is given, behavior must be specified, e.g. retrieve it from configuration file. + fn query_balance(&self, key_name: Option) -> Result; fn query_application_status(&self) -> Result; diff --git a/relayer/src/chain/handle/base.rs b/relayer/src/chain/handle/base.rs index a9e3a77f67..d557518098 100644 --- a/relayer/src/chain/handle/base.rs +++ b/relayer/src/chain/handle/base.rs @@ -149,8 +149,8 @@ impl ChainHandle for BaseChainHandle { self.send(|reply_to| ChainRequest::IbcVersion { reply_to }) } - fn query_balance(&self) -> Result { - self.send(|reply_to| ChainRequest::QueryBalance { reply_to }) + fn query_balance(&self, key_name: Option) -> Result { + self.send(|reply_to| ChainRequest::QueryBalance { key_name, reply_to }) } fn query_application_status(&self) -> Result { diff --git a/relayer/src/chain/handle/cache.rs b/relayer/src/chain/handle/cache.rs index 34199ea717..ffc114dc5e 100644 --- a/relayer/src/chain/handle/cache.rs +++ b/relayer/src/chain/handle/cache.rs @@ -130,8 +130,8 @@ impl ChainHandle for CachingChainHandle { self.inner().ibc_version() } - fn query_balance(&self) -> Result { - self.inner().query_balance() + fn query_balance(&self, key_name: Option) -> Result { + self.inner().query_balance(key_name) } fn query_application_status(&self) -> Result { diff --git a/relayer/src/chain/handle/counting.rs b/relayer/src/chain/handle/counting.rs index df10b5790a..ebe05f5d06 100644 --- a/relayer/src/chain/handle/counting.rs +++ b/relayer/src/chain/handle/counting.rs @@ -156,9 +156,9 @@ impl ChainHandle for CountingChainHandle { self.inner().ibc_version() } - fn query_balance(&self) -> Result { + fn query_balance(&self, key_name: Option) -> Result { self.inc_metric("query_balance"); - self.inner().query_balance() + self.inner().query_balance(key_name) } fn query_application_status(&self) -> Result { diff --git a/relayer/src/chain/mock.rs b/relayer/src/chain/mock.rs index 79ee256e8c..357723fbd8 100644 --- a/relayer/src/chain/mock.rs +++ b/relayer/src/chain/mock.rs @@ -165,7 +165,7 @@ impl ChainEndpoint for MockChain { Ok(Some(semver::Version::new(3, 0, 0))) } - fn query_balance(&self) -> Result { + fn query_balance(&self, _key_name: Option) -> Result { unimplemented!() } diff --git a/relayer/src/chain/runtime.rs b/relayer/src/chain/runtime.rs index d142a0e256..e974ffb00e 100644 --- a/relayer/src/chain/runtime.rs +++ b/relayer/src/chain/runtime.rs @@ -301,8 +301,8 @@ where self.build_channel_proofs(port_id, channel_id, height, reply_to)? }, - Ok(ChainRequest::QueryBalance { reply_to }) => { - self.query_balance(reply_to)? + Ok(ChainRequest::QueryBalance { key_name, reply_to }) => { + self.query_balance(key_name, reply_to)? } Ok(ChainRequest::QueryApplicationStatus { reply_to }) => { @@ -467,8 +467,12 @@ where reply_to.send(result).map_err(Error::send) } - fn query_balance(&self, reply_to: ReplyTo) -> Result<(), Error> { - let balance = self.chain.query_balance(); + fn query_balance( + &self, + key_name: Option, + reply_to: ReplyTo, + ) -> Result<(), Error> { + let balance = self.chain.query_balance(key_name); reply_to.send(balance).map_err(Error::send) } diff --git a/relayer/src/channel/version.rs b/relayer/src/channel/version.rs index 56218a267e..dd2d74cb4d 100644 --- a/relayer/src/channel/version.rs +++ b/relayer/src/channel/version.rs @@ -5,13 +5,13 @@ //! handshake. use ibc::{ - applications::ics20_fungible_token_transfer, + applications::transfer, core::{ics04_channel::Version, ics24_host::identifier::PortId}, }; /// Returns the default channel version, depending on the the given [`PortId`]. pub fn default_by_port(port_id: &PortId) -> Option { - if port_id.as_str() == ics20_fungible_token_transfer::PORT_ID { + if port_id.as_str() == transfer::PORT_ID_STR { // https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer#forwards-compatibility Some(Version::ics20()) } else { diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index 34bd663e52..c78fa61379 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -17,9 +17,12 @@ use crate::chain::tracking::TrackingId; use crate::link::error::LinkError; use crate::link::RelayPath; +/// The chain that the events associated with a piece of [`OperationalData`] are bound for. #[derive(Clone, Copy, PartialEq)] pub enum OperationalDataTarget { + /// The chain which generated the events associated with the `OperationalData`. Source, + /// The chain receiving the events associated with the `OperationalData``. Destination, } @@ -75,18 +78,22 @@ pub struct TransitMessage { pub msg: Any, } -/// Holds all the necessary information for handling a set of in-transit messages. -/// -/// Each `OperationalData` item is uniquely identified by the combination of two attributes: -/// - `target`: represents the target of the packet messages, either source or destination chain, -/// - `proofs_height`: represents the height for the proofs in all the messages. -/// Note: this is the height at which the proofs are queried. A client consensus state at -/// `proofs_height + 1` must exist on-chain in order to verify the proofs. +/// Holds all the necessary information for handling a batch of in-transit messages. This includes +/// an event received from a chain along with any other packets related to the event (i.e. +/// 'receive' or 'timeout' packets) that the relayer has to submit in response to the event. #[derive(Clone)] pub struct OperationalData { + /// Represents the height for the proofs in all the messages. Note that this is the height + /// at which the proofs are queried. For example, for Tendermint chains, a client consensus + /// state at `proofs_height + 1` must exist on-chain in order to verify the proofs. pub proofs_height: Height, + /// The batch of messages associated with this piece of operational data. pub batch: Vec, + /// Represents the target of the packet messages, either the source or the destination + /// chain. pub target: OperationalDataTarget, + /// A unique ID for tracking this batch of events starting from when they were received + /// until the transactions corresponding to those events is submitted. pub tracking_id: TrackingId, /// Stores `Some(ConnectionDelay)` if the delay is non-zero and `None` otherwise connection_delay: Option, diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 96fce59f1f..1bb03c5565 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -98,8 +98,8 @@ pub struct RelayPath { // mostly timeout packet messages. // The operational data targeting the destination chain // comprises mostly RecvPacket and Ack msgs. - pub(crate) src_operational_data: Queue, - pub(crate) dst_operational_data: Queue, + pub src_operational_data: Queue, + pub dst_operational_data: Queue, // Toggle for the transaction confirmation mechanism. confirm_txes: bool, @@ -391,7 +391,7 @@ impl RelayPath { for i in 1..=MAX_RETRIES { let cleared = self .schedule_recv_packet_and_timeout_msgs(height, tracking_id) - .and_then(|()| self.schedule_packet_ack_msgs(height, tracking_id)); + .and_then(|_| self.schedule_packet_ack_msgs(height, tracking_id)); match cleared { Ok(()) => return Ok(()), @@ -406,6 +406,7 @@ impl RelayPath { } /// Clears any packets that were sent before `height`. + /// If no height is passed in, then the latest height of the source chain is used. pub fn schedule_packet_clearing(&self, height: Option) -> Result<(), LinkError> { let span = span!(Level::DEBUG, "clear"); let _enter = span.enter(); @@ -1278,27 +1279,120 @@ impl RelayPath { } } - /// Checks if there are any operational data items ready, - /// and if so performs the relaying of corresponding packets - /// to the target chain. + /// Drives the relaying of elapsed operational data items meant for + /// a specified target chain forward. /// - /// This method performs relaying using the asynchronous sender. - /// Retains the operational data as pending, and associates it - /// with one or more transaction hash(es). - pub fn execute_schedule(&self) -> Result<(), LinkError> { - let (src_ods, dst_ods) = self.try_fetch_scheduled_operational_data()?; + /// Given an iterator of `OperationalData` elements, this function + /// first determines whether the current piece of operational data + /// has elapsed. + /// + /// A piece of operational data is considered 'elapsed' if it has been waiting + /// for an amount of time that surpasses both of the following: + /// 1. The time duration specified in the connection delay + /// 2. The number of blocks specified in the connection delay + /// + /// If the current piece of operational data has elapsed, then relaying + /// is performed using the asynchronous sender. Operational data is + /// retained as pending and is associated with one or more transaction + /// hash(es). + /// + /// Should an error occur when attempting to relay a piece of operational + /// data, this function returns all subsequent unprocessed pieces of + /// operational data back to the caller so that they can be re-queued + /// for processing; the operational data that failed to send is dropped. + /// + /// Note that pieces of operational data that have not elapsed yet are + /// also placed in the 'unprocessed' bucket. + fn execute_schedule_for_target_chain>( + &mut self, + mut operations: I, + target_chain: OperationalDataTarget, + ) -> Result, (VecDeque, LinkError)> { + let mut unprocessed = VecDeque::new(); + + while let Some(od) = operations.next() { + let elapsed_result = match target_chain { + OperationalDataTarget::Source => od.has_conn_delay_elapsed( + &|| self.src_time_latest(), + &|| self.src_max_block_time(), + &|| self.src_latest_height(), + ), + OperationalDataTarget::Destination => od.has_conn_delay_elapsed( + &|| self.dst_time_latest(), + &|| self.dst_max_block_time(), + &|| self.dst_latest_height(), + ), + }; - for od in dst_ods { - let reply = - self.relay_from_operational_data::(od.clone())?; + match elapsed_result { + Ok(elapsed) => { + if elapsed { + // The current piece of operational data has elapsed; we can go ahead and + // attempt to relay it. + match self + .relay_from_operational_data::(od.clone()) + { + // The operational data was successfully relayed; enqueue the associated tx. + Ok(reply) => self.enqueue_pending_tx(reply, od), + // The relaying process failed; return all of the subsequent pieces of operational + // data along with the underlying error that occurred. + Err(e) => { + unprocessed.extend(operations); + + return Err((unprocessed, e)); + } + } + } else { + // The current piece of operational data has not elapsed; add it to the bucket + // of unprocessed operational data and continue processing subsequent pieces + // of operational data. + unprocessed.push_back(od); + } + } + Err(e) => { + // An error occurred when attempting to determine whether the current piece of + // operational data has elapsed or not. Add the current piece of data, along with + // all of the subsequent pieces of data, to the unprocessed bucket and return it + // along with the error that resulted. + unprocessed.push_back(od); + unprocessed.extend(operations); + + return Err((unprocessed, e)); + } + } + } - self.enqueue_pending_tx(reply, od); + Ok(unprocessed) + } + + /// While there are pending operational data items, this function + /// performs the relaying of packets corresponding to those + /// operational data items to both the source and destination chains. + /// + /// Any operational data items that do not get successfully relayed are + /// dropped. Subsequent pending operational data items that went unprocessed + /// are queued up again for re-submission. + pub fn execute_schedule(&mut self) -> Result<(), LinkError> { + let src_od_iter = self.src_operational_data.take().into_iter(); + + match self.execute_schedule_for_target_chain(src_od_iter, OperationalDataTarget::Source) { + Ok(unprocessed_src_data) => self.src_operational_data = unprocessed_src_data.into(), + Err((unprocessed_src_data, e)) => { + self.src_operational_data = unprocessed_src_data.into(); + return Err(e); + } } - for od in src_ods { - let reply = - self.relay_from_operational_data::(od.clone())?; - self.enqueue_pending_tx(reply, od); + let dst_od_iter = self.dst_operational_data.take().into_iter(); + + match self + .execute_schedule_for_target_chain(dst_od_iter, OperationalDataTarget::Destination) + { + Ok(unprocessed_dst_data) => self.dst_operational_data = unprocessed_dst_data.into(), + Err((unprocessed_dst_data, e)) => { + self.dst_operational_data = unprocessed_dst_data.into(); + return Err(e); + } } Ok(()) diff --git a/relayer/src/transfer.rs b/relayer/src/transfer.rs index 45b5930c5d..b21f4dd73b 100644 --- a/relayer/src/transfer.rs +++ b/relayer/src/transfer.rs @@ -1,23 +1,22 @@ -use core::fmt::{Display, Formatter}; -use core::str::FromStr; use core::time::Duration; use flex_error::{define_error, DetailOnly}; -use ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; +use ibc::applications::transfer::error::Error as Ics20Error; +use ibc::applications::transfer::msgs::transfer::MsgTransfer; +use ibc::applications::transfer::Amount; use ibc::core::ics24_host::identifier::{ChainId, ChannelId, PortId}; use ibc::events::IbcEvent; use ibc::signer::Signer; use ibc::timestamp::{Timestamp, TimestampOverflowError}; use ibc::tx_msg::Msg; use ibc::Height; +use ibc_proto::cosmos::base::v1beta1::Coin; use ibc_proto::google::protobuf::Any; -use uint::FromStrRadixErr; use crate::chain::handle::ChainHandle; use crate::chain::tracking::TrackedMsgs; use crate::chain::ChainStatus; use crate::error::Error; -use crate::util::bigint::U256; define_error! { TransferError { @@ -55,34 +54,15 @@ define_error! { e.event) }, + TokenTransfer + [ Ics20Error ] + |_| { "Token transfer error" }, + ZeroTimeout | _ | { "packet timeout height and packet timeout timestamp cannot both be 0" }, } } -#[derive(Clone, Copy, Debug, Default)] -pub struct Amount(pub U256); - -impl Display for Amount { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl FromStr for Amount { - type Err = FromStrRadixErr; - - fn from_str(s: &str) -> Result { - Ok(Self(U256::from_str_radix(s, 10)?)) - } -} - -impl From for Amount { - fn from(amount: u64) -> Self { - Self(amount.into()) - } -} - #[derive(Copy, Clone)] pub struct TransferTimeout { pub timeout_height: Height, @@ -150,10 +130,10 @@ pub fn build_transfer_message( let msg = MsgTransfer { source_port: packet_src_port_id, source_channel: packet_src_channel_id, - token: Some(ibc_proto::cosmos::base::v1beta1::Coin { + token: Coin { denom, amount: amount.to_string(), - }), + }, sender, receiver, timeout_height, @@ -168,10 +148,7 @@ pub fn build_and_send_transfer_messages Result, TransferError> { - let receiver = match &opts.receiver { - None => packet_dst_chain.get_signer().map_err(TransferError::key)?, - Some(r) => r.clone().into(), - }; + let receiver = packet_dst_chain.get_signer().map_err(TransferError::key)?; let sender = packet_src_chain.get_signer().map_err(TransferError::key)?; @@ -188,10 +165,10 @@ pub fn build_and_send_transfer_messages Default for Queue { Self::new() } } + +impl From> for Queue { + fn from(deque: VecDeque) -> Self { + Queue(Arc::new(RwLock::new(deque))) + } +} diff --git a/relayer/src/worker/packet.rs b/relayer/src/worker/packet.rs index 5c090bde32..38dbeeaf55 100644 --- a/relayer/src/worker/packet.rs +++ b/relayer/src/worker/packet.rs @@ -73,7 +73,7 @@ pub fn spawn_packet_worker( }; spawn_background_task(span, Some(Duration::from_millis(1000)), move || { - let relay_path = &link.lock().unwrap().a_to_b; + let relay_path = &mut link.lock().unwrap().a_to_b; relay_path .refresh_schedule() @@ -120,7 +120,7 @@ pub fn spawn_packet_cmd_worker( retry_with_index(retry_strategy::worker_stubborn_strategy(), |index| { handle_packet_cmd( &mut is_first_run, - &link.lock().unwrap(), + &mut link.lock().unwrap(), clear_on_start, clear_interval, &path, @@ -145,7 +145,7 @@ pub fn spawn_packet_cmd_worker( /// data that is ready. fn handle_packet_cmd( is_first_run: &mut bool, - link: &Link, + link: &mut Link, clear_on_start: bool, clear_interval: u64, path: &Packet, diff --git a/relayer/src/worker/wallet.rs b/relayer/src/worker/wallet.rs index b046eb34c5..6bf068adef 100644 --- a/relayer/src/worker/wallet.rs +++ b/relayer/src/worker/wallet.rs @@ -4,12 +4,11 @@ use tracing::{error_span, trace}; use crate::{ chain::handle::ChainHandle, - util::task::{spawn_background_task, Next, TaskHandle}, + telemetry, + util::task::{spawn_background_task, Next, TaskError, TaskHandle}, }; pub fn spawn_wallet_worker(chain: Chain) -> TaskHandle { - use crate::{telemetry, util::task::TaskError}; - let span = error_span!("wallet", chain = %chain.id()); spawn_background_task(span, Some(Duration::from_secs(5)), move || { @@ -17,11 +16,11 @@ pub fn spawn_wallet_worker(chain: Chain) -> TaskHandle { TaskError::Fatal(format!("failed to get key in use by the relayer: {e}")) })?; - let balance = chain.query_balance().map_err(|e| { + let balance = chain.query_balance(None).map_err(|e| { TaskError::Ignore(format!("failed to query balance for the account: {e}")) })?; - let amount = balance.amount.parse().map_err(|_| { + let amount: u64 = balance.amount.parse().map_err(|_| { TaskError::Ignore(format!( "failed to parse amount into u64: {}", balance.amount diff --git a/scripts/release.sh b/scripts/release.sh index 3f17d4a909..d400c3fef5 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -18,7 +18,7 @@ set -e # A space-separated list of all the crates we want to publish, in the order in # which they must be published. It's important to respect this order, since # each subsequent crate depends on one or more of the preceding ones. -DEFAULT_CRATES="ibc-proto ibc ibc-telemetry ibc-relayer ibc-relayer-rest ibc-relayer-cli" +DEFAULT_CRATES="ibc-proto ibc ibc-telemetry ibc-relayer ibc-relayer-rest ibc-relayer-cli ibc-test-framework" # Allows us to override the crates we want to publish. CRATES=${*:-${DEFAULT_CRATES}} diff --git a/telemetry/Cargo.toml b/telemetry/Cargo.toml index 200e54699f..c56d9f1354 100644 --- a/telemetry/Cargo.toml +++ b/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-telemetry" -version = "0.14.1" +version = "0.15.0" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -13,13 +13,14 @@ description = """ """ [dependencies] -ibc = { version = "0.14.1", path = "../modules" } +ibc = { version = "0.15.0", path = "../modules" } crossbeam-channel = "0.5.4" -once_cell = "1.9.0" +once_cell = "1.12.0" opentelemetry = "0.17.0" opentelemetry-prometheus = "0.10.0" prometheus = "0.13.0" rouille = "3.5.0" -uuid = { version = "1.0.0", features = ["v4"] } -moka = "0.8.2" + +moka = "0.8.5" +uuid = { version = "1.1.0", features = ["v4"] } diff --git a/tools/integration-test/Cargo.toml b/tools/integration-test/Cargo.toml index b01057166f..bf00cb6a80 100644 --- a/tools/integration-test/Cargo.toml +++ b/tools/integration-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-integration-test" -version = "0.14.1" +version = "0.15.0" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -23,6 +23,7 @@ tendermint = { version = "=0.23.7" } tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } serde_json = "1" +modelator = { git = "https://github.com/informalsystems/modelator", optional = true } time = "0.3" serde = "1.0.136" @@ -33,7 +34,11 @@ manual = [] ordered = [] ica = [] experimental = [] +mbt = ["modelator"] [[bin]] name = "test_setup_with_binary_channel" doc = true + +[dev-dependencies] +tempfile = "3.3.0" diff --git a/tools/integration-test/spec/.gitignore b/tools/integration-test/spec/.gitignore new file mode 100644 index 0000000000..86ee001101 --- /dev/null +++ b/tools/integration-test/spec/.gitignore @@ -0,0 +1,3 @@ +states/ +_apalache-out/ +run/ diff --git a/tools/integration-test/spec/MC_Transfer.cfg b/tools/integration-test/spec/MC_Transfer.cfg new file mode 100644 index 0000000000..e3e8c3d250 --- /dev/null +++ b/tools/integration-test/spec/MC_Transfer.cfg @@ -0,0 +1,2 @@ +INIT Init +NEXT Next diff --git a/tools/integration-test/spec/MC_Transfer.tla b/tools/integration-test/spec/MC_Transfer.tla new file mode 100644 index 0000000000..3af3909d55 --- /dev/null +++ b/tools/integration-test/spec/MC_Transfer.tla @@ -0,0 +1,49 @@ +---- MODULE MC_Transfer ---- +EXTENDS Transfer_typedefs + +CHAIN_IDS == {1, 2} +N_INITIAL_ACCOUNTS == 2 +GENESIS_AMOUNT == 3 + +VARIABLES + \* Interchain state + \* @type: CHAIN_ID -> CHAIN; + chains, + \* @type: Bool; + relayerRunning, + \* Action performed at current step + \* @type: [ name: Str ]; + action, + \* Outcome after action performed at current step + \* @type: [ name: Str ]; + outcome + +INSTANCE Transfer + +\* Trace with a LocalTransfer action +LocalTransferTest == action.name = LocalTransferAction + +\* Trace with a RestoreRelay action +RestoreRelayTest == action.name = RestoreRelayAction +\* Trace with an InterruptRelay action +InterruptRelayTest == action.name = InterruptRelayAction + +\* Trace with an IBCTransferSendPacket action +IBCTransferSendPacketTest == action.name = IBCTransferSendPacketAction +\* Trace with an IBCTransferReceivePacket action +IBCTransferReceivePacketTest == action.name = IBCTransferReceivePacketAction +\* Trace with an IBCTransferAcknowledgePacket action +IBCTransferAcknowledgePacketTest == action.name = IBCTransferAcknowledgePacketAction +\* Trace with an IBCTransferTimeoutPacket action +IBCTransferTimeoutPacketTest == action.name = IBCTransferTimeoutPacketAction + +\* Negate the trace predicate to find counter-example +LocalTransferInv == ~LocalTransferTest +RestoreRelayInv == ~RestoreRelayTest +InterruptRelayInv == ~InterruptRelayTest +IBCTransferSendPacketInv == ~IBCTransferSendPacketTest +IBCTransferReceivePacketInv == ~IBCTransferReceivePacketTest +IBCTransferAcknowledgePacketInv == ~IBCTransferAcknowledgePacketTest +IBCTransferTimeoutPacketInv == ~IBCTransferTimeoutPacketTest + +==== diff --git a/tools/integration-test/spec/README.md b/tools/integration-test/spec/README.md new file mode 100644 index 0000000000..8d94669151 --- /dev/null +++ b/tools/integration-test/spec/README.md @@ -0,0 +1,23 @@ +# ICS20 Specification + +Add desired `Invariant` predicate in `MC_Transfer.tla`. Then execute, + +```sh +apalache check --inv=Invariant --run-dir=run MC_Transfer.tla +``` + +Provided invariants to pass, + +``` +LocalTransferInv +RestoreRelayInv +InterruptRelayInv +IBCTransferSendPacketInv +IBCTransferReceivePacketInv +IBCTransferAcknowledgePacketInv +IBCTransferTimeoutPacketInv +``` + +```sh +apalache check --inv=IBCTransferAcknowledgePacketInv --run-dir=run MC_Transfer.tla +``` diff --git a/tools/integration-test/spec/Transfer.tla b/tools/integration-test/spec/Transfer.tla new file mode 100644 index 0000000000..bb9067868d --- /dev/null +++ b/tools/integration-test/spec/Transfer.tla @@ -0,0 +1,398 @@ +---- MODULE Transfer ---- +EXTENDS Apalache, Sequences, Integers, Transfer_typedefs + +CONSTANTS + \* Set of blockchain names + \* @type: Set(CHAIN_ID); + CHAIN_IDS, + \* Number of accounts per blockchain + \* @type: ACCOUNT_ID; + N_INITIAL_ACCOUNTS, + \* Genesis balance per account + \* @type: Int; + GENESIS_AMOUNT + +VARIABLES + \* Interchain state + \* @type: CHAIN_ID -> CHAIN; + chains, + \* @type: Bool; + relayerRunning, + \* Action performed at current step + \* @type: [ name: Str ]; + action, + \* Outcome after action performed at current step + \* @type: [ name: Str ]; + outcome + +\* Account IDs starts from 1 +\* @type: () => Set(ACCOUNT_ID); +ACCOUNT_IDS == 1..N_INITIAL_ACCOUNTS + +\* Actions +NullAction == "Null" +LocalTransferAction == "LocalTransfer" +RestoreRelayAction == "RestoreRelay" +InterruptRelayAction == "InterruptRelay" +IBCTransferSendPacketAction == "IBCTransferSendPacket" +IBCTransferReceivePacketAction == "IBCTransferReceivePacket" +IBCTransferAcknowledgePacketAction == "IBCTransferAcknowledgePacket" +IBCTransferTimeoutPacketAction == "IBCTransferTimeoutPacket" + +\* Outcomes +SuccessOutcome == "Success" +ErrorOutcome == "Error" + +\* @type: (CHAIN_ID) => CHAIN; +Genesis(chainId) == + LET nativeDenom == chainId IN [ + \* Name of the chain + id |-> chainId, + + \* Bank data for this chain + \* To support different cross-chain(ibc) denoms, it is a 2D map. + \* `accountId` has `bank[accountId][denomId]` many `denomId`. + bank |-> [accountId \in ACCOUNT_IDS |-> [denom \in {nativeDenom} |-> GENESIS_AMOUNT]], + \* Record of circulating native and cross-chain(ibc) token sourced at this chain + supply |-> [denom \in {nativeDenom} |-> GENESIS_AMOUNT * N_INITIAL_ACCOUNTS ], + + \* Record of packets originated from this chain + localPackets |-> [ + \* A table of packets with packetId + list |-> SetAsFun({}), + \* Set of packetIds of packets which are not yet acknowledged by destination chain + pending |-> {}, + \* Set of packetIds of packets which are not delivered to destrination chain within timeout block height + expired |-> {}, + \* Set of packetIds of packets which are acknowledged by destination chain + success |-> {} + ], + + \* Escrow balance per chain + escrow |-> [cId \in CHAIN_IDS \ {chainId} |-> SetAsFun({})], + + \* Record of packets receiveed from other chains + \* Packets are maintained using the channelId, it was received at. + \* Note: A pair of chain may have multiple channels in the past. + remotePackets |-> SetAsFun({}), + + nextPacketId |-> 0 +] + +\* Get balance of denom in a bank +\* @type: (BANK, DENOM_ID) => Int; +GetDenomFromBank(bank, denom) == + IF denom \in DOMAIN bank THEN bank[denom] + ELSE 0 + +\* Add an entry to a map if its key does not exists +\* Else update the existing entry +\* @type: (k -> v, k, v) => (k -> v); +AddOrUpdateEntry(func, key, value) == + IF key \in DOMAIN func THEN [func EXCEPT ![key] = value] + ELSE [x \in {key} \union DOMAIN func |-> IF x = key THEN value ELSE func[x]] + + +(* +We will model TokenTransfer using following actions. + +LocalTransfer : on-chain transfer + +InterruptRelay : Interrupt relaying +RestoreRelay : Restore relaying + +IBCTransferSendPacket : account in source chain tries to send denom to an account in target chain +IBCTransferReceivePacket : account in target chain receives the denom sent from account in source chain +IBCTransferAcknowledgePacket : the transaction is acknowledged. source chain completes the transaction. +IBCTransferTimeoutPacket : the transfer is timed-out. balance is refunded to source account. +*) + +\* Checks if the source account has enough balance +\* @type: (CHAIN, ACCOUNT_ID, DENOM_ID, Int) => Bool; +HasEnoughBalance(chain, source, denom, amount) == + /\ source \in DOMAIN chain.bank + /\ denom \in DOMAIN chain.bank[source] + /\ chain.bank[source][denom] >= amount + +\* Updated bank after local transfer +\* @type: (CHAIN, ACCOUNT_ID, ACCOUNT_ID, DENOM_ID, Int) => CHAIN; +LocalTransfer(chain, source, target, denom, amount) == + [ + chain EXCEPT + !.bank = [ + @ EXCEPT + ![source] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) - amount), + ![target] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) + ] + ] + +\* Next operator for LocalTransfer +\* @type: () => Bool; +LocalTransferNext == + \E chainId \in CHAIN_IDS: + \E source, target \in ACCOUNT_IDS: + source /= target /\ + \E amount \in 1..10: + LET + chain == chains[chainId] + denom == chain.id IN + /\ HasEnoughBalance(chain, source, denom, amount) + /\ chains' = [ + chains EXCEPT + ![chainId] = LocalTransfer(@, source, target, chain.id, amount) + ] + /\ action' = [ + name |-> LocalTransferAction, + chainId |-> chainId, + source |-> source, + target |-> target, + denom |-> denom, + amount |-> amount + ] + /\ outcome' = [ name |-> SuccessOutcome ] + /\ UNCHANGED relayerRunning + +\* Next operator for RestoreRelay +\* @type: () => Bool; +RestoreRelayNext == + /\ relayerRunning = FALSE + /\ relayerRunning' = TRUE + /\ UNCHANGED chains + /\ action' = [name |-> RestoreRelayAction] + /\ outcome' = [name |-> SuccessOutcome] + +\* Next operator for InterruptRelay +\* @type: () => Bool; +InterruptRelayNext == + /\ relayerRunning = TRUE + /\ relayerRunning' = FALSE + /\ UNCHANGED chains + /\ action' = [name |-> InterruptRelayAction] + /\ outcome' = [name |-> SuccessOutcome] + +\* Checks if there exists a channel between two chains +\* @type: () => Bool; +IBCTransferSendPacketCondition == + relayerRunning + +\* Creates an IBCPacket with the necessary information and adds it to pending packets +\* @type: (CHAIN, ACCOUNT_ID, CHAIN, ACCOUNT_ID, DENOM_ID, Int) => CHAIN; +IBCTransferSendPacket(sourceChain, source, targetChain, target, denom, amount) == + [ + sourceChain EXCEPT + !.bank = [@ EXCEPT + ![source] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) - amount) + ], + !.escrow = [@ EXCEPT + ![targetChain.id] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) + ], + !.localPackets = [@ EXCEPT + !.list = AddOrUpdateEntry(@, + sourceChain.nextPacketId, + [ + id |-> sourceChain.nextPacketId, + from |-> source, + sourceChainId |-> sourceChain.id, + to |-> target, + targetChainId |-> targetChain.id, + denom |-> denom, + amount |-> amount + ] + ), + !.pending = @ \union {sourceChain.nextPacketId} + ], + !.nextPacketId = @ + 1 + ] + +\* Next operator for IBCTransferSendPacket +IBCTransferSendPacketNext == + \E chainId1, chainId2 \in CHAIN_IDS: + chainId1 /= chainId2 /\ + \E acc1, acc2 \in ACCOUNT_IDS: + \E denom \in DOMAIN chains[chainId1].supply: + \E amount \in 1..10: + /\ IBCTransferSendPacketCondition + /\ HasEnoughBalance(chains[chainId1], acc1, denom, amount) + /\ chains' = [chains EXCEPT + ![chainId1] = IBCTransferSendPacket(chains[chainId1], acc1, chains[chainId2], acc2, denom, amount) + ] + /\ action' = [ + name |-> IBCTransferSendPacketAction, + packet |-> chains'[chainId1].localPackets.list[chains[chainId1].nextPacketId] + ] + /\ outcome' = [name |-> SuccessOutcome] + /\ UNCHANGED relayerRunning + +\* TODO: +\* append CHANNEL_ID/PORT_ID for source zone +\* trim CHANNEL_ID/PORT_ID for sink zone +\* @type: (DENOM_ID, CHAIN_ID) => DENOM_ID; +TransformDenom(denom, targetChainId) == + denom + +\* Process an IBC packet at targetChain +\* @type: (PACKET) => CHAIN; +IBCTransferReceivePacket(packet) == + LET + targetChainId == packet.targetChainId + sourceChainId == packet.sourceChainId + destination == packet.to + denom == TransformDenom(packet.denom, targetChainId) + amount == packet.amount + targetChain == chains[targetChainId] + IN + [ + targetChain EXCEPT + !.bank = [@ EXCEPT + ![destination] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) + ], + !.supply = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount), + !.remotePackets = AddOrUpdateEntry( + @, + sourceChainId, + AddOrUpdateEntry( + IF sourceChainId \in DOMAIN @ THEN @[sourceChainId] ELSE SetAsFun({}), + packet.id, + packet + ) + ) + ] + +\* Checks if the packet is not processed by the targetChain +\* @type: (PACKET, CHAIN) => Bool; +IBCTransferReceivePacketCondition(packet, targetChain) == + /\ relayerRunning + /\ \/ packet.sourceChainId \notin DOMAIN targetChain.remotePackets + \/ packet.id \notin DOMAIN targetChain.remotePackets[packet.sourceChainId] + +\* Next operator for IBCTransferReceivePacket +IBCTransferReceivePacketNext == + \E chainId \in CHAIN_IDS: + \E packetId \in chains[chainId].localPackets.pending: + LET + packet == chains[chainId].localPackets.list[packetId] + targetChain == chains[packet.targetChainId] + IN + /\ IBCTransferReceivePacketCondition(packet, targetChain) + /\ chains' = [chains EXCEPT + ![packet.targetChainId] = IBCTransferReceivePacket(packet) + ] + /\ action' = [ + name |-> IBCTransferReceivePacketAction, + packet |-> packet + ] + /\ outcome' = [name |-> SuccessOutcome] + /\ UNCHANGED relayerRunning + + +\* Picks an IBCPacket from sourceChain to timeout +\* Refunds balance to source account +\* Moves the packet from pending to expired +\* @type: (PACKET) => CHAIN; +IBCTransferTimeoutPacket(packet) == + LET + from == packet.from + denom == packet.denom + amount == packet.amount + sourceChain == chains[packet.sourceChainId] + targetChain == chains[packet.targetChainId] + escrowAccount == sourceChain.escrow[packet.targetChainId] + IN + [ + sourceChain EXCEPT + !.bank = [@ EXCEPT + ![from] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) + amount) + ], + !.escrow = [@ EXCEPT + ![packet.targetChainId] = AddOrUpdateEntry(@, denom, GetDenomFromBank(@, denom) - amount) + ], + !.localPackets = [@ EXCEPT + !.pending = @ \ {packet.id}, + !.expired = @ \union {packet.id} + ] + ] + +\* Checks if the packet is not processed by the targetChain +\* @type: (PACKET, CHAIN) => Bool; +IBCTransferTimeoutPacketCondition(packet, targetChain) == + /\ ~relayerRunning + /\ packet.id \notin DOMAIN targetChain.remotePackets[packet.sourceChainId] + +\* Next operator for IBCTransferTimeoutPacket +IBCTransferTimeoutPacketNext == + \E chainId \in CHAIN_IDS: + \E packetId \in chains[chainId].localPackets.pending: + LET + packet == chains[chainId].localPackets.list[packetId] + sourceChain == chains[packet.sourceChainId] + targetChain == chains[packet.targetChainId] + IN + /\ IBCTransferTimeoutPacketCondition(packet, targetChain) + /\ chains' = [chains EXCEPT + ![sourceChain.id] = IBCTransferTimeoutPacket(packet) + ] + /\ action' = [ + name |-> IBCTransferTimeoutPacketAction, + packet |-> packet + ] + /\ outcome' = [name |-> SuccessOutcome] + /\ UNCHANGED relayerRunning + + +\* Mark an IBC packet at sourceChain as success which is processed at targetChain +\* @type: (PACKET) => CHAIN; +IBCTransferAcknowledgePacket(packet) == + LET sourceChain == chains[packet.sourceChainId] IN + [ + sourceChain EXCEPT + !.localPackets = [@ EXCEPT + !.pending = @ \ {packet.id}, + !.success = @ \union {packet.id} + ] + ] + +\* Checks if the packet is already processed by the targetChain +\* @type: (PACKET, CHAIN) => Bool; +IBCTransferAcknowledgePacketCondition(packet, targetChain) == + /\ relayerRunning + /\ packet.sourceChainId \in DOMAIN targetChain.remotePackets + /\ packet.id \in DOMAIN targetChain.remotePackets[packet.sourceChainId] + +\* Next operator for IBCTransferAcknowledgePacket +IBCTransferAcknowledgePacketNext == + \E chainId \in CHAIN_IDS: + \E packetId \in chains[chainId].localPackets.pending: + LET + packet == chains[chainId].localPackets.list[packetId] + sourceChain == chains[packet.sourceChainId] + targetChain == chains[packet.targetChainId] + IN + /\ IBCTransferAcknowledgePacketCondition(packet, targetChain) + /\ chains' = [chains EXCEPT + ![sourceChain.id] = IBCTransferAcknowledgePacket(packet) + ] + /\ action' = [ + name |-> IBCTransferAcknowledgePacketAction, + packet |-> packet + ] + /\ outcome' = [name |-> SuccessOutcome] + /\ UNCHANGED relayerRunning + +\* Init predicate +Init == + /\ chains = [chainId \in CHAIN_IDS |-> Genesis(chainId)] + /\ relayerRunning = TRUE + /\ action = [ name |-> NullAction ] + /\ outcome = [ name |-> SuccessOutcome ] + +\* Complete Next predicate +Next == + \/ LocalTransferNext + \/ InterruptRelayNext + \/ RestoreRelayNext + \/ IBCTransferSendPacketNext + \/ IBCTransferReceivePacketNext + \/ IBCTransferTimeoutPacketNext + \/ IBCTransferAcknowledgePacketNext + +==== diff --git a/tools/integration-test/spec/Transfer_typedefs.tla b/tools/integration-test/spec/Transfer_typedefs.tla new file mode 100644 index 0000000000..f352ccfb76 --- /dev/null +++ b/tools/integration-test/spec/Transfer_typedefs.tla @@ -0,0 +1,45 @@ +---- MODULE Transfer_typedefs ---- + +(* + @typeAlias: ACCOUNT_ID = Int; + @typeAlias: CHAIN_ID = Int; + @typeAlias: PACKET_ID = Int; + + TODO: Fix when to transfer back money to sink zone + @typeAlias: DENOM_ID = CHAIN_ID; + + @typeAlias: PACKET = [ + id: PACKET_ID, + from: ACCOUNT_ID, + sourceChainId: CHAIN_ID, + to: ACCOUNT_ID, + targetChainId: CHAIN_ID, + denom: DENOM_ID, + amount: Int + ]; + + @typeAlias: BANK = DENOM_ID -> Int; + + @typeAlias: CHAIN = [ + id: CHAIN_ID, + + bank: ACCOUNT_ID -> BANK, + supply: BANK, + + localPackets: [ + list: PACKET_ID -> PACKET, + pending: Set(PACKET_ID), + expired: Set(PACKET_ID), + success: Set(PACKET_ID) + ], + + remotePackets: CHAIN_ID -> PACKET_ID -> PACKET, + + escrow: CHAIN_ID -> BANK, + + nextPacketId: PACKET_ID + ]; +*) +typedefs == TRUE + +==== diff --git a/tools/integration-test/src/lib.rs b/tools/integration-test/src/lib.rs index 75d580024e..f4b83a25c4 100644 --- a/tools/integration-test/src/lib.rs +++ b/tools/integration-test/src/lib.rs @@ -1,3 +1,7 @@ #[allow(clippy::too_many_arguments)] #[cfg(test)] pub mod tests; + +#[cfg(any(all(test, feature = "mbt"), doc))] +#[macro_use] +pub mod mbt; diff --git a/tools/integration-test/src/mbt/README.md b/tools/integration-test/src/mbt/README.md new file mode 100644 index 0000000000..f2c0311b28 --- /dev/null +++ b/tools/integration-test/src/mbt/README.md @@ -0,0 +1,7 @@ +# MBT for Hermes Integration Test + +Make sure [`apalache-mc`](https://github.com/informalsystems/apalache) is installed and setup properly. Check `apalache-mc version`. + +```bash +cargo test -p ibc-integration-test --features mbt mbt::transfer +``` diff --git a/tools/integration-test/src/mbt/handlers.rs b/tools/integration-test/src/mbt/handlers.rs new file mode 100644 index 0000000000..1ad10c628b --- /dev/null +++ b/tools/integration-test/src/mbt/handlers.rs @@ -0,0 +1,296 @@ +use ibc_relayer::util::task::TaskHandle; +use ibc_relayer::worker::client::spawn_refresh_client; + +use ibc_test_framework::bootstrap::binary::chain::bootstrap_foreign_client_pair; +use ibc_test_framework::bootstrap::binary::connection::bootstrap_connection; +use ibc_test_framework::chain::tagged::TaggedChainDriverExt; +use ibc_test_framework::ibc::denom::derive_ibc_denom; +use ibc_test_framework::prelude::*; +use ibc_test_framework::relayer::channel::{assert_eventually_channel_established, init_channel}; +use ibc_test_framework::relayer::connection::{ + assert_eventually_connection_established, init_connection, +}; +use ibc_test_framework::types::binary::client::ClientIdPair; +use ibc_test_framework::types::binary::connection::ConnectedConnection; +use ibc_test_framework::types::tagged::mono::Tagged; + +use super::state::Packet; + +use super::utils::{get_denom, get_wallet, wait_for_client}; + +pub fn setup_chains( + chains: &ConnectedChains, +) -> Result<(), Error> { + { + let _refresh_task_a = spawn_refresh_client(chains.foreign_clients.client_b_to_a.clone()) + .ok_or_else(|| eyre!("expect refresh task spawned"))?; + + let _refresh_task_b = spawn_refresh_client(chains.foreign_clients.client_a_to_b.clone()) + .ok_or_else(|| eyre!("expect refresh task spawned"))?; + + bootstrap_connection(&chains.foreign_clients, Default::default())?; + }; + + wait_for_client(); + + Ok(()) +} + +pub fn local_transfer_handler( + node: Tagged, + source: u64, + target: u64, + denom: u64, + amount: u64, +) -> Result<(), Error> { + let wallets = node.wallets(); + + let source_wallet = get_wallet(&wallets, source); + let target_wallet = get_wallet(&wallets, target); + let denom = get_denom(&node, denom); + + node.chain_driver().local_transfer_token( + &source_wallet, + &target_wallet.address(), + amount, + &denom, + )?; + + Ok(()) +} + +pub fn create_channel( + chain_handle_a: &ChainA, + chain_handle_b: &ChainB, + channel: &mut Option>, + refresh_task_a: &mut Option, + refresh_task_b: &mut Option, +) -> Result<(), Error> { + let port_a = tagged_transfer_port(); + let port_b = tagged_transfer_port(); + + let clients2 = + bootstrap_foreign_client_pair(chain_handle_a, chain_handle_b, Default::default())?; + + *refresh_task_a = Some( + spawn_refresh_client(clients2.client_b_to_a.clone()) + .ok_or_else(|| eyre!("expect refresh task spawned"))?, + ); + + *refresh_task_b = Some( + spawn_refresh_client(clients2.client_a_to_b.clone()) + .ok_or_else(|| eyre!("expect refresh task spawned"))?, + ); + + let (connection_id_b, new_connection_b) = init_connection( + chain_handle_a, + chain_handle_b, + &clients2.client_b_to_a.tagged_client_id(), + &clients2.client_a_to_b.tagged_client_id(), + )?; + + let connection_id_a = assert_eventually_connection_established( + chain_handle_b, + chain_handle_a, + &connection_id_b.as_ref(), + )?; + + let (channel_id_b_2, channel_b_2) = init_channel( + chain_handle_a, + chain_handle_b, + &clients2.client_b_to_a.tagged_client_id(), + &clients2.client_a_to_b.tagged_client_id(), + &connection_id_a.as_ref(), + &connection_id_b.as_ref(), + &port_a.as_ref(), + &port_b.as_ref(), + )?; + + let channel_id_a_2 = assert_eventually_channel_established( + chain_handle_b, + chain_handle_a, + &channel_id_b_2.as_ref(), + &port_b.as_ref(), + )?; + + let client_ids = ClientIdPair::new( + clients2.client_b_to_a.tagged_client_id().cloned(), + clients2.client_a_to_b.tagged_client_id().cloned(), + ); + + let new_connected_connection = ConnectedConnection::new( + client_ids, + new_connection_b.flipped(), + connection_id_a, + connection_id_b, + ); + + let connected_channel = ConnectedChannel { + connection: new_connected_connection, + channel: channel_b_2.flipped(), + channel_id_a: channel_id_a_2, + channel_id_b: channel_id_b_2, + port_a, + port_b, + }; + + *channel = Some(connected_channel); + + info!("Channel is created"); + + Ok(()) +} + +pub fn expire_channel( + channel: &mut Option>, + refresh_task_a: &mut Option, + refresh_task_b: &mut Option, +) -> Result<(), Error> { + // dropping the client handler to expire the clients + super::utils::drop(refresh_task_a.take()); + super::utils::drop(refresh_task_b.take()); + + wait_for_client(); + + super::utils::drop(channel.take()); + + info!("Channel expired"); + + Ok(()) +} + +pub fn ibc_transfer_send_packet( + node_source: Tagged, + node_target: Tagged, + channels: &ConnectedChannel, + packet: &Packet, +) -> Result<(), Error> { + let wallets_source = node_source.wallets(); + let wallets_target = node_target.wallets(); + + let wallet_source = get_wallet(&wallets_source, packet.from); + let wallet_target = get_wallet(&wallets_target, packet.to); + let denom_source = get_denom(&node_source, packet.denom); + let amount_source_to_target = packet.amount; + + let (port_source, channel_id_source) = ( + DualTagged::new(channels.port_a.value()), + DualTagged::new(channels.channel_id_a.value()), + ); + + let balance_source = node_source + .chain_driver() + .query_balance(&wallet_source.address(), &denom_source)?; + + info!( + "Sending IBC transfer from chain {} to chain {} with amount of {} {}", + node_source.chain_id(), + node_target.chain_id(), + amount_source_to_target, + denom_source, + ); + + node_source.chain_driver().ibc_transfer_token( + &port_source, + &channel_id_source, + &wallet_source, + &wallet_target.address(), + &denom_source, + amount_source_to_target, + )?; + + node_source.chain_driver().assert_eventual_wallet_amount( + &wallet_source.address(), + balance_source - amount_source_to_target, + &denom_source, + )?; + + Ok(()) +} + +pub fn ibc_transfer_receive_packet( + node_source: Tagged, + node_target: Tagged, + channels: &ConnectedChannel, + packet: &Packet, +) -> Result<(), Error> { + let wallets_target = node_target.wallets(); + + let wallet_target = get_wallet(&wallets_target, packet.to); + let denom_source = get_denom(&node_source, packet.denom); + let amount_source_to_target = packet.amount; + + let (port_target, channel_id_target) = ( + DualTagged::new(channels.port_b.value()), + DualTagged::new(channels.channel_id_b.value()), + ); + + let denom_target = derive_ibc_denom(&port_target, &channel_id_target, &denom_source)?; + + info!( + "Waiting for user on chain {} to receive IBC transferred amount of {} {} (chain {}/{})", + node_target.chain_id(), + amount_source_to_target, + denom_target, + node_source.chain_id(), + denom_source + ); + + node_target.chain_driver().assert_eventual_wallet_amount( + &wallet_target.address(), + amount_source_to_target, + &denom_target.as_ref(), + )?; + + Ok(()) +} + +pub fn ibc_transfer_acknowledge_packet( + node_source: Tagged, + node_target: Tagged, + _channels: &Option>, + packet: &Packet, +) -> Result<(), Error> { + let denom_source = get_denom(&node_source, packet.denom); + let amount_source_to_target = packet.amount; + + info!( + "Waiting for user on chain {} to confirm IBC transferred amount of {} {}", + node_source.chain_id(), + amount_source_to_target, + denom_source + ); + + info!( + "Successfully performed IBC transfer from chain {} to chain {}", + node_source.chain_id(), + node_target.chain_id(), + ); + + Ok(()) +} + +pub fn ibc_transfer_expire_packet( + node_source: Tagged, + node_target: Tagged, + _channels: &Option>, + packet: &Packet, +) -> Result<(), Error> { + let denom_source = get_denom(&node_source, packet.denom); + let amount_source_to_target = packet.amount; + + info!( + "Waiting for user on chain {} to get refund of previously IBC transferred amount of {} {}", + node_source.chain_id(), + amount_source_to_target, + denom_source + ); + + info!( + "Successfully performed IBC packet expiry intended from chain {} to chain {}", + node_source.chain_id(), + node_target.chain_id(), + ); + + Ok(()) +} diff --git a/tools/integration-test/src/mbt/itf.rs b/tools/integration-test/src/mbt/itf.rs new file mode 100644 index 0000000000..44bb354ed9 --- /dev/null +++ b/tools/integration-test/src/mbt/itf.rs @@ -0,0 +1,132 @@ +use serde::{Deserialize, Deserializer, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Meta { + pub format: String, + #[serde(rename = "format-description")] + pub format_description: String, + pub description: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct InformalTrace { + #[serde(rename = "#meta")] + pub meta: Meta, + pub vars: Vec, + pub states: Vec, +} + +#[derive(Debug, Serialize)] +pub struct Map(pub Vec<(K, V)>); + +impl<'de, K, V> Deserialize<'de> for Map +where + K: Deserialize<'de>, + V: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Debug, Deserialize)] + struct Meta { + #[serde(rename = "#map")] + map: Vec<(K, V)>, + } + let s: Meta<_, _> = Deserialize::deserialize(deserializer)?; + Ok(Self(s.map)) + } +} + +#[derive(Debug, Serialize)] +pub struct Set(pub Vec); + +impl<'de, E> Deserialize<'de> for Set +where + E: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Debug, Deserialize)] + pub struct Meta { + #[serde(rename = "#set")] + set: Vec, + } + let s: Meta<_> = Deserialize::deserialize(deserializer)?; + Ok(Self(s.set)) + } +} + +mod test { + use super::{Map, Set}; + + #[test] + fn test_empty_set() { + let itf = r##"{ "#set": [] }"##; + let s: Set = serde_json::from_str(itf).unwrap(); + assert!(s.0.is_empty()); + } + + #[test] + fn test_set() { + let itf = r##"{ "#set": [1,2,3] }"##; + let s: Set = serde_json::from_str(itf).unwrap(); + assert_eq!(s.0, vec![1, 2, 3]); + } + + #[test] + fn test_empty_map() { + let itf = r##"{ "#map": [ ] }"##; + let m: Map = serde_json::from_str(itf).unwrap(); + assert!(m.0.is_empty()); + } + + #[test] + #[should_panic] + fn test_singleton_map() { + let itf = r##"{ "#map": [1, 11] }"##; + let m: Map = serde_json::from_str(itf).unwrap(); + assert_eq!(m.0, vec![(1, 11)]); + } + + #[test] + fn test_normal_map() { + let itf = r##"{ "#map": [[1, 11], [2, 22]] }"##; + let m: Map = serde_json::from_str(itf).unwrap(); + assert_eq!(m.0, vec![(1, 11), (2, 22)]); + } + + #[test] + #[cfg(feature = "manual")] + fn parse_itf() { + use super::super::itf::InformalTrace; + use super::super::state::State; + + let itf_path = concat!( + env!("CARGO_MANIFEST_DIR"), + "/spec/example/counterexample.itf.json" + ); + + let itf_json = std::fs::read_to_string(itf_path).expect("itf file does not exist"); + + let t: InformalTrace = + serde_json::from_str(&itf_json).expect("deserialization error"); + + for state in t.states { + println!( + "action: {}", + serde_json::to_string_pretty(&state.action).unwrap() + ); + println!( + "outcome: {}", + serde_json::to_string_pretty(&state.outcome).unwrap() + ); + println!( + "chains: {}", + serde_json::to_string_pretty(&state.chains).unwrap() + ); + } + } +} diff --git a/tools/integration-test/src/mbt/mod.rs b/tools/integration-test/src/mbt/mod.rs new file mode 100644 index 0000000000..173f132abb --- /dev/null +++ b/tools/integration-test/src/mbt/mod.rs @@ -0,0 +1,8 @@ +pub mod utils; + +pub mod itf; +pub mod state; + +pub mod handlers; + +pub mod transfer; diff --git a/tools/integration-test/src/mbt/state.rs b/tools/integration-test/src/mbt/state.rs new file mode 100644 index 0000000000..129983c67d --- /dev/null +++ b/tools/integration-test/src/mbt/state.rs @@ -0,0 +1,85 @@ +use serde::{Deserialize, Serialize}; + +use super::itf::{Map, Set}; + +pub type ChainId = u64; +pub type DenomId = ChainId; +pub type AccountId = u64; +pub type PacketId = u64; +pub type Balance = u64; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Packet { + pub id: PacketId, + pub from: AccountId, + pub source_chain_id: ChainId, + pub to: AccountId, + pub target_chain_id: ChainId, + pub denom: DenomId, + pub amount: Balance, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LocalPackets { + pub list: Map, + pub pending: Set, + pub expired: Set, + pub success: Set, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Chain { + pub id: ChainId, + pub bank: Map>, + pub supply: Map, + pub local_packets: LocalPackets, + pub remote_packets: Map>, + pub escrow: Map>, + pub next_packet_id: PacketId, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "name")] +pub enum Action { + Null, + #[serde(rename_all = "camelCase")] + LocalTransfer { + chain_id: ChainId, + source: AccountId, + target: AccountId, + denom: DenomId, + amount: Balance, + }, + RestoreRelay, + InterruptRelay, + IBCTransferSendPacket { + packet: Packet, + }, + IBCTransferReceivePacket { + packet: Packet, + }, + IBCTransferAcknowledgePacket { + packet: Packet, + }, + IBCTransferTimeoutPacket { + packet: Packet, + }, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "name")] +pub enum Outcome { + Success, + Error, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct State { + pub chains: Map, + pub action: Action, + pub outcome: Outcome, +} diff --git a/tools/integration-test/src/mbt/transfer.rs b/tools/integration-test/src/mbt/transfer.rs new file mode 100644 index 0000000000..10b62fdee5 --- /dev/null +++ b/tools/integration-test/src/mbt/transfer.rs @@ -0,0 +1,365 @@ +use std::io::Write; +use std::panic::{RefUnwindSafe, UnwindSafe}; + +use ibc_relayer::config::{ + Channels as ConfigChannels, Clients as ConfigClients, Connections as ConfigConnections, + ModeConfig, Packets as ConfigPackets, +}; + +use ibc_test_framework::prelude::*; +use ibc_test_framework::types::tagged::mono::Tagged; + +use super::state::{Action, State}; + +use super::itf::InformalTrace; +use super::utils::{get_chain, CLIENT_EXPIRY}; + +const TEST_NAMES: &[&str] = &[ + "LocalTransferInv", + "IBCTransferAcknowledgePacketInv", + "IBCTransferTimeoutPacketInv", +]; +const NUM_TRACES: Option<&str> = option_env!("MBT_TRACES"); +const APALACHE: Option<&str> = option_env!("APALACHE"); + +const ITF_TRACE_DIRECTORY: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/data/mbt"); + +fn generate_mbt_traces( + apalache_path: &str, + test_name: &str, + num_traces: usize, +) -> Result, Error> { + let temp_dir = tempfile::TempDir::new()?; + let run_dir = temp_dir.path().join("run"); + let tla_path = concat!(env!("CARGO_MANIFEST_DIR"), "/spec/MC_Transfer.tla"); + let mut cmd = std::process::Command::new(apalache_path); + cmd.arg("check") + .arg("--init=Init") + .arg("--next=Next") + .arg(&format!("--inv={test_name}")) + .arg(&format!("--max-error={num_traces}")) + .arg(&format!( + "--run-dir={}", + run_dir.to_str().expect("no panic") + )) + .arg(&format!( + "--out-dir={}", + temp_dir.path().to_str().expect("no panic") + )) + .arg(tla_path); + let _ = cmd.status().expect("failed to execute process"); + + std::fs::read_dir(run_dir)? + .flatten() + .map(|entry| entry.path()) + .filter(|file_path| file_path.is_file()) + .flat_map(|file_path| { + file_path + .file_name() + .and_then(|file_name| file_name.to_str()) + .and_then(|file_name| { + (file_name != "counterexample.itf.json" + && file_name.starts_with("counterexample") + && file_name.ends_with(".itf.json")) + .then(|| { + let name = format!("{test_name}_{file_name}"); + Ok(( + name, + std::fs::read_to_string(file_path.to_str().expect("should not panic")) + .expect("error while reading counterexample.itf.json"), + )) + }) + }) + }) + .collect() +} + +fn execute_mbt(f: F) -> Result<(), Error> +where + F: FnOnce(Vec) -> Result<(), Error> + UnwindSafe + RefUnwindSafe + Copy, +{ + let apalache = APALACHE.unwrap_or("apalache-mc"); + let num_traces = NUM_TRACES + .unwrap_or("2") + .parse() + .expect("an number for number of traces per test"); + + let success_traces = &format!("{ITF_TRACE_DIRECTORY}/success"); + let failure_traces = &format!("{ITF_TRACE_DIRECTORY}/failure"); + + std::fs::create_dir_all(success_traces)?; + std::fs::create_dir_all(failure_traces)?; + + for test_name in TEST_NAMES { + for (itf_name, itf_json) in generate_mbt_traces(apalache, test_name, num_traces)? { + let itf: InformalTrace = + serde_json::from_str(&itf_json).expect("deserialization error"); + + let result = std::panic::catch_unwind(|| f(itf.states).expect("to fail")); + + let unique_itf_trace_path = if result.is_ok() { + format!("{success_traces}/{itf_name}") + } else { + format!("{failure_traces}/{itf_name}") + }; + + let mut file = std::fs::File::create(unique_itf_trace_path)?; + file.write_all(itf_json.as_bytes())?; + + if let Err(err) = result { + std::panic::resume_unwind(err); + } + } + } + Ok(()) +} + +#[test] +fn test_ibc_transfer() -> Result<(), Error> { + execute_mbt(|trace| run_binary_channel_test(&IbcTransferMBT(trace))) +} + +/** + Test that IBC token transfer can still work with a single + chain that is connected to itself. +*/ +#[test] +#[cfg(feature = "manual")] +fn test_self_connected_ibc_transfer() -> Result<(), Error> { + use ibc_test_framework::framework::binary::chain::run_self_connected_binary_chain_test; + use ibc_test_framework::framework::binary::channel::RunBinaryChannelTest; + + execute_mbt(|trace| { + run_self_connected_binary_chain_test(&RunBinaryConnectionTest::new( + &RunBinaryChannelTest::new(&IbcTransferMBT(trace)), + )) + }) +} + +pub struct IbcTransferMBT(Vec); + +impl TestOverrides for IbcTransferMBT { + fn modify_test_config(&self, config: &mut TestConfig) { + config.bootstrap_with_random_ids = false; + } + + fn modify_relayer_config(&self, config: &mut Config) { + config.mode = ModeConfig { + clients: ConfigClients { + enabled: true, + refresh: true, + misbehaviour: true, + }, + connections: ConfigConnections { enabled: true }, + channels: ConfigChannels { enabled: true }, + packets: ConfigPackets { + enabled: true, + clear_interval: 10, + clear_on_start: true, + tx_confirmation: true, + }, + }; + + for mut chain_config in config.chains.iter_mut() { + chain_config.trusting_period = Some(CLIENT_EXPIRY); + } + } + + fn should_spawn_supervisor(&self) -> bool { + false + } +} + +impl BinaryChannelTest for IbcTransferMBT { + fn run( + &self, + _config: &TestConfig, + relayer: RelayerDriver, + chains: ConnectedChains, + channels: ConnectedChannel, + ) -> Result<(), Error> { + // relayer is spawned + let mut supervisor = Some(relayer.spawn_supervisor()?); + + for state in &self.0 { + match &state.action { + Action::Null => { + info!("[Init] Done"); + } + Action::LocalTransfer { + chain_id, + source, + target, + denom, + amount, + } => { + info!("[LocalTransfer] Init"); + let node: Tagged = get_chain(&chains, *chain_id); + super::handlers::local_transfer_handler( + node, *source, *target, *denom, *amount, + )?; + info!("[LocalTransfer] Done"); + } + Action::RestoreRelay => { + if supervisor.is_none() { + supervisor = Some(relayer.spawn_supervisor()?); + } + + info!("[RestoreRelay] Done"); + } + Action::InterruptRelay => { + supervisor.take().expect("one").shutdown(); + + info!("[InterruptRelay] Done"); + } + Action::IBCTransferSendPacket { packet } => { + info!("[IBCTransferSendPacket] {:?}", packet); + + match (packet.source_chain_id, packet.target_chain_id) { + (1, 2) => { + assert!( + super::utils::get_committed_packets_at_src( + &chains.handle_a, + &channels + )? + .is_empty(), + "no packets present" + ); + + super::handlers::ibc_transfer_send_packet( + chains.node_a.as_ref(), + chains.node_b.as_ref(), + &channels, + packet, + )?; + + assert_eq!( + super::utils::get_committed_packets_at_src( + &chains.handle_a, + &channels, + )? + .len(), + 1, + "one packet is sent" + ); + } + (2, 1) => { + assert!( + super::utils::get_committed_packets_at_src( + &chains.handle_b, + &channels.clone().flip() + )? + .is_empty(), + "no packets present" + ); + + super::handlers::ibc_transfer_send_packet( + chains.node_b.as_ref(), + chains.node_a.as_ref(), + &channels.clone().flip(), + packet, + )?; + + assert_eq!( + super::utils::get_committed_packets_at_src( + &chains.handle_b, + &channels.clone().flip() + )? + .len(), + 1, + "one packet is present" + ); + } + _ => unreachable!(), + } + + info!("[IBCTransferSendPacket] Done"); + } + Action::IBCTransferReceivePacket { packet } => { + info!("[IBCTransferReceivePacket] {:?}", packet); + match (packet.source_chain_id, packet.target_chain_id) { + (1, 2) => { + super::handlers::ibc_transfer_receive_packet( + chains.node_a.as_ref(), + chains.node_b.as_ref(), + &channels, + packet, + )?; + assert_eq!( + super::utils::get_acknowledged_packets_at_dst( + &chains.handle_b, + &channels.clone().flip() + )? + .len(), + 1, + "one packet is received and sent acknowledgement" + ); + } + (2, 1) => { + super::handlers::ibc_transfer_receive_packet( + chains.node_b.as_ref(), + chains.node_a.as_ref(), + &channels.clone().flip(), + packet, + )?; + assert_eq!( + super::utils::get_acknowledged_packets_at_dst( + &chains.handle_a, + &channels + )? + .len(), + 1, + "one packet is received and sent acknowledgement" + ); + } + _ => unreachable!(), + } + + info!("[IBCTransferReceivePacket] Done"); + } + Action::IBCTransferAcknowledgePacket { packet } => { + info!("[IBCTransferAcknowledgePacket] {:?}", packet); + super::utils::wait_for_client(); + match (packet.source_chain_id, packet.target_chain_id) { + (1, 2) => { + assert!( + super::utils::get_committed_packets_at_src( + &chains.handle_a, + &channels + )? + .is_empty(), + "commitment is completed" + ); + } + (2, 1) => { + assert!( + super::utils::get_committed_packets_at_src( + &chains.handle_b, + &channels.clone().flip() + )? + .is_empty(), + "commitment is completed" + ); + } + _ => unreachable!(), + } + + info!("[IBCTransferAcknowledgePacket] Done"); + } + Action::IBCTransferTimeoutPacket { packet } => { + info!("[IBCTransferTimeoutPacket] {:?}", packet); + + match (packet.source_chain_id, packet.target_chain_id) { + (1, 2) => {} + (2, 1) => {} + _ => unreachable!(), + } + + info!("[IBCTransferTimeoutPacket] Done") + } + } + } + + Ok(()) + } +} diff --git a/tools/integration-test/src/mbt/utils.rs b/tools/integration-test/src/mbt/utils.rs new file mode 100644 index 0000000000..97af0c01cc --- /dev/null +++ b/tools/integration-test/src/mbt/utils.rs @@ -0,0 +1,136 @@ +use std::thread::sleep; +use std::time::Duration; + +use ibc::core::ics04_channel::packet::Sequence; +use ibc_relayer::chain::requests::{ + QueryPacketAcknowledgementsRequest, QueryPacketCommitmentsRequest, QueryUnreceivedAcksRequest, + QueryUnreceivedPacketsRequest, +}; +use ibc_test_framework::ibc::denom::Denom; +use ibc_test_framework::prelude::*; +use ibc_test_framework::types::tagged::mono::Tagged; + +use super::{ + itf::InformalTrace, + state::{DenomId, State}, +}; + +pub const CLIENT_EXPIRY: Duration = Duration::from_secs(15); + +pub fn get_chain( + chains: &ConnectedChains, + chain_id: u64, +) -> Tagged +where + ChainA: ChainHandle, + ChainB: ChainHandle, + ChainX: ChainHandle, +{ + Tagged::new(match chain_id { + 1 => chains.node_a.value(), + 2 => chains.node_b.value(), + _ => unreachable!(), + }) +} + +pub fn get_wallet<'a, ChainX>( + wallets: &'a Tagged, + user: u64, +) -> Tagged { + match user { + 1 => wallets.user1(), + 2 => wallets.user2(), + _ => unreachable!(), + } +} + +pub fn get_denom<'a, ChainX>( + chain: &'a Tagged, + denom: DenomId, +) -> Tagged { + match denom { + 1 => chain.denom(), + 2 => chain.denom(), + _ => unreachable!(), + } +} + +pub fn wait_for_client() { + let sleep_time = CLIENT_EXPIRY + Duration::from_secs(5); + + info!( + "Sleeping for {} seconds to wait for IBC client to expire", + sleep_time.as_secs() + ); + + sleep(sleep_time); +} + +pub fn parse_itf_from_json(itf_path: &str) -> Vec { + let itf_json = std::fs::read_to_string(itf_path).expect("itf file does not exist. did you run `apalache check --inv=Invariant --run-dir=run main.tla` first?"); + + let trace: InformalTrace = + serde_json::from_str(&itf_json).expect("deserialization error"); + + trace.states +} + +pub fn get_unreceived_packets_at_dst( + chain: &ChainA, + channel: &ConnectedChannel, +) -> Result, Error> { + let port_id_a = channel.port_a.value(); + let channel_id_a = channel.channel_id_a.value(); + let request = QueryUnreceivedPacketsRequest { + port_id: port_id_a.clone(), + channel_id: *channel_id_a, + packet_commitment_sequences: Vec::new(), + }; + Ok(chain.query_unreceived_packets(request)?) +} + +pub fn get_committed_packets_at_src( + chain: &ChainA, + channel: &ConnectedChannel, +) -> Result, Error> { + let port_id_a = channel.port_a.value(); + let channel_id_a = channel.channel_id_a.value(); + let request = QueryPacketCommitmentsRequest { + port_id: port_id_a.clone(), + channel_id: *channel_id_a, + pagination: None, + }; + let (sequences, _) = chain.query_packet_commitments(request)?; + Ok(sequences) +} + +pub fn get_unacknowledged_packets_at_src( + chain: &ChainA, + channel: &ConnectedChannel, +) -> Result, Error> { + let port_id_a = channel.port_a.value(); + let channel_id_a = channel.channel_id_a.value(); + let request = QueryUnreceivedAcksRequest { + port_id: port_id_a.clone(), + channel_id: *channel_id_a, + packet_ack_sequences: Vec::new(), + }; + Ok(chain.query_unreceived_acknowledgement(request)?) +} + +pub fn get_acknowledged_packets_at_dst( + chain: &ChainA, + channel: &ConnectedChannel, +) -> Result, Error> { + let port_id_a = channel.port_a.value(); + let channel_id_a = channel.channel_id_a.value(); + let request = QueryPacketAcknowledgementsRequest { + port_id: port_id_a.clone(), + channel_id: *channel_id_a, + pagination: None, + packet_commitment_sequences: Vec::new(), + }; + Ok(chain.query_packet_acknowledgements(request)?.0) +} + +pub fn drop(_: X) {} diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs new file mode 100644 index 0000000000..96f374e609 --- /dev/null +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -0,0 +1,92 @@ +//! This test ensures that the `RelayPath::execute_schedule` method does not +//! drop any scheduled `OperationalData` when events associated with a prior +//! piece of operational data fails to send. Subsequent pieces of operational +//! data that were scheduled should be re-queued and not dropped. +//! +//! In order to test this behavior, the test manually relays a batch (i.e. at least +//! 2) IBC transfers from chain A to chain B. Chain B is then shut down in order to +//! force the batch of messages (in the form of their associated pieces of operational +//! data) to be queued up again for re-submission. +//! +//! It is expected that the first message of the batch gets dropped (i.e. it is not +//! later found in the pending queue), but all of the subsequent messages should +//! exist in the pending queue. + +use ibc_test_framework::prelude::*; +use ibc_test_framework::util::random::random_u64_range; + +use ibc_relayer::link::{Link, LinkParameters}; + +/// The number of messages to be sent in a batch contained in a piece of operational data. +const BATCH_SIZE: usize = 10; + +#[test] +fn test_execute_schedule() -> Result<(), Error> { + run_binary_channel_test(&ExecuteScheduleTest) +} + +pub struct ExecuteScheduleTest; + +impl TestOverrides for ExecuteScheduleTest { + fn should_spawn_supervisor(&self) -> bool { + false + } +} + +impl BinaryChannelTest for ExecuteScheduleTest { + fn run( + &self, + _config: &TestConfig, + _relayer: RelayerDriver, + chains: ConnectedChains, + channel: ConnectedChannel, + ) -> Result<(), Error> { + let amount1 = random_u64_range(1000, 5000); + + let chain_a_link_opts = LinkParameters { + src_port_id: channel.port_a.clone().into_value(), + src_channel_id: channel.channel_id_a.into_value(), + }; + + let chain_a_link = Link::new_from_opts( + chains.handle_a().clone(), + chains.handle_b().clone(), + chain_a_link_opts, + true, + )?; + + let mut relay_path_a_to_b = chain_a_link.a_to_b; + + // Construct `BATCH_SIZE` pieces of operational data and queue them up to be sent to chain B. + for i in 0..BATCH_SIZE { + chains.node_a.chain_driver().ibc_transfer_token( + &channel.port_a.as_ref(), + &channel.channel_id_a.as_ref(), + &chains.node_a.wallets().user1(), + &chains.node_b.wallets().user1().address(), + &chains.node_a.denom(), + amount1, + )?; + + relay_path_a_to_b.schedule_packet_clearing(None)?; + + info!("Performing IBC send packet with a token transfer #{} from chain A to be received by chain B", i); + } + + // We should see that all of the events in the batch are queued up to be sent to chain B. + assert_eq!(relay_path_a_to_b.dst_operational_data.len(), BATCH_SIZE); + + chains.node_b.value().kill()?; + + // With chain B inactive, if we attempt to send the batch of messages, we expect to see + // `BATCH_SIZE` - 1 messages in the batch since the initial event should have failed to + // be relayed and was thus dropped. The subsequent messages in the batch should have all + // been re-added to the pending queue. + match relay_path_a_to_b.execute_schedule() { + Ok(_) => panic!("Expected an error when relaying tx from A to B"), + Err(_) => assert_eq!(relay_path_a_to_b.dst_operational_data.len(), BATCH_SIZE - 1), + } + + Ok(()) + } +} diff --git a/tools/integration-test/src/tests/manual/simulation.rs b/tools/integration-test/src/tests/manual/simulation.rs index cac7b3bedb..6419ab37a4 100644 --- a/tools/integration-test/src/tests/manual/simulation.rs +++ b/tools/integration-test/src/tests/manual/simulation.rs @@ -14,7 +14,7 @@ use core::time::Duration; use ibc::events::IbcEvent; use ibc_relayer::config::{types::MaxMsgNum, Config}; -use ibc_relayer::transfer::{build_and_send_transfer_messages, Amount, TransferOptions}; +use ibc_relayer::transfer::{build_and_send_transfer_messages, TransferOptions}; use ibc_test_framework::prelude::*; #[test] @@ -84,7 +84,7 @@ fn tx_raw_ft_transfer( let transfer_options = TransferOptions { packet_src_port_id: channel.port_a.value().clone(), packet_src_channel_id: *channel.channel_id_a.value(), - amount: Amount(amount.into()), + amount: amount.into(), denom: denom.value().to_string(), receiver: Some(recipient.value().0.clone()), timeout_height_offset, diff --git a/tools/integration-test/src/tests/mod.rs b/tools/integration-test/src/tests/mod.rs index f6aaf4ad1e..09e21d5252 100644 --- a/tools/integration-test/src/tests/mod.rs +++ b/tools/integration-test/src/tests/mod.rs @@ -9,6 +9,7 @@ pub mod clear_packet; pub mod client_expiration; mod client_settings; pub mod connection_delay; +pub mod execute_schedule; pub mod memo; pub mod python; mod query_packet; diff --git a/tools/test-framework/Cargo.toml b/tools/test-framework/Cargo.toml index a1729b95f3..d244c9431e 100644 --- a/tools/test-framework/Cargo.toml +++ b/tools/test-framework/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-test-framework" -version = "0.14.1" +version = "0.15.0" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -10,14 +10,14 @@ repository = "https://github.com/informalsystems/ibc-rs" authors = ["Informal Systems "] description = """ - Integration tests for IBC Relayer + Framework for writing integration tests for IBC relayers """ [dependencies] -ibc = { path = "../../modules" } -ibc-relayer = { path = "../../relayer" } -ibc-relayer-cli = { path = "../../relayer-cli" } -ibc-proto = { path = "../../proto" } +ibc = { version = "=0.15.0", path = "../../modules" } +ibc-relayer = { version = "=0.15.0", path = "../../relayer" } +ibc-relayer-cli = { version = "=0.15.0", path = "../../relayer-cli" } +ibc-proto = { version = "=0.18.0", path = "../../proto" } tendermint = { version = "=0.23.7" } tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } diff --git a/tools/test-framework/README.md b/tools/test-framework/README.md index ad3cbf955a..8856e9d34f 100644 --- a/tools/test-framework/README.md +++ b/tools/test-framework/README.md @@ -1,25 +1,23 @@ -# IBC Relayer Integration Test Suite +# IBC Relayer Integration Test Framework ## Overview -The `ibc-relayer-test` crate provides the infrastructure and framework for writing end-to-end (E2E) tests that include the spawning of the relayer together with Cosmos full nodes running as child processes inside the tests. - -## Build Documentation - -This documentation is best viewed as Rustdoc HTML pages. You can run the following command to build and view the documentation using `cargo doc`: - -```bash -cargo doc -p ibc-integration-test --open -``` +The `ibc-test-framework` crate provides the infrastructure and framework for writing end-to-end (E2E) tests that include the spawning of the relayer together with Cosmos full nodes running as child processes inside the tests. ## Installation Other than Rust, the test suite assumes the `gaiad` binary is present in `$PATH`. You can install Gaia by either [building from source](https://github.com/cosmos/gaia), or load it using [Cosmos.nix](https://github.com/informalsystems/cosmos.nix/): ```text -nix shell github:informalsystems/cosmos.nix#gaia5 +nix shell github:informalsystems/cosmos.nix#gaia7 ``` +Alternatively, you can use `$CHAIN_COMMAND_PATH` to override with a different executable that is compatible with `gaiad`. + +## Examples + +Example tests written using `ibc-test-framework` can be found in the [`ibc-rs` project repository](https://github.com/informalsystems/ibc-rs/tree/master/tools/integration-test) + ## Diagrams Some diagrams have been prepared to ease the understanding of the test framework: diff --git a/tools/test-framework/src/ibc/denom.rs b/tools/test-framework/src/ibc/denom.rs index e0059c3887..ec4bcabc6c 100644 --- a/tools/test-framework/src/ibc/denom.rs +++ b/tools/test-framework/src/ibc/denom.rs @@ -4,7 +4,9 @@ use core::fmt::{self, Display}; use eyre::Report as Error; -use ibc::applications::ics20_fungible_token_transfer as token_transfer; +use ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use sha2::{Digest, Sha256}; +use subtle_encoding::hex; use crate::types::id::{TaggedChannelIdRef, TaggedPortIdRef}; use crate::types::tagged::*; @@ -56,10 +58,30 @@ pub fn derive_ibc_denom( channel_id: &TaggedChannelIdRef, denom: &TaggedDenomRef, ) -> Result, Error> { + fn derive_denom( + port_id: &PortId, + channel_id: &ChannelId, + denom: &str, + ) -> Result { + let transfer_path = format!("{}/{}/{}", port_id, channel_id, denom); + derive_denom_with_path(&transfer_path) + } + + /// Derive the transferred token denomination using + /// + fn derive_denom_with_path(transfer_path: &str) -> Result { + let mut hasher = Sha256::new(); + hasher.update(transfer_path.as_bytes()); + + let denom_bytes = hasher.finalize(); + let denom_hex = String::from_utf8(hex::encode_upper(denom_bytes))?; + + Ok(format!("ibc/{}", denom_hex)) + } + match denom.value() { Denom::Base(denom) => { - let hashed = - token_transfer::derive_ibc_denom(port_id.value(), channel_id.value(), denom)?; + let hashed = derive_denom(port_id.value(), channel_id.value(), denom)?; Ok(MonoTagged::new(Denom::Ibc { path: format!("{}/{}", port_id, channel_id), @@ -69,8 +91,7 @@ pub fn derive_ibc_denom( } Denom::Ibc { path, denom, .. } => { let new_path = format!("{}/{}/{}", port_id, channel_id, path); - let hashed = - token_transfer::derive_ibc_denom_with_path(&format!("{}/{}", new_path, denom))?; + let hashed = derive_denom_with_path(&format!("{}/{}", new_path, denom))?; Ok(MonoTagged::new(Denom::Ibc { path: new_path, diff --git a/tools/test-framework/src/relayer/chain.rs b/tools/test-framework/src/relayer/chain.rs index ad69229cbd..579069d6df 100644 --- a/tools/test-framework/src/relayer/chain.rs +++ b/tools/test-framework/src/relayer/chain.rs @@ -384,7 +384,7 @@ where self.value().query_host_consensus_state(request) } - fn query_balance(&self) -> Result { - self.value().query_balance() + fn query_balance(&self, key_name: Option) -> Result { + self.value().query_balance(key_name) } } diff --git a/tools/test-framework/src/relayer/transfer.rs b/tools/test-framework/src/relayer/transfer.rs index d4ac86aff0..3af20851a5 100644 --- a/tools/test-framework/src/relayer/transfer.rs +++ b/tools/test-framework/src/relayer/transfer.rs @@ -5,12 +5,14 @@ use core::ops::Add; use core::time::Duration; -use ibc::signer::Signer; + +use ibc::applications::transfer::error::Error as Ics20Error; use ibc::timestamp::Timestamp; use ibc::Height; use ibc_proto::google::protobuf::Any; use ibc_relayer::chain::cosmos::types::config::TxConfig; use ibc_relayer::transfer::build_transfer_message as raw_build_transfer_message; +use ibc_relayer::transfer::TransferError; use crate::error::{handle_generic_error, Error}; use crate::ibc::denom::Denom; @@ -31,13 +33,26 @@ pub fn build_transfer_message( .add(Duration::from_secs(60)) .map_err(handle_generic_error)?; + let sender = sender + .value() + .address + .0 + .parse() + .map_err(|e| TransferError::token_transfer(Ics20Error::signer(e)))?; + + let receiver = recipient + .value() + .0 + .parse() + .map_err(|e| TransferError::token_transfer(Ics20Error::signer(e)))?; + Ok(raw_build_transfer_message( (*port_id.value()).clone(), **channel_id.value(), amount.into(), - denom.value().to_string(), - Signer::new(sender.value().address.0.clone()), - Signer::new(recipient.value().0.clone()), + denom.to_string(), + sender, + receiver, Height::zero(), timeout_timestamp, ))