From 042480e3f3d38d4c1f8e40d0b50aea7555764b15 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 22 Apr 2022 09:48:20 -0500 Subject: [PATCH 01/36] Pull in upstream changes --- ...x-hermes-retrying-not-regenerating-msgs.md | 1 + .../1288-upgrade-chain-confirmation.md | 2 + .../relayer/2097-misbehavior-height.md | 2 + .../improvements/1936-missing-chain-warn.md | 2 + Cargo.lock | 38 +- config.toml | 4 +- docs/architecture/README.md | 17 +- .../adr-007-ics20-implementation.md | 235 +++ guide/src/SUMMARY.md | 1 + guide/src/commands/upgrade/test.md | 4 +- guide/src/config.md | 15 +- guide/src/example-config.md | 7 + modules/Cargo.toml | 2 +- relayer-cli/Cargo.toml | 4 +- relayer-cli/src/cli_utils.rs | 2 +- relayer-cli/src/commands/tx/transfer.rs | 5 +- relayer-cli/src/commands/tx/upgrade.rs | 6 +- relayer-cli/src/error.rs | 14 +- relayer-rest/Cargo.toml | 2 +- relayer/Cargo.toml | 4 +- relayer/src/chain.rs | 4 +- relayer/src/chain/cosmos.rs | 1279 ++--------------- relayer/src/chain/cosmos/account.rs | 108 -- relayer/src/chain/cosmos/batch.rs | 166 +++ relayer/src/chain/cosmos/encode.rs | 179 +++ relayer/src/chain/cosmos/estimate.rs | 155 ++ relayer/src/chain/cosmos/gas.rs | 74 + relayer/src/chain/cosmos/query.rs | 149 ++ relayer/src/chain/cosmos/query/account.rs | 80 ++ relayer/src/chain/cosmos/query/status.rs | 40 + relayer/src/chain/cosmos/query/tx.rs | 250 ++++ relayer/src/chain/cosmos/retry.rs | 199 +++ relayer/src/chain/cosmos/simulate.rs | 33 + relayer/src/chain/cosmos/tx.rs | 68 + relayer/src/chain/cosmos/types/account.rs | 70 + relayer/src/chain/cosmos/types/gas.rs | 79 + relayer/src/chain/cosmos/types/mod.rs | 3 + relayer/src/chain/cosmos/types/tx.rs | 18 + relayer/src/chain/cosmos/wait.rs | 114 ++ relayer/src/chain/handle.rs | 6 +- relayer/src/chain/handle/base.rs | 4 +- relayer/src/chain/handle/cache.rs | 4 +- relayer/src/chain/handle/counting.rs | 4 +- relayer/src/chain/mock.rs | 6 +- relayer/src/chain/runtime.rs | 4 +- relayer/src/chain/tx.rs | 4 +- relayer/src/event/rpc.rs | 6 +- relayer/src/foreign_client.rs | 2 +- relayer/src/keyring.rs | 47 +- relayer/src/lib.rs | 1 + relayer/src/link.rs | 4 +- relayer/src/link/error.rs | 4 +- relayer/src/link/pending.rs | 52 +- relayer/src/link/relay_path.rs | 68 +- relayer/src/link/relay_summary.rs | 12 + relayer/src/registry.rs | 18 +- relayer/src/supervisor.rs | 18 +- relayer/src/supervisor/client_state_filter.rs | 6 + relayer/src/supervisor/error.rs | 8 +- relayer/src/transfer.rs | 85 +- relayer/src/upgrade_chain.rs | 31 +- relayer/src/util/queue.rs | 12 +- relayer/src/worker.rs | 5 +- relayer/src/worker/cmd.rs | 20 + relayer/src/worker/packet.rs | 13 +- .../src/tests/client_expiration.rs | 28 +- tools/test-framework/Cargo.toml | 2 +- .../src/bootstrap/binary/chain.rs | 303 ++-- .../src/bootstrap/binary/channel.rs | 71 +- .../src/bootstrap/binary/connection.rs | 34 +- .../src/bootstrap/nary/chain.rs | 7 +- .../src/bootstrap/nary/channel.rs | 13 +- .../src/bootstrap/nary/connection.rs | 12 +- tools/test-framework/src/error.rs | 16 +- .../src/framework/binary/chain.rs | 18 +- .../src/framework/binary/channel.rs | 15 +- .../src/framework/binary/connection.rs | 12 +- tools/test-framework/src/relayer/chain.rs | 4 +- tools/test-framework/src/relayer/transfer.rs | 4 +- 79 files changed, 2684 insertions(+), 1734 deletions(-) create mode 100644 .changelog/unreleased/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md create mode 100644 .changelog/unreleased/bug-fixes/relayer-cli/1288-upgrade-chain-confirmation.md create mode 100644 .changelog/unreleased/bug-fixes/relayer/2097-misbehavior-height.md create mode 100644 .changelog/unreleased/improvements/1936-missing-chain-warn.md create mode 100644 docs/architecture/adr-007-ics20-implementation.md create mode 100644 guide/src/example-config.md delete mode 100644 relayer/src/chain/cosmos/account.rs create mode 100644 relayer/src/chain/cosmos/batch.rs create mode 100644 relayer/src/chain/cosmos/encode.rs create mode 100644 relayer/src/chain/cosmos/estimate.rs create mode 100644 relayer/src/chain/cosmos/gas.rs create mode 100644 relayer/src/chain/cosmos/query.rs create mode 100644 relayer/src/chain/cosmos/query/account.rs create mode 100644 relayer/src/chain/cosmos/query/status.rs create mode 100644 relayer/src/chain/cosmos/query/tx.rs create mode 100644 relayer/src/chain/cosmos/retry.rs create mode 100644 relayer/src/chain/cosmos/simulate.rs create mode 100644 relayer/src/chain/cosmos/tx.rs create mode 100644 relayer/src/chain/cosmos/types/account.rs create mode 100644 relayer/src/chain/cosmos/types/gas.rs create mode 100644 relayer/src/chain/cosmos/types/mod.rs create mode 100644 relayer/src/chain/cosmos/types/tx.rs create mode 100644 relayer/src/chain/cosmos/wait.rs diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md b/.changelog/unreleased/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md new file mode 100644 index 0000000000..7e01a0fb21 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md @@ -0,0 +1 @@ +- Fixed Hermes retrying mechanism not regenerating operational data for messages ([#1792](https://github.com/informalsystems/ibc-rs/pull/1951)) diff --git a/.changelog/unreleased/bug-fixes/relayer-cli/1288-upgrade-chain-confirmation.md b/.changelog/unreleased/bug-fixes/relayer-cli/1288-upgrade-chain-confirmation.md new file mode 100644 index 0000000000..656ed75bde --- /dev/null +++ b/.changelog/unreleased/bug-fixes/relayer-cli/1288-upgrade-chain-confirmation.md @@ -0,0 +1,2 @@ +- Skip waiting for confirmation events on tx raw upgrade-chain + ([#1288](https://github.com/informalsystems/ibc-rs/issues/1288)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/relayer/2097-misbehavior-height.md b/.changelog/unreleased/bug-fixes/relayer/2097-misbehavior-height.md new file mode 100644 index 0000000000..ef2ed9d7ea --- /dev/null +++ b/.changelog/unreleased/bug-fixes/relayer/2097-misbehavior-height.md @@ -0,0 +1,2 @@ +- Fixed target height used in misbehavior detection. + ([#2097](https://github.com/informalsystems/ibc-rs/issues/2097)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1936-missing-chain-warn.md b/.changelog/unreleased/improvements/1936-missing-chain-warn.md new file mode 100644 index 0000000000..eb510e28df --- /dev/null +++ b/.changelog/unreleased/improvements/1936-missing-chain-warn.md @@ -0,0 +1,2 @@ +- Log `missing chain in configuration` errors emitted during event processing at + debug level ([#1936](https://github.com/informalsystems/ibc-rs/issues/1936)) diff --git a/Cargo.lock b/Cargo.lock index a4adcf62a7..36ead3c506 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -399,16 +399,16 @@ checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" [[package]] name = "clap" -version = "3.1.8" +version = "3.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" +checksum = "6aad2534fad53df1cc12519c5cda696dd3e20e6118a027e24054aea14a0bdcbe" dependencies = [ "atty", "bitflags", "clap_derive", + "clap_lex", "indexmap", "lazy_static", - "os_str_bytes", "strsim", "termcolor", "textwrap", @@ -436,6 +436,15 @@ dependencies = [ "syn", ] +[[package]] +name = "clap_lex" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189ddd3b5d32a70b35e7686054371742a937b0d99128e76dde6340210e966669" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "color-eyre" version = "0.6.1" @@ -1880,9 +1889,9 @@ dependencies = [ [[package]] name = "moka" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb1ccadd67a5fda33bf5fbfbf2372ee49e02c3e662095b270eccef6dd94cb63" +checksum = "ef9d038ced38770a867f2300ef21e1193b5e26d8e8e060fa5c034d1dddc57452" dependencies = [ "crossbeam-channel 0.5.4", "crossbeam-epoch 0.8.2", @@ -2094,9 +2103,6 @@ name = "os_str_bytes" version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] [[package]] name = "owo-colors" @@ -3157,12 +3163,6 @@ dependencies = [ "der", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -3621,9 +3621,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] @@ -3695,9 +3695,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b9fa4360528139bc96100c160b7ae879f5567f49f1782b0b02035b0358ebf3" +checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ "cfg-if 1.0.0", "log", @@ -3796,8 +3796,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c45e322b26410d7260e00f64234810c2f17d7ece356182af4df8f7ff07890f09" dependencies = [ "memoffset 0.6.5", - "serde", - "stable_deref_trait", ] [[package]] diff --git a/config.toml b/config.toml index a0fa87f0e6..9319b1cdc2 100644 --- a/config.toml +++ b/config.toml @@ -56,7 +56,9 @@ clear_on_start = true # Toggle the transaction confirmation mechanism. # The tx confirmation mechanism periodically queries the `/tx_search` RPC # endpoint to check that previously-submitted transactions -# (to any chain in this config file) have delivered successfully. +# (to any chain in this config file) have been successfully delivered. +# If they have not been, and `clear_interval = 0`, then those packets are +# queued up for re-submission. # Experimental feature. Affects telemetry if set to false. # [Default: true] tx_confirmation = true diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 247fee3520..c64eb3c19f 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -25,11 +25,12 @@ To suggest an ADR, please make use of the [ADR template](./adr-template.md) prov ## Table of Contents -| ADR \# | Description | Status | -| ------ | ----------- | ------ | -| [001](./adr-001-repo.md) | Repository structure for `ibc-rs` | Accepted | -| [002](./adr-002-ibc-relayer.md) | IBC Relayer in Rust | Accepted | -| [003](./adr-003-handler-implementation.md) | IBC handlers implementation | Accepted | -| [004](./adr-004-relayer-domain-decomposition.md) | Relayer domain decomposition | Accepted | -| [005](./adr-005-relayer-v0-implementation.md) | Relayer v0 implementation | Accepted | -| [006](./adr-006-hermes-v0.2-usecases.md) | Hermes v0.2.0 Use-Cases | Proposed | +| ADR \# | Description | Status | +|--------------------------------------------------|-----------------------------------| ------ | +| [001](./adr-001-repo.md) | Repository structure for `ibc-rs` | Accepted | +| [002](./adr-002-ibc-relayer.md) | IBC Relayer in Rust | Accepted | +| [003](./adr-003-handler-implementation.md) | IBC handlers implementation | Accepted | +| [004](./adr-004-relayer-domain-decomposition.md) | Relayer domain decomposition | Accepted | +| [005](./adr-005-relayer-v0-implementation.md) | Relayer v0 implementation | Accepted | +| [006](./adr-006-hermes-v0.2-usecases.md) | Hermes v0.2.0 Use-Cases | Proposed | +| [007](./adr-007-ics20-implementation.md) | ICS20 implementation | Proposed | diff --git a/docs/architecture/adr-007-ics20-implementation.md b/docs/architecture/adr-007-ics20-implementation.md new file mode 100644 index 0000000000..be8f7cc2b3 --- /dev/null +++ b/docs/architecture/adr-007-ics20-implementation.md @@ -0,0 +1,235 @@ +# ADR 007: ICS20 Implementation Proposal + +## Changelog + +* 21.04.2022: Draft Proposed + +## Context + +The goal of this ADR is to provide recommendations and a guide for implementing the ICS20 application. + +## Decision + +The proposal is broken down into traits that should be implemented by the ICS20 module. It also defines some primitives +that would help in building a module compliant with the ICS20 spec. + +#### Types + +The implementation must provide a base denom type that is serializable to string. Additionally, the following denom +types must also be provided: + +* `HashedDenom`: A denom type that can be serialized to a string of the form `'ibc/{Hash(trace_path/base_denom)}'`. +* `PrefixedDenom`: A denom type with a base denom which is prefixed with a trace. The trace itself consists + of `'{PortId}/{ChannelId}'` pairs and enables coin source tracing[^1]. + +```rust +/// Base denomination type +pub struct Denom(String); +``` + +A `Coin` defines a token with a denomination and an amount where the denomination may be any one of the denom types +described above. + +```rust +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct Coin { + /// Denomination + pub denom: Denom, + /// Amount + pub amount: U256, +} +``` + +The ICS20 acknowledgement type and packet data type are defined in the spec[^2] and maybe modelled as follows. Note that +these types must be (de)serializable from/to JSON. + +```rust +pub enum ICS20Acknowledgement { + /// Equivalent to b"AQ==" + Success, + /// Error Acknowledgement + Error(String) +} + +pub struct FungibleTokenPacketData { + denomination: Denom, + amount: U256, + sender: String, + receiver: String, +} +``` + +#### Keepers and readers + +```rust +pub trait ICS20Keeper: ChannelKeeper ++ PortKeeper ++ BankKeeper::AccountId> ++ AccountKeeper::AccountId> +{ + /// The account identifier type. + type AccountId: Into; + + /// Set channel escrow address + fn set_channel_escrow_address(&mut self, port_id: &PortId, channel_id: &ChannelId) -> Result<(), ICS20Error>; + /// Sets a new {trace hash -> denom trace} pair to the store. + fn set_denom_trace(&mut self, denom_trace: DenomTrace) -> Result<(), Ics20Error>; +} + +pub trait ICS20Reader: ChannelReader ++ PortReader ++ AccountReader::AccountId> ++ BankReader::AccountId> +{ + /// The account identifier type. + type AccountId: Into + FromStr; + + /// Returns true iff sending is allowed in the module params + fn is_send_enabled(&self) -> bool; + /// Returns true iff receiving is allowed in the module params + fn is_receive_enabled(&self) -> bool; + /// get_transfer_account returns the ICS20 - transfers AccountId. + fn get_transfer_account(&self) -> AccountId; + /// Sets and returns the escrow account id for a port and channel combination + fn get_channel_escrow_address(&self, port_id: &PortId, channel_id: &ChannelId) -> Result; + /// Returns true iff the store contains a `DenomTrace` entry for the specified `HashedDenom`. + fn has_denom_trace(&self, hashed_denom: HashedDenom) -> bool; + /// Gets the denom trace associated with the specified hash in the store. + fn get_denom_trace(&self, denom_hash: HashedDenom) -> Option; +} + +pub trait BankKeeper { + /// The account identifier type. + type AccountId: Into; + + /// This function should enable sending ibc fungible tokens from one account to another + fn send_coins(&mut self, from: &Self::AccountId, to: &Self::AccountId, amt: Coin) -> Result<(), ICS20Error>; + /// This function to enable minting tokens(vouchers) in a module + fn mint_coins(&mut self, amt: Coin) -> Result<(), ICS20Error>; + /// This function should enable burning of minted tokens or vouchers + fn burn_coins(&mut self, module: &Self::AccountId, amt: Coin) -> Result<(), ICS20Error>; + /// This function should enable transfer of tokens from the ibc module to an account + fn send_coins_from_module_to_account( + &mut self, + module: Self::AccountId, + to: Self::AccountId, + amt: Coin, + ) -> Result<(), Ics20Error>; + /// This function should enable transfer of tokens from an account to the ibc module + fn send_coins_from_account_to_module( + &mut self, + from: Self::AccountId, + module: Self::AccountId, + amt: Coin, + ) -> Result<(), Ics20Error>; +} + +pub trait BankReader { + /// The account identifier type. + type AccountId: Into + FromStr; + + /// Returns true if the specified account is not allowed to receive funds and false otherwise. + fn is_blocked_account(&self, account: &Self::AccountId) -> bool; +} + +pub trait AccountReader { + /// The account identifier type. + type AccountId: Into + FromStr; + + /// This function should return the account of the ibc module + fn get_module_account(&self) -> Self::AccountId; +} + +pub trait Ics20Context: +Ics20Keeper::AccountId> ++ Ics20Reader::AccountId> +{ + type AccountId: Into + FromStr; +} +``` + +## Handling ICS20 Packets + +ICS20 messages are still a subset of channel packets, so they should be handled as such. + +The following handlers are recommended to be implemented in the `ics20_fungible_token_transfer` application in the `ibc` +crate. These handlers will be executed in the module callbacks of any third-party IBC module that is implementing an +ICS20 application on-chain. + +```rust +/// Should be used in the transaction that initiates the ICS20 token transfer +/// Performs all logic related to token transfer and returns a SendTransferPacket type +/// for the calling module to create the actual packet and register it in the ibc module. +pub fn send_transfer(ctx: &Ctx, _msg: MsgTransfer) -> Result + where Ctx: ICS20Context +{ + if !ctx.is_send_enabled() { + return Err(ICS20Error::send_disabled()); + } + + // implementation details, see ICS 20 for reference +} + +/// Handles incoming packets with ICS20 data +/// To be called inside the on_recv_packet callback +pub fn on_recv_packet(ctx: &Ctx, _packet: &Packet, _data: &FungibleTokenPacketData) -> ICS20Acknowledgement + where Ctx: ICS20Context +{ + if !ctx.is_received_enabled() { + return Err(ICS20Error::receive_disabled()); + } + + // implementation details, see ICS 20 for reference +} + +/// on_timeout_packet refunds the sender since the original packet sent was +/// never received and has been timed out. +/// To be called inside the on_timeout_packet callback +pub fn on_timeout_packet(ctx: &Ctx, data: &FungibleTokenPacketData) -> Result<(), ICS20Error> + where Ctx: ICS20Context +{ + refund_packet_token(ctx, data) +} + +/// Responds to the the success or failure of a packet +/// acknowledgement written on the receiving chain. If the acknowledgement +/// was a success then nothing occurs. If the acknowledgement failed, then +/// the sender is refunded their tokens. +/// To be called inside the on_acknowledgement_packet callback +pub fn on_acknowledgement_packet(ctx: &Ctx, ack: ICS20Acknowledgement, data: &FungibleTokenPacketData) -> Result<(), ICS20Error> + where Ctx: ICS20Context +{ + match ack { + ICS20Acknowledgement::Sucess => Ok(()), + _ => refund_packet_token(ctx, data) + } +} + +/// Implements logic for refunding a sender on packet timeout or acknowledgement error +pub fn refund_packet_token(_ctx: &Ctx, _data: &FungibleTokenPacketData) -> Result<(), ICS20Error> + where Ctx: ICS20Context +{ + //... +} +``` + +## Status + +Proposed + +## Consequences + +### Positive + +- Provides more clarity on the details of implementing the ICS20 application in the `ibc` crate. +- Helps align closer with the ibc-go implementation[^3]. + +### Negative + +### Neutral + +## References + +[^1]: [ibc-go ADR 001: Coin Source Tracing](https://github.com/cosmos/ibc-go/blob/4271027a5ab1e6faaa2edbc2b9840209c315afab/docs/architecture/adr-001-coin-source-tracing.md) +[^2]: [ICS20 spec](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) +[^3]: [ibc-go's transfer module implementation](https://github.com/cosmos/ibc-go/tree/d31f92d9bf709f5550b75db5c70a3b44314d9781/modules/apps/transfer) diff --git a/guide/src/SUMMARY.md b/guide/src/SUMMARY.md index 3e7dbb8621..4f9be3ad34 100644 --- a/guide/src/SUMMARY.md +++ b/guide/src/SUMMARY.md @@ -11,6 +11,7 @@ - [Pre-requisites](./pre_requisites.md) - [Installation](./installation.md) - [Configuration](./config.md) + - [Example Configuration](./example-config.md) - [Telemetry](./telemetry.md) - [REST API](./rest-api.md) - [Tutorials](./tutorials/index.md) diff --git a/guide/src/commands/upgrade/test.md b/guide/src/commands/upgrade/test.md index b3d0615990..56a2f3ccb3 100644 --- a/guide/src/commands/upgrade/test.md +++ b/guide/src/commands/upgrade/test.md @@ -54,8 +54,8 @@ commit: 535be14a8bdbfeb0d950914b5baa2dc72c6b081c hermes tx raw upgrade-chain ibc-0 ibc-1 07-tendermint-0 10000000 300 ``` - ```shell - Success: [] + ```text + Success: transaction::Hash(CE98D8D98091BA8016BD852D18056E54C4CB3C4525E7F40DD3C40B4FD0F2482B) ``` Note that the height offset should be picked such that the proposal plan height is reached after the `200s` voting period. diff --git a/guide/src/config.md b/guide/src/config.md index 10327960af..192f3f98f6 100644 --- a/guide/src/config.md +++ b/guide/src/config.md @@ -43,12 +43,17 @@ To restrict relaying on specific channels, or uni-directionally, you can use [pa For each chain configured you need to add a private key for that chain in order to submit [transactions](./commands/raw/index.md), please refer to the [Keys](./commands/keys/index.md) sections in order to learn how to add the private keys that are used by the relayer. -## Example configuration file +## Connecting via TLS -Here is a full example of a configuration file with two chains configured: - -```toml -{{#include ../../config.toml}} +Hermes supports connection via TLS for use-cases such as connecting from behind +a proxy or a load balancer. In order to enable this, you'll want to set the +`rpc_addr`, `grpc_addr`, or `websocket_addr` parameters to specify a TLS +connection via HTTPS using the following scheme (note that the port number 443 +is just used for example): +``` +rpc_addr = 'https://domain.com:443' +grpc_addr = 'https://domain.com:443' +websocket_addr = 'wss://domain.com:443/websocket' ``` ## Support for Interchain Accounts diff --git a/guide/src/example-config.md b/guide/src/example-config.md new file mode 100644 index 0000000000..a6a1095f3a --- /dev/null +++ b/guide/src/example-config.md @@ -0,0 +1,7 @@ +# Example Configuration File + +Here is a full example of a configuration file with two chains configured: + +```toml +{{#include ../../config.toml}} +``` diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 99135b0335..53b083149e 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -33,7 +33,7 @@ time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } serde = { version = "1.0", default-features = false } serde_json = { version = "1", default-features = false } -tracing = { version = "0.1.33", default-features = false } +tracing = { version = "0.1.34", default-features = false } prost = { version = "0.9", default-features = false } prost-types = { version = "0.9", default-features = false } bytes = { version = "1.1.0", default-features = false } diff --git a/relayer-cli/Cargo.toml b/relayer-cli/Cargo.toml index 5af959de0a..1478f89eae 100644 --- a/relayer-cli/Cargo.toml +++ b/relayer-cli/Cargo.toml @@ -37,13 +37,13 @@ clap_complete = "3.1" humantime = "2.1" serde = { version = "1.0", features = ["serde_derive"] } tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.33" +tracing = "0.1.34" tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "json"]} eyre = "0.6.8" color-eyre = "0.6" oneline-eyre = "0.1" futures = "0.3.21" -toml = "0.5.8" +toml = "0.5.9" serde_derive = "1.0.116" serde_json = "1" prost = { version = "0.9" } diff --git a/relayer-cli/src/cli_utils.rs b/relayer-cli/src/cli_utils.rs index 5e59a954a9..9a4d0e408e 100644 --- a/relayer-cli/src/cli_utils.rs +++ b/relayer-cli/src/cli_utils.rs @@ -63,7 +63,7 @@ pub fn spawn_chain_runtime_generic( let chain_config = config .find_chain(chain_id) .cloned() - .ok_or_else(|| Error::missing_config(chain_id.clone()))?; + .ok_or_else(|| Error::missing_chain_config(chain_id.clone()))?; let rt = Arc::new(TokioRuntime::new().unwrap()); let handle = diff --git a/relayer-cli/src/commands/tx/transfer.rs b/relayer-cli/src/commands/tx/transfer.rs index 40a44006a8..973a87b800 100644 --- a/relayer-cli/src/commands/tx/transfer.rs +++ b/relayer-cli/src/commands/tx/transfer.rs @@ -1,6 +1,7 @@ use abscissa_core::clap::Parser; use abscissa_core::{config::Override, Command, FrameworkErrorKind, Runnable}; +use core::time::Duration; use ibc::{ core::{ ics02_client::client_state::ClientState, @@ -141,7 +142,7 @@ impl TxIcs20MsgTransferCmd { denom, receiver: self.receiver.clone(), timeout_height_offset: self.timeout_height_offset, - timeout_seconds: core::time::Duration::from_secs(self.timeout_seconds), + timeout_duration: Duration::from_secs(self.timeout_seconds), number_msgs, }; @@ -226,7 +227,7 @@ impl Runnable for TxIcs20MsgTransferCmd { // Checks pass, build and send the tx let res: Result, Error> = build_and_send_transfer_messages(&chains.src, &chains.dst, &opts) - .map_err(Error::packet); + .map_err(Error::transfer); match res { Ok(ev) => Output::success(ev).exit(), diff --git a/relayer-cli/src/commands/tx/upgrade.rs b/relayer-cli/src/commands/tx/upgrade.rs index 0678efa33d..14e66ac571 100644 --- a/relayer-cli/src/commands/tx/upgrade.rs +++ b/relayer-cli/src/commands/tx/upgrade.rs @@ -6,7 +6,6 @@ use abscissa_core::{Command, Runnable}; use tokio::runtime::Runtime as TokioRuntime; use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc::events::IbcEvent; use ibc_relayer::upgrade_chain::{build_and_send_ibc_upgrade_proposal, UpgradePlanOptions}; use ibc_relayer::{ chain::{ChainEndpoint, CosmosSdkChain}, @@ -144,9 +143,8 @@ impl Runnable for TxIbcUpgradeChainCmd { Err(e) => Output::error(format!("{}", e)).exit(), }; - let res: Result, Error> = - build_and_send_ibc_upgrade_proposal(dst_chain, src_chain, &opts) - .map_err(Error::upgrade_chain); + let res = build_and_send_ibc_upgrade_proposal(dst_chain, src_chain, &opts) + .map_err(Error::upgrade_chain); match res { Ok(ev) => Output::success(ev).exit(), diff --git a/relayer-cli/src/error.rs b/relayer-cli/src/error.rs index fdec0e4287..99f5456d18 100644 --- a/relayer-cli/src/error.rs +++ b/relayer-cli/src/error.rs @@ -7,7 +7,7 @@ use ibc_relayer::error::Error as RelayerError; use ibc_relayer::foreign_client::ForeignClientError; use ibc_relayer::link::error::LinkError; use ibc_relayer::supervisor::Error as SupervisorError; -use ibc_relayer::transfer::PacketError; +use ibc_relayer::transfer::TransferError; use ibc_relayer::upgrade_chain::UpgradeChainError; use tendermint::Error as TendermintError; @@ -47,17 +47,17 @@ define_error! { Keys |_| { "keys error" }, - MissingConfig + MissingChainConfig { chain_id: ChainId } | e | { - format_args!("missing chain for id ({}) in configuration file", + format_args!("missing chain with id '{}' in configuration file", e.chain_id) }, MissingCounterpartyChannelId { channel_end: IdentifiedChannelEnd } | e | { - format_args!("the channel {:?} counterparty has no channel id", + format_args!("this channel's counterparty has no channel id: {:?}", e.channel_end) }, @@ -69,9 +69,9 @@ define_error! { [ ConnectionError ] |_| { "connection error" }, - Packet - [ PacketError ] - |_| { "packet error" }, + Transfer + [ TransferError ] + |_| { "transfer error" }, Channel [ ChannelError ] diff --git a/relayer-rest/Cargo.toml b/relayer-rest/Cargo.toml index 0cfac3ff50..53039c943a 100644 --- a/relayer-rest/Cargo.toml +++ b/relayer-rest/Cargo.toml @@ -24,5 +24,5 @@ tracing = "0.1" [dev-dependencies] serde_json = "1.0.79" -toml = "0.5.8" +toml = "0.5.9" ureq = "2.4.0" diff --git a/relayer/Cargo.toml b/relayer/Cargo.toml index 4ae18f30f3..694fe2ebb6 100644 --- a/relayer/Cargo.toml +++ b/relayer/Cargo.toml @@ -31,7 +31,7 @@ serde = "1.0" serde_derive = "1.0" thiserror = "1.0.30" toml = "0.5" -tracing = "0.1.33" +tracing = "0.1.34" tokio = { version = "1.0", features = ["rt-multi-thread", "time", "sync"] } serde_json = { version = "1" } bytes = "1.1.0" @@ -62,7 +62,7 @@ uint = "0.9" humantime = "2.1.0" nanoid = "0.4.0" regex = "1.5.5" -moka = "0.8.1" +moka = "0.8.2" [dependencies.num-bigint] version = "0.4" diff --git a/relayer/src/chain.rs b/relayer/src/chain.rs index 2b7237396b..f94a45fa77 100644 --- a/relayer/src/chain.rs +++ b/relayer/src/chain.rs @@ -67,7 +67,7 @@ pub enum HealthCheck { /// The result of a chain status query. #[derive(Clone, Debug)] -pub struct StatusResponse { +pub struct ChainStatus { pub height: ICSHeight, pub timestamp: Timestamp, } @@ -160,7 +160,7 @@ pub trait ChainEndpoint: Sized { } /// Query the latest height and timestamp the chain is at - fn query_status(&self) -> Result; + fn query_status(&self) -> Result; /// Performs a query to retrieve the state of all clients that a chain hosts. fn query_clients( diff --git a/relayer/src/chain/cosmos.rs b/relayer/src/chain/cosmos.rs index ba9ff59da7..2e16815ff5 100644 --- a/relayer/src/chain/cosmos.rs +++ b/relayer/src/chain/cosmos.rs @@ -1,45 +1,35 @@ use alloc::sync::Arc; use core::{ - cmp::min, convert::{TryFrom, TryInto}, future::Future, str::FromStr, time::Duration, }; use num_bigint::BigInt; -use num_rational::BigRational; -use std::{fmt, thread, time::Instant}; +use std::thread; -use bech32::{ToBase32, Variant}; use bitcoin::hashes::hex::ToHex; -use ibc_proto::google::protobuf::Any; -use itertools::Itertools; -use tendermint::abci::{Code, Event, Path as TendermintABCIPath}; -use tendermint::account::Id as AccountId; use tendermint::block::Height; -use tendermint::node::info::TxIndexStatus; +use tendermint::{ + abci::{Event, Path as TendermintABCIPath}, + node::info::TxIndexStatus, +}; use tendermint_light_client_verifier::types::LightBlock as TMLightBlock; use tendermint_proto::Protobuf; -use tendermint_rpc::endpoint::tx::Response as ResultTx; -use tendermint_rpc::query::Query; -use tendermint_rpc::Url; use tendermint_rpc::{ endpoint::broadcast::tx_sync::Response, endpoint::status, Client, HttpClient, Order, }; use tokio::runtime::Runtime as TokioRuntime; use tonic::codegen::http::Uri; -use tracing::{debug, error, info, span, trace, warn, Level}; +use tracing::{span, warn, Level}; use ibc::clients::ics07_tendermint::client_state::{AllowUpdate, ClientState}; use ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TMConsensusState; use ibc::clients::ics07_tendermint::header::Header as TmHeader; -use ibc::core::ics02_client::client_consensus::{ - AnyConsensusState, AnyConsensusStateWithHeight, QueryClientEventRequest, -}; +use ibc::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; use ibc::core::ics02_client::client_state::{AnyClientState, IdentifiedAnyClientState}; use ibc::core::ics02_client::client_type::ClientType; use ibc::core::ics02_client::error::Error as ClientError; -use ibc::core::ics02_client::events as ClientEvents; use ibc::core::ics03_connection::connection::{ConnectionEnd, IdentifiedConnectionEnd}; use ibc::core::ics04_channel::channel::{ ChannelEnd, IdentifiedChannelEnd, QueryPacketEventDataRequest, @@ -47,23 +37,18 @@ use ibc::core::ics04_channel::channel::{ use ibc::core::ics04_channel::events as ChannelEvents; use ibc::core::ics04_channel::packet::{Packet, PacketMsgType, Sequence}; use ibc::core::ics23_commitment::commitment::CommitmentPrefix; -use ibc::core::ics23_commitment::merkle::convert_tm_to_ics_merkle_proof; use ibc::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; +use ibc::core::ics24_host::path::{ + AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, + ConnectionsPath, ReceiptsPath, SeqRecvsPath, +}; use ibc::core::ics24_host::{ClientUpgradePath, Path, IBC_QUERY_PATH, SDK_UPGRADE_QUERY_PATH}; -use ibc::events::{from_tx_response_event, IbcEvent}; +use ibc::events::IbcEvent; use ibc::query::QueryBlockRequest; -use ibc::query::{QueryTxHash, QueryTxRequest}; +use ibc::query::QueryTxRequest; use ibc::signer::Signer; use ibc::Height as ICSHeight; -use ibc_proto::cosmos::base::tendermint::v1beta1::service_client::ServiceClient; -use ibc_proto::cosmos::base::tendermint::v1beta1::GetNodeInfoRequest; -use ibc_proto::cosmos::base::v1beta1::Coin; use ibc_proto::cosmos::staking::v1beta1::Params as StakingParams; -use ibc_proto::cosmos::tx::v1beta1::mode_info::{Single, Sum}; -use ibc_proto::cosmos::tx::v1beta1::{ - AuthInfo, Fee, ModeInfo, SignDoc, SignerInfo, SimulateRequest, SimulateResponse, Tx, TxBody, - TxRaw, -}; use ibc_proto::ibc::core::channel::v1::{ PacketState, QueryChannelClientStateRequest, QueryChannelsRequest, QueryConnectionChannelsRequest, QueryNextSequenceReceiveRequest, @@ -76,65 +61,45 @@ use ibc_proto::ibc::core::connection::v1::{ QueryClientConnectionsRequest, QueryConnectionsRequest, }; -use crate::config::types::Memo; -use crate::config::{AddressType, ChainConfig, GasPrice}; +use crate::chain::client::ClientSettings; +use crate::chain::cosmos::batch::{ + send_batched_messages_and_wait_check_tx, send_batched_messages_and_wait_commit, +}; +use crate::chain::cosmos::encode::encode_to_bech32; +use crate::chain::cosmos::gas::{calculate_fee, mul_ceil}; +use crate::chain::cosmos::query::account::get_or_fetch_account; +use crate::chain::cosmos::query::status::query_status; +use crate::chain::cosmos::query::tx::query_txs; +use crate::chain::cosmos::query::{abci_query, fetch_version_specs, packet_query}; +use crate::chain::cosmos::types::account::Account; +use crate::chain::cosmos::types::gas::{default_gas_from_config, max_gas_from_config}; +use crate::chain::tx::TrackedMsgs; +use crate::chain::{ChainEndpoint, HealthCheck}; +use crate::chain::{ChainStatus, QueryResponse}; +use crate::config::ChainConfig; use crate::error::Error; use crate::event::monitor::{EventMonitor, EventReceiver, TxMonitorCmd}; use crate::keyring::{KeyEntry, KeyRing}; use crate::light_client::tendermint::LightClient as TmLightClient; use crate::light_client::{LightClient, Verified}; -use crate::sdk_error::sdk_error_from_tx_sync_error_code; -use crate::util::retry::{retry_with_index, RetryResult}; - -use ibc::core::ics24_host::path::{ - AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, - ConnectionsPath, ReceiptsPath, SeqRecvsPath, -}; -use self::account::{query_account, Account, AccountNumber, AccountSequence}; -use super::client::ClientSettings; -use super::tx::TrackedMsgs; -use super::{ChainEndpoint, HealthCheck, QueryResponse, StatusResponse}; - -pub mod account; +pub mod batch; pub mod client; pub mod compatibility; +pub mod encode; +pub mod estimate; +pub mod gas; +pub mod query; +pub mod retry; +pub mod simulate; +pub mod tx; +pub mod types; pub mod version; +pub mod wait; -/// Default gas limit when submitting a transaction. -const DEFAULT_MAX_GAS: u64 = 400_000; - -/// Fraction of the estimated gas to add to the estimated gas amount when submitting a transaction. -const DEFAULT_GAS_PRICE_ADJUSTMENT: f64 = 0.1; - -const DEFAULT_FEE_GRANTER: &str = ""; - -/// Upper limit on the size of transactions submitted by Hermes, expressed as a /// fraction of the maximum block size defined in the Tendermint core consensus parameters. pub const GENESIS_MAX_BYTES_MAX_FRACTION: f64 = 0.9; - -// The error "incorrect account sequence" is defined as the unique error code 32 in cosmos-sdk: // https://github.com/cosmos/cosmos-sdk/blob/v0.44.0/types/errors/errors.go#L115-L117 -pub const INCORRECT_ACCOUNT_SEQUENCE_ERR: u32 = 32; - -mod retry_strategy { - use crate::util::retry::Fixed; - use core::time::Duration; - - // Maximum number of retries for send_tx in the case of - // an account sequence mismatch at broadcast step. - pub const MAX_ACCOUNT_SEQUENCE_RETRY: u32 = 1; - - // Backoff multiplier to apply while retrying in the case - // of account sequence mismatch. - pub const BACKOFF_MULTIPLIER_ACCOUNT_SEQUENCE_RETRY: u64 = 300; - - pub fn wait_for_block_commits(max_total_wait: Duration) -> impl Iterator { - let backoff_millis = 300; // The periodic backoff - let count: usize = (max_total_wait.as_millis() / backoff_millis as u128) as usize; - Fixed::from_millis(backoff_millis).take(count) - } -} pub struct CosmosSdkChain { config: ChainConfig, @@ -186,14 +151,17 @@ impl CosmosSdkChain { ); } + let max_gas = max_gas_from_config(&self.config); + let default_gas = default_gas_from_config(&self.config); + // If the default gas is strictly greater than the max gas and the tx simulation fails, // Hermes won't be able to ever submit that tx because the gas amount wanted will be // greater than the max gas. - if self.default_gas() > self.max_gas() { + if default_gas > max_gas { return Err(Error::config_validation_default_gas_too_high( self.id().clone(), - self.default_gas(), - self.max_gas(), + default_gas, + max_gas, )); } @@ -234,10 +202,12 @@ impl CosmosSdkChain { .try_into() .expect("cannot over or underflow because it is positive"); - if self.max_gas() > consensus_max_gas { + let max_gas = max_gas_from_config(&self.config); + + if max_gas > consensus_max_gas { return Err(Error::config_validation_max_gas_too_high( self.id().clone(), - self.max_gas(), + max_gas, result.consensus_params.block.max_gas, )); } @@ -301,296 +271,6 @@ impl CosmosSdkChain { self.rt.block_on(f) } - fn send_tx_with_account_sequence( - &mut self, - proto_msgs: Vec, - account_seq: AccountSequence, - ) -> Result { - debug!( - "sending {} messages using account sequence {}", - proto_msgs.len(), - account_seq, - ); - - let signer_info = self.signer(account_seq)?; - let max_fee = self.max_fee(); - - debug!("max fee, for use in tx simulation: {}", PrettyFee(&max_fee)); - - let (body, body_buf) = tx_body_and_bytes(proto_msgs, self.tx_memo())?; - let account_number = self.account_number()?; - - let (auth_info, auth_buf) = auth_info_and_bytes(signer_info.clone(), max_fee)?; - let signed_doc = self.signed_doc(body_buf.clone(), auth_buf, account_number)?; - - let simulate_tx = Tx { - body: Some(body), - auth_info: Some(auth_info), - signatures: vec![signed_doc], - }; - - // This may result in an account sequence mismatch error - let estimated_gas = self.estimate_gas(simulate_tx)?; - - if estimated_gas > self.max_gas() { - debug!( - id = %self.id(), estimated = ?estimated_gas, max = ?self.max_gas(), - "send_tx: estimated gas is higher than max gas" - ); - - return Err(Error::tx_simulate_gas_estimate_exceeded( - self.id().clone(), - estimated_gas, - self.max_gas(), - )); - } - - let adjusted_fee = self.fee_with_gas(estimated_gas); - - debug!( - id = %self.id(), - "send_tx: using {} gas, fee {}", - estimated_gas, - PrettyFee(&adjusted_fee) - ); - - let (_auth_adjusted, auth_buf_adjusted) = auth_info_and_bytes(signer_info, adjusted_fee)?; - let signed_doc = - self.signed_doc(body_buf.clone(), auth_buf_adjusted.clone(), account_number)?; - - let tx_raw = TxRaw { - body_bytes: body_buf, - auth_info_bytes: auth_buf_adjusted, - signatures: vec![signed_doc], - }; - - let mut tx_bytes = Vec::new(); - prost::Message::encode(&tx_raw, &mut tx_bytes) - .map_err(|e| Error::protobuf_encode(String::from("Transaction"), e))?; - - self.block_on(broadcast_tx_sync( - &self.rpc_client, - &self.config.rpc_addr, - tx_bytes, - )) - } - - /// Try to `send_tx` with retry on account sequence error. - /// An account sequence error can occur if the account sequence that - /// the relayer caches becomes outdated. This may happen if the relayer - /// wallet is used concurrently elsewhere, or when tx fees are mis-configured - /// leading to transactions hanging in the mempool. - /// - /// Account sequence mismatch error can occur at two separate steps: - /// 1. as Err variant, propagated from the `estimate_gas` step. - /// 2. as an Ok variant, with an Code::Err response, propagated from - /// the `broadcast_tx_sync` step. - /// - /// We treat both cases by re-fetching the account sequence number - /// from the full node. - /// Upon case #1, we do not retry submitting the same tx (retry happens - /// nonetheless at the worker `step` level). Upon case #2, we retry - /// submitting the same transaction. - fn send_tx_with_account_sequence_retry( - &mut self, - proto_msgs: Vec, - retry_counter: u32, - ) -> Result { - let account_sequence = self.account_sequence()?; - - match self.send_tx_with_account_sequence(proto_msgs.clone(), account_sequence) { - // Gas estimation failed with acct. s.n. mismatch at estimate gas step. - // This indicates that the full node did not yet push the previous tx out of its - // mempool. Possible explanations: fees too low, network congested, or full node - // congested. Whichever the case, it is more expedient in production to drop the tx - // and refresh the s.n., to allow proceeding to the other transactions. A separate - // retry at the worker-level will handle retrying. - Err(e) if mismatching_account_sequence_number(&e) => { - warn!("failed at estimate_gas step mismatching account sequence: dropping the tx & refreshing account sequence number"); - self.refresh_account()?; - // Note: propagating error here can lead to bug & dropped packets: - // https://github.com/informalsystems/ibc-rs/issues/1153 - // But periodic packet clearing will catch any dropped packets. - Err(e) - } - - // Gas estimation succeeded. Broadcasting failed with a retry-able error. - Ok(response) if response.code == Code::Err(INCORRECT_ACCOUNT_SEQUENCE_ERR) => { - if retry_counter < retry_strategy::MAX_ACCOUNT_SEQUENCE_RETRY { - let retry_counter = retry_counter + 1; - warn!("failed at broadcast step with incorrect account sequence. retrying ({}/{})", - retry_counter, retry_strategy::MAX_ACCOUNT_SEQUENCE_RETRY); - // Backoff & re-fetch the account s.n. - let backoff = (retry_counter as u64) - * retry_strategy::BACKOFF_MULTIPLIER_ACCOUNT_SEQUENCE_RETRY; - thread::sleep(Duration::from_millis(backoff)); - self.refresh_account()?; - - // Now retry. - self.send_tx_with_account_sequence_retry(proto_msgs, retry_counter + 1) - } else { - // If after the max retry we still get an account sequence mismatch error, - // we ignore the error and return the original response to downstream. - // We do not return an error here, because the current convention - // let the caller handle error responses separately. - error!("failed due to account sequence errors. the relayer wallet may be used elsewhere concurrently."); - Ok(response) - } - } - - // Catch-all arm for the Ok variant. - // This is the case when gas estimation succeeded. - Ok(response) => { - // Complete success. - match response.code { - tendermint::abci::Code::Ok => { - debug!("broadcast_tx_sync: {:?}", response); - - self.incr_account_sequence(); - Ok(response) - } - // Gas estimation succeeded, but broadcasting failed with unrecoverable error. - tendermint::abci::Code::Err(code) => { - // Avoid increasing the account s.n. if CheckTx failed - // Log the error - error!( - "broadcast_tx_sync: {:?}: diagnostic: {:?}", - response, - sdk_error_from_tx_sync_error_code(code) - ); - Ok(response) - } - } - } - - // Catch-all case for the Err variant. - // Gas estimation failure or other unrecoverable error, propagate. - Err(e) => Err(e), - } - } - - fn send_tx(&mut self, proto_msgs: Vec) -> Result { - crate::time!("send_tx"); - let _span = span!(Level::ERROR, "send_tx", id = %self.id()).entered(); - - self.send_tx_with_account_sequence_retry(proto_msgs, 0) - } - - /// Try to simulate the given tx in order to estimate how much gas will be needed to submit it. - /// - /// It is possible that a batch of messages are fragmented by the caller (`send_msgs`) such that - /// they do not individually verify. For example for the following batch: - /// [`MsgUpdateClient`, `MsgRecvPacket`, ..., `MsgRecvPacket`] - /// - /// If the batch is split in two TX-es, the second one will fail the simulation in `deliverTx` check. - /// In this case we use the `default_gas` param. - fn estimate_gas(&mut self, tx: Tx) -> Result { - let simulated_gas = self.send_tx_simulate(tx).map(|sr| sr.gas_info); - let _span = span!(Level::ERROR, "estimate_gas").entered(); - - match simulated_gas { - Ok(Some(gas_info)) => { - debug!( - "tx simulation successful, gas amount used: {:?}", - gas_info.gas_used - ); - - Ok(gas_info.gas_used) - } - - Ok(None) => { - warn!( - "tx simulation successful but no gas amount used was returned, falling back on default gas: {}", - self.default_gas() - ); - - Ok(self.default_gas()) - } - - // If there is a chance that the tx will be accepted once actually submitted, we fall - // back on the max gas and will attempt to send it anyway. - // See `can_recover_from_simulation_failure` for more info. - Err(e) if can_recover_from_simulation_failure(&e) => { - warn!( - "failed to simulate tx, falling back on default gas because the error is potentially recoverable: {}", - e.detail() - ); - - Ok(self.default_gas()) - } - - Err(e) => { - error!( - "failed to simulate tx. propagating error to caller: {}", - e.detail() - ); - // Propagate the error, the retrying mechanism at caller may catch & retry. - Err(e) - } - } - } - - /// The default amount of gas the relayer is willing to pay for a transaction, - /// when it cannot simulate the tx and therefore estimate the gas amount needed. - fn default_gas(&self) -> u64 { - self.config.default_gas.unwrap_or_else(|| self.max_gas()) - } - - /// The maximum amount of gas the relayer is willing to pay for a transaction - fn max_gas(&self) -> u64 { - self.config.max_gas.unwrap_or(DEFAULT_MAX_GAS) - } - - /// Get the fee granter address - fn fee_granter(&self) -> &str { - self.config - .fee_granter - .as_deref() - .unwrap_or(DEFAULT_FEE_GRANTER) - } - - /// The gas price - fn gas_price(&self) -> &GasPrice { - &self.config.gas_price - } - - /// The gas price adjustment - fn gas_adjustment(&self) -> f64 { - self.config - .gas_adjustment - .unwrap_or(DEFAULT_GAS_PRICE_ADJUSTMENT) - } - - /// Adjusts the fee based on the configured `gas_adjustment` to prevent out of gas errors. - /// The actual gas cost, when a transaction is executed, may be slightly higher than the - /// one returned by the simulation. - fn apply_adjustment_to_gas(&self, gas_amount: u64) -> u64 { - assert!(self.gas_adjustment() <= 1.0); - - let (_, digits) = mul_ceil(gas_amount, self.gas_adjustment()).to_u64_digits(); - assert!(digits.len() == 1); - - let adjustment = digits[0]; - let gas = gas_amount.checked_add(adjustment).unwrap_or(u64::MAX); - - min(gas, self.max_gas()) - } - - /// The maximum fee the relayer pays for a transaction - fn max_fee_in_coins(&self) -> Coin { - calculate_fee(self.max_gas(), self.gas_price()) - } - - /// The fee in coins based on gas amount - fn fee_from_gas_in_coins(&self, gas: u64) -> Coin { - calculate_fee(gas, self.gas_price()) - } - - /// The maximum number of messages included in a transaction - fn max_msg_num(&self) -> usize { - self.config.max_msg_num.into() - } - /// The maximum size of any transaction sent by the relayer to this chain fn max_tx_size(&self) -> usize { self.config.max_tx_size.into() @@ -615,7 +295,14 @@ impl CosmosSdkChain { return Err(Error::private_store()); } - let response = self.block_on(abci_query(self, path, data.to_string(), height, prove))?; + let response = self.block_on(abci_query( + &self.rpc_client, + &self.config.rpc_addr, + path, + data.to_string(), + height, + prove, + ))?; // TODO - Verify response proof, if requested. if prove {} @@ -640,7 +327,8 @@ impl CosmosSdkChain { let path = TendermintABCIPath::from_str(SDK_UPGRADE_QUERY_PATH) .expect("Turning SDK upgrade query path constant into a Tendermint ABCI path"); let response: QueryResponse = self.block_on(abci_query( - self, + &self.rpc_client, + &self.config.rpc_addr, path, Path::Upgrade(data).to_string(), prev_height, @@ -652,253 +340,18 @@ impl CosmosSdkChain { Ok((response.value, proof)) } - fn send_tx_simulate(&self, tx: Tx) -> Result { - crate::time!("send_tx_simulate"); - - use ibc_proto::cosmos::tx::v1beta1::service_client::ServiceClient; - - // The `tx` field of `SimulateRequest` was deprecated in Cosmos SDK 0.43 in favor of `tx_bytes`. - let mut tx_bytes = vec![]; - prost::Message::encode(&tx, &mut tx_bytes) - .map_err(|e| Error::protobuf_encode(String::from("Transaction"), e))?; - - #[allow(deprecated)] - let req = SimulateRequest { - tx: Some(tx), // needed for simulation to go through with Cosmos SDK < 0.43 - tx_bytes, // needed for simulation to go through with Cosmos SDk >= 0.43 - }; - - let mut client = self - .block_on(ServiceClient::connect(self.grpc_addr.clone())) - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(req); - let response = self - .block_on(client.simulate(request)) - .map_err(Error::grpc_status)? - .into_inner(); - - Ok(response) - } - fn key(&self) -> Result { self.keybase() .get_key(&self.config.key_name) .map_err(Error::key_base) } - fn key_bytes(&self, key: &KeyEntry) -> Result, Error> { - let mut pk_buf = Vec::new(); - prost::Message::encode(&key.public_key.public_key.to_bytes(), &mut pk_buf) - .map_err(|e| Error::protobuf_encode(String::from("Key bytes"), e))?; - Ok(pk_buf) - } - - fn key_and_bytes(&self) -> Result<(KeyEntry, Vec), Error> { - let key = self.key()?; - let key_bytes = self.key_bytes(&key)?; - Ok((key, key_bytes)) - } - - fn refresh_account(&mut self) -> Result<(), Error> { - let account = self.block_on(query_account(self, self.key()?.account))?; - - info!( - sequence = %account.sequence, - number = %account.account_number, - "refresh: retrieved account", - ); - - self.account = Some(account.into()); - - Ok(()) - } - - fn account(&mut self) -> Result<&Account, Error> { - if self.account == None { - self.refresh_account()?; - } - - Ok(self - .account - .as_ref() - .expect("account was supposedly just cached")) - } - - fn account_number(&mut self) -> Result { - Ok(self.account()?.number) - } - - fn account_sequence(&mut self) -> Result { - Ok(self.account()?.sequence) - } - - fn incr_account_sequence(&mut self) { - if let Some(account) = &mut self.account { - account.sequence = account.sequence.increment(); - } - } - - fn signer(&self, sequence: AccountSequence) -> Result { - let (_key, pk_buf) = self.key_and_bytes()?; - let pk_type = match &self.config.address_type { - AddressType::Cosmos => "/cosmos.crypto.secp256k1.PubKey".to_string(), - AddressType::Ethermint { pk_type } => pk_type.clone(), - }; - // Create a MsgSend proto Any message - let pk_any = Any { - type_url: pk_type, - value: pk_buf, - }; - - let single = Single { mode: 1 }; - let sum_single = Some(Sum::Single(single)); - let mode = Some(ModeInfo { sum: sum_single }); - let signer_info = SignerInfo { - public_key: Some(pk_any), - mode_info: mode, - sequence: sequence.to_u64(), - }; - Ok(signer_info) - } - - fn max_fee(&self) -> Fee { - Fee { - amount: vec![self.max_fee_in_coins()], - gas_limit: self.max_gas(), - payer: "".to_string(), - granter: self.fee_granter().to_string(), - } - } - - fn fee_with_gas(&self, gas_limit: u64) -> Fee { - let adjusted_gas_limit = self.apply_adjustment_to_gas(gas_limit); - - Fee { - amount: vec![self.fee_from_gas_in_coins(adjusted_gas_limit)], - gas_limit: adjusted_gas_limit, - payer: "".to_string(), - granter: self.fee_granter().to_string(), - } - } - - fn signed_doc( - &self, - body_bytes: Vec, - auth_info_bytes: Vec, - account_number: AccountNumber, - ) -> Result, Error> { - let sign_doc = SignDoc { - body_bytes, - auth_info_bytes, - chain_id: self.config.clone().id.to_string(), - account_number: account_number.to_u64(), - }; - - // A protobuf serialization of a SignDoc - let mut signdoc_buf = Vec::new(); - prost::Message::encode(&sign_doc, &mut signdoc_buf) - .map_err(|e| Error::protobuf_encode(String::from("SignDoc"), e))?; - - // Sign doc - let signed = self - .keybase - .sign_msg( - &self.config.key_name, - signdoc_buf, - &self.config.address_type, - ) - .map_err(Error::key_base)?; - - Ok(signed) - } - - /// Given a vector of `TxSyncResult` elements, - /// each including a transaction response hash for one or more messages, periodically queries the chain - /// with the transaction hashes to get the list of IbcEvents included in those transactions. - pub fn wait_for_block_commits( - &self, - mut tx_sync_results: Vec, - ) -> Result, Error> { - let hashes = tx_sync_results - .iter() - .map(|res| res.response.hash.to_string()) - .join(", "); - - info!( - id = %self.id(), - "wait_for_block_commits: waiting for commit of tx hashes(s) {}", - hashes - ); - - // Wait a little bit initially - thread::sleep(Duration::from_millis(200)); - - let start = Instant::now(); - let result = retry_with_index( - retry_strategy::wait_for_block_commits(self.config.rpc_timeout), - |index| { - if all_tx_results_found(&tx_sync_results) { - trace!( - id = %self.id(), - "wait_for_block_commits: retrieved {} tx results after {} tries ({}ms)", - tx_sync_results.len(), - index, - start.elapsed().as_millis() - ); - - // All transactions confirmed - return RetryResult::Ok(()); - } - - for TxSyncResult { response, events } in tx_sync_results.iter_mut() { - // If this transaction was not committed, determine whether it was because it failed - // or because it hasn't been committed yet. - if empty_event_present(events) { - // If the transaction failed, replace the events with an error, - // so that we don't attempt to resolve the transaction later on. - if response.code.value() != 0 { - *events = vec![IbcEvent::ChainError(format!( - "deliver_tx on chain {} for Tx hash {} reports error: code={:?}, log={:?}", - self.id(), response.hash, response.code, response.log - ))]; - - // Otherwise, try to resolve transaction hash to the corresponding events. - } else if let Ok(events_per_tx) = - self.query_txs(QueryTxRequest::Transaction(QueryTxHash(response.hash))) - { - // If we get events back, progress was made, so we replace the events - // with the new ones. in both cases we will check in the next iteration - // whether or not the transaction was fully committed. - if !events_per_tx.is_empty() { - *events = events_per_tx; - } - } - } - } - RetryResult::Retry(index) - }, - ); - - match result { - // All transactions confirmed - Ok(()) => Ok(tx_sync_results), - // Did not find confirmation - Err(_) => Err(Error::tx_no_confirmation()), - } - } - fn trusting_period(&self, unbonding_period: Duration) -> Duration { self.config .trusting_period .unwrap_or(2 * unbonding_period / 3) } - /// Returns the preconfigured memo to be used for every submitted transaction - fn tx_memo(&self) -> &Memo { - &self.config.memo_prefix - } - /// Query the chain status via an RPC query. /// /// Returns an error if the node is still syncing and has not caught up, @@ -923,23 +376,71 @@ impl CosmosSdkChain { crate::time!("query_latest_height"); crate::telemetry!(query, self.id(), "query_latest_height"); - let status = self.status()?; + let status = self.rt.block_on(query_status( + self.id(), + &self.rpc_client, + &self.config.rpc_addr, + ))?; - Ok(ICSHeight { - revision_number: ChainId::chain_version(status.node_info.network.as_str()), - revision_height: u64::from(status.sync_info.latest_block_height), - }) + Ok(status.height) } -} -fn empty_event_present(events: &[IbcEvent]) -> bool { - events.iter().any(|ev| matches!(ev, IbcEvent::Empty(_))) -} + async fn do_send_messages_and_wait_commit( + &mut self, + tracked_msgs: TrackedMsgs, + ) -> Result, Error> { + crate::time!("send_messages_and_wait_commit"); + + let _span = + span!(Level::DEBUG, "send_tx_commit", id = %tracked_msgs.tracking_id()).entered(); + + let proto_msgs = tracked_msgs.msgs; + + let key_entry = self.key()?; + + let account = + get_or_fetch_account(&self.grpc_addr, &key_entry.account, &mut self.account).await?; + + send_batched_messages_and_wait_commit( + &self.config, + &self.rpc_client, + &self.config.rpc_addr, + &self.grpc_addr, + &key_entry, + account, + &self.config.memo_prefix, + proto_msgs, + ) + .await + } + + async fn do_send_messages_and_wait_check_tx( + &mut self, + tracked_msgs: TrackedMsgs, + ) -> Result, Error> { + crate::time!("send_messages_and_wait_check_tx"); + + let span = span!(Level::DEBUG, "send_tx_check", id = %tracked_msgs.tracking_id()); + let _enter = span.enter(); + + let proto_msgs = tracked_msgs.msgs; + + let key_entry = self.key()?; -fn all_tx_results_found(tx_sync_results: &[TxSyncResult]) -> bool { - tx_sync_results - .iter() - .all(|r| !empty_event_present(&r.events)) + let account = + get_or_fetch_account(&self.grpc_addr, &key_entry.account, &mut self.account).await?; + + send_batched_messages_and_wait_check_tx( + &self.config, + &self.rpc_client, + &self.grpc_addr, + &key_entry, + account, + &self.config.memo_prefix, + proto_msgs, + ) + .await + } } impl ChainEndpoint for CosmosSdkChain { @@ -1069,98 +570,18 @@ impl ChainEndpoint for CosmosSdkChain { &mut self, tracked_msgs: TrackedMsgs, ) -> Result, Error> { - crate::time!("send_messages_and_wait_commit"); + let runtime = self.rt.clone(); - let _span = - span!(Level::DEBUG, "send_tx_commit", id = %tracked_msgs.tracking_id()).entered(); - - let proto_msgs = tracked_msgs.messages(); - - if proto_msgs.is_empty() { - return Ok(vec![]); - } - let mut tx_sync_results = vec![]; - - let mut n = 0; - let mut size = 0; - let mut msg_batch = vec![]; - for msg in proto_msgs.iter() { - msg_batch.push(msg.clone()); - let mut buf = Vec::new(); - prost::Message::encode(msg, &mut buf) - .map_err(|e| Error::protobuf_encode(String::from("Message"), e))?; - n += 1; - size += buf.len(); - if n >= self.max_msg_num() || size >= self.max_tx_size() { - let events_per_tx = vec![IbcEvent::default(); msg_batch.len()]; - let tx_sync_result = self.send_tx(msg_batch)?; - tx_sync_results.push(TxSyncResult { - response: tx_sync_result, - events: events_per_tx, - }); - n = 0; - size = 0; - msg_batch = vec![]; - } - } - if !msg_batch.is_empty() { - let events_per_tx = vec![IbcEvent::default(); msg_batch.len()]; - let tx_sync_result = self.send_tx(msg_batch)?; - tx_sync_results.push(TxSyncResult { - response: tx_sync_result, - events: events_per_tx, - }); - } - - let tx_sync_results = self.wait_for_block_commits(tx_sync_results)?; - - let events = tx_sync_results - .into_iter() - .flat_map(|el| el.events) - .collect(); - - Ok(events) + runtime.block_on(self.do_send_messages_and_wait_commit(tracked_msgs)) } fn send_messages_and_wait_check_tx( &mut self, tracked_msgs: TrackedMsgs, ) -> Result, Error> { - crate::time!("send_messages_and_wait_check_tx"); + let runtime = self.rt.clone(); - let span = span!(Level::DEBUG, "send_tx_check", id = %tracked_msgs.tracking_id()); - let _enter = span.enter(); - - let proto_msgs = tracked_msgs.messages(); - - if proto_msgs.is_empty() { - return Ok(vec![]); - } - let mut responses = vec![]; - - let mut n = 0; - let mut size = 0; - let mut msg_batch = vec![]; - for msg in proto_msgs.iter() { - msg_batch.push(msg.clone()); - let mut buf = Vec::new(); - prost::Message::encode(msg, &mut buf) - .map_err(|e| Error::protobuf_encode(String::from("Messages"), e))?; - n += 1; - size += buf.len(); - if n >= self.max_msg_num() || size >= self.max_tx_size() { - // Send the tx and enqueue the resulting response - responses.push(self.send_tx(msg_batch)?); - n = 0; - size = 0; - msg_batch = vec![]; - } - } - if !msg_batch.is_empty() { - responses.push(self.send_tx(msg_batch)?); - } - - Ok(responses) + runtime.block_on(self.do_send_messages_and_wait_check_tx(tracked_msgs)) } /// Get the account for the signer @@ -1218,22 +639,15 @@ impl ChainEndpoint for CosmosSdkChain { } /// Query the chain status - fn query_status(&self) -> Result { + fn query_status(&self) -> Result { crate::time!("query_status"); crate::telemetry!(query, self.id(), "query_status"); - let status = self.status()?; - - let time = status.sync_info.latest_block_time; - let height = ICSHeight { - revision_number: ChainId::chain_version(status.node_info.network.as_str()), - revision_height: u64::from(status.sync_info.latest_block_height), - }; - - Ok(StatusResponse { - height, - timestamp: time.into(), - }) + self.rt.block_on(query_status( + self.id(), + &self.rpc_client, + &self.config.rpc_addr, + )) } fn query_clients( @@ -1770,99 +1184,12 @@ impl ChainEndpoint for CosmosSdkChain { crate::time!("query_txs"); crate::telemetry!(query, self.id(), "query_txs"); - match request { - QueryTxRequest::Packet(request) => { - crate::time!("query_txs: query packet events"); - - let mut result: Vec = vec![]; - - for seq in &request.sequences { - // query first (and only) Tx that includes the event specified in the query request - let response = self - .block_on(self.rpc_client.tx_search( - packet_query(&request, *seq), - false, - 1, - 1, // get only the first Tx matching the query - Order::Ascending, - )) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - assert!( - response.txs.len() <= 1, - "packet_from_tx_search_response: unexpected number of txs" - ); - - if response.txs.is_empty() { - continue; - } - - if let Some(event) = packet_from_tx_search_response( - self.id(), - &request, - *seq, - response.txs[0].clone(), - ) { - result.push(event); - } - } - Ok(result) - } - - QueryTxRequest::Client(request) => { - crate::time!("query_txs: single client update event"); - - // query the first Tx that includes the event matching the client request - // Note: it is possible to have multiple Tx-es for same client and consensus height. - // In this case it must be true that the client updates were performed with tha - // same header as the first one, otherwise a subsequent transaction would have - // failed on chain. Therefore only one Tx is of interest and current API returns - // the first one. - let mut response = self - .block_on(self.rpc_client.tx_search( - header_query(&request), - false, - 1, - 1, // get only the first Tx matching the query - Order::Ascending, - )) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - if response.txs.is_empty() { - return Ok(vec![]); - } - - // the response must include a single Tx as specified in the query. - assert!( - response.txs.len() <= 1, - "packet_from_tx_search_response: unexpected number of txs" - ); - - let tx = response.txs.remove(0); - let event = update_client_from_tx_search_response(self.id(), &request, tx); - - Ok(event.into_iter().collect()) - } - - QueryTxRequest::Transaction(tx) => { - let mut response = self - .block_on(self.rpc_client.tx_search( - tx_hash_query(&tx), - false, - 1, - 1, // get only the first Tx matching the query - Order::Ascending, - )) - .map_err(|e| Error::rpc(self.config.rpc_addr.clone(), e))?; - - if response.txs.is_empty() { - Ok(vec![]) - } else { - let tx = response.txs.remove(0); - Ok(all_ibc_events_from_tx_search_response(self.id(), tx)) - } - } - } + self.block_on(query_txs( + self.id(), + &self.rpc_client, + &self.config.rpc_addr, + request, + )) } fn query_blocks( @@ -2121,129 +1448,6 @@ impl ChainEndpoint for CosmosSdkChain { } } -fn packet_query(request: &QueryPacketEventDataRequest, seq: Sequence) -> Query { - tendermint_rpc::query::Query::eq( - format!("{}.packet_src_channel", request.event_id.as_str()), - request.source_channel_id.to_string(), - ) - .and_eq( - format!("{}.packet_src_port", request.event_id.as_str()), - request.source_port_id.to_string(), - ) - .and_eq( - format!("{}.packet_dst_channel", request.event_id.as_str()), - request.destination_channel_id.to_string(), - ) - .and_eq( - format!("{}.packet_dst_port", request.event_id.as_str()), - request.destination_port_id.to_string(), - ) - .and_eq( - format!("{}.packet_sequence", request.event_id.as_str()), - seq.to_string(), - ) -} - -fn header_query(request: &QueryClientEventRequest) -> Query { - tendermint_rpc::query::Query::eq( - format!("{}.client_id", request.event_id.as_str()), - request.client_id.to_string(), - ) - .and_eq( - format!("{}.consensus_height", request.event_id.as_str()), - format!( - "{}-{}", - request.consensus_height.revision_number, request.consensus_height.revision_height - ), - ) -} - -fn tx_hash_query(request: &QueryTxHash) -> Query { - tendermint_rpc::query::Query::eq("tx.hash", request.0.to_string()) -} - -// Extract the packet events from the query_txs RPC response. For any given -// packet query, there is at most one Tx matching such query. Moreover, a Tx may -// contain several events, but a single one must match the packet query. -// For example, if we're querying for the packet with sequence 3 and this packet -// was committed in some Tx along with the packet with sequence 4, the response -// will include both packets. For this reason, we iterate all packets in the Tx, -// searching for those that match (which must be a single one). -fn packet_from_tx_search_response( - chain_id: &ChainId, - request: &QueryPacketEventDataRequest, - seq: Sequence, - response: ResultTx, -) -> Option { - let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); - if request.height != ICSHeight::zero() && height > request.height { - return None; - } - - response - .tx_result - .events - .into_iter() - .find_map(|ev| filter_matching_event(ev, request, seq)) -} - -// Extracts from the Tx the update client event for the requested client and height. -// Note: in the Tx, there may have been multiple events, some of them may be -// for update of other clients that are not relevant to the request. -// For example, if we're querying for a transaction that includes the update for client X at -// consensus height H, it is possible that the transaction also includes an update client -// for client Y at consensus height H'. This is the reason the code iterates all event fields in the -// returned Tx to retrieve the relevant ones. -// Returns `None` if no matching event was found. -fn update_client_from_tx_search_response( - chain_id: &ChainId, - request: &QueryClientEventRequest, - response: ResultTx, -) -> Option { - let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); - if request.height != ICSHeight::zero() && height > request.height { - return None; - } - - response - .tx_result - .events - .into_iter() - .filter(|event| event.type_str == request.event_id.as_str()) - .flat_map(|event| ClientEvents::try_from_tx(&event)) - .flat_map(|event| match event { - IbcEvent::UpdateClient(mut update) => { - update.common.height = height; - Some(update) - } - _ => None, - }) - .find(|update| { - update.common.client_id == request.client_id - && update.common.consensus_height == request.consensus_height - }) - .map(IbcEvent::UpdateClient) -} - -fn all_ibc_events_from_tx_search_response(chain_id: &ChainId, response: ResultTx) -> Vec { - let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); - let deliver_tx_result = response.tx_result; - if deliver_tx_result.code.is_err() { - return vec![IbcEvent::ChainError(format!( - "deliver_tx for {} reports error: code={:?}, log={:?}", - response.hash, deliver_tx_result.code, deliver_tx_result.log - ))]; - } - - let mut result = vec![]; - for event in deliver_tx_result.events { - if let Some(ibc_ev) = from_tx_response_event(height, &event) { - result.push(ibc_ev); - } - } - result -} - fn filter_matching_event( event: Event, request: &QueryPacketEventDataRequest, @@ -2279,76 +1483,6 @@ fn filter_matching_event( } } -/// Perform a generic `abci_query`, and return the corresponding deserialized response data. -async fn abci_query( - chain: &CosmosSdkChain, - path: TendermintABCIPath, - data: String, - height: Height, - prove: bool, -) -> Result { - let height = if height.value() == 0 { - None - } else { - Some(height) - }; - - // Use the Tendermint-rs RPC client to do the query. - let response = chain - .rpc_client - .abci_query(Some(path), data.into_bytes(), height, prove) - .await - .map_err(|e| Error::rpc(chain.config.rpc_addr.clone(), e))?; - - if !response.code.is_ok() { - // Fail with response log. - return Err(Error::abci_query(response)); - } - - if prove && response.proof.is_none() { - // Fail due to empty proof - return Err(Error::empty_response_proof()); - } - - let proof = response - .proof - .map(|p| convert_tm_to_ics_merkle_proof(&p)) - .transpose() - .map_err(Error::ics23)?; - - let response = QueryResponse { - value: response.value, - height: response.height, - proof, - }; - - Ok(response) -} - -/// Perform a `broadcast_tx_sync`, and return the corresponding deserialized response data. -pub async fn broadcast_tx_sync( - rpc_client: &HttpClient, - rpc_address: &Url, - data: Vec, -) -> Result { - let response = rpc_client - .broadcast_tx_sync(data.into()) - .await - .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - - Ok(response) -} - -fn encode_to_bech32(address: &str, account_prefix: &str) -> Result { - let account = AccountId::from_str(address) - .map_err(|e| Error::invalid_key_address(address.to_string(), e))?; - - let encoded = bech32::encode(account_prefix, account.to_base32(), Variant::Bech32) - .map_err(Error::bech32_encoding)?; - - Ok(encoded) -} - /// Returns the suffix counter for a CosmosSDK client id. /// Returns `None` if the client identifier is malformed /// and the suffix could not be parsed. @@ -2360,46 +1494,6 @@ fn client_id_suffix(client_id: &ClientId) -> Option { .and_then(|e| e.parse::().ok()) } -pub struct TxSyncResult { - // the broadcast_tx_sync response - response: Response, - // the events generated by a Tx once executed - events: Vec, -} - -pub fn auth_info_and_bytes( - signer_info: SignerInfo, - fee: Fee, -) -> Result<(AuthInfo, Vec), Error> { - let auth_info = AuthInfo { - signer_infos: vec![signer_info], - fee: Some(fee), - }; - - // A protobuf serialization of a AuthInfo - let mut auth_buf = Vec::new(); - prost::Message::encode(&auth_info, &mut auth_buf) - .map_err(|e| Error::protobuf_encode(String::from("AuthInfo"), e))?; - Ok((auth_info, auth_buf)) -} - -pub fn tx_body_and_bytes(proto_msgs: Vec, memo: &Memo) -> Result<(TxBody, Vec), Error> { - // Create TxBody - let body = TxBody { - messages: proto_msgs.to_vec(), - memo: memo.to_string(), - timeout_height: 0_u64, - extension_options: Vec::::new(), - non_critical_extension_options: Vec::::new(), - }; - - // A protobuf serialization of a TxBody - let mut body_buf = Vec::new(); - prost::Message::encode(&body, &mut body_buf) - .map_err(|e| Error::protobuf_encode(String::from("TxBody"), e))?; - Ok((body, body_buf)) -} - fn do_health_check(chain: &CosmosSdkChain) -> Result<(), Error> { let chain_id = chain.id(); let grpc_address = chain.grpc_addr.to_string(); @@ -2443,107 +1537,6 @@ fn do_health_check(chain: &CosmosSdkChain) -> Result<(), Error> { Ok(()) } -/// Queries the chain to obtain the version information. -async fn fetch_version_specs( - chain_id: &ChainId, - grpc_address: &Uri, -) -> Result { - let grpc_addr_string = grpc_address.to_string(); - - // Construct a gRPC client - let mut client = ServiceClient::connect(grpc_address.clone()) - .await - .map_err(|e| { - Error::fetch_version_grpc_transport( - chain_id.clone(), - grpc_addr_string.clone(), - "tendermint::ServiceClient".to_string(), - e, - ) - })?; - - let request = tonic::Request::new(GetNodeInfoRequest {}); - - let response = client.get_node_info(request).await.map_err(|e| { - Error::fetch_version_grpc_status( - chain_id.clone(), - grpc_addr_string.clone(), - "tendermint::ServiceClient".to_string(), - e, - ) - })?; - - let version = response.into_inner().application_version.ok_or_else(|| { - Error::fetch_version_invalid_version_response( - chain_id.clone(), - grpc_addr_string.clone(), - "tendermint::GetNodeInfoRequest".to_string(), - ) - })?; - - // Parse the raw version info into a domain-type `version::Specs` - version - .try_into() - .map_err(|e| Error::fetch_version_parsing(chain_id.clone(), grpc_addr_string.clone(), e)) -} - -/// Determine whether the given error yielded by `tx_simulate` -/// can be recovered from by submitting the tx anyway. -fn can_recover_from_simulation_failure(e: &Error) -> bool { - use crate::error::ErrorDetail::*; - - match e.detail() { - GrpcStatus(detail) => detail.is_client_state_height_too_low(), - _ => false, - } -} - -/// Determine whether the given error yielded by `tx_simulate` -/// indicates that the current sequence number cached in Hermes -/// may be out-of-sync with the full node's version of the s.n. -fn mismatching_account_sequence_number(e: &Error) -> bool { - use crate::error::ErrorDetail::*; - - match e.detail() { - GrpcStatus(detail) => detail.is_account_sequence_mismatch(), - _ => false, - } -} - -struct PrettyFee<'a>(&'a Fee); - -impl fmt::Display for PrettyFee<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let amount = match self.0.amount.get(0) { - Some(coin) => format!("{}{}", coin.amount, coin.denom), - None => "".to_string(), - }; - - f.debug_struct("Fee") - .field("amount", &amount) - .field("gas_limit", &self.0.gas_limit) - .finish() - } -} - -fn calculate_fee(adjusted_gas_amount: u64, gas_price: &GasPrice) -> Coin { - let fee_amount = mul_ceil(adjusted_gas_amount, gas_price.price); - - Coin { - denom: gas_price.denom.to_string(), - amount: fee_amount.to_string(), - } -} - -/// Multiply `a` with `f` and round the result up to the nearest integer. -fn mul_ceil(a: u64, f: f64) -> BigInt { - assert!(f.is_finite()); - - let a = BigInt::from(a); - let f = BigRational::from_float(f).expect("f is finite"); - (f * a).ceil().to_integer() -} - #[cfg(test)] mod tests { use ibc::{ diff --git a/relayer/src/chain/cosmos/account.rs b/relayer/src/chain/cosmos/account.rs deleted file mode 100644 index ee669d17f7..0000000000 --- a/relayer/src/chain/cosmos/account.rs +++ /dev/null @@ -1,108 +0,0 @@ -use core::fmt; - -use ibc_proto::cosmos::auth::v1beta1::BaseAccount; -use prost::Message; - -use crate::error::Error; - -use super::CosmosSdkChain; - -/// Wrapper for account number and sequence number. -/// -/// More fields may be added later. -#[derive(Clone, Debug, PartialEq)] -pub struct Account { - // pub address: String, - // pub pub_key: Option, - pub number: AccountNumber, - pub sequence: AccountSequence, -} - -impl From for Account { - fn from(value: BaseAccount) -> Self { - Self { - number: AccountNumber::new(value.account_number), - sequence: AccountSequence::new(value.sequence), - } - } -} - -/// Newtype for account numbers -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct AccountNumber(u64); - -impl AccountNumber { - pub fn new(number: u64) -> Self { - Self(number) - } - - pub fn to_u64(self) -> u64 { - self.0 - } -} - -impl fmt::Display for AccountNumber { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Newtype for account sequence numbers -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct AccountSequence(u64); - -impl AccountSequence { - pub fn new(sequence: u64) -> Self { - Self(sequence) - } - - pub fn to_u64(self) -> u64 { - self.0 - } - - pub fn increment(self) -> Self { - Self(self.0 + 1) - } -} - -impl fmt::Display for AccountSequence { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Uses the GRPC client to retrieve the account sequence -pub async fn query_account(chain: &CosmosSdkChain, address: String) -> Result { - use ibc_proto::cosmos::auth::v1beta1::query_client::QueryClient; - use ibc_proto::cosmos::auth::v1beta1::{EthAccount, QueryAccountRequest}; - - crate::telemetry!(query, &chain.config.id, "query_account"); - - let mut client = QueryClient::connect(chain.grpc_addr.clone()) - .await - .map_err(Error::grpc_transport)?; - - let request = tonic::Request::new(QueryAccountRequest { - address: address.clone(), - }); - - let response = client.account(request).await; - - // Querying for an account might fail, i.e. if the account doesn't actually exist - let resp_account = match response.map_err(Error::grpc_status)?.into_inner().account { - Some(account) => account, - None => return Err(Error::empty_query_account(address)), - }; - - if resp_account.type_url == "/cosmos.auth.v1beta1.BaseAccount" { - Ok(BaseAccount::decode(resp_account.value.as_slice()) - .map_err(|e| Error::protobuf_decode("BaseAccount".to_string(), e))?) - } else if resp_account.type_url.ends_with(".EthAccount") { - Ok(EthAccount::decode(resp_account.value.as_slice()) - .map_err(|e| Error::protobuf_decode("EthAccount".to_string(), e))? - .base_account - .ok_or_else(Error::empty_base_account)?) - } else { - Err(Error::unknown_account_type(resp_account.type_url)) - } -} diff --git a/relayer/src/chain/cosmos/batch.rs b/relayer/src/chain/cosmos/batch.rs new file mode 100644 index 0000000000..6a36fb8639 --- /dev/null +++ b/relayer/src/chain/cosmos/batch.rs @@ -0,0 +1,166 @@ +use ibc::events::IbcEvent; +use ibc_proto::google::protobuf::Any; +use prost::Message; +use tendermint_rpc::endpoint::broadcast::tx_sync::Response; +use tendermint_rpc::{HttpClient, Url}; +use tonic::codegen::http::Uri; + +use crate::chain::cosmos::retry::send_tx_with_account_sequence_retry; +use crate::chain::cosmos::types::account::Account; +use crate::chain::cosmos::types::tx::TxSyncResult; +use crate::chain::cosmos::wait::wait_for_block_commits; +use crate::config::types::Memo; +use crate::config::ChainConfig; +use crate::error::Error; +use crate::keyring::KeyEntry; + +pub async fn send_batched_messages_and_wait_commit( + config: &ChainConfig, + rpc_client: &HttpClient, + rpc_address: &Url, + grpc_address: &Uri, + key_entry: &KeyEntry, + account: &mut Account, + tx_memo: &Memo, + messages: Vec, +) -> Result, Error> { + if messages.is_empty() { + return Ok(Vec::new()); + } + + let mut tx_sync_results = send_messages_as_batches( + config, + rpc_client, + grpc_address, + key_entry, + account, + tx_memo, + messages, + ) + .await?; + + wait_for_block_commits( + &config.id, + rpc_client, + rpc_address, + &config.rpc_timeout, + &mut tx_sync_results, + ) + .await?; + + let events = tx_sync_results + .into_iter() + .flat_map(|el| el.events) + .collect(); + + Ok(events) +} + +pub async fn send_batched_messages_and_wait_check_tx( + config: &ChainConfig, + rpc_client: &HttpClient, + grpc_address: &Uri, + key_entry: &KeyEntry, + account: &mut Account, + tx_memo: &Memo, + messages: Vec, +) -> Result, Error> { + if messages.is_empty() { + return Ok(Vec::new()); + } + + let batches = batch_messages(config, messages)?; + + let mut responses = Vec::new(); + + for batch in batches { + let response = send_tx_with_account_sequence_retry( + config, + rpc_client, + grpc_address, + key_entry, + account, + tx_memo, + batch, + 0, + ) + .await?; + + responses.push(response); + } + + Ok(responses) +} + +async fn send_messages_as_batches( + config: &ChainConfig, + rpc_client: &HttpClient, + grpc_address: &Uri, + key_entry: &KeyEntry, + account: &mut Account, + tx_memo: &Memo, + messages: Vec, +) -> Result, Error> { + if messages.is_empty() { + return Ok(Vec::new()); + } + + let batches = batch_messages(config, messages)?; + + let mut tx_sync_results = Vec::new(); + + for batch in batches { + let events_per_tx = vec![IbcEvent::default(); batch.len()]; + + let response = send_tx_with_account_sequence_retry( + config, + rpc_client, + grpc_address, + key_entry, + account, + tx_memo, + batch, + 0, + ) + .await?; + + let tx_sync_result = TxSyncResult { + response, + events: events_per_tx, + }; + + tx_sync_results.push(tx_sync_result); + } + + Ok(tx_sync_results) +} + +fn batch_messages(config: &ChainConfig, messages: Vec) -> Result>, Error> { + let max_message_count = config.max_msg_num.0; + let max_tx_size = config.max_tx_size.into(); + + let mut batches = vec![]; + + let mut current_count = 0; + let mut current_size = 0; + let mut current_batch = vec![]; + + for message in messages.into_iter() { + current_count += 1; + current_size += message.encoded_len(); + current_batch.push(message); + + if current_count >= max_message_count || current_size >= max_tx_size { + let insert_batch = current_batch.drain(..).collect(); + batches.push(insert_batch); + current_count = 0; + current_size = 0; + } + } + + if !current_batch.is_empty() { + batches.push(current_batch); + } + + Ok(batches) +} diff --git a/relayer/src/chain/cosmos/encode.rs b/relayer/src/chain/cosmos/encode.rs new file mode 100644 index 0000000000..b66265c4bc --- /dev/null +++ b/relayer/src/chain/cosmos/encode.rs @@ -0,0 +1,179 @@ +use bech32::{ToBase32, Variant}; +use core::str::FromStr; +use ibc::core::ics24_host::identifier::ChainId; +use ibc_proto::cosmos::tx::v1beta1::mode_info::{Single, Sum}; +use ibc_proto::cosmos::tx::v1beta1::{AuthInfo, Fee, ModeInfo, SignDoc, SignerInfo, TxBody, TxRaw}; +use ibc_proto::google::protobuf::Any; +use tendermint::account::Id as AccountId; + +use crate::chain::cosmos::types::account::{Account, AccountNumber, AccountSequence}; +use crate::chain::cosmos::types::tx::SignedTx; +use crate::config::types::Memo; +use crate::config::AddressType; +use crate::config::ChainConfig; +use crate::error::Error; +use crate::keyring::{sign_message, KeyEntry}; + +pub fn sign_and_encode_tx( + config: &ChainConfig, + key_entry: &KeyEntry, + account: &Account, + tx_memo: &Memo, + messages: Vec, + fee: &Fee, +) -> Result, Error> { + let signed_tx = sign_tx(config, key_entry, account, tx_memo, messages, fee)?; + + let tx_raw = TxRaw { + body_bytes: signed_tx.body_bytes, + auth_info_bytes: signed_tx.auth_info_bytes, + signatures: signed_tx.signatures, + }; + + encode_tx_raw(tx_raw) +} + +pub fn sign_tx( + config: &ChainConfig, + key_entry: &KeyEntry, + account: &Account, + tx_memo: &Memo, + messages: Vec, + fee: &Fee, +) -> Result { + let key_bytes = encode_key_bytes(key_entry)?; + + let signer = encode_signer_info(&config.address_type, account.sequence, key_bytes)?; + + let (body, body_bytes) = tx_body_and_bytes(messages, tx_memo)?; + + let (auth_info, auth_info_bytes) = auth_info_and_bytes(signer, fee.clone())?; + + let signed_doc = encode_sign_doc( + &config.id, + key_entry, + &config.address_type, + account.number, + auth_info_bytes.clone(), + body_bytes.clone(), + )?; + + Ok(SignedTx { + body, + body_bytes, + auth_info, + auth_info_bytes, + signatures: vec![signed_doc], + }) +} + +fn encode_key_bytes(key: &KeyEntry) -> Result, Error> { + let mut pk_buf = Vec::new(); + + prost::Message::encode(&key.public_key.public_key.to_bytes(), &mut pk_buf) + .map_err(|e| Error::protobuf_encode("PublicKey".into(), e))?; + + Ok(pk_buf) +} + +fn encode_sign_doc( + chain_id: &ChainId, + key: &KeyEntry, + address_type: &AddressType, + account_number: AccountNumber, + auth_info_bytes: Vec, + body_bytes: Vec, +) -> Result, Error> { + let sign_doc = SignDoc { + body_bytes, + auth_info_bytes, + chain_id: chain_id.to_string(), + account_number: account_number.to_u64(), + }; + + // A protobuf serialization of a SignDoc + let mut signdoc_buf = Vec::new(); + prost::Message::encode(&sign_doc, &mut signdoc_buf).unwrap(); + + let signed = sign_message(key, signdoc_buf, address_type).map_err(Error::key_base)?; + + Ok(signed) +} + +fn encode_signer_info( + address_type: &AddressType, + sequence: AccountSequence, + key_bytes: Vec, +) -> Result { + let pk_type = match address_type { + AddressType::Cosmos => "/cosmos.crypto.secp256k1.PubKey".to_string(), + AddressType::Ethermint { pk_type } => pk_type.clone(), + }; + // Create a MsgSend proto Any message + let pk_any = Any { + type_url: pk_type, + value: key_bytes, + }; + + let single = Single { mode: 1 }; + let sum_single = Some(Sum::Single(single)); + let mode = Some(ModeInfo { sum: sum_single }); + let signer_info = SignerInfo { + public_key: Some(pk_any), + mode_info: mode, + sequence: sequence.to_u64(), + }; + Ok(signer_info) +} + +fn encode_tx_raw(tx_raw: TxRaw) -> Result, Error> { + let mut tx_bytes = Vec::new(); + prost::Message::encode(&tx_raw, &mut tx_bytes) + .map_err(|e| Error::protobuf_encode("Transaction".to_string(), e))?; + + Ok(tx_bytes) +} + +pub fn encode_to_bech32(address: &str, account_prefix: &str) -> Result { + let account = AccountId::from_str(address) + .map_err(|e| Error::invalid_key_address(address.to_string(), e))?; + + let encoded = bech32::encode(account_prefix, account.to_base32(), Variant::Bech32) + .map_err(Error::bech32_encoding)?; + + Ok(encoded) +} + +fn auth_info_and_bytes(signer_info: SignerInfo, fee: Fee) -> Result<(AuthInfo, Vec), Error> { + let auth_info = AuthInfo { + signer_infos: vec![signer_info], + fee: Some(fee), + }; + + // A protobuf serialization of a AuthInfo + let mut auth_buf = Vec::new(); + + prost::Message::encode(&auth_info, &mut auth_buf) + .map_err(|e| Error::protobuf_encode(String::from("AuthInfo"), e))?; + + Ok((auth_info, auth_buf)) +} + +fn tx_body_and_bytes(proto_msgs: Vec, memo: &Memo) -> Result<(TxBody, Vec), Error> { + // Create TxBody + let body = TxBody { + messages: proto_msgs.to_vec(), + memo: memo.to_string(), + timeout_height: 0_u64, + extension_options: Vec::::new(), + non_critical_extension_options: Vec::::new(), + }; + + // A protobuf serialization of a TxBody + let mut body_buf = Vec::new(); + + prost::Message::encode(&body, &mut body_buf) + .map_err(|e| Error::protobuf_encode(String::from("TxBody"), e))?; + + Ok((body, body_buf)) +} diff --git a/relayer/src/chain/cosmos/estimate.rs b/relayer/src/chain/cosmos/estimate.rs new file mode 100644 index 0000000000..22faf937ea --- /dev/null +++ b/relayer/src/chain/cosmos/estimate.rs @@ -0,0 +1,155 @@ +use ibc::core::ics24_host::identifier::ChainId; +use ibc_proto::cosmos::tx::v1beta1::{Fee, Tx}; +use ibc_proto::google::protobuf::Any; +use tonic::codegen::http::Uri; +use tracing::{debug, error, span, warn, Level}; + +use crate::chain::cosmos::encode::sign_tx; +use crate::chain::cosmos::gas::{gas_amount_to_fees, PrettyFee}; +use crate::chain::cosmos::simulate::send_tx_simulate; +use crate::chain::cosmos::types::account::Account; +use crate::chain::cosmos::types::gas::GasConfig; +use crate::config::types::Memo; +use crate::config::ChainConfig; +use crate::error::Error; +use crate::keyring::KeyEntry; + +pub async fn estimate_tx_fees( + config: &ChainConfig, + grpc_address: &Uri, + key_entry: &KeyEntry, + account: &Account, + tx_memo: &Memo, + messages: Vec, +) -> Result { + let gas_config = GasConfig::from_chain_config(config); + + debug!( + "max fee, for use in tx simulation: {}", + PrettyFee(&gas_config.max_fee) + ); + + let signed_tx = sign_tx( + config, + key_entry, + account, + tx_memo, + messages, + &gas_config.max_fee, + )?; + + let tx = Tx { + body: Some(signed_tx.body), + auth_info: Some(signed_tx.auth_info), + signatures: signed_tx.signatures, + }; + + let estimated_fee = estimate_fee_with_tx(&gas_config, grpc_address, &config.id, tx).await?; + + Ok(estimated_fee) +} + +async fn estimate_fee_with_tx( + gas_config: &GasConfig, + grpc_address: &Uri, + chain_id: &ChainId, + tx: Tx, +) -> Result { + let estimated_gas = estimate_gas_with_tx(gas_config, grpc_address, tx).await?; + + if estimated_gas > gas_config.max_gas { + debug!( + id = %chain_id, estimated = ?estimated_gas, max = ?gas_config.max_gas, + "send_tx: estimated gas is higher than max gas" + ); + + return Err(Error::tx_simulate_gas_estimate_exceeded( + chain_id.clone(), + estimated_gas, + gas_config.max_gas, + )); + } + + let adjusted_fee = gas_amount_to_fees(gas_config, estimated_gas); + + debug!( + id = %chain_id, + "send_tx: using {} gas, fee {}", + estimated_gas, + PrettyFee(&adjusted_fee) + ); + + Ok(adjusted_fee) +} + +/// Try to simulate the given tx in order to estimate how much gas will be needed to submit it. +/// +/// It is possible that a batch of messages are fragmented by the caller (`send_msgs`) such that +/// they do not individually verify. For example for the following batch: +/// [`MsgUpdateClient`, `MsgRecvPacket`, ..., `MsgRecvPacket`] +/// +/// If the batch is split in two TX-es, the second one will fail the simulation in `deliverTx` check. +/// In this case we use the `default_gas` param. +async fn estimate_gas_with_tx( + gas_config: &GasConfig, + grpc_address: &Uri, + tx: Tx, +) -> Result { + let simulated_gas = send_tx_simulate(grpc_address, tx) + .await + .map(|sr| sr.gas_info); + + let _span = span!(Level::ERROR, "estimate_gas").entered(); + + match simulated_gas { + Ok(Some(gas_info)) => { + debug!( + "tx simulation successful, gas amount used: {:?}", + gas_info.gas_used + ); + + Ok(gas_info.gas_used) + } + + Ok(None) => { + warn!( + "tx simulation successful but no gas amount used was returned, falling back on default gas: {}", + gas_config.default_gas + ); + + Ok(gas_config.default_gas) + } + + // If there is a chance that the tx will be accepted once actually submitted, we fall + // back on the max gas and will attempt to send it anyway. + // See `can_recover_from_simulation_failure` for more info. + Err(e) if can_recover_from_simulation_failure(&e) => { + warn!( + "failed to simulate tx, falling back on default gas because the error is potentially recoverable: {}", + e.detail() + ); + + Ok(gas_config.default_gas) + } + + Err(e) => { + error!( + "failed to simulate tx. propagating error to caller: {}", + e.detail() + ); + // Propagate the error, the retrying mechanism at caller may catch & retry. + Err(e) + } + } +} + +/// Determine whether the given error yielded by `tx_simulate` +/// can be recovered from by submitting the tx anyway. +fn can_recover_from_simulation_failure(e: &Error) -> bool { + use crate::error::ErrorDetail::*; + + match e.detail() { + GrpcStatus(detail) => detail.is_client_state_height_too_low(), + _ => false, + } +} diff --git a/relayer/src/chain/cosmos/gas.rs b/relayer/src/chain/cosmos/gas.rs new file mode 100644 index 0000000000..9b881845db --- /dev/null +++ b/relayer/src/chain/cosmos/gas.rs @@ -0,0 +1,74 @@ +use core::cmp::min; +use core::fmt; +use ibc_proto::cosmos::base::v1beta1::Coin; +use ibc_proto::cosmos::tx::v1beta1::Fee; +use num_bigint::BigInt; +use num_rational::BigRational; + +use crate::chain::cosmos::types::gas::GasConfig; +use crate::config::GasPrice; + +pub struct PrettyFee<'a>(pub &'a Fee); + +pub fn gas_amount_to_fees(config: &GasConfig, gas_amount: u64) -> Fee { + let adjusted_gas_limit = adjust_gas_with_simulated_fees(config, gas_amount); + + // The fee in coins based on gas amount + let amount = calculate_fee(adjusted_gas_limit, &config.gas_price); + + Fee { + amount: vec![amount], + gas_limit: adjusted_gas_limit, + payer: "".to_string(), + granter: config.fee_granter.clone(), + } +} + +pub fn calculate_fee(adjusted_gas_amount: u64, gas_price: &GasPrice) -> Coin { + let fee_amount = mul_ceil(adjusted_gas_amount, gas_price.price); + + Coin { + denom: gas_price.denom.to_string(), + amount: fee_amount.to_string(), + } +} + +/// Multiply `a` with `f` and round the result up to the nearest integer. +pub fn mul_ceil(a: u64, f: f64) -> BigInt { + assert!(f.is_finite()); + + let a = BigInt::from(a); + let f = BigRational::from_float(f).expect("f is finite"); + (f * a).ceil().to_integer() +} + +/// Adjusts the fee based on the configured `gas_adjustment` to prevent out of gas errors. +/// The actual gas cost, when a transaction is executed, may be slightly higher than the +/// one returned by the simulation. +fn adjust_gas_with_simulated_fees(config: &GasConfig, gas_amount: u64) -> u64 { + let gas_adjustment = config.gas_adjustment; + + assert!(gas_adjustment <= 1.0); + + let (_, digits) = mul_ceil(gas_amount, gas_adjustment).to_u64_digits(); + assert!(digits.len() == 1); + + let adjustment = digits[0]; + let gas = gas_amount.checked_add(adjustment).unwrap_or(u64::MAX); + + min(gas, config.max_gas) +} + +impl fmt::Display for PrettyFee<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let amount = match self.0.amount.get(0) { + Some(coin) => format!("{}{}", coin.amount, coin.denom), + None => "".to_string(), + }; + + f.debug_struct("Fee") + .field("amount", &amount) + .field("gas_limit", &self.0.gas_limit) + .finish() + } +} diff --git a/relayer/src/chain/cosmos/query.rs b/relayer/src/chain/cosmos/query.rs new file mode 100644 index 0000000000..15d700b173 --- /dev/null +++ b/relayer/src/chain/cosmos/query.rs @@ -0,0 +1,149 @@ +use http::uri::Uri; +use ibc::core::ics02_client::client_consensus::QueryClientEventRequest; +use ibc::core::ics04_channel::channel::QueryPacketEventDataRequest; +use ibc::core::ics04_channel::packet::Sequence; +use ibc::core::ics23_commitment::merkle::convert_tm_to_ics_merkle_proof; +use ibc::core::ics24_host::identifier::ChainId; +use ibc::query::QueryTxHash; +use ibc_proto::cosmos::base::tendermint::v1beta1::service_client::ServiceClient; +use ibc_proto::cosmos::base::tendermint::v1beta1::GetNodeInfoRequest; +use tendermint::abci::Path as TendermintABCIPath; +use tendermint::block::Height; +use tendermint_rpc::query::Query; +use tendermint_rpc::{Client, HttpClient, Url}; + +use crate::chain::cosmos::version::Specs; +use crate::chain::QueryResponse; +use crate::error::Error; + +pub mod account; +pub mod status; +pub mod tx; + +pub fn packet_query(request: &QueryPacketEventDataRequest, seq: Sequence) -> Query { + Query::eq( + format!("{}.packet_src_channel", request.event_id.as_str()), + request.source_channel_id.to_string(), + ) + .and_eq( + format!("{}.packet_src_port", request.event_id.as_str()), + request.source_port_id.to_string(), + ) + .and_eq( + format!("{}.packet_dst_channel", request.event_id.as_str()), + request.destination_channel_id.to_string(), + ) + .and_eq( + format!("{}.packet_dst_port", request.event_id.as_str()), + request.destination_port_id.to_string(), + ) + .and_eq( + format!("{}.packet_sequence", request.event_id.as_str()), + seq.to_string(), + ) +} + +pub fn header_query(request: &QueryClientEventRequest) -> Query { + Query::eq( + format!("{}.client_id", request.event_id.as_str()), + request.client_id.to_string(), + ) + .and_eq( + format!("{}.consensus_height", request.event_id.as_str()), + format!( + "{}-{}", + request.consensus_height.revision_number, request.consensus_height.revision_height + ), + ) +} + +pub fn tx_hash_query(request: &QueryTxHash) -> Query { + Query::eq("tx.hash", request.0.to_string()) +} + +/// Perform a generic `abci_query`, and return the corresponding deserialized response data. +pub async fn abci_query( + rpc_client: &HttpClient, + rpc_address: &Url, + path: TendermintABCIPath, + data: String, + height: Height, + prove: bool, +) -> Result { + let height = if height.value() == 0 { + None + } else { + Some(height) + }; + + // Use the Tendermint-rs RPC client to do the query. + let response = rpc_client + .abci_query(Some(path), data.into_bytes(), height, prove) + .await + .map_err(|e| Error::rpc(rpc_address.clone(), e))?; + + if !response.code.is_ok() { + // Fail with response log. + return Err(Error::abci_query(response)); + } + + if prove && response.proof.is_none() { + // Fail due to empty proof + return Err(Error::empty_response_proof()); + } + + let proof = response + .proof + .map(|p| convert_tm_to_ics_merkle_proof(&p)) + .transpose() + .map_err(Error::ics23)?; + + let response = QueryResponse { + value: response.value, + height: response.height, + proof, + }; + + Ok(response) +} + +/// Queries the chain to obtain the version information. +pub async fn fetch_version_specs(chain_id: &ChainId, grpc_address: &Uri) -> Result { + let grpc_addr_string = grpc_address.to_string(); + + // Construct a gRPC client + let mut client = ServiceClient::connect(grpc_address.clone()) + .await + .map_err(|e| { + Error::fetch_version_grpc_transport( + chain_id.clone(), + grpc_addr_string.clone(), + "tendermint::ServiceClient".to_string(), + e, + ) + })?; + + let request = tonic::Request::new(GetNodeInfoRequest {}); + + let response = client.get_node_info(request).await.map_err(|e| { + Error::fetch_version_grpc_status( + chain_id.clone(), + grpc_addr_string.clone(), + "tendermint::ServiceClient".to_string(), + e, + ) + })?; + + let version = response.into_inner().application_version.ok_or_else(|| { + Error::fetch_version_invalid_version_response( + chain_id.clone(), + grpc_addr_string.clone(), + "tendermint::GetNodeInfoRequest".to_string(), + ) + })?; + + // Parse the raw version info into a domain-type `version::Specs` + version + .try_into() + .map_err(|e| Error::fetch_version_parsing(chain_id.clone(), grpc_addr_string.clone(), e)) +} diff --git a/relayer/src/chain/cosmos/query/account.rs b/relayer/src/chain/cosmos/query/account.rs new file mode 100644 index 0000000000..2cc7579481 --- /dev/null +++ b/relayer/src/chain/cosmos/query/account.rs @@ -0,0 +1,80 @@ +use http::uri::Uri; +use ibc_proto::cosmos::auth::v1beta1::query_client::QueryClient; +use ibc_proto::cosmos::auth::v1beta1::{BaseAccount, EthAccount, QueryAccountRequest}; +use prost::Message; +use tracing::info; + +use crate::chain::cosmos::types::account::Account; +use crate::error::Error; + +/// Get a `&mut Account` from an `&mut Option` if it is `Some(Account)`. +/// Otherwise query for the account information, update the `Option` to `Some`, +/// and return the underlying `&mut` reference. +pub async fn get_or_fetch_account<'a>( + grpc_address: &Uri, + account_address: &str, + m_account: &'a mut Option, +) -> Result<&'a mut Account, Error> { + match m_account { + Some(account) => Ok(account), + None => { + let account = query_account(grpc_address, account_address).await?; + *m_account = Some(account.into()); + + Ok(m_account + .as_mut() + .expect("account was supposedly just cached")) + } + } +} + +/// Refresh the account sequence behind the `&mut Account` by refetching the +/// account and updating the `&mut` reference. +pub async fn refresh_account<'a>( + grpc_address: &Uri, + account_address: &str, + m_account: &'a mut Account, +) -> Result<(), Error> { + let account = query_account(grpc_address, account_address).await?; + + info!( + sequence = %account.sequence, + number = %account.account_number, + "refresh: retrieved account", + ); + + *m_account = account.into(); + + Ok(()) +} + +/// Uses the GRPC client to retrieve the account sequence +async fn query_account(grpc_address: &Uri, account_address: &str) -> Result { + let mut client = QueryClient::connect(grpc_address.clone()) + .await + .map_err(Error::grpc_transport)?; + + let request = tonic::Request::new(QueryAccountRequest { + address: account_address.to_string(), + }); + + let response = client.account(request).await; + + // Querying for an account might fail, i.e. if the account doesn't actually exist + let resp_account = match response.map_err(Error::grpc_status)?.into_inner().account { + Some(account) => account, + None => return Err(Error::empty_query_account(account_address.to_string())), + }; + + if resp_account.type_url == "/cosmos.auth.v1beta1.BaseAccount" { + Ok(BaseAccount::decode(resp_account.value.as_slice()) + .map_err(|e| Error::protobuf_decode("BaseAccount".to_string(), e))?) + } else if resp_account.type_url.ends_with(".EthAccount") { + Ok(EthAccount::decode(resp_account.value.as_slice()) + .map_err(|e| Error::protobuf_decode("EthAccount".to_string(), e))? + .base_account + .ok_or_else(Error::empty_base_account)?) + } else { + Err(Error::unknown_account_type(resp_account.type_url)) + } +} diff --git a/relayer/src/chain/cosmos/query/status.rs b/relayer/src/chain/cosmos/query/status.rs new file mode 100644 index 0000000000..8efc5c57c4 --- /dev/null +++ b/relayer/src/chain/cosmos/query/status.rs @@ -0,0 +1,40 @@ +use ibc::core::ics24_host::identifier::ChainId; +use ibc::Height; +use tendermint_rpc::{Client, HttpClient, Url}; + +use crate::chain::ChainStatus; +use crate::error::Error; + +/// Query the chain status via an RPC query. +/// +/// Returns an error if the node is still syncing and has not caught up, +/// ie. if `sync_info.catching_up` is `true`. +pub async fn query_status( + chain_id: &ChainId, + rpc_client: &HttpClient, + rpc_address: &Url, +) -> Result { + let response = rpc_client + .status() + .await + .map_err(|e| Error::rpc(rpc_address.clone(), e))?; + + if response.sync_info.catching_up { + return Err(Error::chain_not_caught_up( + rpc_address.to_string(), + chain_id.clone(), + )); + } + + let time = response.sync_info.latest_block_time; + + let height = Height { + revision_number: ChainId::chain_version(response.node_info.network.as_str()), + revision_height: u64::from(response.sync_info.latest_block_height), + }; + + Ok(ChainStatus { + height, + timestamp: time.into(), + }) +} diff --git a/relayer/src/chain/cosmos/query/tx.rs b/relayer/src/chain/cosmos/query/tx.rs new file mode 100644 index 0000000000..dab694a53c --- /dev/null +++ b/relayer/src/chain/cosmos/query/tx.rs @@ -0,0 +1,250 @@ +use ibc::core::ics02_client::client_consensus::QueryClientEventRequest; +use ibc::core::ics02_client::events as ClientEvents; +use ibc::core::ics04_channel::channel::QueryPacketEventDataRequest; +use ibc::core::ics04_channel::events as ChannelEvents; +use ibc::core::ics04_channel::packet::{Packet, Sequence}; +use ibc::core::ics24_host::identifier::ChainId; +use ibc::events::{from_tx_response_event, IbcEvent}; +use ibc::query::QueryTxRequest; +use ibc::Height as ICSHeight; +use tendermint::abci::Event; +use tendermint_rpc::endpoint::tx::Response as ResultTx; +use tendermint_rpc::{Client, HttpClient, Order, Url}; + +use crate::chain::cosmos::query::{header_query, packet_query, tx_hash_query}; +use crate::error::Error; + +/// This function queries transactions for events matching certain criteria. +/// 1. Client Update request - returns a vector with at most one update client event +/// 2. Packet event request - returns at most one packet event for each sequence specified +/// in the request. +/// Note - there is no way to format the packet query such that it asks for Tx-es with either +/// sequence (the query conditions can only be AND-ed). +/// There is a possibility to include "<=" and ">=" conditions but it doesn't work with +/// string attributes (sequence is emmitted as a string). +/// Therefore, for packets we perform one tx_search for each sequence. +/// Alternatively, a single query for all packets could be performed but it would return all +/// packets ever sent. +pub async fn query_txs( + chain_id: &ChainId, + rpc_client: &HttpClient, + rpc_address: &Url, + request: QueryTxRequest, +) -> Result, Error> { + crate::time!("query_txs"); + crate::telemetry!(query, chain_id, "query_txs"); + + match request { + QueryTxRequest::Packet(request) => { + crate::time!("query_txs: query packet events"); + + let mut result: Vec = vec![]; + + for seq in &request.sequences { + // query first (and only) Tx that includes the event specified in the query request + let response = rpc_client + .tx_search( + packet_query(&request, *seq), + false, + 1, + 1, // get only the first Tx matching the query + Order::Ascending, + ) + .await + .map_err(|e| Error::rpc(rpc_address.clone(), e))?; + + assert!( + response.txs.len() <= 1, + "packet_from_tx_search_response: unexpected number of txs" + ); + + if response.txs.is_empty() { + continue; + } + + if let Some(event) = packet_from_tx_search_response( + chain_id, + &request, + *seq, + response.txs[0].clone(), + ) { + result.push(event); + } + } + Ok(result) + } + + QueryTxRequest::Client(request) => { + crate::time!("query_txs: single client update event"); + + // query the first Tx that includes the event matching the client request + // Note: it is possible to have multiple Tx-es for same client and consensus height. + // In this case it must be true that the client updates were performed with tha + // same header as the first one, otherwise a subsequent transaction would have + // failed on chain. Therefore only one Tx is of interest and current API returns + // the first one. + let mut response = rpc_client + .tx_search( + header_query(&request), + false, + 1, + 1, // get only the first Tx matching the query + Order::Ascending, + ) + .await + .map_err(|e| Error::rpc(rpc_address.clone(), e))?; + + if response.txs.is_empty() { + return Ok(vec![]); + } + + // the response must include a single Tx as specified in the query. + assert!( + response.txs.len() <= 1, + "packet_from_tx_search_response: unexpected number of txs" + ); + + let tx = response.txs.remove(0); + let event = update_client_from_tx_search_response(chain_id, &request, tx); + + Ok(event.into_iter().collect()) + } + + QueryTxRequest::Transaction(tx) => { + let mut response = rpc_client + .tx_search( + tx_hash_query(&tx), + false, + 1, + 1, // get only the first Tx matching the query + Order::Ascending, + ) + .await + .map_err(|e| Error::rpc(rpc_address.clone(), e))?; + + if response.txs.is_empty() { + Ok(vec![]) + } else { + let tx = response.txs.remove(0); + Ok(all_ibc_events_from_tx_search_response(chain_id, tx)) + } + } + } +} + +// Extracts from the Tx the update client event for the requested client and height. +// Note: in the Tx, there may have been multiple events, some of them may be +// for update of other clients that are not relevant to the request. +// For example, if we're querying for a transaction that includes the update for client X at +// consensus height H, it is possible that the transaction also includes an update client +// for client Y at consensus height H'. This is the reason the code iterates all event fields in the +// returned Tx to retrieve the relevant ones. +// Returns `None` if no matching event was found. +fn update_client_from_tx_search_response( + chain_id: &ChainId, + request: &QueryClientEventRequest, + response: ResultTx, +) -> Option { + let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); + if request.height != ICSHeight::zero() && height > request.height { + return None; + } + + response + .tx_result + .events + .into_iter() + .filter(|event| event.type_str == request.event_id.as_str()) + .flat_map(|event| ClientEvents::try_from_tx(&event)) + .flat_map(|event| match event { + IbcEvent::UpdateClient(mut update) => { + update.common.height = height; + Some(update) + } + _ => None, + }) + .find(|update| { + update.common.client_id == request.client_id + && update.common.consensus_height == request.consensus_height + }) + .map(IbcEvent::UpdateClient) +} + +// Extract the packet events from the query_txs RPC response. For any given +// packet query, there is at most one Tx matching such query. Moreover, a Tx may +// contain several events, but a single one must match the packet query. +// For example, if we're querying for the packet with sequence 3 and this packet +// was committed in some Tx along with the packet with sequence 4, the response +// will include both packets. For this reason, we iterate all packets in the Tx, +// searching for those that match (which must be a single one). +fn packet_from_tx_search_response( + chain_id: &ChainId, + request: &QueryPacketEventDataRequest, + seq: Sequence, + response: ResultTx, +) -> Option { + let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); + if request.height != ICSHeight::zero() && height > request.height { + return None; + } + + response + .tx_result + .events + .into_iter() + .find_map(|ev| filter_matching_event(ev, request, seq)) +} + +fn filter_matching_event( + event: Event, + request: &QueryPacketEventDataRequest, + seq: Sequence, +) -> Option { + fn matches_packet( + request: &QueryPacketEventDataRequest, + seq: Sequence, + packet: &Packet, + ) -> bool { + packet.source_port == request.source_port_id + && packet.source_channel == request.source_channel_id + && packet.destination_port == request.destination_port_id + && packet.destination_channel == request.destination_channel_id + && packet.sequence == seq + } + + if event.type_str != request.event_id.as_str() { + return None; + } + + let ibc_event = ChannelEvents::try_from_tx(&event)?; + match ibc_event { + IbcEvent::SendPacket(ref send_ev) if matches_packet(request, seq, &send_ev.packet) => { + Some(ibc_event) + } + IbcEvent::WriteAcknowledgement(ref ack_ev) + if matches_packet(request, seq, &ack_ev.packet) => + { + Some(ibc_event) + } + _ => None, + } +} + +fn all_ibc_events_from_tx_search_response(chain_id: &ChainId, response: ResultTx) -> Vec { + let height = ICSHeight::new(chain_id.version(), u64::from(response.height)); + let deliver_tx_result = response.tx_result; + if deliver_tx_result.code.is_err() { + return vec![IbcEvent::ChainError(format!( + "deliver_tx for {} reports error: code={:?}, log={:?}", + response.hash, deliver_tx_result.code, deliver_tx_result.log + ))]; + } + + let mut result = vec![]; + for event in deliver_tx_result.events { + if let Some(ibc_ev) = from_tx_response_event(height, &event) { + result.push(ibc_ev); + } + } + result +} diff --git a/relayer/src/chain/cosmos/retry.rs b/relayer/src/chain/cosmos/retry.rs new file mode 100644 index 0000000000..9960455969 --- /dev/null +++ b/relayer/src/chain/cosmos/retry.rs @@ -0,0 +1,199 @@ +use core::future::Future; +use core::pin::Pin; +use core::time::Duration; +use ibc_proto::google::protobuf::Any; +use std::thread; +use tendermint::abci::Code; +use tendermint_rpc::endpoint::broadcast::tx_sync::Response; +use tendermint_rpc::HttpClient; +use tonic::codegen::http::Uri; +use tracing::{debug, error, span, warn, Level}; + +use crate::chain::cosmos::query::account::refresh_account; +use crate::chain::cosmos::tx::estimate_fee_and_send_tx; +use crate::chain::cosmos::types::account::Account; +use crate::config::types::Memo; +use crate::config::ChainConfig; +use crate::error::Error; +use crate::keyring::KeyEntry; +use crate::sdk_error::sdk_error_from_tx_sync_error_code; + +// Maximum number of retries for send_tx in the case of +// an account sequence mismatch at broadcast step. +const MAX_ACCOUNT_SEQUENCE_RETRY: u64 = 1; + +// Backoff multiplier to apply while retrying in the case +// of account sequence mismatch. +const BACKOFF_MULTIPLIER_ACCOUNT_SEQUENCE_RETRY: u64 = 300; + +// The error "incorrect account sequence" is defined as the unique error code 32 in cosmos-sdk: +// https://github.com/cosmos/cosmos-sdk/blob/v0.44.0/types/errors/errors.go#L115-L117 +const INCORRECT_ACCOUNT_SEQUENCE_ERR: u32 = 32; + +/// Try to `send_tx` with retry on account sequence error. +/// An account sequence error can occur if the account sequence that +/// the relayer caches becomes outdated. This may happen if the relayer +/// wallet is used concurrently elsewhere, or when tx fees are mis-configured +/// leading to transactions hanging in the mempool. +/// +/// Account sequence mismatch error can occur at two separate steps: +/// 1. as Err variant, propagated from the `estimate_gas` step. +/// 2. as an Ok variant, with an Code::Err response, propagated from +/// the `broadcast_tx_sync` step. +/// +/// We treat both cases by re-fetching the account sequence number +/// from the full node. +/// Upon case #1, we do not retry submitting the same tx (retry happens +/// nonetheless at the worker `step` level). Upon case #2, we retry +/// submitting the same transaction. +pub async fn send_tx_with_account_sequence_retry( + config: &ChainConfig, + rpc_client: &HttpClient, + grpc_address: &Uri, + key_entry: &KeyEntry, + account: &mut Account, + tx_memo: &Memo, + messages: Vec, + retry_counter: u64, +) -> Result { + crate::time!("send_tx_with_account_sequence_retry"); + let _span = + span!(Level::ERROR, "send_tx_with_account_sequence_retry", id = %config.id).entered(); + + do_send_tx_with_account_sequence_retry( + config, + rpc_client, + grpc_address, + key_entry, + account, + tx_memo, + messages, + retry_counter, + ) + .await +} + +// We have to do explicit return of `Box` because Rust +// do not currently support recursive async functions behind the +// `async fn` syntactic sugar. +fn do_send_tx_with_account_sequence_retry<'a>( + config: &'a ChainConfig, + rpc_client: &'a HttpClient, + grpc_address: &'a Uri, + key_entry: &'a KeyEntry, + account: &'a mut Account, + tx_memo: &'a Memo, + messages: Vec, + retry_counter: u64, +) -> Pin> + 'a>> { + Box::pin(async move { + debug!( + "sending {} messages using account sequence {}", + messages.len(), + account.sequence, + ); + + let tx_result = estimate_fee_and_send_tx( + config, + rpc_client, + grpc_address, + key_entry, + account, + tx_memo, + messages.clone(), + ) + .await; + + match tx_result { + // Gas estimation failed with acct. s.n. mismatch at estimate gas step. + // This indicates that the full node did not yet push the previous tx out of its + // mempool. Possible explanations: fees too low, network congested, or full node + // congested. Whichever the case, it is more expedient in production to drop the tx + // and refresh the s.n., to allow proceeding to the other transactions. A separate + // retry at the worker-level will handle retrying. + Err(e) if mismatching_account_sequence_number(&e) => { + warn!("failed at estimate_gas step mismatching account sequence: dropping the tx & refreshing account sequence number"); + refresh_account(grpc_address, &key_entry.account, account).await?; + // Note: propagating error here can lead to bug & dropped packets: + // https://github.com/informalsystems/ibc-rs/issues/1153 + // But periodic packet clearing will catch any dropped packets. + Err(e) + } + + // Gas estimation succeeded. Broadcasting failed with a retry-able error. + Ok(response) if response.code == Code::Err(INCORRECT_ACCOUNT_SEQUENCE_ERR) => { + if retry_counter < MAX_ACCOUNT_SEQUENCE_RETRY { + let retry_counter = retry_counter + 1; + warn!("failed at broadcast step with incorrect account sequence. retrying ({}/{})", + retry_counter, MAX_ACCOUNT_SEQUENCE_RETRY); + // Backoff & re-fetch the account s.n. + let backoff = retry_counter * BACKOFF_MULTIPLIER_ACCOUNT_SEQUENCE_RETRY; + + thread::sleep(Duration::from_millis(backoff)); + refresh_account(grpc_address, &key_entry.account, account).await?; + + // Now retry. + do_send_tx_with_account_sequence_retry( + config, + rpc_client, + grpc_address, + key_entry, + account, + tx_memo, + messages, + retry_counter + 1, + ) + .await + } else { + // If after the max retry we still get an account sequence mismatch error, + // we ignore the error and return the original response to downstream. + // We do not return an error here, because the current convention + // let the caller handle error responses separately. + error!("failed due to account sequence errors. the relayer wallet may be used elsewhere concurrently."); + Ok(response) + } + } + + // Catch-all arm for the Ok variant. + // This is the case when gas estimation succeeded. + Ok(response) => { + // Complete success. + match response.code { + Code::Ok => { + debug!("broadcast_tx_sync: {:?}", response); + + account.sequence.increment_mut(); + Ok(response) + } + // Gas estimation succeeded, but broadcasting failed with unrecoverable error. + Code::Err(code) => { + // Avoid increasing the account s.n. if CheckTx failed + // Log the error + error!( + "broadcast_tx_sync: {:?}: diagnostic: {:?}", + response, + sdk_error_from_tx_sync_error_code(code) + ); + Ok(response) + } + } + } + + // Catch-all case for the Err variant. + // Gas estimation failure or other unrecoverable error, propagate. + Err(e) => Err(e), + } + }) +} + +/// Determine whether the given error yielded by `tx_simulate` +/// indicates hat the current sequence number cached in Hermes +/// may be out-of-sync with the full node's version of the s.n. +fn mismatching_account_sequence_number(e: &Error) -> bool { + use crate::error::ErrorDetail::*; + + match e.detail() { + GrpcStatus(detail) => detail.is_account_sequence_mismatch(), + _ => false, + } +} diff --git a/relayer/src/chain/cosmos/simulate.rs b/relayer/src/chain/cosmos/simulate.rs new file mode 100644 index 0000000000..ce249a44db --- /dev/null +++ b/relayer/src/chain/cosmos/simulate.rs @@ -0,0 +1,33 @@ +use ibc_proto::cosmos::tx::v1beta1::service_client::ServiceClient; +use ibc_proto::cosmos::tx::v1beta1::{SimulateRequest, SimulateResponse, Tx}; +use tonic::codegen::http::Uri; + +use crate::error::Error; + +pub async fn send_tx_simulate(grpc_address: &Uri, tx: Tx) -> Result { + crate::time!("send_tx_simulate"); + + // The `tx` field of `SimulateRequest` was deprecated in Cosmos SDK 0.43 in favor of `tx_bytes`. + let mut tx_bytes = vec![]; + prost::Message::encode(&tx, &mut tx_bytes) + .map_err(|e| Error::protobuf_encode(String::from("Transaction"), e))?; + + #[allow(deprecated)] + let req = SimulateRequest { + tx: Some(tx), // needed for simulation to go through with Cosmos SDK < 0.43 + tx_bytes, // needed for simulation to go through with Cosmos SDk >= 0.43 + }; + + let mut client = ServiceClient::connect(grpc_address.clone()) + .await + .map_err(Error::grpc_transport)?; + + let request = tonic::Request::new(req); + let response = client + .simulate(request) + .await + .map_err(Error::grpc_status)? + .into_inner(); + + Ok(response) +} diff --git a/relayer/src/chain/cosmos/tx.rs b/relayer/src/chain/cosmos/tx.rs new file mode 100644 index 0000000000..0ac651d270 --- /dev/null +++ b/relayer/src/chain/cosmos/tx.rs @@ -0,0 +1,68 @@ +use ibc_proto::cosmos::tx::v1beta1::Fee; +use ibc_proto::google::protobuf::Any; +use tendermint_rpc::endpoint::broadcast::tx_sync::Response; +use tendermint_rpc::{Client, HttpClient, Url}; +use tonic::codegen::http::Uri; + +use crate::chain::cosmos::encode::sign_and_encode_tx; +use crate::chain::cosmos::estimate::estimate_tx_fees; +use crate::chain::cosmos::types::account::Account; +use crate::config::types::Memo; +use crate::config::ChainConfig; +use crate::error::Error; +use crate::keyring::KeyEntry; + +pub async fn estimate_fee_and_send_tx( + config: &ChainConfig, + rpc_client: &HttpClient, + grpc_address: &Uri, + key_entry: &KeyEntry, + account: &Account, + tx_memo: &Memo, + messages: Vec, +) -> Result { + let fee = estimate_tx_fees( + config, + grpc_address, + key_entry, + account, + tx_memo, + messages.clone(), + ) + .await?; + + send_tx_with_fee( + config, rpc_client, key_entry, account, tx_memo, messages, &fee, + ) + .await +} + +async fn send_tx_with_fee( + config: &ChainConfig, + rpc_client: &HttpClient, + key_entry: &KeyEntry, + account: &Account, + tx_memo: &Memo, + messages: Vec, + fee: &Fee, +) -> Result { + let tx_bytes = sign_and_encode_tx(config, key_entry, account, tx_memo, messages, fee)?; + + let response = broadcast_tx_sync(rpc_client, &config.rpc_addr, tx_bytes).await?; + + Ok(response) +} + +/// Perform a `broadcast_tx_sync`, and return the corresponding deserialized response data. +async fn broadcast_tx_sync( + rpc_client: &HttpClient, + rpc_address: &Url, + data: Vec, +) -> Result { + let response = rpc_client + .broadcast_tx_sync(data.into()) + .await + .map_err(|e| Error::rpc(rpc_address.clone(), e))?; + + Ok(response) +} diff --git a/relayer/src/chain/cosmos/types/account.rs b/relayer/src/chain/cosmos/types/account.rs new file mode 100644 index 0000000000..5350c7b373 --- /dev/null +++ b/relayer/src/chain/cosmos/types/account.rs @@ -0,0 +1,70 @@ +use core::fmt; +use ibc_proto::cosmos::auth::v1beta1::BaseAccount; + +/// Wrapper for account number and sequence number. +/// +/// More fields may be added later. +#[derive(Clone, Debug, PartialEq)] +pub struct Account { + // pub address: String, + // pub pub_key: Option, + pub number: AccountNumber, + pub sequence: AccountSequence, +} + +impl From for Account { + fn from(value: BaseAccount) -> Self { + Self { + number: AccountNumber::new(value.account_number), + sequence: AccountSequence::new(value.sequence), + } + } +} + +/// Newtype for account numbers +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AccountNumber(u64); + +impl AccountNumber { + pub fn new(number: u64) -> Self { + Self(number) + } + + pub fn to_u64(self) -> u64 { + self.0 + } +} + +impl fmt::Display for AccountNumber { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Newtype for account sequence numbers +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AccountSequence(u64); + +impl AccountSequence { + pub fn new(sequence: u64) -> Self { + Self(sequence) + } + + pub fn to_u64(self) -> u64 { + self.0 + } + + pub fn increment(self) -> Self { + Self(self.0 + 1) + } + + pub fn increment_mut(&mut self) { + self.0 += 1 + } +} + +impl fmt::Display for AccountSequence { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} diff --git a/relayer/src/chain/cosmos/types/gas.rs b/relayer/src/chain/cosmos/types/gas.rs new file mode 100644 index 0000000000..55625dab3c --- /dev/null +++ b/relayer/src/chain/cosmos/types/gas.rs @@ -0,0 +1,79 @@ +use ibc_proto::cosmos::tx::v1beta1::Fee; + +use crate::chain::cosmos::calculate_fee; +use crate::config::{ChainConfig, GasPrice}; + +/// Default gas limit when submitting a transaction. +const DEFAULT_MAX_GAS: u64 = 400_000; + +/// Fraction of the estimated gas to add to the estimated gas amount when submitting a transaction. +const DEFAULT_GAS_PRICE_ADJUSTMENT: f64 = 0.1; + +const DEFAULT_FEE_GRANTER: &str = ""; + +pub struct GasConfig { + pub default_gas: u64, + pub max_gas: u64, + pub gas_adjustment: f64, + pub gas_price: GasPrice, + pub max_fee: Fee, + pub fee_granter: String, +} + +impl GasConfig { + pub fn from_chain_config(config: &ChainConfig) -> GasConfig { + GasConfig { + default_gas: default_gas_from_config(config), + max_gas: max_gas_from_config(config), + gas_adjustment: gas_adjustment_from_config(config), + gas_price: config.gas_price.clone(), + max_fee: max_fee_from_config(config), + fee_granter: fee_granter_from_config(config), + } + } +} + +/// The default amount of gas the relayer is willing to pay for a transaction, +/// when it cannot simulate the tx and therefore estimate the gas amount needed. +pub fn default_gas_from_config(config: &ChainConfig) -> u64 { + config + .default_gas + .unwrap_or_else(|| max_gas_from_config(config)) +} + +/// The maximum amount of gas the relayer is willing to pay for a transaction +pub fn max_gas_from_config(config: &ChainConfig) -> u64 { + config.max_gas.unwrap_or(DEFAULT_MAX_GAS) +} + +/// The gas price adjustment +fn gas_adjustment_from_config(config: &ChainConfig) -> f64 { + config + .gas_adjustment + .unwrap_or(DEFAULT_GAS_PRICE_ADJUSTMENT) +} + +/// Get the fee granter address +fn fee_granter_from_config(config: &ChainConfig) -> String { + config + .fee_granter + .as_deref() + .unwrap_or(DEFAULT_FEE_GRANTER) + .to_string() +} + +fn max_fee_from_config(config: &ChainConfig) -> Fee { + let max_gas = max_gas_from_config(config); + + // The maximum fee the relayer pays for a transaction + let max_fee_in_coins = calculate_fee(max_gas, &config.gas_price); + + let fee_granter = fee_granter_from_config(config); + + Fee { + amount: vec![max_fee_in_coins], + gas_limit: max_gas, + payer: "".to_string(), + granter: fee_granter, + } +} diff --git a/relayer/src/chain/cosmos/types/mod.rs b/relayer/src/chain/cosmos/types/mod.rs new file mode 100644 index 0000000000..4d91a59966 --- /dev/null +++ b/relayer/src/chain/cosmos/types/mod.rs @@ -0,0 +1,3 @@ +pub mod account; +pub mod gas; +pub mod tx; diff --git a/relayer/src/chain/cosmos/types/tx.rs b/relayer/src/chain/cosmos/types/tx.rs new file mode 100644 index 0000000000..9509c9826c --- /dev/null +++ b/relayer/src/chain/cosmos/types/tx.rs @@ -0,0 +1,18 @@ +use ibc::events::IbcEvent; +use ibc_proto::cosmos::tx::v1beta1::{AuthInfo, TxBody}; +use tendermint_rpc::endpoint::broadcast::tx_sync::Response; + +pub struct SignedTx { + pub body: TxBody, + pub body_bytes: Vec, + pub auth_info: AuthInfo, + pub auth_info_bytes: Vec, + pub signatures: Vec>, +} + +pub struct TxSyncResult { + // the broadcast_tx_sync response + pub response: Response, + // the events generated by a Tx once executed + pub events: Vec, +} diff --git a/relayer/src/chain/cosmos/wait.rs b/relayer/src/chain/cosmos/wait.rs new file mode 100644 index 0000000000..953395fef2 --- /dev/null +++ b/relayer/src/chain/cosmos/wait.rs @@ -0,0 +1,114 @@ +use core::time::Duration; +use ibc::core::ics24_host::identifier::ChainId; +use ibc::events::IbcEvent; +use ibc::query::{QueryTxHash, QueryTxRequest}; +use itertools::Itertools; +use std::thread; +use std::time::Instant; +use tendermint_rpc::{HttpClient, Url}; +use tracing::{info, trace}; + +use crate::chain::cosmos::query::tx::query_txs; +use crate::chain::cosmos::types::tx::TxSyncResult; +use crate::error::Error; + +const WAIT_BACKOFF: Duration = Duration::from_millis(300); + +/// Given a vector of `TxSyncResult` elements, +/// each including a transaction response hash for one or more messages, periodically queries the chain +/// with the transaction hashes to get the list of IbcEvents included in those transactions. +pub async fn wait_for_block_commits( + chain_id: &ChainId, + rpc_client: &HttpClient, + rpc_address: &Url, + rpc_timeout: &Duration, + tx_sync_results: &mut [TxSyncResult], +) -> Result<(), Error> { + let start_time = Instant::now(); + + let hashes = tx_sync_results + .iter() + .map(|res| res.response.hash.to_string()) + .join(", "); + + info!( + id = %chain_id, + "wait_for_block_commits: waiting for commit of tx hashes(s) {}", + hashes + ); + + loop { + let elapsed = start_time.elapsed(); + + if all_tx_results_found(tx_sync_results) { + trace!( + id = %chain_id, + "wait_for_block_commits: retrieved {} tx results after {}ms", + tx_sync_results.len(), + elapsed.as_millis(), + ); + + return Ok(()); + } else if &elapsed > rpc_timeout { + return Err(Error::tx_no_confirmation()); + } else { + thread::sleep(WAIT_BACKOFF); + + for tx_sync_result in tx_sync_results.iter_mut() { + // ignore error + let _ = + update_tx_sync_result(chain_id, rpc_client, rpc_address, tx_sync_result).await; + } + } + } +} + +async fn update_tx_sync_result( + chain_id: &ChainId, + rpc_client: &HttpClient, + rpc_address: &Url, + tx_sync_result: &mut TxSyncResult, +) -> Result<(), Error> { + let TxSyncResult { response, events } = tx_sync_result; + + // If this transaction was not committed, determine whether it was because it failed + // or because it hasn't been committed yet. + if empty_event_present(events) { + // If the transaction failed, replace the events with an error, + // so that we don't attempt to resolve the transaction later on. + if response.code.value() != 0 { + *events = vec![IbcEvent::ChainError(format!( + "deliver_tx on chain {} for Tx hash {} reports error: code={:?}, log={:?}", + chain_id, response.hash, response.code, response.log + ))]; + } + + // Otherwise, try to resolve transaction hash to the corresponding events. + let events_per_tx = query_txs( + chain_id, + rpc_client, + rpc_address, + QueryTxRequest::Transaction(QueryTxHash(response.hash)), + ) + .await?; + + // If we get events back, progress was made, so we replace the events + // with the new ones. in both cases we will check in the next iteration + // whether or not the transaction was fully committed. + if !events_per_tx.is_empty() { + *events = events_per_tx; + } + } + + Ok(()) +} + +fn empty_event_present(events: &[IbcEvent]) -> bool { + events.iter().any(|ev| matches!(ev, IbcEvent::Empty(_))) +} + +fn all_tx_results_found(tx_sync_results: &[TxSyncResult]) -> bool { + tx_sync_results + .iter() + .all(|r| !empty_event_present(&r.events)) +} diff --git a/relayer/src/chain/handle.rs b/relayer/src/chain/handle.rs index d88e7c0825..100a1c249c 100644 --- a/relayer/src/chain/handle.rs +++ b/relayer/src/chain/handle.rs @@ -53,7 +53,7 @@ use crate::{ use super::client::ClientSettings; use super::tx::TrackedMsgs; -use super::{HealthCheck, StatusResponse}; +use super::{ChainStatus, HealthCheck}; mod base; mod cache; @@ -150,7 +150,7 @@ pub enum ChainRequest { }, QueryStatus { - reply_to: ReplyTo, + reply_to: ReplyTo, }, QueryClients { @@ -384,7 +384,7 @@ pub trait ChainHandle: Clone + Send + Sync + Serialize + Debug + 'static { /// Return the version of the IBC protocol that this chain is running, if known. fn ibc_version(&self) -> Result, Error>; - fn query_status(&self) -> Result; + fn query_status(&self) -> Result; fn query_latest_height(&self) -> Result { Ok(self.query_status()?.height) diff --git a/relayer/src/chain/handle/base.rs b/relayer/src/chain/handle/base.rs index 9dbf33c6dc..984144c69c 100644 --- a/relayer/src/chain/handle/base.rs +++ b/relayer/src/chain/handle/base.rs @@ -37,7 +37,7 @@ use ibc_proto::ibc::core::connection::v1::QueryClientConnectionsRequest; use ibc_proto::ibc::core::connection::v1::QueryConnectionsRequest; use crate::{ - chain::{client::ClientSettings, tx::TrackedMsgs, StatusResponse}, + chain::{client::ClientSettings, tx::TrackedMsgs, ChainStatus}, config::ChainConfig, connection::ConnectionMsgType, error::Error, @@ -142,7 +142,7 @@ impl ChainHandle for BaseChainHandle { self.send(|reply_to| ChainRequest::IbcVersion { reply_to }) } - fn query_status(&self) -> Result { + fn query_status(&self) -> Result { self.send(|reply_to| ChainRequest::QueryStatus { reply_to }) } diff --git a/relayer/src/chain/handle/cache.rs b/relayer/src/chain/handle/cache.rs index 9a3870d7b7..d7e1b83654 100644 --- a/relayer/src/chain/handle/cache.rs +++ b/relayer/src/chain/handle/cache.rs @@ -38,7 +38,7 @@ use crate::cache::{Cache, CacheStatus}; use crate::chain::client::ClientSettings; use crate::chain::handle::{ChainHandle, ChainRequest, Subscription}; use crate::chain::tx::TrackedMsgs; -use crate::chain::{HealthCheck, StatusResponse}; +use crate::chain::{ChainStatus, HealthCheck}; use crate::config::ChainConfig; use crate::error::Error; use crate::telemetry; @@ -127,7 +127,7 @@ impl ChainHandle for CachingChainHandle { self.inner().ibc_version() } - fn query_status(&self) -> Result { + fn query_status(&self) -> Result { self.inner().query_status() } diff --git a/relayer/src/chain/handle/counting.rs b/relayer/src/chain/handle/counting.rs index 8a8b192c97..d094d04c84 100644 --- a/relayer/src/chain/handle/counting.rs +++ b/relayer/src/chain/handle/counting.rs @@ -38,7 +38,7 @@ use tracing::debug; use crate::chain::client::ClientSettings; use crate::chain::handle::{ChainHandle, ChainRequest, Subscription}; use crate::chain::tx::TrackedMsgs; -use crate::chain::{HealthCheck, StatusResponse}; +use crate::chain::{ChainStatus, HealthCheck}; use crate::config::ChainConfig; use crate::error::Error; use crate::util::lock::LockExt; @@ -155,7 +155,7 @@ impl ChainHandle for CountingChainHandle { self.inner().ibc_version() } - fn query_status(&self) -> Result { + fn query_status(&self) -> Result { self.inc_metric("query_status"); self.inner().query_status() } diff --git a/relayer/src/chain/mock.rs b/relayer/src/chain/mock.rs index 52627d2c9c..f26f60ce36 100644 --- a/relayer/src/chain/mock.rs +++ b/relayer/src/chain/mock.rs @@ -40,7 +40,7 @@ use ibc_proto::ibc::core::connection::v1::{ }; use crate::chain::client::ClientSettings; -use crate::chain::{ChainEndpoint, StatusResponse}; +use crate::chain::{ChainEndpoint, ChainStatus}; use crate::config::ChainConfig; use crate::error::Error; use crate::event::monitor::{EventReceiver, EventSender, TxMonitorCmd}; @@ -170,8 +170,8 @@ impl ChainEndpoint for MockChain { unimplemented!() } - fn query_status(&self) -> Result { - Ok(StatusResponse { + fn query_status(&self) -> Result { + Ok(ChainStatus { height: self.context.host_height(), timestamp: self.context.host_timestamp(), }) diff --git a/relayer/src/chain/runtime.rs b/relayer/src/chain/runtime.rs index 09224c241f..e4f74aba8c 100644 --- a/relayer/src/chain/runtime.rs +++ b/relayer/src/chain/runtime.rs @@ -44,7 +44,7 @@ use ibc_proto::ibc::core::{ }; use crate::{ - chain::{client::ClientSettings, StatusResponse}, + chain::{client::ClientSettings, ChainStatus}, config::ChainConfig, connection::ConnectionMsgType, error::Error, @@ -465,7 +465,7 @@ where reply_to.send(result).map_err(Error::send) } - fn query_status(&self, reply_to: ReplyTo) -> Result<(), Error> { + fn query_status(&self, reply_to: ReplyTo) -> Result<(), Error> { let latest_timestamp = self.chain.query_status(); reply_to.send(latest_timestamp).map_err(Error::send) } diff --git a/relayer/src/chain/tx.rs b/relayer/src/chain/tx.rs index e234af229f..dc9d23f86e 100644 --- a/relayer/src/chain/tx.rs +++ b/relayer/src/chain/tx.rs @@ -9,8 +9,8 @@ use ibc_proto::google::protobuf::Any; /// by sharing the same `tracking_id`. #[derive(Debug, Clone)] pub struct TrackedMsgs { - msgs: Vec, - tracking_id: String, + pub msgs: Vec, + pub tracking_id: String, } impl TrackedMsgs { diff --git a/relayer/src/event/rpc.rs b/relayer/src/event/rpc.rs index 33877f9f16..561e078a44 100644 --- a/relayer/src/event/rpc.rs +++ b/relayer/src/event/rpc.rs @@ -143,14 +143,14 @@ pub fn get_all_events( if query == queries::ibc_client().to_string() { if let Some(mut client_event) = ClientEvents::try_from_tx(abci_event) { client_event.set_height(height); - tracing::trace!("extracted ibc_client event {:?}", client_event); + tracing::trace!("extracted ibc_client event {}", client_event); vals.push((height, client_event)); } } if query == queries::ibc_connection().to_string() { if let Some(mut conn_event) = ConnectionEvents::try_from_tx(abci_event) { conn_event.set_height(height); - tracing::trace!("extracted ibc_connection event {:?}", conn_event); + tracing::trace!("extracted ibc_connection event {}", conn_event); vals.push((height, conn_event)); } } @@ -158,7 +158,7 @@ pub fn get_all_events( if let Some(mut chan_event) = ChannelEvents::try_from_tx(abci_event) { chan_event.set_height(height); let _span = tracing::trace_span!("ibc_channel event").entered(); - tracing::trace!("extracted {:?}", chan_event); + tracing::trace!("extracted {}", chan_event); if matches!(chan_event, IbcEvent::SendPacket(_)) { // Should be the same as the hash of tx_result.tx? if let Some(hash) = diff --git a/relayer/src/foreign_client.rs b/relayer/src/foreign_client.rs index aacebd53fb..8812714f06 100644 --- a/relayer/src/foreign_client.rs +++ b/relayer/src/foreign_client.rs @@ -1164,7 +1164,7 @@ impl ForeignClient Result, Error> { let key = self.get_key(key_name)?; - let private_key_bytes = key.private_key.private_key.to_bytes(); - match address_type { - AddressType::Ethermint { ref pk_type } if pk_type.ends_with(".ethsecp256k1.PubKey") => { - let hash = keccak256_hash(msg.as_slice()); - let s = Secp256k1::signing_only(); - // SAFETY: hash is 32 bytes, as expected in `Message::from_slice` -- see `keccak256_hash`, hence `unwrap` - let sign_msg = Message::from_slice(hash.as_slice()).unwrap(); - let key = SecretKey::from_slice(private_key_bytes.as_slice()) - .map_err(Error::invalid_key_raw)?; - let (_, sig_bytes) = s.sign_recoverable(&sign_msg, &key).serialize_compact(); - Ok(sig_bytes.to_vec()) - } - AddressType::Cosmos | AddressType::Ethermint { .. } => { - let signing_key = SigningKey::from_bytes(private_key_bytes.as_slice()) - .map_err(Error::invalid_key)?; - let signature: Signature = signing_key.sign(&msg); - Ok(signature.as_ref().to_vec()) - } - } + sign_message(&key, msg, address_type) } pub fn account_prefix(&self) -> &str { @@ -389,6 +371,33 @@ impl KeyRing { } } +/// Sign a message +pub fn sign_message( + key: &KeyEntry, + msg: Vec, + address_type: &AddressType, +) -> Result, Error> { + let private_key_bytes = key.private_key.private_key.to_bytes(); + match address_type { + AddressType::Ethermint { ref pk_type } if pk_type.ends_with(".ethsecp256k1.PubKey") => { + let hash = keccak256_hash(msg.as_slice()); + let s = Secp256k1::signing_only(); + // SAFETY: hash is 32 bytes, as expected in `Message::from_slice` -- see `keccak256_hash`, hence `unwrap` + let sign_msg = Message::from_slice(hash.as_slice()).unwrap(); + let key = SecretKey::from_slice(private_key_bytes.as_slice()) + .map_err(Error::invalid_key_raw)?; + let (_, sig_bytes) = s.sign_recoverable(&sign_msg, &key).serialize_compact(); + Ok(sig_bytes.to_vec()) + } + AddressType::Cosmos | AddressType::Ethermint { .. } => { + let signing_key = + SigningKey::from_bytes(private_key_bytes.as_slice()).map_err(Error::invalid_key)?; + let signature: Signature = signing_key.sign(&msg); + Ok(signature.as_ref().to_vec()) + } + } +} + /// Decode an extended private key from a mnemonic fn private_key_from_mnemonic( mnemonic_words: &str, diff --git a/relayer/src/lib.rs b/relayer/src/lib.rs index 47552d930c..7d99879458 100644 --- a/relayer/src/lib.rs +++ b/relayer/src/lib.rs @@ -7,6 +7,7 @@ unused_qualifications, rust_2018_idioms )] +#![allow(clippy::too_many_arguments)] // TODO: disable unwraps: // https://github.com/informalsystems/ibc-rs/issues/987 // #![cfg_attr(not(test), deny(clippy::unwrap_used))] diff --git a/relayer/src/link.rs b/relayer/src/link.rs index dd0530fbe0..436e00a287 100644 --- a/relayer/src/link.rs +++ b/relayer/src/link.rs @@ -11,11 +11,11 @@ use crate::chain::counterparty::check_channel_counterparty; use crate::chain::handle::ChainHandle; use crate::channel::{Channel, ChannelSide}; use crate::link::error::LinkError; -use crate::link::relay_path::RelayPath; pub mod cli; pub mod error; pub mod operational_data; + mod pending; mod relay_path; mod relay_sender; @@ -26,6 +26,8 @@ use tx_hashes::TxHashes; // Re-export the telemetries summary pub use relay_summary::RelaySummary; +pub use relay_path::{RelayPath, Resubmit}; + #[derive(Clone, Debug)] pub struct LinkParameters { pub src_port_id: PortId, diff --git a/relayer/src/link/error.rs b/relayer/src/link/error.rs index 67b11869bc..bf2f0803b2 100644 --- a/relayer/src/link/error.rs +++ b/relayer/src/link/error.rs @@ -9,7 +9,7 @@ use crate::connection::ConnectionError; use crate::error::Error; use crate::foreign_client::{ForeignClientError, HasExpiredOrFrozenError}; use crate::supervisor::Error as SupervisorError; -use crate::transfer::PacketError; +use crate::transfer::TransferError; define_error! { LinkError { @@ -64,7 +64,7 @@ define_error! { |_| { "failed during a client operation" }, Packet - [ PacketError ] + [ TransferError ] |_| { "packet error" }, OldPacketClearingFailed diff --git a/relayer/src/link/pending.rs b/relayer/src/link/pending.rs index 4aeea71823..3df931608a 100644 --- a/relayer/src/link/pending.rs +++ b/relayer/src/link/pending.rs @@ -9,7 +9,7 @@ use ibc::events::IbcEvent; use ibc::query::{QueryTxHash, QueryTxRequest}; use crate::error::Error as RelayerError; -use crate::link::error::LinkError; +use crate::link::{error::LinkError, RelayPath}; use crate::util::queue::Queue; use crate::{ chain::handle::ChainHandle, @@ -65,7 +65,7 @@ impl PendingTxs { self.chain.id() } - // Insert new pending transaction to the back of the queue. + /// Insert a new pending transaction to the back of the queue. pub fn insert_new_pending_tx(&self, r: AsyncReply, od: OperationalData) { let mut tx_hashes = Vec::new(); let mut error_events = Vec::new(); @@ -133,11 +133,17 @@ impl PendingTxs { Ok(Some(all_events)) } - /// Try and process one pending transaction if available. - pub fn process_pending( + /// Try and process one pending transaction within the given timeout duration if one + /// is available. + /// + /// A `resubmit` closure is provided when the clear interval for packets is 0. If this closure + /// is provided, the pending transactions that fail to process within the given timeout duration + /// are resubmitted following the logic specified by the closure. + pub fn process_pending( &self, timeout: Duration, - resubmit: impl FnOnce(OperationalData) -> Result, + relay_path: &RelayPath, + resubmit: Option Result>, ) -> Result, LinkError> { // We process pending transactions in a FIFO manner, so take from // the front of the queue. @@ -177,16 +183,34 @@ impl PendingTxs { // relayer to resubmit the transaction to the chain again. error!("timed out while confirming {}", tx_hashes); - let resubmit_res = resubmit(pending.original_od.clone()); - - match resubmit_res { - Ok(reply) => { - self.insert_new_pending_tx(reply, pending.original_od); - Ok(None) + match resubmit { + Some(f) => { + // The pending tx needs to be resubmitted. This involves replacing the tx's + // stale operational data with a fresh copy and then applying the `resubmit` + // closure to it. + let new_od = relay_path + .regenerate_operational_data(pending.original_od.clone()); + + trace!("regenerated operational data for {}", tx_hashes); + + match new_od.map(f) { + Some(Ok(reply)) => { + self.insert_new_pending_tx(reply, pending.original_od); + Ok(None) + } + Some(Err(e)) => { + self.pending_queue.push_back(pending); + Err(e) + } + None => { + // No operational data was regenerated; nothing to resubmit + Ok(None) + } + } } - Err(e) => { - self.pending_queue.push_back(pending); - Err(e) + None => { + // `clear_interval != 0` such that resubmission has been disabled + Ok(None) } } } else { diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 0561d3ceae..f2f164450c 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -42,7 +42,7 @@ use crate::chain::counterparty::{ }; use crate::chain::handle::ChainHandle; use crate::chain::tx::TrackedMsgs; -use crate::chain::StatusResponse; +use crate::chain::ChainStatus; use crate::channel::error::ChannelError; use crate::channel::Channel; use crate::event::monitor::EventBatch; @@ -59,6 +59,27 @@ use crate::util::queue::Queue; const MAX_RETRIES: usize = 5; +/// Whether or not to resubmit packets when pending transactions +/// fail to process within the given timeout duration. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Resubmit { + Yes, + No, +} + +impl Resubmit { + /// Packet resubmission is enabled when the clear interval for packets is 0. Otherwise, + /// when the packet clear interval is > 0, the relayer will periodically clear unsent packets + /// such that resubmitting packets is not necessary. + pub fn from_clear_interval(clear_interval: u64) -> Self { + if clear_interval == 0 { + Self::Yes + } else { + Self::No + } + } +} + pub struct RelayPath { channel: Channel, @@ -557,8 +578,8 @@ impl RelayPath { Ok(S::Reply::empty()) } - /// Helper for managing retries of the `relay_from_operational_data` method. - /// Expects as input the initial operational data that failed to send. + /// Generates fresh operational data for a tx given the initial operational data + /// that failed to send. /// /// Return value: /// - `Some(..)`: a new operational data from which to retry sending, @@ -567,7 +588,7 @@ impl RelayPath { /// /// Side effects: may schedule a new operational data targeting the source chain, comprising /// new timeout messages. - fn regenerate_operational_data( + pub(crate) fn regenerate_operational_data( &self, initial_odata: OperationalData, ) -> Option { @@ -1317,7 +1338,7 @@ impl RelayPath { fn build_timeout_from_send_packet_event( &self, event: &SendPacket, - dst_info: &StatusResponse, + dst_info: &ChainStatus, ) -> Result, LinkError> { let packet = event.packet.clone(); if self @@ -1335,7 +1356,7 @@ impl RelayPath { fn build_recv_or_timeout_from_send_packet_event( &self, event: &SendPacket, - dst_info: &StatusResponse, + dst_info: &ChainStatus, ) -> Result<(Option, Option), LinkError> { let timeout = self.build_timeout_from_send_packet_event(event, dst_info)?; if timeout.is_some() { @@ -1371,17 +1392,20 @@ impl RelayPath { Ok(()) } - pub fn process_pending_txs(&self) -> RelaySummary { + /// Kicks off the process of relaying pending txs to the source and destination chains. + /// + /// See [`Resubmit::from_clear_interval`] for more info about the `resubmit` parameter. + pub fn process_pending_txs(&self, resubmit: Resubmit) -> RelaySummary { if !self.confirm_txes { return RelaySummary::empty(); } - let mut summary_src = self.process_pending_txs_src().unwrap_or_else(|e| { + let mut summary_src = self.process_pending_txs_src(resubmit).unwrap_or_else(|e| { error!("error processing pending events in source chain: {}", e); RelaySummary::empty() }); - let summary_dst = self.process_pending_txs_dst().unwrap_or_else(|e| { + let summary_dst = self.process_pending_txs_dst(resubmit).unwrap_or_else(|e| { error!( "error processing pending events in destination chain: {}", e @@ -1393,23 +1417,33 @@ impl RelayPath { summary_src } - fn process_pending_txs_src(&self) -> Result { + fn process_pending_txs_src(&self, resubmit: Resubmit) -> Result { + let do_resubmit = match resubmit { + Resubmit::Yes => { + Some(|odata| self.relay_from_operational_data::(odata)) + } + Resubmit::No => None, + }; + let res = self .pending_txs_src - .process_pending(pending::TIMEOUT, |odata| { - self.relay_from_operational_data::(odata) - })? + .process_pending(pending::TIMEOUT, self, do_resubmit)? .unwrap_or_else(RelaySummary::empty); Ok(res) } - fn process_pending_txs_dst(&self) -> Result { + fn process_pending_txs_dst(&self, resubmit: Resubmit) -> Result { + let do_resubmit = match resubmit { + Resubmit::Yes => { + Some(|odata| self.relay_from_operational_data::(odata)) + } + Resubmit::No => None, + }; + let res = self .pending_txs_dst - .process_pending(pending::TIMEOUT, |odata| { - self.relay_from_operational_data::(odata) - })? + .process_pending(pending::TIMEOUT, self, do_resubmit)? .unwrap_or_else(RelaySummary::empty); Ok(res) diff --git a/relayer/src/link/relay_summary.rs b/relayer/src/link/relay_summary.rs index 3332e65629..2dbcdd9d83 100644 --- a/relayer/src/link/relay_summary.rs +++ b/relayer/src/link/relay_summary.rs @@ -1,3 +1,5 @@ +use core::fmt; + use ibc::events::IbcEvent; #[derive(Clone, Debug)] @@ -24,3 +26,13 @@ impl RelaySummary { self.events.extend(other.events) } } + +impl fmt::Display for RelaySummary { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "RelaySummary events: ")?; + for e in &self.events { + write!(f, "{}; ", e)? + } + write!(f, "total events = {}", self.events.len()) + } +} diff --git a/relayer/src/registry.rs b/relayer/src/registry.rs index 5468db5ce3..26f857fa83 100644 --- a/relayer/src/registry.rs +++ b/relayer/src/registry.rs @@ -26,15 +26,27 @@ define_error! { RuntimeNotFound | _ | { "expected runtime to be found in registry" }, - MissingChain + MissingChainConfig { chain_id: ChainId } | e | { - format_args!("missing chain for id ({}) in configuration file", + format_args!("missing chain config for '{}' in configuration file", e.chain_id) } } } +impl SpawnError { + pub fn log_as_debug(&self) -> bool { + self.detail().log_as_debug() + } +} + +impl SpawnErrorDetail { + pub fn log_as_debug(&self) -> bool { + matches!(self, SpawnErrorDetail::MissingChainConfig(_)) + } +} + /// Registry for keeping track of [`ChainHandle`]s indexed by a `ChainId`. /// /// The purpose of this type is to avoid spawning multiple runtimes for a single `ChainId`. @@ -150,7 +162,7 @@ pub fn spawn_chain_runtime( let chain_config = config .find_chain(chain_id) .cloned() - .ok_or_else(|| SpawnError::missing_chain(chain_id.clone()))?; + .ok_or_else(|| SpawnError::missing_chain_config(chain_id.clone()))?; let handle = ChainRuntime::::spawn(chain_config, rt).map_err(SpawnError::relayer)?; diff --git a/relayer/src/supervisor.rs b/relayer/src/supervisor.rs index 689765eaa2..89b05af381 100644 --- a/relayer/src/supervisor.rs +++ b/relayer/src/supervisor.rs @@ -7,7 +7,7 @@ use std::sync::RwLock; use crossbeam_channel::{unbounded, Receiver, Sender}; use itertools::Itertools; -use tracing::{error, error_span, info, trace, warn}; +use tracing::{debug, error, error_span, info, trace, warn}; use ibc::{ core::ics24_host::identifier::{ChainId, ChannelId, PortId}, @@ -18,10 +18,7 @@ use ibc::{ use crate::{ chain::{handle::ChainHandle, HealthCheck}, config::Config, - event::{ - self, - monitor::{Error as EventError, ErrorDetail as EventErrorDetail, EventBatch}, - }, + event::monitor::{self, Error as EventError, ErrorDetail as EventErrorDetail, EventBatch}, object::Object, registry::{Registry, SharedRegistry}, rest, @@ -50,7 +47,7 @@ use cmd::SupervisorCmd; use self::{scan::ChainScanner, spawn::SpawnContext}; -type ArcBatch = Arc>; +type ArcBatch = Arc>; type Subscription = Receiver; /** @@ -332,6 +329,15 @@ fn relay_on_object( false } + Err(e) if e.log_as_debug() => { + debug!( + "denying relaying on object {}, caused by: {}", + object.short_name(), + e + ); + + false + } Err(e) => { warn!( "denying relaying on object {}, caused by: {}", diff --git a/relayer/src/supervisor/client_state_filter.rs b/relayer/src/supervisor/client_state_filter.rs index e34776dfed..46525ea75e 100644 --- a/relayer/src/supervisor/client_state_filter.rs +++ b/relayer/src/supervisor/client_state_filter.rs @@ -55,6 +55,12 @@ define_error! { } } +impl FilterError { + pub fn log_as_debug(&self) -> bool { + matches!(self.detail(), FilterErrorDetail::Spawn(e) if e.source.log_as_debug()) + } +} + /// A cache storing filtering status (allow or deny) for /// arbitrary identifiers. #[derive(Default, Debug)] diff --git a/relayer/src/supervisor/error.rs b/relayer/src/supervisor/error.rs index 0da9b6d48f..a05235712a 100644 --- a/relayer/src/supervisor/error.rs +++ b/relayer/src/supervisor/error.rs @@ -64,10 +64,16 @@ define_error! { Spawn [ SpawnError ] - |_| { "supervisor was not able to connect to any chains" }, + |_| { "supervisor was not able to spawn chain runtime" }, Scan [ ScanError ] |_| { "supervisor encountered an error when scanning chains" }, } } + +impl Error { + pub fn log_as_debug(&self) -> bool { + matches!(self.detail(), ErrorDetail::Spawn(e) if e.source.log_as_debug()) + } +} diff --git a/relayer/src/transfer.rs b/relayer/src/transfer.rs index f6e928431a..e9d950e1e5 100644 --- a/relayer/src/transfer.rs +++ b/relayer/src/transfer.rs @@ -13,11 +13,12 @@ use uint::FromStrRadixErr; use crate::chain::handle::ChainHandle; use crate::chain::tx::TrackedMsgs; +use crate::chain::ChainStatus; use crate::error::Error; use crate::util::bigint::U256; define_error! { - PacketError { + TransferError { Relayer [ Error ] |_| { "relayer error" }, @@ -51,6 +52,9 @@ define_error! { format!("internal error, expected IBCEvent::ChainError, got {:?}", e.event) }, + + ZeroTimeout + | _ | { "packet timeout height and packet timeout timestamp cannot both be 0" }, } } @@ -71,6 +75,48 @@ impl FromStr for Amount { } } +#[derive(Copy, Clone)] +pub struct TransferTimeout { + pub timeout_height: Height, + pub timeout_timestamp: Timestamp, +} + +impl TransferTimeout { + /** + Construct the transfer timeout parameters from the given timeout + height offset, timeout duration, and the latest chain status + containing the latest time of the destination chain. + + The height offset and duration are optional, with zero indicating + that the packet do not get expired at the given height or time. + If both height offset and duration are zero, then the packet will + never expire. + */ + pub fn new( + timeout_height_offset: u64, + timeout_duration: Duration, + destination_chain_status: &ChainStatus, + ) -> Result { + let timeout_height = if timeout_height_offset == 0 { + Height::zero() + } else { + destination_chain_status.height.add(timeout_height_offset) + }; + + let timeout_timestamp = if timeout_duration == Duration::ZERO { + Timestamp::none() + } else { + (destination_chain_status.timestamp + timeout_duration) + .map_err(TransferError::timestamp_overflow)? + }; + + Ok(TransferTimeout { + timeout_height, + timeout_timestamp, + }) + } +} + #[derive(Clone, Debug)] pub struct TransferOptions { pub packet_src_port_id: PortId, @@ -79,7 +125,7 @@ pub struct TransferOptions { pub denom: String, pub receiver: Option, pub timeout_height_offset: u64, - pub timeout_seconds: Duration, + pub timeout_duration: Duration, pub number_msgs: usize, } @@ -87,28 +133,23 @@ pub fn build_and_send_transfer_messages Result, PacketError> { +) -> Result, TransferError> { let receiver = match &opts.receiver { - None => packet_dst_chain.get_signer().map_err(PacketError::key)?, + None => packet_dst_chain.get_signer().map_err(TransferError::key)?, Some(r) => r.clone().into(), }; - let sender = packet_src_chain.get_signer().map_err(PacketError::key)?; + let sender = packet_src_chain.get_signer().map_err(TransferError::key)?; - let timeout_timestamp = if opts.timeout_seconds == Duration::from_secs(0) { - Timestamp::none() - } else { - (Timestamp::now() + opts.timeout_seconds).map_err(PacketError::timestamp_overflow)? - }; + let chain_status = packet_dst_chain + .query_status() + .map_err(TransferError::relayer)?; - let timeout_height = if opts.timeout_height_offset == 0 { - Height::zero() - } else { - packet_dst_chain - .query_latest_height() - .map_err(PacketError::relayer)? - .add(opts.timeout_height_offset) - }; + let timeout = TransferTimeout::new( + opts.timeout_height_offset, + opts.timeout_duration, + &chain_status, + )?; let msg = MsgTransfer { source_port: opts.packet_src_port_id.clone(), @@ -119,8 +160,8 @@ pub fn build_and_send_transfer_messages Ok(events), Some(err) => { if let IbcEvent::ChainError(err) = err { - Err(PacketError::tx_response(err.clone())) + Err(TransferError::tx_response(err.clone())) } else { panic!( "internal error, expected IBCEvent::ChainError, got {:?}", diff --git a/relayer/src/upgrade_chain.rs b/relayer/src/upgrade_chain.rs index e201e2f8ed..29fc2f7e79 100644 --- a/relayer/src/upgrade_chain.rs +++ b/relayer/src/upgrade_chain.rs @@ -5,16 +5,17 @@ use core::time::Duration; use bytes::BufMut; use flex_error::define_error; -use ibc::clients::ics07_tendermint::client_state::UpgradeOptions; -use ibc::downcast; -use ibc_proto::google::protobuf::Any; +use tendermint::abci::transaction::Hash as TxHash; + +use ibc::clients::ics07_tendermint::client_state::UpgradeOptions; use ibc::core::ics02_client::client_state::{AnyClientState, ClientState}; use ibc::core::ics02_client::height::Height; use ibc::core::ics24_host::identifier::{ChainId, ClientId}; -use ibc::events::IbcEvent; +use ibc::downcast; use ibc_proto::cosmos::gov::v1beta1::MsgSubmitProposal; use ibc_proto::cosmos::upgrade::v1beta1::{Plan, SoftwareUpgradeProposal}; +use ibc_proto::google::protobuf::Any; use ibc_proto::ibc::core::client::v1::UpgradeProposal; use crate::chain::tx::TrackedMsgs; @@ -68,7 +69,7 @@ pub fn build_and_send_ibc_upgrade_proposal( mut dst_chain: CosmosSdkChain, // the chain which will undergo an upgrade src_chain: CosmosSdkChain, // the source chain; supplies a client state for building the upgrade plan opts: &UpgradePlanOptions, -) -> Result, UpgradeChainError> { +) -> Result { let upgrade_height = dst_chain .query_latest_height() .map_err(UpgradeChainError::query)? @@ -140,20 +141,16 @@ pub fn build_and_send_ibc_upgrade_proposal( value: buf_msg, }; - let events = dst_chain - .send_messages_and_wait_commit(TrackedMsgs::new_single(any_msg, "upgrade")) - .map_err(|e| UpgradeChainError::submit(dst_chain.id().clone(), e))?; + // Can't use send_messages_and_wait_commit because no IBC events + // corresponding to the transaction can be recognized to confirm the + // upgrade. + // https://github.com/informalsystems/ibc-rs/issues/1288#issuecomment-1066884163 - // Check if the chain rejected the transaction - let result = events.iter().find_map(|event| match event { - IbcEvent::ChainError(reason) => Some(reason.clone()), - _ => None, - }); + let responses = dst_chain + .send_messages_and_wait_check_tx(TrackedMsgs::new_single(any_msg, "upgrade")) + .map_err(|e| UpgradeChainError::submit(dst_chain.id().clone(), e))?; - match result { - None => Ok(events), - Some(reason) => Err(UpgradeChainError::tx_response(reason)), - } + Ok(responses[0].hash) } enum Proposal { diff --git a/relayer/src/util/queue.rs b/relayer/src/util/queue.rs index 2fe5ab144a..27d501eb33 100644 --- a/relayer/src/util/queue.rs +++ b/relayer/src/util/queue.rs @@ -3,11 +3,11 @@ use std::sync::{Arc, RwLock}; use crate::util::lock::LockExt; -// A lightweight wrapper type to RefCell> so that -// we can safely mutate it with regular reference instead of -// mutable reference. We only expose subset of VecDeque methods -// that does not return any inner reference, so that the RefCell -// can never panic caused by simultaneous borrow and borrow_mut. +/// A lightweight wrapper type to RefCell> so that +/// we can safely mutate it with regular reference instead of +/// mutable reference. We only expose subset of VecDeque methods +/// that does not return any inner reference, so that the RefCell +/// can never panic caused by simultaneous borrow and borrow_mut. pub struct Queue(Arc>>); impl Queue { @@ -24,7 +24,7 @@ impl Queue { } pub fn push_back(&self, val: T) { - self.0.acquire_write().push_front(val) + self.0.acquire_write().push_back(val) } pub fn push_front(&self, val: T) { diff --git a/relayer/src/worker.rs b/relayer/src/worker.rs index 176b209d1e..6681d58627 100644 --- a/relayer/src/worker.rs +++ b/relayer/src/worker.rs @@ -5,7 +5,7 @@ use std::sync::Mutex; use tracing::error; use crate::foreign_client::ForeignClient; -use crate::link::{Link, LinkParameters}; +use crate::link::{Link, LinkParameters, Resubmit}; use crate::{ chain::handle::{ChainHandle, ChainHandlePair}, config::Config, @@ -125,6 +125,7 @@ pub fn spawn_worker_tasks( Ok(link) => { let (cmd_tx, cmd_rx) = crossbeam_channel::unbounded(); let link = Arc::new(Mutex::new(link)); + let resubmit = Resubmit::from_clear_interval(packets_config.clear_interval); let packet_task = packet::spawn_packet_cmd_worker( cmd_rx, link.clone(), @@ -134,7 +135,7 @@ pub fn spawn_worker_tasks( ); task_handles.push(packet_task); - let link_task = packet::spawn_packet_worker(path.clone(), link); + let link_task = packet::spawn_packet_worker(path.clone(), link, resubmit); task_handles.push(link_task); (Some(cmd_tx), None) diff --git a/relayer/src/worker/cmd.rs b/relayer/src/worker/cmd.rs index 2293384706..c809daa108 100644 --- a/relayer/src/worker/cmd.rs +++ b/relayer/src/worker/cmd.rs @@ -1,3 +1,5 @@ +use core::fmt; + use ibc::{core::ics02_client::events::NewBlock, Height}; use crate::event::monitor::EventBatch; @@ -14,3 +16,21 @@ pub enum WorkerCmd { /// Trigger a pending packets clear ClearPendingPackets, } + +impl fmt::Display for WorkerCmd { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + WorkerCmd::IbcEvents { batch } => { + write!(f, "IbcEvents batch from {}: ", batch.chain_id)?; + for e in &batch.events { + write!(f, "{}; ", e)?; + } + write!(f, "batch Height: {}", batch.height) + } + WorkerCmd::NewBlock { height, new_block } => { + write!(f, "NewBlock({}, {:?})", height, new_block) + } + WorkerCmd::ClearPendingPackets => write!(f, "CleaPendingPackets"), + } + } +} diff --git a/relayer/src/worker/packet.rs b/relayer/src/worker/packet.rs index 6d41d92726..5c090bde32 100644 --- a/relayer/src/worker/packet.rs +++ b/relayer/src/worker/packet.rs @@ -9,6 +9,7 @@ use ibc::Height; use crate::chain::handle::ChainHandle; use crate::foreign_client::HasExpiredOrFrozenError; +use crate::link::Resubmit; use crate::link::{error::LinkError, Link}; use crate::object::Packet; use crate::telemetry; @@ -52,10 +53,13 @@ fn handle_link_error_in_task(e: LinkError) -> TaskError { } } +/// Spawns a packet worker task in the background that handles the work of +/// processing pending txs between `ChainA` and `ChainB`. pub fn spawn_packet_worker( path: Packet, // Mutex is used to prevent race condition between the packet workers link: Arc>>, + resubmit: Resubmit, ) -> TaskHandle { let span = { let relay_path = &link.lock().unwrap().a_to_b; @@ -79,10 +83,10 @@ pub fn spawn_packet_worker( .execute_schedule() .map_err(handle_link_error_in_task)?; - let summary = relay_path.process_pending_txs(); + let summary = relay_path.process_pending_txs(resubmit); if !summary.is_empty() { - trace!("Packet worker produced relay summary: {:?}", summary); + trace!("packet worker produced relay summary: {}", summary); } telemetry!(packet_metrics(&path, &summary)); @@ -148,7 +152,7 @@ fn handle_packet_cmd( cmd: WorkerCmd, index: u64, ) -> RetryResult<(), u64> { - trace!("handling command {:?}", cmd); + trace!("handling command {}", cmd); let result = match cmd { WorkerCmd::IbcEvents { batch } => link.a_to_b.update_schedule(batch), @@ -218,7 +222,8 @@ fn handle_packet_cmd( } } - let summary = link.a_to_b.process_pending_txs(); + let resubmit = Resubmit::from_clear_interval(clear_interval); + let summary = link.a_to_b.process_pending_txs(resubmit); if !summary.is_empty() { trace!("produced relay summary: {:?}", summary); diff --git a/tools/integration-test/src/tests/client_expiration.rs b/tools/integration-test/src/tests/client_expiration.rs index 57b57da9bb..10dcfe0d24 100644 --- a/tools/integration-test/src/tests/client_expiration.rs +++ b/tools/integration-test/src/tests/client_expiration.rs @@ -3,11 +3,9 @@ use std::thread::sleep; use ibc::core::ics03_connection::connection::State as ConnectionState; use ibc::core::ics04_channel::channel::State as ChannelState; -use ibc::core::ics04_channel::Version as ChannelVersion; -use ibc_relayer::config::default::connection_delay as default_connection_delay; use ibc_relayer::config::{self, Config, ModeConfig}; -use ibc_test_framework::bootstrap::binary::chain::ForeignClientBuilder; +use ibc_test_framework::bootstrap::binary::chain::bootstrap_foreign_client_pair; use ibc_test_framework::bootstrap::binary::channel::{ bootstrap_channel_with_chains, bootstrap_channel_with_connection, }; @@ -140,7 +138,7 @@ impl BinaryChainTest for ChannelExpirationTest { let connection = { let _refresh_tasks = spawn_refresh_client_tasks(&chains.foreign_clients)?; - bootstrap_connection(&chains.foreign_clients, default_connection_delay(), false)? + bootstrap_connection(&chains.foreign_clients, Default::default())? }; wait_for_client_expiry(); @@ -214,9 +212,11 @@ impl BinaryChainTest for ChannelExpirationTest { "Trying to create new channel and worker after previous connection worker failed" ); - let foreign_clients_2 = ForeignClientBuilder::new(&chains.handle_a, &chains.handle_b) - .pair() - .bootstrap()?; + let foreign_clients_2 = bootstrap_foreign_client_pair( + &chains.handle_a, + &chains.handle_b, + Default::default(), + )?; // Need to spawn refresh client for new clients to make sure they don't expire @@ -282,10 +282,8 @@ impl BinaryChainTest for PacketExpirationTest { &chains, &PortId::transfer(), &PortId::transfer(), - Order::Unordered, - ChannelVersion::ics20(), - default_connection_delay(), - false, + Default::default(), + Default::default(), )? }; @@ -367,14 +365,14 @@ impl BinaryChainTest for CreateOnExpiredClientTest { let connection = { let _refresh_tasks = spawn_refresh_client_tasks(&chains.foreign_clients)?; - bootstrap_connection(&chains.foreign_clients, default_connection_delay(), false)? + bootstrap_connection(&chains.foreign_clients, Default::default())? }; wait_for_client_expiry(); info!("trying to bootstrap connection after IBC client is expired"); - let res = bootstrap_connection(&chains.foreign_clients, default_connection_delay(), false); + let res = bootstrap_connection(&chains.foreign_clients, Default::default()); match res { Ok(_) => { @@ -397,9 +395,7 @@ impl BinaryChainTest for CreateOnExpiredClientTest { connection, &DualTagged::new(&PortId::transfer()), &DualTagged::new(&PortId::transfer()), - Order::Unordered, - ChannelVersion::ics20(), - false, + Default::default(), ); match res { diff --git a/tools/test-framework/Cargo.toml b/tools/test-framework/Cargo.toml index 208722acca..b05c582b60 100644 --- a/tools/test-framework/Cargo.toml +++ b/tools/test-framework/Cargo.toml @@ -22,7 +22,7 @@ tendermint = { version = "=0.23.6" } tendermint-rpc = { version = "=0.23.6", features = ["http-client", "websocket-client"] } tokio = { version = "1.0", features = ["full"] } -tracing = "0.1.33" +tracing = "0.1.34" tracing-subscriber = "0.3.11" eyre = "0.6.8" color-eyre = "0.6" diff --git a/tools/test-framework/src/bootstrap/binary/chain.rs b/tools/test-framework/src/bootstrap/binary/chain.rs index ff8991d2ee..ac85b78837 100644 --- a/tools/test-framework/src/bootstrap/binary/chain.rs +++ b/tools/test-framework/src/bootstrap/binary/chain.rs @@ -26,223 +26,95 @@ use crate::types::tagged::*; use crate::types::wallet::{TestWallets, Wallet}; use crate::util::random::random_u64_range; -/// A builder to bootstrap two relayer chain handles with connected -/// foreign clients. -pub struct Builder<'config> { - test_config: &'config TestConfig, - node_a: FullNode, - node_b: FullNode, - client_options_a_to_b: ClientOptions, - client_options_b_to_a: ClientOptions, +#[derive(Default)] +pub struct BootstrapClientOptions { + pub client_options_a_to_b: ClientOptions, + pub client_options_b_to_a: ClientOptions, } -impl<'config> Builder<'config> { - /// Initializes the builder with two [`FullNode`] values representing two different - /// running full nodes, to bootstrap chain A and chain B respectively. - pub fn with_node_pair( - test_config: &'config TestConfig, - node_a: FullNode, - node_b: FullNode, - ) -> Self { - Self { - test_config, - node_a, - node_b, - client_options_a_to_b: Default::default(), - client_options_b_to_a: Default::default(), - } - } - - /// Work similary to [`with_node_pair`][wnp], but bootstraps a - /// single chain to be connected with itself. - /// - /// Self-connected chains are in fact allowed in IBC. Although we do not - /// have a clear use case for it yet, it is important to verify that - /// tests that pass with two connected chains should also pass with - /// self-connected chains. - /// - /// [wnp]: Builder::with_node_pair - /// - pub fn self_connected(test_config: &'config TestConfig, node: FullNode) -> Self { - let node1 = node.clone(); - Self::with_node_pair(test_config, node, node1) - } - - /// Overrides options for the foreign client connecting chain A to chain B. - pub fn client_options_a_to_b(mut self, options: ClientOptions) -> Self { - self.client_options_a_to_b = options; - self - } - - /// Overrides options for the foreign client connecting chain B to chain A. - pub fn client_options_b_to_a(mut self, options: ClientOptions) -> Self { - self.client_options_b_to_a = options; - self - } - - /// Bootstraps two relayer chain handles with connected foreign clients. - /// - /// Returns a tuple consisting of the [`RelayerDriver`] and a - /// [`ConnectedChains`] object that contains the given - /// full nodes together with the corresponding two [`ChainHandle`]s and - /// [`ForeignClient`]s. - pub fn bootstrap( - self, - ) -> Result< - ( - RelayerDriver, - ConnectedChains, - ), - Error, - > { - self.bootstrap_with_config(|_| {}) - } - - /// Bootstraps two relayer chain handles with connected foreign clients. - /// - /// Returns a tuple consisting of the [`RelayerDriver`] and a - /// [`ConnectedChains`] object that contains the given - /// full nodes together with the corresponding two [`ChainHandle`]s and - /// [`ForeignClient`]s. - /// - /// This method gives the caller a way to modify the relayer configuration - /// that is pre-generated from the configurations of the full nodes. - pub fn bootstrap_with_config( - self, - config_modifier: impl FnOnce(&mut Config), - ) -> Result< - ( - RelayerDriver, - ConnectedChains, - ), - Error, - > { - let mut config = Config::default(); - - add_chain_config(&mut config, &self.node_a)?; - add_chain_config(&mut config, &self.node_b)?; - - config_modifier(&mut config); - - let config_path = self.test_config.chain_store_dir.join("relayer-config.toml"); - - save_relayer_config(&config, &config_path)?; - - let registry = new_registry(config.clone()); - - // Pass in unique closure expressions `||{}` as the first argument so that - // the returned chains are considered different types by Rust. - // See [`spawn_chain_handle`] for more details. - let handle_a = spawn_chain_handle(|| {}, ®istry, &self.node_a)?; - let handle_b = spawn_chain_handle(|| {}, ®istry, &self.node_b)?; - - if self.test_config.bootstrap_with_random_ids { - pad_client_ids(&handle_a, &handle_b)?; - pad_client_ids(&handle_b, &handle_a)?; - } - - let foreign_clients = ForeignClientBuilder::new(&handle_a, &handle_b) - .client_options(self.client_options_a_to_b) - .pair() - .client_options(self.client_options_b_to_a) - .bootstrap()?; - - let relayer = RelayerDriver { - config_path, - config, - registry, - hang_on_fail: self.test_config.hang_on_fail, - }; - - let chains = ConnectedChains::new( - handle_a, - handle_b, - MonoTagged::new(self.node_a), - MonoTagged::new(self.node_b), - foreign_clients, - ); - - Ok((relayer, chains)) - } -} +/// Bootstraps two relayer chain handles with connected foreign clients. +/// +/// Returns a tuple consisting of the [`RelayerDriver`] and a +/// [`ConnectedChains`] object that contains the given +/// full nodes together with the corresponding two [`ChainHandle`]s and +/// [`ForeignClient`]s. +pub fn bootstrap_chains_with_full_nodes( + test_config: &TestConfig, + node_a: FullNode, + node_b: FullNode, + options: BootstrapClientOptions, + config_modifier: impl FnOnce(&mut Config), +) -> Result< + ( + RelayerDriver, + ConnectedChains, + ), + Error, +> { + let mut config = Config::default(); -pub fn pad_client_ids( - chain_a: &ChainA, - chain_b: &ChainB, -) -> Result<(), Error> { - let foreign_client = - ForeignClient::restore(ClientId::default(), chain_b.clone(), chain_a.clone()); + add_chain_config(&mut config, &node_a)?; + add_chain_config(&mut config, &node_b)?; - for i in 0..random_u64_range(1, 6) { - debug!("creating new client id {} on chain {}", i + 1, chain_b.id()); - foreign_client.build_create_client_and_send(Default::default())?; - } + config_modifier(&mut config); - Ok(()) -} + let config_path = test_config.chain_store_dir.join("relayer-config.toml"); -pub struct ForeignClientBuilder<'a, ChainA: ChainHandle, ChainB: ChainHandle> { - chain_a: &'a ChainA, - chain_b: &'a ChainB, - client_options: ClientOptions, -} + save_relayer_config(&config, &config_path)?; -pub struct ForeignClientPairBuilder<'a, ChainA: ChainHandle, ChainB: ChainHandle> { - a_to_b: ForeignClientBuilder<'a, ChainA, ChainB>, - b_to_a_client_options: ClientOptions, -} + let registry = new_registry(config.clone()); -impl<'a, ChainA: ChainHandle, ChainB: ChainHandle> ForeignClientBuilder<'a, ChainA, ChainB> { - pub fn new(chain_a: &'a ChainA, chain_b: &'a ChainB) -> Self { - Self { - chain_a, - chain_b, - client_options: Default::default(), - } - } + // Pass in unique closure expressions `||{}` as the first argument so that + // the returned chains are considered different types by Rust. + // See [`spawn_chain_handle`] for more details. + let handle_a = spawn_chain_handle(|| {}, ®istry, &node_a)?; + let handle_b = spawn_chain_handle(|| {}, ®istry, &node_b)?; - pub fn client_options(mut self, settings: ClientOptions) -> Self { - self.client_options = settings; - self + if test_config.bootstrap_with_random_ids { + pad_client_ids(&handle_a, &handle_b)?; + pad_client_ids(&handle_b, &handle_a)?; } - /// Bootstrap a foreign client from `ChainA` to `ChainB`, i.e. the foreign - /// client collects information from `ChainA` and submits them as transactions - /// to `ChainB`. - /// - /// The returned `ForeignClient` is tagged in contravariant ordering, i.e. - /// `ChainB` then `ChainB`, because `ForeignClient` takes the the destination - /// chain in the first position. - pub fn bootstrap(self) -> Result, Error> { - bootstrap_foreign_client(self.chain_a, self.chain_b, self.client_options) - } + let foreign_clients = bootstrap_foreign_client_pair(&handle_a, &handle_b, options)?; + + let relayer = RelayerDriver { + config_path, + config, + registry, + hang_on_fail: test_config.hang_on_fail, + }; + + let chains = ConnectedChains::new( + handle_a, + handle_b, + MonoTagged::new(node_a), + MonoTagged::new(node_b), + foreign_clients, + ); - /// Continues the builder composition for a pair of clients in both directions. - pub fn pair(self) -> ForeignClientPairBuilder<'a, ChainA, ChainB> { - ForeignClientPairBuilder { - a_to_b: self, - b_to_a_client_options: Default::default(), - } - } + Ok((relayer, chains)) } -impl<'a, ChainA: ChainHandle, ChainB: ChainHandle> ForeignClientPairBuilder<'a, ChainA, ChainB> { - /// Overrides the settings for a client in the reverse direction (B to A). - pub fn client_options(mut self, settings: ClientOptions) -> Self { - self.b_to_a_client_options = settings; - self - } - - pub fn bootstrap(self) -> Result, Error> { - let chain_a = self.a_to_b.chain_a; - let chain_b = self.a_to_b.chain_b; - let client_a_to_b = bootstrap_foreign_client(chain_a, chain_b, self.a_to_b.client_options)?; - let client_b_to_a = bootstrap_foreign_client(chain_b, chain_a, self.b_to_a_client_options)?; - Ok(ForeignClientPair::new(client_a_to_b, client_b_to_a)) - } +/// Bootstraps two relayer chain handles with connected foreign clients. +/// +/// Returns a tuple consisting of the [`RelayerDriver`] and a +/// [`ConnectedChains`] object that contains the given +/// full nodes together with the corresponding two [`ChainHandle`]s and +/// [`ForeignClient`]s. +/// +/// This method gives the caller a way to modify the relayer configuration +/// that is pre-generated from the configurations of the full nodes. +pub fn bootstrap_foreign_client_pair( + chain_a: &ChainA, + chain_b: &ChainB, + options: BootstrapClientOptions, +) -> Result, Error> { + let client_a_to_b = bootstrap_foreign_client(chain_a, chain_b, options.client_options_a_to_b)?; + let client_b_to_a = bootstrap_foreign_client(chain_b, chain_a, options.client_options_b_to_a)?; + Ok(ForeignClientPair::new(client_a_to_b, client_b_to_a)) } -fn bootstrap_foreign_client( +pub fn bootstrap_foreign_client( chain_a: &ChainA, chain_b: &ChainB, client_options: ClientOptions, @@ -268,6 +140,21 @@ fn bootstrap_foreign_client( )) } +pub fn pad_client_ids( + chain_a: &ChainA, + chain_b: &ChainB, +) -> Result<(), Error> { + let foreign_client = + ForeignClient::restore(ClientId::default(), chain_b.clone(), chain_a.clone()); + + for i in 0..random_u64_range(1, 6) { + debug!("creating new client id {} on chain {}", i + 1, chain_b.id()); + foreign_client.build_create_client_and_send(Default::default())?; + } + + Ok(()) +} + /** Spawn a new chain handle using the given [`SharedRegistry`] and [`FullNode`]. @@ -385,3 +272,17 @@ pub fn save_relayer_config(config: &Config, config_path: &Path) -> Result<(), Er Ok(()) } + +impl BootstrapClientOptions { + /// Overrides options for the foreign client connecting chain A to chain B. + pub fn client_options_a_to_b(mut self, options: ClientOptions) -> Self { + self.client_options_a_to_b = options; + self + } + + /// Overrides options for the foreign client connecting chain B to chain A. + pub fn client_options_b_to_a(mut self, options: ClientOptions) -> Self { + self.client_options_b_to_a = options; + self + } +} diff --git a/tools/test-framework/src/bootstrap/binary/channel.rs b/tools/test-framework/src/bootstrap/binary/channel.rs index 6399c5852f..0a8b46b4bb 100644 --- a/tools/test-framework/src/bootstrap/binary/channel.rs +++ b/tools/test-framework/src/bootstrap/binary/channel.rs @@ -2,7 +2,6 @@ Helper functions for bootstrapping a channel between two chains. */ -use core::time::Duration; use eyre::{eyre, Report as Error}; use ibc::core::ics04_channel::channel::Order; use ibc::core::ics04_channel::Version; @@ -11,7 +10,7 @@ use ibc_relayer::chain::handle::ChainHandle; use ibc_relayer::channel::{Channel, ChannelSide}; use tracing::{debug, info}; -use super::connection::bootstrap_connection; +use super::connection::{bootstrap_connection, BootstrapConnectionOptions}; use crate::types::binary::chains::ConnectedChains; use crate::types::binary::channel::ConnectedChannel; use crate::types::binary::connection::ConnectedConnection; @@ -20,6 +19,12 @@ use crate::types::id::TaggedPortIdRef; use crate::types::tagged::*; use crate::util::random::random_u64_range; +pub struct BootstrapChannelOptions { + pub order: Order, + pub version: Version, + pub bootstrap_with_random_ids: bool, +} + /** Create a new [`ConnectedChannel`] based on the provided [`ConnectedChains`]. @@ -30,19 +35,15 @@ pub fn bootstrap_channel_with_chains( chains: &ConnectedChains, port_a: &PortId, port_b: &PortId, - order: Order, - version: Version, - connection_delay: Duration, - bootstrap_with_random_ids: bool, + connection_options: BootstrapConnectionOptions, + channel_options: BootstrapChannelOptions, ) -> Result, Error> { let channel = bootstrap_channel( &chains.foreign_clients, &DualTagged::new(port_a), &DualTagged::new(port_b), - order, - version, - connection_delay, - bootstrap_with_random_ids, + connection_options, + channel_options, )?; Ok(channel) @@ -56,13 +57,10 @@ pub fn bootstrap_channel( foreign_clients: &ForeignClientPair, port_a: &TaggedPortIdRef, port_b: &TaggedPortIdRef, - order: Order, - version: Version, - connection_delay: Duration, - bootstrap_with_random_ids: bool, + connection_options: BootstrapConnectionOptions, + channel_options: BootstrapChannelOptions, ) -> Result, Error> { - let connection = - bootstrap_connection(foreign_clients, connection_delay, bootstrap_with_random_ids)?; + let connection = bootstrap_connection(foreign_clients, connection_options)?; bootstrap_channel_with_connection( &foreign_clients.handle_a(), @@ -70,9 +68,7 @@ pub fn bootstrap_channel( connection, port_a, port_b, - order, - version, - bootstrap_with_random_ids, + channel_options, ) } @@ -85,21 +81,19 @@ pub fn bootstrap_channel_with_connection, port_a: &TaggedPortIdRef, port_b: &TaggedPortIdRef, - order: Order, - version: Version, - bootstrap_with_random_ids: bool, + options: BootstrapChannelOptions, ) -> Result, Error> { - if bootstrap_with_random_ids { + if options.bootstrap_with_random_ids { pad_channel_id(chain_a, chain_b, &connection, port_a)?; pad_channel_id(chain_b, chain_a, &connection.clone().flip(), port_b)?; } let channel = Channel::new( connection.connection.clone(), - order, + options.order, port_a.0.clone(), port_b.0.clone(), - Some(version), + Some(options.version), )?; let channel_id_a = *channel @@ -188,3 +182,30 @@ pub fn pad_channel_id( Ok(()) } + +impl Default for BootstrapChannelOptions { + fn default() -> Self { + Self { + order: Order::Unordered, + version: Version::ics20(), + bootstrap_with_random_ids: false, + } + } +} + +impl BootstrapChannelOptions { + pub fn order(mut self, order: Order) -> Self { + self.order = order; + self + } + + pub fn version(mut self, version: Version) -> Self { + self.version = version; + self + } + + pub fn bootstrap_with_random_ids(mut self, bootstrap_with_random_ids: bool) -> Self { + self.bootstrap_with_random_ids = bootstrap_with_random_ids; + self + } +} diff --git a/tools/test-framework/src/bootstrap/binary/connection.rs b/tools/test-framework/src/bootstrap/binary/connection.rs index 858ce1bb73..942c1b068b 100644 --- a/tools/test-framework/src/bootstrap/binary/connection.rs +++ b/tools/test-framework/src/bootstrap/binary/connection.rs @@ -6,6 +6,7 @@ use core::time::Duration; use eyre::{eyre, Report as Error}; use ibc::timestamp::ZERO_DURATION; use ibc_relayer::chain::handle::ChainHandle; +use ibc_relayer::config::default::connection_delay as default_connection_delay; use ibc_relayer::connection::{Connection, ConnectionSide}; use tracing::{debug, info}; @@ -16,14 +17,18 @@ use crate::types::binary::foreign_client::ForeignClientPair; use crate::types::id::TaggedClientIdRef; use crate::util::random::random_u64_range; +pub struct BootstrapConnectionOptions { + pub connection_delay: Duration, + pub bootstrap_with_random_ids: bool, +} + /** Create a new [`ConnectedConnection`] using the foreign clients with initialized client IDs. */ pub fn bootstrap_connection( foreign_clients: &ForeignClientPair, - connection_delay: Duration, - bootstrap_with_random_ids: bool, + options: BootstrapConnectionOptions, ) -> Result, Error> { let chain_a = foreign_clients.handle_a(); let chain_b = foreign_clients.handle_b(); @@ -31,7 +36,7 @@ pub fn bootstrap_connection( let client_id_a = foreign_clients.client_id_a(); let client_id_b = foreign_clients.client_id_b(); - if bootstrap_with_random_ids { + if options.bootstrap_with_random_ids { pad_connection_id(&chain_a, &chain_b, &client_id_a, &client_id_b)?; pad_connection_id(&chain_b, &chain_a, &client_id_b, &client_id_a)?; } @@ -39,7 +44,7 @@ pub fn bootstrap_connection( let connection = Connection::new( foreign_clients.client_b_to_a.clone(), foreign_clients.client_a_to_b.clone(), - connection_delay, + options.connection_delay, )?; let connection_id_a = connection @@ -104,3 +109,24 @@ pub fn pad_connection_id( Ok(()) } + +impl Default for BootstrapConnectionOptions { + fn default() -> Self { + Self { + connection_delay: default_connection_delay(), + bootstrap_with_random_ids: false, + } + } +} + +impl BootstrapConnectionOptions { + pub fn connection_delay(mut self, connection_delay: Duration) -> Self { + self.connection_delay = connection_delay; + self + } + + pub fn bootstrap_with_random_ids(mut self, bootstrap_with_random_ids: bool) -> Self { + self.bootstrap_with_random_ids = bootstrap_with_random_ids; + self + } +} diff --git a/tools/test-framework/src/bootstrap/nary/chain.rs b/tools/test-framework/src/bootstrap/nary/chain.rs index 5b001ae319..30b2188266 100644 --- a/tools/test-framework/src/bootstrap/nary/chain.rs +++ b/tools/test-framework/src/bootstrap/nary/chain.rs @@ -9,8 +9,8 @@ use ibc_relayer::foreign_client::ForeignClient; use ibc_relayer::registry::SharedRegistry; use crate::bootstrap::binary::chain::{ - add_chain_config, add_keys_to_chain_handle, new_registry, save_relayer_config, - ForeignClientBuilder, + add_chain_config, add_keys_to_chain_handle, bootstrap_foreign_client, new_registry, + save_relayer_config, }; use crate::error::{handle_generic_error, Error}; use crate::relayer::driver::RelayerDriver; @@ -84,7 +84,8 @@ pub fn boostrap_chains_with_any_nodes( let mut foreign_clients_b = Vec::new(); for handle_b in chain_handles.iter() { - let foreign_client = ForeignClientBuilder::new(handle_a, handle_b).bootstrap()?; + let foreign_client = bootstrap_foreign_client(handle_a, handle_b, Default::default())?; + foreign_clients_b.push(foreign_client); } diff --git a/tools/test-framework/src/bootstrap/nary/channel.rs b/tools/test-framework/src/bootstrap/nary/channel.rs index ac3a58e36e..ff04920983 100644 --- a/tools/test-framework/src/bootstrap/nary/channel.rs +++ b/tools/test-framework/src/bootstrap/nary/channel.rs @@ -5,11 +5,12 @@ use core::convert::TryInto; use core::time::Duration; use ibc::core::ics04_channel::channel::Order; -use ibc::core::ics04_channel::Version; use ibc::core::ics24_host::identifier::PortId; use ibc_relayer::chain::handle::ChainHandle; -use crate::bootstrap::binary::channel::bootstrap_channel_with_connection; +use crate::bootstrap::binary::channel::{ + bootstrap_channel_with_connection, BootstrapChannelOptions, +}; use crate::bootstrap::nary::connection::bootstrap_connections_dynamic; use crate::error::{handle_generic_error, Error}; use crate::types::binary::channel::ConnectedChannel; @@ -48,15 +49,17 @@ pub fn bootstrap_channels_with_connections_dynamic( let port_a = &ports[i][j]; let port_b = &ports[j][i]; + let bootstrap_options = BootstrapChannelOptions::default() + .order(order) + .bootstrap_with_random_ids(bootstrap_with_random_ids); + let channel = bootstrap_channel_with_connection( chain_a, chain_b, connection.clone(), &DualTagged::new(port_a), &DualTagged::new(port_b), - order, - Version::ics20(), - bootstrap_with_random_ids, + bootstrap_options, )?; channels_b.push(channel); diff --git a/tools/test-framework/src/bootstrap/nary/connection.rs b/tools/test-framework/src/bootstrap/nary/connection.rs index 78390afee5..932f9fc9be 100644 --- a/tools/test-framework/src/bootstrap/nary/connection.rs +++ b/tools/test-framework/src/bootstrap/nary/connection.rs @@ -7,7 +7,7 @@ use core::time::Duration; use ibc_relayer::chain::handle::ChainHandle; use ibc_relayer::foreign_client::ForeignClient; -use crate::bootstrap::binary::connection::bootstrap_connection; +use crate::bootstrap::binary::connection::{bootstrap_connection, BootstrapConnectionOptions}; use crate::error::Error; use crate::types::binary::connection::ConnectedConnection; use crate::types::binary::foreign_client::ForeignClientPair; @@ -39,11 +39,11 @@ pub fn bootstrap_connections_dynamic( let foreign_clients = ForeignClientPair::new(foreign_client.clone(), counter_foreign_client.clone()); - let connection = bootstrap_connection( - &foreign_clients, - connection_delay, - bootstrap_with_random_ids, - )?; + let bootstrap_options = BootstrapConnectionOptions::default() + .connection_delay(connection_delay) + .bootstrap_with_random_ids(bootstrap_with_random_ids); + + let connection = bootstrap_connection(&foreign_clients, bootstrap_options)?; connections_b.push(connection); } else { diff --git a/tools/test-framework/src/error.rs b/tools/test-framework/src/error.rs index 2683989201..d8d8db24c3 100644 --- a/tools/test-framework/src/error.rs +++ b/tools/test-framework/src/error.rs @@ -7,7 +7,7 @@ use ibc_relayer::channel::error::ChannelError; use ibc_relayer::connection::ConnectionError; use ibc_relayer::error::Error as RelayerError; use ibc_relayer::supervisor::error::Error as SupervisorError; -use ibc_relayer::transfer::PacketError; +use ibc_relayer::transfer::TransferError; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; define_error! { @@ -45,9 +45,9 @@ define_error! { [ ConnectionError ] | _ | { "connection error"}, - Packet - [ PacketError ] - | _ | { "packet error"}, + Transfer + [ TransferError ] + | _ | { "transfer error"}, Retry { @@ -60,7 +60,7 @@ define_error! { e.attempts, e.task_name ) - } + }, } } @@ -111,8 +111,8 @@ impl From for Error { } } -impl From for Error { - fn from(e: PacketError) -> Self { - Error::packet(e) +impl From for Error { + fn from(e: TransferError) -> Self { + Error::transfer(e) } } diff --git a/tools/test-framework/src/framework/binary/chain.rs b/tools/test-framework/src/framework/binary/chain.rs index 9f299f0061..4a342a2c7f 100644 --- a/tools/test-framework/src/framework/binary/chain.rs +++ b/tools/test-framework/src/framework/binary/chain.rs @@ -8,7 +8,7 @@ use ibc_relayer::config::Config; use ibc_relayer::foreign_client::CreateOptions as ClientOptions; use tracing::info; -use crate::bootstrap::binary::chain::Builder; +use crate::bootstrap::binary::chain::{bootstrap_chains_with_full_nodes, BootstrapClientOptions}; use crate::error::Error; use crate::framework::base::{HasOverrides, TestConfigOverride}; use crate::framework::binary::node::{ @@ -203,12 +203,20 @@ where { fn run(&self, config: &TestConfig, node_a: FullNode, node_b: FullNode) -> Result<(), Error> { let overrides = self.test.get_overrides(); - let (relayer, chains) = Builder::with_node_pair(config, node_a, node_b) + + let bootstrap_options = BootstrapClientOptions::default() .client_options_a_to_b(overrides.client_options_a_to_b()) - .client_options_b_to_a(overrides.client_options_b_to_a()) - .bootstrap_with_config(|config| { + .client_options_b_to_a(overrides.client_options_b_to_a()); + + let (relayer, chains) = bootstrap_chains_with_full_nodes( + config, + node_a, + node_b, + bootstrap_options, + |config| { overrides.modify_relayer_config(config); - })?; + }, + )?; let env_path = config.chain_store_dir.join("binary-chains.env"); diff --git a/tools/test-framework/src/framework/binary/channel.rs b/tools/test-framework/src/framework/binary/channel.rs index 01f32b26ac..e25822c785 100644 --- a/tools/test-framework/src/framework/binary/channel.rs +++ b/tools/test-framework/src/framework/binary/channel.rs @@ -10,7 +10,9 @@ use ibc::core::ics24_host::identifier::PortId; use ibc_relayer::chain::handle::ChainHandle; use tracing::info; -use crate::bootstrap::binary::channel::bootstrap_channel_with_connection; +use crate::bootstrap::binary::channel::{ + bootstrap_channel_with_connection, BootstrapChannelOptions, +}; use crate::error::Error; use crate::framework::base::{HasOverrides, TestConfigOverride}; use crate::framework::binary::chain::{ @@ -196,8 +198,11 @@ where let port_a = overrides.channel_port_a(); let port_b = overrides.channel_port_b(); - let order = overrides.channel_order(); - let version = overrides.channel_version(); + + let bootstrap_options = BootstrapChannelOptions::default() + .order(overrides.channel_order()) + .version(overrides.channel_version()) + .bootstrap_with_random_ids(config.bootstrap_with_random_ids); let channels = bootstrap_channel_with_connection( &chains.handle_a, @@ -205,9 +210,7 @@ where connection, &DualTagged::new(port_a).as_ref(), &DualTagged::new(port_b).as_ref(), - order, - version, - config.bootstrap_with_random_ids, + bootstrap_options, )?; let env_path = config.chain_store_dir.join("binary-channels.env"); diff --git a/tools/test-framework/src/framework/binary/connection.rs b/tools/test-framework/src/framework/binary/connection.rs index 5534926f38..9e96a7665e 100644 --- a/tools/test-framework/src/framework/binary/connection.rs +++ b/tools/test-framework/src/framework/binary/connection.rs @@ -8,7 +8,7 @@ use core::time::Duration; use ibc_relayer::chain::handle::ChainHandle; use tracing::info; -use crate::bootstrap::binary::connection::bootstrap_connection; +use crate::bootstrap::binary::connection::{bootstrap_connection, BootstrapConnectionOptions}; use crate::error::Error; use crate::framework::base::HasOverrides; use crate::framework::base::TestConfigOverride; @@ -154,13 +154,11 @@ where relayer: RelayerDriver, chains: ConnectedChains, ) -> Result<(), Error> { - let connection_delay = self.get_overrides().connection_delay(); + let bootstrap_options = BootstrapConnectionOptions::default() + .connection_delay(self.get_overrides().connection_delay()) + .bootstrap_with_random_ids(config.bootstrap_with_random_ids); - let connection = bootstrap_connection( - &chains.foreign_clients, - connection_delay, - config.bootstrap_with_random_ids, - )?; + let connection = bootstrap_connection(&chains.foreign_clients, bootstrap_options)?; let env_path = config.chain_store_dir.join("binary-connections.env"); diff --git a/tools/test-framework/src/relayer/chain.rs b/tools/test-framework/src/relayer/chain.rs index bca4a5bf34..3d2c00be6b 100644 --- a/tools/test-framework/src/relayer/chain.rs +++ b/tools/test-framework/src/relayer/chain.rs @@ -57,7 +57,7 @@ use ibc_proto::ibc::core::connection::v1::QueryConnectionsRequest; use ibc_relayer::chain::client::ClientSettings; use ibc_relayer::chain::handle::{ChainHandle, ChainRequest, Subscription}; use ibc_relayer::chain::tx::TrackedMsgs; -use ibc_relayer::chain::{HealthCheck, StatusResponse}; +use ibc_relayer::chain::{ChainStatus, HealthCheck}; use ibc_relayer::config::ChainConfig; use ibc_relayer::error::Error; use ibc_relayer::{connection::ConnectionMsgType, keyring::KeyEntry}; @@ -128,7 +128,7 @@ where self.value().ibc_version() } - fn query_status(&self) -> Result { + fn query_status(&self) -> Result { self.value().query_status() } diff --git a/tools/test-framework/src/relayer/transfer.rs b/tools/test-framework/src/relayer/transfer.rs index cd0575a76e..81643bd4da 100644 --- a/tools/test-framework/src/relayer/transfer.rs +++ b/tools/test-framework/src/relayer/transfer.rs @@ -46,7 +46,7 @@ pub fn tx_raw_ft_transfer( denom: &MonoTagged, amount: u64, timeout_height_offset: u64, - timeout_seconds: Duration, + timeout_duration: Duration, number_messages: usize, ) -> Result, Error> { let transfer_options = TransferOptions { @@ -56,7 +56,7 @@ pub fn tx_raw_ft_transfer( denom: denom.value().to_string(), receiver: Some(recipient.value().0.clone()), timeout_height_offset, - timeout_seconds, + timeout_duration, number_msgs: number_messages, }; From 83b60d709ebce850cac30cb35a4552287d67b3da Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 22 Apr 2022 10:05:24 -0500 Subject: [PATCH 02/36] Adding integration test for execute_schedule --- tools/integration-test/src/tests/mod.rs | 1 + tools/test-framework/src/error.rs | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/tools/integration-test/src/tests/mod.rs b/tools/integration-test/src/tests/mod.rs index 1cab6d91e0..eeddd632ab 100644 --- a/tools/integration-test/src/tests/mod.rs +++ b/tools/integration-test/src/tests/mod.rs @@ -9,6 +9,7 @@ pub mod clear_packet; pub mod client_expiration; mod client_settings; pub mod connection_delay; +pub mod execute_schedule; pub mod memo; pub mod supervisor; pub mod ternary_transfer; diff --git a/tools/test-framework/src/error.rs b/tools/test-framework/src/error.rs index d8d8db24c3..388f80b1dc 100644 --- a/tools/test-framework/src/error.rs +++ b/tools/test-framework/src/error.rs @@ -6,6 +6,7 @@ use flex_error::{define_error, TraceError}; use ibc_relayer::channel::error::ChannelError; use ibc_relayer::connection::ConnectionError; use ibc_relayer::error::Error as RelayerError; +use ibc_relayer::link::error::LinkError; use ibc_relayer::supervisor::error::Error as SupervisorError; use ibc_relayer::transfer::TransferError; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; @@ -49,6 +50,10 @@ define_error! { [ TransferError ] | _ | { "transfer error"}, + Link + [ LinkError ] + | _ | { "link error" }, + Retry { task_name: String, @@ -116,3 +121,9 @@ impl From for Error { Error::transfer(e) } } + +impl From for Error { + fn from(e: LinkError) -> Self { + Error::link(e) + } +} From 60edac7bb3b25b6dd667012a7a5bb84b4941c5ec Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 22 Apr 2022 10:06:04 -0500 Subject: [PATCH 03/36] Adding integration test for execute_schedule --- .../src/tests/execute_schedule.rs | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 tools/integration-test/src/tests/execute_schedule.rs diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs new file mode 100644 index 0000000000..a4e873de71 --- /dev/null +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -0,0 +1,72 @@ +use ibc_test_framework::prelude::*; +use ibc_test_framework::util::random::random_u64_range; + +use ibc_relayer::link::{Link, LinkParameters}; + +#[test] +fn test_execute_schedule() -> Result<(), Error> { + run_binary_channel_test(&ExecuteScheduleTest) +} + +pub struct ExecuteScheduleTest; + +impl TestOverrides for ExecuteScheduleTest { + fn should_spawn_supervisor(&self) -> bool { + false + } +} + +impl BinaryChannelTest for ExecuteScheduleTest { + fn run( + &self, + _config: &TestConfig, + relayer: RelayerDriver, + chains: ConnectedChains, + channel: ConnectedChannel, + ) -> Result<(), Error> { + let denom_a = chains.node_a.denom(); + + let wallet_a = chains.node_a.wallets().user1().cloned(); + let wallet_b = chains.node_b.wallets().user1().cloned(); + + let balance_a = chains + .node_a + .chain_driver() + .query_balance(&wallet_a.address(), &denom_a)?; + + let amount1 = random_u64_range(1000, 5000); + + info!( + "Performing IBC transfer with amount {}, which should be relayed because its an ordered channel", + amount1 + ); + + chains.node_a.chain_driver().transfer_token( + &channel.port_a.as_ref(), + &channel.channel_id_a.as_ref(), + &wallet_a.address(), + &wallet_b.address(), + amount1, + &denom_a, + )?; + + sleep(Duration::from_secs(2)); + + let link_opts = LinkParameters { + src_port_id: channel.port_a.clone().into_value(), + src_channel_id: channel.channel_id_a.clone().into_value(), + }; + let link = Link::new_from_opts( + chains.handle_a().clone(), + chains.handle_b().clone(), + link_opts, + false, + )?; + let relay_path = link.a_to_b; + + // relay pending packets + relay_path.schedule_packet_clearing(None)?; + + Ok(()) + } +} From 5b26c2fd6565ccda51b96d1ecd81bbb14936fdf2 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 22 Apr 2022 11:20:27 -0500 Subject: [PATCH 04/36] Finish stubbing out execute_schedule test --- .../src/tests/execute_schedule.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index a4e873de71..c8cfe993fc 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -1,7 +1,7 @@ use ibc_test_framework::prelude::*; use ibc_test_framework::util::random::random_u64_range; -use ibc_relayer::link::{Link, LinkParameters}; +use ibc_relayer::link::{Link, LinkParameters, Resubmit}; #[test] fn test_execute_schedule() -> Result<(), Error> { @@ -66,6 +66,20 @@ impl BinaryChannelTest for ExecuteScheduleTest { // relay pending packets relay_path.schedule_packet_clearing(None)?; + relay_path.refresh_schedule()?; + relay_path.execute_schedule()?; + + let summary = relay_path.process_pending_txs(Resubmit::No); + + assert_eq!(summary.events.len(), 1); + + chains.shutdown(); + + relay_path.execute_schedule()?; + + let summary = relay_path.process_pending_txs(Resubmit::No); + + assert_eq!(summary.events.len(), 1); Ok(()) } From 37130a3949e992b61daa5504e3ff4ae1e4e0ca8c Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Mon, 25 Apr 2022 10:28:27 -0500 Subject: [PATCH 05/36] Call `chains.shutdown` method --- tools/integration-test/src/tests/execute_schedule.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index c8cfe993fc..fe7b93bd8d 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -73,7 +73,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { assert_eq!(summary.events.len(), 1); - chains.shutdown(); + chains.node_a.shutdown(); relay_path.execute_schedule()?; From c032b4f55fbe0f8991b7246d0dc2e6d31b435bc9 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Mon, 25 Apr 2022 11:50:02 -0500 Subject: [PATCH 06/36] Correctly shut down chain --- tools/integration-test/src/tests/execute_schedule.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index fe7b93bd8d..c459cea5a4 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -20,7 +20,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { fn run( &self, _config: &TestConfig, - relayer: RelayerDriver, + _relayer: RelayerDriver, chains: ConnectedChains, channel: ConnectedChannel, ) -> Result<(), Error> { @@ -29,7 +29,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { let wallet_a = chains.node_a.wallets().user1().cloned(); let wallet_b = chains.node_b.wallets().user1().cloned(); - let balance_a = chains + let _balance_a = chains .node_a .chain_driver() .query_balance(&wallet_a.address(), &denom_a)?; @@ -73,7 +73,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { assert_eq!(summary.events.len(), 1); - chains.node_a.shutdown(); + chains.node_a.value().kill()?; relay_path.execute_schedule()?; From a5617153b34a42be54de7bee5f9492907af99edf Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Tue, 26 Apr 2022 12:52:36 -0500 Subject: [PATCH 07/36] Shut down node b as well --- tools/integration-test/src/tests/execute_schedule.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index c459cea5a4..0fd53a6766 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -74,6 +74,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { assert_eq!(summary.events.len(), 1); chains.node_a.value().kill()?; + chains.node_b.value().kill()?; relay_path.execute_schedule()?; From fe08ba3ce46a302c24775bd373d456688743f623 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Mon, 9 May 2022 09:15:39 -0500 Subject: [PATCH 08/36] Debugging execute_schedule test --- relayer/src/link/relay_path.rs | 4 +-- .../src/tests/execute_schedule.rs | 28 +++++++++++++++---- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index f2f164450c..c25b1a7d8f 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -96,8 +96,8 @@ pub struct RelayPath { // mostly timeout packet messages. // The operational data targeting the destination chain // comprises mostly RecvPacket and Ack msgs. - pub(crate) src_operational_data: Queue, - pub(crate) dst_operational_data: Queue, + pub src_operational_data: Queue, + pub dst_operational_data: Queue, // Toggle for the transaction confirmation mechanism. confirm_txes: bool, diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 0fd53a6766..ff488902af 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -60,27 +60,43 @@ impl BinaryChannelTest for ExecuteScheduleTest { chains.handle_a().clone(), chains.handle_b().clone(), link_opts, - false, + true, )?; let relay_path = link.a_to_b; + info!("Calling schedule_packet_clearing"); + // relay pending packets relay_path.schedule_packet_clearing(None)?; + + info!("Calling refresh_schedule"); + relay_path.refresh_schedule()?; + + info!("Calling execute_schedule"); + relay_path.execute_schedule()?; - let summary = relay_path.process_pending_txs(Resubmit::No); + info!("Calling process_pending_txs"); - assert_eq!(summary.events.len(), 1); + assert_eventually_succeed("process_pending_tx", 10, Duration::from_secs(1), || { + let summary = relay_path.process_pending_txs(Resubmit::No); + info!("RELAY SUMMARY: {:?}", summary); + assert_gt("Summary should not be empty", &summary.events.len(), &0) + })?; chains.node_a.value().kill()?; chains.node_b.value().kill()?; - relay_path.execute_schedule()?; + match relay_path.execute_schedule() { + Ok(_) => panic!("Expected an error"), + Err(_e) => {} + } - let summary = relay_path.process_pending_txs(Resubmit::No); + assert!(!relay_path.src_operational_data.is_empty()); + assert!(!relay_path.dst_operational_data.is_empty()); - assert_eq!(summary.events.len(), 1); + // assert_eq!(summary.events.len(), 1); Ok(()) } From 37d96b7e774f62dce00e59f953b3456d4b2782e7 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Tue, 10 May 2022 08:49:21 -0500 Subject: [PATCH 09/36] Add Debug derivations --- relayer/src/link/operational_data.rs | 8 ++++---- relayer/src/link/relay_path.rs | 6 ++++++ relayer/src/util/queue.rs | 1 + tools/integration-test/src/tests/execute_schedule.rs | 9 +++++++-- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index 82c1cae72a..92f8de8c7d 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -17,7 +17,7 @@ use crate::chain::tx::TrackedMsgs; use crate::link::error::LinkError; use crate::link::RelayPath; -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Debug)] pub enum OperationalDataTarget { Source, Destination, @@ -73,7 +73,7 @@ impl From> for TrackedEvents { /// /// Comprises the proto-encoded packet message, /// alongside the event which generated it. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TransitMessage { pub event: IbcEvent, pub msg: Any, @@ -86,7 +86,7 @@ pub struct TransitMessage { /// - `proofs_height`: represents the height for the proofs in all the messages. /// Note: this is the height at which the proofs are queried. A client consensus state at /// `proofs_height + 1` must exist on-chain in order to verify the proofs. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct OperationalData { pub proofs_height: Height, pub batch: Vec, @@ -302,7 +302,7 @@ impl OperationalData { /// A struct that holds everything that is required to calculate and deal with the connection-delay /// feature. -#[derive(Clone)] +#[derive(Clone, Debug)] struct ConnectionDelay { delay: Duration, scheduled_time: Instant, diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index c25b1a7d8f..ebebabf7bb 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1376,6 +1376,9 @@ impl RelayPath { pub fn execute_schedule(&self) -> Result<(), LinkError> { let (src_ods, dst_ods) = self.try_fetch_scheduled_operational_data()?; + // info!("Src op data: {:?}", src_ods); + // info!("Dst op data: {:?}", dst_ods); + for od in dst_ods { let reply = self.relay_from_operational_data::(od.clone())?; @@ -1606,6 +1609,9 @@ impl RelayPath { fn try_fetch_scheduled_operational_data( &self, ) -> Result<(VecDeque, VecDeque), LinkError> { + // info!("Src operational data: {:?}", self.src_operational_data); + // info!("Dst operational data: {:?}", self.dst_operational_data); + // Extracts elements from a Vec when the predicate returns true. // The mutable vector is then updated to the remaining unextracted elements. fn partition( diff --git a/relayer/src/util/queue.rs b/relayer/src/util/queue.rs index 27d501eb33..74f30d96b9 100644 --- a/relayer/src/util/queue.rs +++ b/relayer/src/util/queue.rs @@ -8,6 +8,7 @@ use crate::util::lock::LockExt; /// mutable reference. We only expose subset of VecDeque methods /// that does not return any inner reference, so that the RefCell /// can never panic caused by simultaneous borrow and borrow_mut. +#[derive(Debug)] pub struct Queue(Arc>>); impl Queue { diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index ff488902af..80dcf22dc8 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -88,13 +88,18 @@ impl BinaryChannelTest for ExecuteScheduleTest { chains.node_a.value().kill()?; chains.node_b.value().kill()?; + // `execute_schedule` calls `try_fetch_scheduled_operational_data` + // which returns no src and no dst operational data + // `try_fetch_scheduled_operational_data` partitions the `src_operational_data` and + // `dst_operational_data` fields on `RelayPath` based on some predicates, however, + // both of these fields start off as empty before partitioning occurs match relay_path.execute_schedule() { Ok(_) => panic!("Expected an error"), Err(_e) => {} } - assert!(!relay_path.src_operational_data.is_empty()); - assert!(!relay_path.dst_operational_data.is_empty()); + // assert!(!relay_path.src_operational_data.is_empty()); + // assert!(!relay_path.dst_operational_data.is_empty()); // assert_eq!(summary.events.len(), 1); From a1267f46083166c07ee5bb8f566735afd01f34d0 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 12:09:38 -0500 Subject: [PATCH 10/36] Update integration test so that its flow correctly tests `execute_schedule` --- relayer/src/link/relay_path.rs | 16 ++++----- .../src/tests/execute_schedule.rs | 34 +++---------------- 2 files changed, 13 insertions(+), 37 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 071548daf1..2a9d33d9ea 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -352,7 +352,7 @@ impl RelayPath { for i in 1..=MAX_RETRIES { let cleared = self .build_recv_packet_and_timeout_msgs(height) - .and_then(|()| self.build_packet_ack_msgs(height)); + .and_then(|_| self.build_packet_ack_msgs(height)); match cleared { Ok(()) => return Ok(()), @@ -367,6 +367,7 @@ impl RelayPath { } /// Clears any packets that were sent before `height`. + /// If no height is passed in, then the latest height of the source chain is used. pub fn schedule_packet_clearing(&self, height: Option) -> Result<(), LinkError> { let span = span!(Level::DEBUG, "clear"); let _enter = span.enter(); @@ -1601,16 +1602,15 @@ impl RelayPath { Ok(()) } - /// Pulls out the operational elements with elapsed delay period and that can - /// now be processed. + /// Partitions the source and destination chains' operational data into 'elapsed' and + /// 'unelapsed' buckets. The elapsed operational data of both chains is returned. + /// 'Elapsed' means the operational data has surpassed the latest timestamp of its chain, + /// the max block time of its chain, as well as its chain's latest height. + /// Any operational data that hasn't elapsed is considered unelapsed such that it is re-added + /// to the pending queue to be checked again at a later time. fn try_fetch_scheduled_operational_data( &self, ) -> Result<(VecDeque, VecDeque), LinkError> { - // info!("Src operational data: {:?}", self.src_operational_data); - // info!("Dst operational data: {:?}", self.dst_operational_data); - - // Extracts elements from a Vec when the predicate returns true. - // The mutable vector is then updated to the remaining unextracted elements. fn partition( queue: VecDeque, pred: impl Fn(&T) -> Result, diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 80dcf22dc8..8962f01dda 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -1,7 +1,7 @@ use ibc_test_framework::prelude::*; use ibc_test_framework::util::random::random_u64_range; -use ibc_relayer::link::{Link, LinkParameters, Resubmit}; +use ibc_relayer::link::{Link, LinkParameters}; #[test] fn test_execute_schedule() -> Result<(), Error> { @@ -66,43 +66,19 @@ impl BinaryChannelTest for ExecuteScheduleTest { info!("Calling schedule_packet_clearing"); - // relay pending packets relay_path.schedule_packet_clearing(None)?; - info!("Calling refresh_schedule"); + assert_eq!(relay_path.dst_operational_data.len(), 1); - relay_path.refresh_schedule()?; - - info!("Calling execute_schedule"); - - relay_path.execute_schedule()?; - - info!("Calling process_pending_txs"); - - assert_eventually_succeed("process_pending_tx", 10, Duration::from_secs(1), || { - let summary = relay_path.process_pending_txs(Resubmit::No); - info!("RELAY SUMMARY: {:?}", summary); - assert_gt("Summary should not be empty", &summary.events.len(), &0) - })?; - - chains.node_a.value().kill()?; chains.node_b.value().kill()?; - // `execute_schedule` calls `try_fetch_scheduled_operational_data` - // which returns no src and no dst operational data - // `try_fetch_scheduled_operational_data` partitions the `src_operational_data` and - // `dst_operational_data` fields on `RelayPath` based on some predicates, however, - // both of these fields start off as empty before partitioning occurs match relay_path.execute_schedule() { Ok(_) => panic!("Expected an error"), - Err(_e) => {} + Err(_e) => { + assert_eq!(relay_path.dst_operational_data.len(), 1); + } } - // assert!(!relay_path.src_operational_data.is_empty()); - // assert!(!relay_path.dst_operational_data.is_empty()); - - // assert_eq!(summary.events.len(), 1); - Ok(()) } } From 1fc709deb711758c14a894e1a962db8e10955703 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 12:19:47 -0500 Subject: [PATCH 11/36] Attempt to perform a second IBC transfer --- .../src/tests/execute_schedule.rs | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 8962f01dda..da4a02d6ee 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -35,9 +35,10 @@ impl BinaryChannelTest for ExecuteScheduleTest { .query_balance(&wallet_a.address(), &denom_a)?; let amount1 = random_u64_range(1000, 5000); + let amount2 = random_u64_range(1000, 5000); info!( - "Performing IBC transfer with amount {}, which should be relayed because its an ordered channel", + "Performing first IBC transfer with amount {}, which should be relayed because its an ordered channel", amount1 ); @@ -50,6 +51,20 @@ impl BinaryChannelTest for ExecuteScheduleTest { &denom_a, )?; + info!( + "Performing second IBC transfer with amount {}, which should be relayed because its an ordered channel", + amount2 + ); + + chains.node_a.chain_driver().transfer_token( + &channel.port_a.as_ref(), + &channel.channel_id_a.as_ref(), + &wallet_a.address(), + &wallet_b.address(), + amount2, + &denom_a, + )?; + sleep(Duration::from_secs(2)); let link_opts = LinkParameters { @@ -64,11 +79,9 @@ impl BinaryChannelTest for ExecuteScheduleTest { )?; let relay_path = link.a_to_b; - info!("Calling schedule_packet_clearing"); - relay_path.schedule_packet_clearing(None)?; - assert_eq!(relay_path.dst_operational_data.len(), 1); + assert_eq!(relay_path.dst_operational_data.len(), 2); chains.node_b.value().kill()?; From cfc7c70a520c672fbbc962d58a74ac5255233dd5 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 12:32:12 -0500 Subject: [PATCH 12/36] Remove info's --- relayer/src/link/relay_path.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 2a9d33d9ea..c18a7f14b3 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1373,10 +1373,6 @@ impl RelayPath { /// with one or more transaction hash(es). pub fn execute_schedule(&self) -> Result<(), LinkError> { let (src_ods, dst_ods) = self.try_fetch_scheduled_operational_data()?; - - // info!("Src op data: {:?}", src_ods); - // info!("Dst op data: {:?}", dst_ods); - for od in dst_ods { let reply = self.relay_from_operational_data::(od.clone())?; From 4c327b29ab7ede00b6ef7e1041416287aae85e7c Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 12:42:00 -0500 Subject: [PATCH 13/36] Increase sleep timeout duration --- tools/integration-test/src/tests/execute_schedule.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index da4a02d6ee..b0acdba438 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -65,7 +65,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { &denom_a, )?; - sleep(Duration::from_secs(2)); + sleep(Duration::from_secs(4)); let link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), From dbe86caf9dc96d9a07c55560b9c51705abbe854c Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 13:02:43 -0500 Subject: [PATCH 14/36] Incorportate new test framework features --- tools/integration-test/src/tests/execute_schedule.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index b0acdba438..bd285b4b6c 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -42,7 +42,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { amount1 ); - chains.node_a.chain_driver().transfer_token( + chains.node_a.chain_driver().ibc_token_transfer( &channel.port_a.as_ref(), &channel.channel_id_a.as_ref(), &wallet_a.address(), @@ -56,7 +56,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { amount2 ); - chains.node_a.chain_driver().transfer_token( + chains.node_a.chain_driver().ibc_token_transfer( &channel.port_a.as_ref(), &channel.channel_id_a.as_ref(), &wallet_a.address(), @@ -65,7 +65,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { &denom_a, )?; - sleep(Duration::from_secs(4)); + sleep(Duration::from_secs(2)); let link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), From ac8532dfabb80c5e98faa47c398caeac2351647c Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 13:03:55 -0500 Subject: [PATCH 15/36] Remove unnecessary `sleep` call --- tools/integration-test/src/tests/execute_schedule.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index bd285b4b6c..c0dd13f74e 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -65,8 +65,6 @@ impl BinaryChannelTest for ExecuteScheduleTest { &denom_a, )?; - sleep(Duration::from_secs(2)); - let link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), src_channel_id: channel.channel_id_a.clone().into_value(), From c3b08586c4e2d64b7c36053f994685d747f9a31f Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 13:46:03 -0500 Subject: [PATCH 16/36] Correctly use new test framework features --- .../src/tests/execute_schedule.rs | 31 ++++++++----------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index c0dd13f74e..a2d4f6cfb7 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -24,16 +24,6 @@ impl BinaryChannelTest for ExecuteScheduleTest { chains: ConnectedChains, channel: ConnectedChannel, ) -> Result<(), Error> { - let denom_a = chains.node_a.denom(); - - let wallet_a = chains.node_a.wallets().user1().cloned(); - let wallet_b = chains.node_b.wallets().user1().cloned(); - - let _balance_a = chains - .node_a - .chain_driver() - .query_balance(&wallet_a.address(), &denom_a)?; - let amount1 = random_u64_range(1000, 5000); let amount2 = random_u64_range(1000, 5000); @@ -42,13 +32,13 @@ impl BinaryChannelTest for ExecuteScheduleTest { amount1 ); - chains.node_a.chain_driver().ibc_token_transfer( + chains.node_a.chain_driver().ibc_transfer_token( &channel.port_a.as_ref(), &channel.channel_id_a.as_ref(), - &wallet_a.address(), - &wallet_b.address(), + &chains.node_a.wallets().user1(), + &chains.node_b.wallets().user1().address(), + &chains.node_a.denom(), amount1, - &denom_a, )?; info!( @@ -56,13 +46,13 @@ impl BinaryChannelTest for ExecuteScheduleTest { amount2 ); - chains.node_a.chain_driver().ibc_token_transfer( + chains.node_a.chain_driver().ibc_transfer_token( &channel.port_a.as_ref(), &channel.channel_id_a.as_ref(), - &wallet_a.address(), - &wallet_b.address(), + &chains.node_a.wallets().user1(), + &chains.node_b.wallets().user1().address(), + &chains.node_a.denom(), amount2, - &denom_a, )?; let link_opts = LinkParameters { @@ -79,6 +69,11 @@ impl BinaryChannelTest for ExecuteScheduleTest { relay_path.schedule_packet_clearing(None)?; + info!( + "Dst operational data: {:?}", + relay_path.dst_operational_data + ); + assert_eq!(relay_path.dst_operational_data.len(), 2); chains.node_b.value().kill()?; From 7b50b162c066def4ed07f240783efd82fb930099 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Wed, 11 May 2022 14:02:01 -0500 Subject: [PATCH 17/36] Get assertions passing for now --- tools/integration-test/src/tests/execute_schedule.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index a2d4f6cfb7..f15fcc00fb 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -69,19 +69,16 @@ impl BinaryChannelTest for ExecuteScheduleTest { relay_path.schedule_packet_clearing(None)?; - info!( - "Dst operational data: {:?}", - relay_path.dst_operational_data - ); - - assert_eq!(relay_path.dst_operational_data.len(), 2); + // assert_eq!(relay_path.dst_operational_data.len(), 2); + assert_eq!(relay_path.dst_operational_data.len(), 1); chains.node_b.value().kill()?; match relay_path.execute_schedule() { Ok(_) => panic!("Expected an error"), Err(_e) => { - assert_eq!(relay_path.dst_operational_data.len(), 1); + // assert_eq!(relay_path.dst_operational_data.len(), 1); + assert_eq!(relay_path.dst_operational_data.len(), 0); } } From de2ea2578f0dc33e5f35d0425b26e178043e0973 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 13 May 2022 10:55:45 -0500 Subject: [PATCH 18/36] Send two transactions, one in each direction --- .../src/tests/execute_schedule.rs | 50 ++++++++++++------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index f15fcc00fb..037dae5d6a 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -28,7 +28,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { let amount2 = random_u64_range(1000, 5000); info!( - "Performing first IBC transfer with amount {}, which should be relayed because its an ordered channel", + "Performing first IBC transfer from chain A to chain B with amount {}", amount1 ); @@ -42,43 +42,57 @@ impl BinaryChannelTest for ExecuteScheduleTest { )?; info!( - "Performing second IBC transfer with amount {}, which should be relayed because its an ordered channel", + "Performing second IBC transfer from chain B to chain A with amount {}", amount2 ); - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &chains.node_a.wallets().user1(), - &chains.node_b.wallets().user1().address(), - &chains.node_a.denom(), + chains.node_b.chain_driver().ibc_transfer_token( + &channel.port_b.as_ref(), + &channel.channel_id_b.as_ref(), + &chains.node_b.wallets().user1(), + &chains.node_a.wallets().user1().address(), + &chains.node_b.denom(), amount2, )?; - let link_opts = LinkParameters { + let chain_a_link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), src_channel_id: channel.channel_id_a.clone().into_value(), }; - let link = Link::new_from_opts( + let chain_b_link_opts = LinkParameters { + src_port_id: channel.port_b.clone().into_value(), + src_channel_id: channel.channel_id_b.clone().into_value(), + }; + + let chain_a_link = Link::new_from_opts( chains.handle_a().clone(), chains.handle_b().clone(), - link_opts, + chain_a_link_opts, + true, + )?; + let chain_b_link = Link::new_from_opts( + chains.handle_b().clone(), + chains.handle_a().clone(), + chain_b_link_opts, true, )?; - let relay_path = link.a_to_b; - relay_path.schedule_packet_clearing(None)?; + let relay_path_a_to_b = chain_a_link.a_to_b; + let relay_path_b_to_a = chain_b_link.a_to_b; + + relay_path_a_to_b.schedule_packet_clearing(None)?; + relay_path_b_to_a.schedule_packet_clearing(None)?; - // assert_eq!(relay_path.dst_operational_data.len(), 2); - assert_eq!(relay_path.dst_operational_data.len(), 1); + assert_eq!(relay_path_a_to_b.dst_operational_data.len(), 1); + assert_eq!(relay_path_b_to_a.dst_operational_data.len(), 1); chains.node_b.value().kill()?; - match relay_path.execute_schedule() { + match relay_path_a_to_b.execute_schedule() { Ok(_) => panic!("Expected an error"), Err(_e) => { - // assert_eq!(relay_path.dst_operational_data.len(), 1); - assert_eq!(relay_path.dst_operational_data.len(), 0); + assert_eq!(relay_path_a_to_b.dst_operational_data.len(), 0); + assert_eq!(relay_path_b_to_a.dst_operational_data.len(), 1); } } From 543cfa77bd4e74db9ccd3588467923762a1bc39d Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 13 May 2022 11:37:57 -0500 Subject: [PATCH 19/36] Add doc comment for test --- tools/integration-test/src/tests/execute_schedule.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 037dae5d6a..e674594a47 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -1,3 +1,14 @@ +//! This test ensures that the `RelayPath::execute_schedule` method does not +//! drop any pending scheduled operational data when a prior transaction fails +//! to send. Subsequent pieces of operational data that were scheduled should +//! be re-queued and not dropped. +//! +//! In order to test this behavior, the test manually relays two IBC transfers +//! between two chains, one transaction going from chain A to chain B, the other +//! going from chain B to chain A. Chain B is then shut down and the transactions +//! queued up again. The first of the two transactions should fail because the +//! chain was shut down. The second transaction should still be queued up. + use ibc_test_framework::prelude::*; use ibc_test_framework::util::random::random_u64_range; From 8cb0aca7ff31c505367f0342e4a3e6855938cc09 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 13 May 2022 13:18:15 -0500 Subject: [PATCH 20/36] Improve panic messages --- tools/integration-test/src/tests/execute_schedule.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index e674594a47..99e7fb2adb 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -100,7 +100,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { chains.node_b.value().kill()?; match relay_path_a_to_b.execute_schedule() { - Ok(_) => panic!("Expected an error"), + Ok(_) => panic!("Expected an error when relaying tx from A to B"), Err(_e) => { assert_eq!(relay_path_a_to_b.dst_operational_data.len(), 0); assert_eq!(relay_path_b_to_a.dst_operational_data.len(), 1); From 3ddc4ef0b6063b8b18bb906aeecf7c8276d76cd7 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 09:37:23 -0500 Subject: [PATCH 21/36] Refactor test so that it is actually testing the desired behavior --- .../src/tests/execute_schedule.rs | 74 ++++++------------- 1 file changed, 23 insertions(+), 51 deletions(-) diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 99e7fb2adb..8f64e2eb5b 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -3,17 +3,19 @@ //! to send. Subsequent pieces of operational data that were scheduled should //! be re-queued and not dropped. //! -//! In order to test this behavior, the test manually relays two IBC transfers -//! between two chains, one transaction going from chain A to chain B, the other -//! going from chain B to chain A. Chain B is then shut down and the transactions -//! queued up again. The first of the two transactions should fail because the -//! chain was shut down. The second transaction should still be queued up. +//! In order to test this behavior, the test manually relays at least 2 IBC transfers +//! from chain A to chain B. Chain B is then shut down in order to force the transactions +//! to be queued up again for re-submission. It is expected that the first transfer +//! be dropped and not be present in the pending queue, but all of the subsequent +//! transactions should exist in the pending queue. use ibc_test_framework::prelude::*; use ibc_test_framework::util::random::random_u64_range; use ibc_relayer::link::{Link, LinkParameters}; +const NUM_TXS: i32 = 10; + #[test] fn test_execute_schedule() -> Result<(), Error> { run_binary_channel_test(&ExecuteScheduleTest) @@ -36,44 +38,11 @@ impl BinaryChannelTest for ExecuteScheduleTest { channel: ConnectedChannel, ) -> Result<(), Error> { let amount1 = random_u64_range(1000, 5000); - let amount2 = random_u64_range(1000, 5000); - - info!( - "Performing first IBC transfer from chain A to chain B with amount {}", - amount1 - ); - - chains.node_a.chain_driver().ibc_transfer_token( - &channel.port_a.as_ref(), - &channel.channel_id_a.as_ref(), - &chains.node_a.wallets().user1(), - &chains.node_b.wallets().user1().address(), - &chains.node_a.denom(), - amount1, - )?; - - info!( - "Performing second IBC transfer from chain B to chain A with amount {}", - amount2 - ); - - chains.node_b.chain_driver().ibc_transfer_token( - &channel.port_b.as_ref(), - &channel.channel_id_b.as_ref(), - &chains.node_b.wallets().user1(), - &chains.node_a.wallets().user1().address(), - &chains.node_b.denom(), - amount2, - )?; let chain_a_link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), src_channel_id: channel.channel_id_a.clone().into_value(), }; - let chain_b_link_opts = LinkParameters { - src_port_id: channel.port_b.clone().into_value(), - src_channel_id: channel.channel_id_b.clone().into_value(), - }; let chain_a_link = Link::new_from_opts( chains.handle_a().clone(), @@ -81,29 +50,32 @@ impl BinaryChannelTest for ExecuteScheduleTest { chain_a_link_opts, true, )?; - let chain_b_link = Link::new_from_opts( - chains.handle_b().clone(), - chains.handle_a().clone(), - chain_b_link_opts, - true, - )?; let relay_path_a_to_b = chain_a_link.a_to_b; - let relay_path_b_to_a = chain_b_link.a_to_b; - relay_path_a_to_b.schedule_packet_clearing(None)?; - relay_path_b_to_a.schedule_packet_clearing(None)?; + for i in 0..NUM_TXS { + chains.node_a.chain_driver().ibc_transfer_token( + &channel.port_a.as_ref(), + &channel.channel_id_a.as_ref(), + &chains.node_a.wallets().user1(), + &chains.node_b.wallets().user1().address(), + &chains.node_a.denom(), + amount1, + )?; + + relay_path_a_to_b.schedule_packet_clearing(None)?; + + info!("Performing IBC transfer #{} from chain A to chain B", i,); + } - assert_eq!(relay_path_a_to_b.dst_operational_data.len(), 1); - assert_eq!(relay_path_b_to_a.dst_operational_data.len(), 1); + assert_eq!(relay_path_a_to_b.dst_operational_data.len(), NUM_TXS); chains.node_b.value().kill()?; match relay_path_a_to_b.execute_schedule() { Ok(_) => panic!("Expected an error when relaying tx from A to B"), Err(_e) => { - assert_eq!(relay_path_a_to_b.dst_operational_data.len(), 0); - assert_eq!(relay_path_b_to_a.dst_operational_data.len(), 1); + assert_eq!(relay_path_a_to_b.dst_operational_data.len(), NUM_TXS - 1); } } From dedce23b1ac46e6f072fe7e7a1d2d17fe67e25a6 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 12:25:07 -0500 Subject: [PATCH 22/36] Attempt at fixing `execute_schedule` leaky logic --- relayer/src/link/relay_path.rs | 188 ++++++++++++------ relayer/src/util/queue.rs | 6 + relayer/src/worker/packet.rs | 6 +- .../src/tests/execute_schedule.rs | 10 +- 4 files changed, 138 insertions(+), 72 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index c18a7f14b3..25572cc20b 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1364,26 +1364,138 @@ impl RelayPath { } } - /// Checks if there are any operational data items ready, - /// and if so performs the relaying of corresponding packets - /// to the target chain. + /// Drives the relaying of elapsed operational data items meant for + /// the source chain forward. Should an error occur when attempting + /// to relay a piece of operational data, this function returns all + /// subsequent unprocessed pieces of operational data back to the caller + /// so that they can be re-queued. + /// + /// Pieces of operational data that have not elapsed yet are also placed + /// in the 'unprocessed' bucket. /// /// This method performs relaying using the asynchronous sender. /// Retains the operational data as pending, and associates it /// with one or more transaction hash(es). - pub fn execute_schedule(&self) -> Result<(), LinkError> { - let (src_ods, dst_ods) = self.try_fetch_scheduled_operational_data()?; - for od in dst_ods { - let reply = - self.relay_from_operational_data::(od.clone())?; + fn execute_schedule_with_src_operational_data>( + &mut self, + mut operations: I, + ) -> Result, (VecDeque, LinkError)> { + let mut unprocessed = VecDeque::new(); + + while let Some(od) = operations.next() { + match od.has_conn_delay_elapsed( + &|| self.src_time_latest(), + &|| self.src_max_block_time(), + &|| self.src_latest_height(), + ) { + Ok(elapsed) => { + if elapsed { + match self + .relay_from_operational_data::(od.clone()) + { + Ok(reply) => self.enqueue_pending_tx(reply, od), + Err(e) => { + unprocessed.push_back(od); + unprocessed.extend(operations); + + return Err((unprocessed, e)); + } + } + } else { + unprocessed.push_back(od); + } + } + Err(e) => { + unprocessed.push_back(od); + unprocessed.extend(operations); + + return Err((unprocessed, e)); + } + } + } + + Ok(unprocessed) + } + + /// Drives the relaying of elapsed operational data items meant for + /// the destination chain forward. Should an error occur when attempting + /// to relay a piece of operational data, this function returns all + /// subsequent unprocessed pieces of operational data back to the caller + /// so that they can be re-queued. + /// + /// Pieces of operational data that have not elapsed yet are also placed + /// in the 'unprocessed' bucket. + /// + /// This method performs relaying using the asynchronous sender. + /// Retains the operational data as pending, and associates it + /// with one or more transaction hash(es). + fn execute_schedule_with_dst_operational_data>( + &mut self, + mut operations: I, + ) -> Result, (VecDeque, LinkError)> { + let mut unprocessed = VecDeque::new(); + + while let Some(od) = operations.next() { + match od.has_conn_delay_elapsed( + &|| self.dst_time_latest(), + &|| self.dst_max_block_time(), + &|| self.dst_latest_height(), + ) { + Ok(elapsed) => { + if elapsed { + match self + .relay_from_operational_data::(od.clone()) + { + Ok(reply) => self.enqueue_pending_tx(reply, od), + Err(e) => { + unprocessed.push_back(od); + unprocessed.extend(operations); + + return Err((unprocessed, e)); + } + } + } else { + unprocessed.push_back(od); + } + } + Err(e) => { + unprocessed.push_back(od); + unprocessed.extend(operations); - self.enqueue_pending_tx(reply, od); + return Err((unprocessed, e)); + } + } } - for od in src_ods { - let reply = - self.relay_from_operational_data::(od.clone())?; - self.enqueue_pending_tx(reply, od); + Ok(unprocessed) + } + + /// While there are pending operational data items, this function + /// performs the relaying of packets corresponding to those + /// operational data items to both the source and destination chains. + /// + /// Any operational data items that do not get successfully relayed are + /// dropped. Subsequent pending operational data items that went unprocessed + /// are queued up again for re-submission. + pub fn execute_schedule(&mut self) -> Result<(), LinkError> { + let src_od_iter = self.src_operational_data.take().into_iter(); + + match self.execute_schedule_with_src_operational_data(src_od_iter) { + Ok(unprocessed_src_data) => self.src_operational_data = unprocessed_src_data.into(), + Err((unprocessed_src_data, e)) => { + self.src_operational_data = unprocessed_src_data.into(); + return Err(e); + } + } + + let dst_od_iter = self.dst_operational_data.take().into_iter(); + + match self.execute_schedule_with_dst_operational_data(dst_od_iter) { + Ok(unprocessed_dst_data) => self.dst_operational_data = unprocessed_dst_data.into(), + Err((unprocessed_dst_data, e)) => { + self.dst_operational_data = unprocessed_dst_data.into(); + return Err(e); + } } Ok(()) @@ -1598,56 +1710,6 @@ impl RelayPath { Ok(()) } - /// Partitions the source and destination chains' operational data into 'elapsed' and - /// 'unelapsed' buckets. The elapsed operational data of both chains is returned. - /// 'Elapsed' means the operational data has surpassed the latest timestamp of its chain, - /// the max block time of its chain, as well as its chain's latest height. - /// Any operational data that hasn't elapsed is considered unelapsed such that it is re-added - /// to the pending queue to be checked again at a later time. - fn try_fetch_scheduled_operational_data( - &self, - ) -> Result<(VecDeque, VecDeque), LinkError> { - fn partition( - queue: VecDeque, - pred: impl Fn(&T) -> Result, - ) -> Result<(VecDeque, VecDeque), LinkError> { - let mut true_res = VecDeque::new(); - let mut false_res = VecDeque::new(); - - for e in queue.into_iter() { - if pred(&e)? { - true_res.push_back(e); - } else { - false_res.push_back(e); - } - } - - Ok((true_res, false_res)) - } - - let (elapsed_src_ods, unelapsed_src_ods) = - partition(self.src_operational_data.take(), |op| { - op.has_conn_delay_elapsed( - &|| self.src_time_latest(), - &|| self.src_max_block_time(), - &|| self.src_latest_height(), - ) - })?; - - let (elapsed_dst_ods, unelapsed_dst_ods) = - partition(self.dst_operational_data.take(), |op| { - op.has_conn_delay_elapsed( - &|| self.dst_time_latest(), - &|| self.dst_max_block_time(), - &|| self.dst_latest_height(), - ) - })?; - - self.src_operational_data.replace(unelapsed_src_ods); - self.dst_operational_data.replace(unelapsed_dst_ods); - Ok((elapsed_src_ods, elapsed_dst_ods)) - } - fn restore_src_client(&self) -> ForeignClient { ForeignClient::restore( self.src_client_id().clone(), diff --git a/relayer/src/util/queue.rs b/relayer/src/util/queue.rs index 74f30d96b9..77b71eb1a6 100644 --- a/relayer/src/util/queue.rs +++ b/relayer/src/util/queue.rs @@ -64,3 +64,9 @@ impl Default for Queue { Self::new() } } + +impl From> for Queue { + fn from(deque: VecDeque) -> Self { + Queue(Arc::new(RwLock::new(deque))) + } +} diff --git a/relayer/src/worker/packet.rs b/relayer/src/worker/packet.rs index 5c090bde32..38dbeeaf55 100644 --- a/relayer/src/worker/packet.rs +++ b/relayer/src/worker/packet.rs @@ -73,7 +73,7 @@ pub fn spawn_packet_worker( }; spawn_background_task(span, Some(Duration::from_millis(1000)), move || { - let relay_path = &link.lock().unwrap().a_to_b; + let relay_path = &mut link.lock().unwrap().a_to_b; relay_path .refresh_schedule() @@ -120,7 +120,7 @@ pub fn spawn_packet_cmd_worker( retry_with_index(retry_strategy::worker_stubborn_strategy(), |index| { handle_packet_cmd( &mut is_first_run, - &link.lock().unwrap(), + &mut link.lock().unwrap(), clear_on_start, clear_interval, &path, @@ -145,7 +145,7 @@ pub fn spawn_packet_cmd_worker( /// data that is ready. fn handle_packet_cmd( is_first_run: &mut bool, - link: &Link, + link: &mut Link, clear_on_start: bool, clear_interval: u64, path: &Packet, diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 8f64e2eb5b..9833b6d934 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -14,7 +14,7 @@ use ibc_test_framework::util::random::random_u64_range; use ibc_relayer::link::{Link, LinkParameters}; -const NUM_TXS: i32 = 10; +const NUM_TXS: usize = 10; #[test] fn test_execute_schedule() -> Result<(), Error> { @@ -51,7 +51,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { true, )?; - let relay_path_a_to_b = chain_a_link.a_to_b; + let mut relay_path_a_to_b = chain_a_link.a_to_b; for i in 0..NUM_TXS { chains.node_a.chain_driver().ibc_transfer_token( @@ -65,7 +65,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { relay_path_a_to_b.schedule_packet_clearing(None)?; - info!("Performing IBC transfer #{} from chain A to chain B", i,); + info!("Performing IBC transfer #{} from chain A to chain B", i); } assert_eq!(relay_path_a_to_b.dst_operational_data.len(), NUM_TXS); @@ -74,9 +74,7 @@ impl BinaryChannelTest for ExecuteScheduleTest { match relay_path_a_to_b.execute_schedule() { Ok(_) => panic!("Expected an error when relaying tx from A to B"), - Err(_e) => { - assert_eq!(relay_path_a_to_b.dst_operational_data.len(), NUM_TXS - 1); - } + Err(_) => assert_eq!(relay_path_a_to_b.dst_operational_data.len(), NUM_TXS - 1), } Ok(()) From a948c3f979e1174588783aa2c43f5b60fa06b048 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 12:29:29 -0500 Subject: [PATCH 23/36] Flesh out doc comments some more --- relayer/src/link/relay_path.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 25572cc20b..a4ccdc3013 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1373,6 +1373,12 @@ impl RelayPath { /// Pieces of operational data that have not elapsed yet are also placed /// in the 'unprocessed' bucket. /// + /// A piece of operational data is considered 'elapsed' if it has surpassed + /// its target chain's: + /// 1. Latest timestamp + /// 2. Maximum block time + /// 3. Latest height + /// /// This method performs relaying using the asynchronous sender. /// Retains the operational data as pending, and associates it /// with one or more transaction hash(es). @@ -1426,6 +1432,12 @@ impl RelayPath { /// Pieces of operational data that have not elapsed yet are also placed /// in the 'unprocessed' bucket. /// + /// A piece of operational data is considered 'elapsed' if it has surpassed + /// its target chain's: + /// 1. Latest timestamp + /// 2. Maximum block time + /// 3. Latest height + /// /// This method performs relaying using the asynchronous sender. /// Retains the operational data as pending, and associates it /// with one or more transaction hash(es). From 0321f0e77a000b55a8dd8235d1cfa5d69fa5349d Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 14:32:59 -0500 Subject: [PATCH 24/36] Remove a duplicate function --- relayer/src/link/relay_path.rs | 95 ++++++++++------------------------ 1 file changed, 27 insertions(+), 68 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index a4ccdc3013..65119c6592 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -81,6 +81,15 @@ impl Resubmit { } } +/// Specifies the target chain, typically for a packet that needs to be relayed. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum TargetChain { + /// The source chain. + Src, + /// The destination chain. + Dst, +} + pub struct RelayPath { channel: Channel, @@ -1364,65 +1373,6 @@ impl RelayPath { } } - /// Drives the relaying of elapsed operational data items meant for - /// the source chain forward. Should an error occur when attempting - /// to relay a piece of operational data, this function returns all - /// subsequent unprocessed pieces of operational data back to the caller - /// so that they can be re-queued. - /// - /// Pieces of operational data that have not elapsed yet are also placed - /// in the 'unprocessed' bucket. - /// - /// A piece of operational data is considered 'elapsed' if it has surpassed - /// its target chain's: - /// 1. Latest timestamp - /// 2. Maximum block time - /// 3. Latest height - /// - /// This method performs relaying using the asynchronous sender. - /// Retains the operational data as pending, and associates it - /// with one or more transaction hash(es). - fn execute_schedule_with_src_operational_data>( - &mut self, - mut operations: I, - ) -> Result, (VecDeque, LinkError)> { - let mut unprocessed = VecDeque::new(); - - while let Some(od) = operations.next() { - match od.has_conn_delay_elapsed( - &|| self.src_time_latest(), - &|| self.src_max_block_time(), - &|| self.src_latest_height(), - ) { - Ok(elapsed) => { - if elapsed { - match self - .relay_from_operational_data::(od.clone()) - { - Ok(reply) => self.enqueue_pending_tx(reply, od), - Err(e) => { - unprocessed.push_back(od); - unprocessed.extend(operations); - - return Err((unprocessed, e)); - } - } - } else { - unprocessed.push_back(od); - } - } - Err(e) => { - unprocessed.push_back(od); - unprocessed.extend(operations); - - return Err((unprocessed, e)); - } - } - } - - Ok(unprocessed) - } - /// Drives the relaying of elapsed operational data items meant for /// the destination chain forward. Should an error occur when attempting /// to relay a piece of operational data, this function returns all @@ -1441,18 +1391,28 @@ impl RelayPath { /// This method performs relaying using the asynchronous sender. /// Retains the operational data as pending, and associates it /// with one or more transaction hash(es). - fn execute_schedule_with_dst_operational_data>( + fn do_execute_schedule>( &mut self, mut operations: I, + target_chain: TargetChain, ) -> Result, (VecDeque, LinkError)> { let mut unprocessed = VecDeque::new(); while let Some(od) = operations.next() { - match od.has_conn_delay_elapsed( - &|| self.dst_time_latest(), - &|| self.dst_max_block_time(), - &|| self.dst_latest_height(), - ) { + let elapsed_result = match target_chain { + TargetChain::Src => od.has_conn_delay_elapsed( + &|| self.src_time_latest(), + &|| self.src_max_block_time(), + &|| self.src_latest_height(), + ), + TargetChain::Dst => od.has_conn_delay_elapsed( + &|| self.dst_time_latest(), + &|| self.dst_max_block_time(), + &|| self.dst_latest_height(), + ), + }; + + match elapsed_result { Ok(elapsed) => { if elapsed { match self @@ -1460,7 +1420,6 @@ impl RelayPath { { Ok(reply) => self.enqueue_pending_tx(reply, od), Err(e) => { - unprocessed.push_back(od); unprocessed.extend(operations); return Err((unprocessed, e)); @@ -1492,7 +1451,7 @@ impl RelayPath { pub fn execute_schedule(&mut self) -> Result<(), LinkError> { let src_od_iter = self.src_operational_data.take().into_iter(); - match self.execute_schedule_with_src_operational_data(src_od_iter) { + match self.do_execute_schedule(src_od_iter, TargetChain::Src) { Ok(unprocessed_src_data) => self.src_operational_data = unprocessed_src_data.into(), Err((unprocessed_src_data, e)) => { self.src_operational_data = unprocessed_src_data.into(); @@ -1502,7 +1461,7 @@ impl RelayPath { let dst_od_iter = self.dst_operational_data.take().into_iter(); - match self.execute_schedule_with_dst_operational_data(dst_od_iter) { + match self.do_execute_schedule(dst_od_iter, TargetChain::Dst) { Ok(unprocessed_dst_data) => self.dst_operational_data = unprocessed_dst_data.into(), Err((unprocessed_dst_data, e)) => { self.dst_operational_data = unprocessed_dst_data.into(); From 142feea214c8a4b65db51665613fede76a512c45 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 14:39:12 -0500 Subject: [PATCH 25/36] Make use of OperationalDataTarget enum --- relayer/src/link/operational_data.rs | 3 +++ relayer/src/link/relay_path.rs | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index 92f8de8c7d..2fa258bbea 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -17,9 +17,12 @@ use crate::chain::tx::TrackedMsgs; use crate::link::error::LinkError; use crate::link::RelayPath; +/// The chain that a piece of `OperationalData` is bound for. #[derive(Clone, Copy, PartialEq, Debug)] pub enum OperationalDataTarget { + /// The source chain. Source, + /// The destination chain. Destination, } diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 65119c6592..e6a585287b 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1394,18 +1394,18 @@ impl RelayPath { fn do_execute_schedule>( &mut self, mut operations: I, - target_chain: TargetChain, + target_chain: OperationalDataTarget, ) -> Result, (VecDeque, LinkError)> { let mut unprocessed = VecDeque::new(); while let Some(od) = operations.next() { let elapsed_result = match target_chain { - TargetChain::Src => od.has_conn_delay_elapsed( + OperationalDataTarget::Source => od.has_conn_delay_elapsed( &|| self.src_time_latest(), &|| self.src_max_block_time(), &|| self.src_latest_height(), ), - TargetChain::Dst => od.has_conn_delay_elapsed( + OperationalDataTarget::Destination => od.has_conn_delay_elapsed( &|| self.dst_time_latest(), &|| self.dst_max_block_time(), &|| self.dst_latest_height(), @@ -1451,7 +1451,7 @@ impl RelayPath { pub fn execute_schedule(&mut self) -> Result<(), LinkError> { let src_od_iter = self.src_operational_data.take().into_iter(); - match self.do_execute_schedule(src_od_iter, TargetChain::Src) { + match self.do_execute_schedule(src_od_iter, OperationalDataTarget::Source) { Ok(unprocessed_src_data) => self.src_operational_data = unprocessed_src_data.into(), Err((unprocessed_src_data, e)) => { self.src_operational_data = unprocessed_src_data.into(); @@ -1461,7 +1461,7 @@ impl RelayPath { let dst_od_iter = self.dst_operational_data.take().into_iter(); - match self.do_execute_schedule(dst_od_iter, TargetChain::Dst) { + match self.do_execute_schedule(dst_od_iter, OperationalDataTarget::Destination) { Ok(unprocessed_dst_data) => self.dst_operational_data = unprocessed_dst_data.into(), Err((unprocessed_dst_data, e)) => { self.dst_operational_data = unprocessed_dst_data.into(); From 0dec734089e57c1a92e77b88d2433ada7f55414b Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 14:40:40 -0500 Subject: [PATCH 26/36] Remove redundant enum --- relayer/src/link/relay_path.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index e6a585287b..21a0591bb6 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -81,15 +81,6 @@ impl Resubmit { } } -/// Specifies the target chain, typically for a packet that needs to be relayed. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum TargetChain { - /// The source chain. - Src, - /// The destination chain. - Dst, -} - pub struct RelayPath { channel: Channel, From ef2014cf5669cf930d13b0052b83fcefb0318674 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 14:47:53 -0500 Subject: [PATCH 27/36] Remove some Debug derives --- relayer/src/link/operational_data.rs | 6 +++--- relayer/src/util/queue.rs | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index 2fa258bbea..a270a972cf 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -76,7 +76,7 @@ impl From> for TrackedEvents { /// /// Comprises the proto-encoded packet message, /// alongside the event which generated it. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct TransitMessage { pub event: IbcEvent, pub msg: Any, @@ -89,7 +89,7 @@ pub struct TransitMessage { /// - `proofs_height`: represents the height for the proofs in all the messages. /// Note: this is the height at which the proofs are queried. A client consensus state at /// `proofs_height + 1` must exist on-chain in order to verify the proofs. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct OperationalData { pub proofs_height: Height, pub batch: Vec, @@ -305,7 +305,7 @@ impl OperationalData { /// A struct that holds everything that is required to calculate and deal with the connection-delay /// feature. -#[derive(Clone, Debug)] +#[derive(Clone)] struct ConnectionDelay { delay: Duration, scheduled_time: Instant, diff --git a/relayer/src/util/queue.rs b/relayer/src/util/queue.rs index 77b71eb1a6..cf273dc837 100644 --- a/relayer/src/util/queue.rs +++ b/relayer/src/util/queue.rs @@ -8,7 +8,6 @@ use crate::util::lock::LockExt; /// mutable reference. We only expose subset of VecDeque methods /// that does not return any inner reference, so that the RefCell /// can never panic caused by simultaneous borrow and borrow_mut. -#[derive(Debug)] pub struct Queue(Arc>>); impl Queue { From ef3f4984b2a00b512e187fbf0fcbd47b0d3a23e8 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 14:48:59 -0500 Subject: [PATCH 28/36] Remove one more debug derive --- relayer/src/link/operational_data.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index a270a972cf..2436646039 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -18,7 +18,7 @@ use crate::link::error::LinkError; use crate::link::RelayPath; /// The chain that a piece of `OperationalData` is bound for. -#[derive(Clone, Copy, PartialEq, Debug)] +#[derive(Clone, Copy, PartialEq)] pub enum OperationalDataTarget { /// The source chain. Source, From 77040d8c772eb8652c8651676b2d6683980afd9d Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 15:05:18 -0500 Subject: [PATCH 29/36] Add `try_fetch_scheduled_operational_data` back in --- relayer/src/link/relay_path.rs | 52 ++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index a115edf2e5..f54d0389f6 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1327,7 +1327,7 @@ impl RelayPath { Ok(reply) => self.enqueue_pending_tx(reply, od), Err(e) => { unprocessed.extend(operations); - + return Err((unprocessed, e)); } } @@ -1338,7 +1338,7 @@ impl RelayPath { Err(e) => { unprocessed.push_back(od); unprocessed.extend(operations); - + return Err((unprocessed, e)); } } @@ -1587,6 +1587,54 @@ impl RelayPath { Ok(()) } + /// Pulls out the operational elements with elapsed delay period and that can + /// now be processed. + pub(crate) fn try_fetch_scheduled_operational_data( + &self, + ) -> Result<(VecDeque, VecDeque), LinkError> { + // Extracts elements from a Vec when the predicate returns true. + // The mutable vector is then updated to the remaining unextracted elements. + fn partition( + queue: VecDeque, + pred: impl Fn(&T) -> Result, + ) -> Result<(VecDeque, VecDeque), LinkError> { + let mut true_res = VecDeque::new(); + let mut false_res = VecDeque::new(); + + for e in queue.into_iter() { + if pred(&e)? { + true_res.push_back(e); + } else { + false_res.push_back(e); + } + } + + Ok((true_res, false_res)) + } + + let (elapsed_src_ods, unelapsed_src_ods) = + partition(self.src_operational_data.take(), |op| { + op.has_conn_delay_elapsed( + &|| self.src_time_latest(), + &|| self.src_max_block_time(), + &|| self.src_latest_height(), + ) + })?; + + let (elapsed_dst_ods, unelapsed_dst_ods) = + partition(self.dst_operational_data.take(), |op| { + op.has_conn_delay_elapsed( + &|| self.dst_time_latest(), + &|| self.dst_max_block_time(), + &|| self.dst_latest_height(), + ) + })?; + + self.src_operational_data.replace(unelapsed_src_ods); + self.dst_operational_data.replace(unelapsed_dst_ods); + Ok((elapsed_src_ods, elapsed_dst_ods)) + } + fn restore_src_client(&self) -> ForeignClient { ForeignClient::restore( self.src_client_id().clone(), From 6a7bf0ef432b37b8132a042e9f3d6beac20b8c32 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Fri, 20 May 2022 17:13:21 -0500 Subject: [PATCH 30/36] Give `do_execute_schedule` a more descriptive name --- relayer/src/link/relay_path.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index f54d0389f6..2f006dd23d 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1280,7 +1280,7 @@ impl RelayPath { } /// Drives the relaying of elapsed operational data items meant for - /// the destination chain forward. Should an error occur when attempting + /// a specified target chain forward. Should an error occur when attempting /// to relay a piece of operational data, this function returns all /// subsequent unprocessed pieces of operational data back to the caller /// so that they can be re-queued. @@ -1297,7 +1297,7 @@ impl RelayPath { /// This method performs relaying using the asynchronous sender. /// Retains the operational data as pending, and associates it /// with one or more transaction hash(es). - fn do_execute_schedule>( + fn execute_schedule_for_target_chain>( &mut self, mut operations: I, target_chain: OperationalDataTarget, @@ -1357,7 +1357,7 @@ impl RelayPath { pub fn execute_schedule(&mut self) -> Result<(), LinkError> { let src_od_iter = self.src_operational_data.take().into_iter(); - match self.do_execute_schedule(src_od_iter, OperationalDataTarget::Source) { + match self.execute_schedule_for_target_chain(src_od_iter, OperationalDataTarget::Source) { Ok(unprocessed_src_data) => self.src_operational_data = unprocessed_src_data.into(), Err((unprocessed_src_data, e)) => { self.src_operational_data = unprocessed_src_data.into(); @@ -1367,7 +1367,9 @@ impl RelayPath { let dst_od_iter = self.dst_operational_data.take().into_iter(); - match self.do_execute_schedule(dst_od_iter, OperationalDataTarget::Destination) { + match self + .execute_schedule_for_target_chain(dst_od_iter, OperationalDataTarget::Destination) + { Ok(unprocessed_dst_data) => self.dst_operational_data = unprocessed_dst_data.into(), Err((unprocessed_dst_data, e)) => { self.dst_operational_data = unprocessed_dst_data.into(); From 402b2c0d062c44436d057934781d5d41f14a7740 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Mon, 23 May 2022 15:09:09 -0500 Subject: [PATCH 31/36] Improve `execute_schedule_for_target_chain` method's documentation --- relayer/src/link/relay_path.rs | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 2f006dd23d..ae5f33358f 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1280,13 +1280,11 @@ impl RelayPath { } /// Drives the relaying of elapsed operational data items meant for - /// a specified target chain forward. Should an error occur when attempting - /// to relay a piece of operational data, this function returns all - /// subsequent unprocessed pieces of operational data back to the caller - /// so that they can be re-queued. + /// a specified target chain forward. /// - /// Pieces of operational data that have not elapsed yet are also placed - /// in the 'unprocessed' bucket. + /// Given an iterator of `OperationalData` elements, this function + /// first determines whether the current piece of operational data + /// has elapsed. /// /// A piece of operational data is considered 'elapsed' if it has surpassed /// its target chain's: @@ -1294,9 +1292,18 @@ impl RelayPath { /// 2. Maximum block time /// 3. Latest height /// - /// This method performs relaying using the asynchronous sender. - /// Retains the operational data as pending, and associates it - /// with one or more transaction hash(es). + /// If the current piece of operational data has elapsed, then relaying + /// is performed using the asynchronous sender. Operational data is + /// retained as pending and is associated with one or more transaction + /// hash(es). + /// + /// Should an error occur when attempting to relay a piece of operational + /// data, this function returns all subsequent unprocessed pieces of + /// operational data back to the caller so that they can be re-queued + /// for processing; the operational data that failed to send is dropped. + /// + /// Note that pieces of operational data that have not elapsed yet are + /// also placed in the 'unprocessed' bucket. fn execute_schedule_for_target_chain>( &mut self, mut operations: I, From 26f61d02653e9f8858c47b1e39e459908068765a Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Tue, 24 May 2022 11:43:05 -0500 Subject: [PATCH 32/36] Add a bunch of clarifying comments --- relayer/src/link/operational_data.rs | 6 ++-- relayer/src/link/relay_path.rs | 12 ++++++++ .../src/tests/execute_schedule.rs | 29 ++++++++++++------- 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index ac79ab98a8..a6a2380d61 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -17,12 +17,12 @@ use crate::chain::tracking::TrackingId; use crate::link::error::LinkError; use crate::link::RelayPath; -/// The chain that a piece of `OperationalData` is bound for. +/// The chain that a piece of [`OperationalData`] is bound for. #[derive(Clone, Copy, PartialEq)] pub enum OperationalDataTarget { - /// The source chain. + /// The chain which generated the events batch in the `OperationalData`. Source, - /// The destination chain. + /// The chain receiving the packets associated with the `OperationalData``. Destination, } diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index ae5f33358f..8c6cb70bf1 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1328,10 +1328,15 @@ impl RelayPath { match elapsed_result { Ok(elapsed) => { if elapsed { + // The current piece of operational data has elapsed; we can go ahead and + // attempt to relay it. match self .relay_from_operational_data::(od.clone()) { + // The operational data was successfully relayed; enqueue the associated tx. Ok(reply) => self.enqueue_pending_tx(reply, od), + // The relaying process failed; return all of the subsequent pieces of operational + // data along with the underlying error that occurred. Err(e) => { unprocessed.extend(operations); @@ -1339,10 +1344,17 @@ impl RelayPath { } } } else { + // The current piece of operational data has not elapsed; add it to the bucket + // of unprocessed operational data and continue processing subsequent pieces + // of operational data. unprocessed.push_back(od); } } Err(e) => { + // An error occurred when attempting to determine whether the current piece of + // operational data has elapsed or not. Add the current piece of data, along with + // all of the subsequent pieces of data, to the unprocessed bucket and return it + // along with the error that resulted. unprocessed.push_back(od); unprocessed.extend(operations); diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 9833b6d934..d25a848160 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -3,18 +3,21 @@ //! to send. Subsequent pieces of operational data that were scheduled should //! be re-queued and not dropped. //! -//! In order to test this behavior, the test manually relays at least 2 IBC transfers -//! from chain A to chain B. Chain B is then shut down in order to force the transactions -//! to be queued up again for re-submission. It is expected that the first transfer -//! be dropped and not be present in the pending queue, but all of the subsequent -//! transactions should exist in the pending queue. +//! In order to test this behavior, the test manually relays a batch (i.e. at least +//! 2) IBC transfers from chain A to chain B. Chain B is then shut down in order to +//! force the batch of messages to be queued up again for re-submission. +//! +//! It is expected that the first message of the batch gets dropped (i.e. it is not +//! present in the pending queue), but all of the subsequent messages should exist +//! in the pending queue. use ibc_test_framework::prelude::*; use ibc_test_framework::util::random::random_u64_range; use ibc_relayer::link::{Link, LinkParameters}; -const NUM_TXS: usize = 10; +/// The number of messages to be sent in a batch contained in a piece of operational data. +const BATCH_SIZE: usize = 10; #[test] fn test_execute_schedule() -> Result<(), Error> { @@ -53,7 +56,8 @@ impl BinaryChannelTest for ExecuteScheduleTest { let mut relay_path_a_to_b = chain_a_link.a_to_b; - for i in 0..NUM_TXS { + // Construct `BATCH_SIZE` pieces of operational data and queue them up to be sent to chain B. + for i in 0..BATCH_SIZE { chains.node_a.chain_driver().ibc_transfer_token( &channel.port_a.as_ref(), &channel.channel_id_a.as_ref(), @@ -65,16 +69,21 @@ impl BinaryChannelTest for ExecuteScheduleTest { relay_path_a_to_b.schedule_packet_clearing(None)?; - info!("Performing IBC transfer #{} from chain A to chain B", i); + info!("Performing IBC send packet with a token transfer #{} from chain A to be received by chain B", i); } - assert_eq!(relay_path_a_to_b.dst_operational_data.len(), NUM_TXS); + // We should see that all of the events in the batch are queued up to be sent to chain B. + assert_eq!(relay_path_a_to_b.dst_operational_data.len(), BATCH_SIZE); chains.node_b.value().kill()?; + // With chain B inactive, if we attempt to send the batch of messages, we expect to see + // `BATCH_SIZE` - 1 messages in the batch since the initial event should have failed to + // be relayed and was thus dropped. The subsequent messages in the batch should have all + // been re-added to the pending queue. match relay_path_a_to_b.execute_schedule() { Ok(_) => panic!("Expected an error when relaying tx from A to B"), - Err(_) => assert_eq!(relay_path_a_to_b.dst_operational_data.len(), NUM_TXS - 1), + Err(_) => assert_eq!(relay_path_a_to_b.dst_operational_data.len(), BATCH_SIZE - 1), } Ok(()) From 58d78cf36f6b6ca8f909180c55dc613e9ebf9959 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Tue, 24 May 2022 12:04:01 -0500 Subject: [PATCH 33/36] More clarification of comments --- relayer/src/link/operational_data.rs | 6 +++--- .../integration-test/src/tests/execute_schedule.rs | 13 +++++++------ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index a6a2380d61..235f3993dd 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -17,12 +17,12 @@ use crate::chain::tracking::TrackingId; use crate::link::error::LinkError; use crate::link::RelayPath; -/// The chain that a piece of [`OperationalData`] is bound for. +/// The chain that the events associated with a piece of [`OperationalData`] are bound for. #[derive(Clone, Copy, PartialEq)] pub enum OperationalDataTarget { - /// The chain which generated the events batch in the `OperationalData`. + /// The chain which generated the events associated with the `OperationalData`. Source, - /// The chain receiving the packets associated with the `OperationalData``. + /// The chain receiving the events associated with the `OperationalData``. Destination, } diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index d25a848160..0a75ca6d26 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -1,15 +1,16 @@ //! This test ensures that the `RelayPath::execute_schedule` method does not -//! drop any pending scheduled operational data when a prior transaction fails -//! to send. Subsequent pieces of operational data that were scheduled should -//! be re-queued and not dropped. +//! drop any scheduled `OperationalData` when events associated with a prior +//! piece of operational data fails to send. Subsequent pieces of operational +//! data that were scheduled should be re-queued and not dropped. //! //! In order to test this behavior, the test manually relays a batch (i.e. at least //! 2) IBC transfers from chain A to chain B. Chain B is then shut down in order to -//! force the batch of messages to be queued up again for re-submission. +//! force the batch of messages (in the form of their associated pieces of operational +//! data) to be queued up again for re-submission. //! //! It is expected that the first message of the batch gets dropped (i.e. it is not -//! present in the pending queue), but all of the subsequent messages should exist -//! in the pending queue. +//! later found in the pending queue), but all of the subsequent messages should +//! exist in the pending queue. use ibc_test_framework::prelude::*; use ibc_test_framework::util::random::random_u64_range; From 746d990fe2140d5b85c560241ab0c0c4b2d0e064 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Tue, 24 May 2022 12:16:42 -0500 Subject: [PATCH 34/36] Flesh out `OperationalData` docs --- relayer/src/link/operational_data.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index 235f3993dd..fc6c69347a 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -78,18 +78,25 @@ pub struct TransitMessage { pub msg: Any, } -/// Holds all the necessary information for handling a set of in-transit messages. +/// Holds all the necessary information for handling a batch of in-transit messages. This includes +/// an event received from a chain along with any other packets related to the event (i.e. +/// 'receive' or 'timeout' packets) that the relayer has to submit in response to the event. /// -/// Each `OperationalData` item is uniquely identified by the combination of two attributes: -/// - `target`: represents the target of the packet messages, either source or destination chain, -/// - `proofs_height`: represents the height for the proofs in all the messages. -/// Note: this is the height at which the proofs are queried. A client consensus state at -/// `proofs_height + 1` must exist on-chain in order to verify the proofs. +/// Each `OperationalData` item is uniquely identified by the combination of its `target` chain +/// and its `proofs_height` value. #[derive(Clone)] pub struct OperationalData { + /// Represents the height for the proofs in all the messages. Note that the + /// height at which the proofs are queried. For example, a client consensus + /// state at `proofs_height - 1` must exist on-chain in order to verify + /// the proofs. pub proofs_height: Height, + /// The batch of messages associated with this piece of operational data. pub batch: Vec, + /// Represents the target of the packet messages, either the source or the destination + /// chain. pub target: OperationalDataTarget, + /// A unique ID for tracking this piece of operational data along the relaying pipeline. pub tracking_id: TrackingId, /// Stores `Some(ConnectionDelay)` if the delay is non-zero and `None` otherwise connection_delay: Option, From d4342feffd5bd62020bd5af8a2e6790c034351d5 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Tue, 24 May 2022 12:33:29 -0500 Subject: [PATCH 35/36] Add changelog entry --- .../ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md b/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md new file mode 100644 index 0000000000..41a113d0e8 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md @@ -0,0 +1,2 @@ +- Fix `execute_schedule` method dropping operational data due to improper + handling of errors. ([#2118](https://github.com/informalsystems/ibc-rs/issues/1153)) From b9e2af377ba04704a92d2f598f9adf54e5006d64 Mon Sep 17 00:00:00 2001 From: Sean Chen Date: Thu, 26 May 2022 10:01:49 -0500 Subject: [PATCH 36/36] Incorporate PR feedback --- relayer/src/link/operational_data.rs | 13 +++++-------- relayer/src/link/relay_path.rs | 9 ++++----- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/relayer/src/link/operational_data.rs b/relayer/src/link/operational_data.rs index fc6c69347a..c78fa61379 100644 --- a/relayer/src/link/operational_data.rs +++ b/relayer/src/link/operational_data.rs @@ -81,22 +81,19 @@ pub struct TransitMessage { /// Holds all the necessary information for handling a batch of in-transit messages. This includes /// an event received from a chain along with any other packets related to the event (i.e. /// 'receive' or 'timeout' packets) that the relayer has to submit in response to the event. -/// -/// Each `OperationalData` item is uniquely identified by the combination of its `target` chain -/// and its `proofs_height` value. #[derive(Clone)] pub struct OperationalData { - /// Represents the height for the proofs in all the messages. Note that the - /// height at which the proofs are queried. For example, a client consensus - /// state at `proofs_height - 1` must exist on-chain in order to verify - /// the proofs. + /// Represents the height for the proofs in all the messages. Note that this is the height + /// at which the proofs are queried. For example, for Tendermint chains, a client consensus + /// state at `proofs_height + 1` must exist on-chain in order to verify the proofs. pub proofs_height: Height, /// The batch of messages associated with this piece of operational data. pub batch: Vec, /// Represents the target of the packet messages, either the source or the destination /// chain. pub target: OperationalDataTarget, - /// A unique ID for tracking this piece of operational data along the relaying pipeline. + /// A unique ID for tracking this batch of events starting from when they were received + /// until the transactions corresponding to those events is submitted. pub tracking_id: TrackingId, /// Stores `Some(ConnectionDelay)` if the delay is non-zero and `None` otherwise connection_delay: Option, diff --git a/relayer/src/link/relay_path.rs b/relayer/src/link/relay_path.rs index 8c6cb70bf1..1bb03c5565 100644 --- a/relayer/src/link/relay_path.rs +++ b/relayer/src/link/relay_path.rs @@ -1286,11 +1286,10 @@ impl RelayPath { /// first determines whether the current piece of operational data /// has elapsed. /// - /// A piece of operational data is considered 'elapsed' if it has surpassed - /// its target chain's: - /// 1. Latest timestamp - /// 2. Maximum block time - /// 3. Latest height + /// A piece of operational data is considered 'elapsed' if it has been waiting + /// for an amount of time that surpasses both of the following: + /// 1. The time duration specified in the connection delay + /// 2. The number of blocks specified in the connection delay /// /// If the current piece of operational data has elapsed, then relaying /// is performed using the asynchronous sender. Operational data is