-
Notifications
You must be signed in to change notification settings - Fork 360
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add hidden configuration for sending batched messages sequentially #2543
Changes from 9 commits
871cb5a
0dcec0a
d62be79
13d0b05
9860fe3
16e1887
095a8ba
a609c05
6b0817b
c9be571
07f2bc7
4575ecd
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,9 +1,11 @@ | ||
use core::mem; | ||
|
||
use ibc::core::ics24_host::identifier::ChainId; | ||
use ibc::events::IbcEvent; | ||
use ibc_proto::google::protobuf::Any; | ||
use prost::Message; | ||
use tendermint_rpc::endpoint::broadcast::tx_sync::Response; | ||
use tracing::debug; | ||
|
||
use crate::chain::cosmos::retry::send_tx_with_account_sequence_retry; | ||
use crate::chain::cosmos::types::account::Account; | ||
|
@@ -14,6 +16,13 @@ use crate::config::types::{MaxMsgNum, MaxTxSize, Memo}; | |
use crate::error::Error; | ||
use crate::keyring::KeyEntry; | ||
|
||
/** | ||
Broadcast messages as multiple batched transactions to the chain all at once, | ||
and then wait for all transactions to be committed. | ||
This may improve performance in case when multiple transactions are | ||
committed into the same block. However this approach may not work if | ||
priority mempool is enabled. | ||
*/ | ||
pub async fn send_batched_messages_and_wait_commit( | ||
config: &TxConfig, | ||
max_msg_num: MaxMsgNum, | ||
|
@@ -55,6 +64,43 @@ pub async fn send_batched_messages_and_wait_commit( | |
Ok(events) | ||
} | ||
|
||
/** | ||
Send batched messages one after another, only after the previous one | ||
has been committed. This is only used in case if parallel transactions | ||
are committed in the wrong order due to interference from priority mempool. | ||
*/ | ||
pub async fn sequential_send_batched_messages_and_wait_commit( | ||
config: &TxConfig, | ||
max_msg_num: MaxMsgNum, | ||
max_tx_size: MaxTxSize, | ||
key_entry: &KeyEntry, | ||
account: &mut Account, | ||
tx_memo: &Memo, | ||
messages: Vec<Any>, | ||
) -> Result<Vec<IbcEvent>, Error> { | ||
if messages.is_empty() { | ||
return Ok(Vec::new()); | ||
} | ||
|
||
let tx_sync_results = sequential_send_messages_as_batches( | ||
config, | ||
max_msg_num, | ||
max_tx_size, | ||
key_entry, | ||
account, | ||
tx_memo, | ||
messages, | ||
) | ||
.await?; | ||
|
||
let events = tx_sync_results | ||
.into_iter() | ||
.flat_map(|el| el.events) | ||
.collect(); | ||
|
||
Ok(events) | ||
} | ||
|
||
pub async fn send_batched_messages_and_wait_check_tx( | ||
config: &TxConfig, | ||
max_msg_num: MaxMsgNum, | ||
|
@@ -95,8 +141,17 @@ async fn send_messages_as_batches( | |
return Ok(Vec::new()); | ||
} | ||
|
||
let message_count = messages.len(); | ||
|
||
let batches = batch_messages(max_msg_num, max_tx_size, messages)?; | ||
|
||
debug!( | ||
"sending {} messages as {} batches to chain {} in parallel", | ||
message_count, | ||
batches.len(), | ||
config.chain_id | ||
); | ||
|
||
let mut tx_sync_results = Vec::new(); | ||
|
||
for batch in batches { | ||
|
@@ -105,33 +160,88 @@ async fn send_messages_as_batches( | |
let response = | ||
send_tx_with_account_sequence_retry(config, key_entry, account, tx_memo, batch).await?; | ||
|
||
if response.code.is_err() { | ||
let events_per_tx = vec![IbcEvent::ChainError(format!( | ||
"check_tx (broadcast_tx_sync) on chain {} for Tx hash {} reports error: code={:?}, log={:?}", | ||
config.chain_id, response.hash, response.code, response.log | ||
)); message_count]; | ||
|
||
let tx_sync_result = TxSyncResult { | ||
response, | ||
events: events_per_tx, | ||
status: TxStatus::ReceivedResponse, | ||
}; | ||
|
||
tx_sync_results.push(tx_sync_result); | ||
} else { | ||
let tx_sync_result = TxSyncResult { | ||
response, | ||
events: Vec::new(), | ||
status: TxStatus::Pending { message_count }, | ||
}; | ||
|
||
tx_sync_results.push(tx_sync_result); | ||
} | ||
let tx_sync_result = response_to_tx_sync_result(&config.chain_id, message_count, response); | ||
|
||
tx_sync_results.push(tx_sync_result); | ||
} | ||
|
||
Ok(tx_sync_results) | ||
} | ||
|
||
async fn sequential_send_messages_as_batches( | ||
config: &TxConfig, | ||
max_msg_num: MaxMsgNum, | ||
max_tx_size: MaxTxSize, | ||
key_entry: &KeyEntry, | ||
account: &mut Account, | ||
tx_memo: &Memo, | ||
messages: Vec<Any>, | ||
) -> Result<Vec<TxSyncResult>, Error> { | ||
if messages.is_empty() { | ||
return Ok(Vec::new()); | ||
} | ||
|
||
let message_count = messages.len(); | ||
|
||
let batches = batch_messages(max_msg_num, max_tx_size, messages)?; | ||
|
||
debug!( | ||
"sending {} messages as {} batches to chain {} in serial", | ||
message_count, | ||
batches.len(), | ||
config.chain_id | ||
); | ||
|
||
let mut tx_sync_results = Vec::new(); | ||
|
||
for batch in batches { | ||
let message_count = batch.len(); | ||
|
||
let response = | ||
send_tx_with_account_sequence_retry(config, key_entry, account, tx_memo, batch).await?; | ||
|
||
let tx_sync_result = response_to_tx_sync_result(&config.chain_id, message_count, response); | ||
|
||
tx_sync_results.push(tx_sync_result); | ||
|
||
wait_for_block_commits( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. With sequential sending, we get the DeliverTX result before sending the next transaction. However it is a bit unclear here whether we should continue submitting the remaining batches to the chain, if DeliverTX or SendTX returns error. Supposingly, we should probably short circuit and return on the first encounter of error. However I keep the behavior the same as the parallel version, so that we do not observe unexpected difference in behavior when sequential batching is enabled. The anti-pattern behavior of converting errors to duplicate IBC error events also significantly complicates the design space here. So I'd rather leave the behavior untouched until the chain errors are handled in better ways. |
||
&config.chain_id, | ||
&config.rpc_client, | ||
&config.rpc_address, | ||
&config.rpc_timeout, | ||
&mut tx_sync_results, | ||
) | ||
.await?; | ||
} | ||
|
||
Ok(tx_sync_results) | ||
} | ||
|
||
fn response_to_tx_sync_result( | ||
chain_id: &ChainId, | ||
message_count: usize, | ||
response: Response, | ||
) -> TxSyncResult { | ||
if response.code.is_err() { | ||
let events_per_tx = vec![IbcEvent::ChainError(format!( | ||
"check_tx (broadcast_tx_sync) on chain {} for Tx hash {} reports error: code={:?}, log={:?}", | ||
chain_id, response.hash, response.code, response.log | ||
)); message_count]; | ||
|
||
TxSyncResult { | ||
response, | ||
events: events_per_tx, | ||
status: TxStatus::ReceivedResponse, | ||
} | ||
} else { | ||
TxSyncResult { | ||
response, | ||
events: Vec::new(), | ||
status: TxStatus::Pending { message_count }, | ||
} | ||
} | ||
} | ||
|
||
fn batch_messages( | ||
max_msg_num: MaxMsgNum, | ||
max_tx_size: MaxTxSize, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
pub mod sequential; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm concerned about more code duplication here.
When contemplating refactoring for batching fixes respecting actual tx size, already there are two functions to care about, this adds a third one.
Could this be incorporated in the regular
send_messages_as_batches
under a dynamic flag?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The core logic has already been abstracted into helper functions like
batch_messages
,send_tx_with_account_sequence_retry
, andwait_for_block_commits
. So functions likesequential_send_messages_as_batches
only act as higher level functions that wire up the lower level functions. Such low level functions are already there to serve the code deduplication purpose.Using a dynamic flag can obscure the logic that differentiates
send_messages_as_batches
fromsequential_send_messages_as_batches
. With them being separate functions, it is much more clear of how the behavior are different from one another.