Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(congestion_control) - relax congestion control #12241

Merged
merged 9 commits into from
Oct 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,23 @@

## [unreleased]

### Protocol Changes

* Fixing invalid cost used for wasm_yield_resume_byte. #12192
* Relaxing Congestion Control to allow accepting and buffering more transactions. #12241

### Non-protocol Changes
**No Changes**

## [2.3.0]

### Protocol Changes
* Sets `chunk_validator_only_kickout_threshold` to 70. Uses this kickout threshold as a cutoff threshold for contribution of endorsement ratio in rewards calculation: if endorsement ratio is above 70%, the contribution of endorsement ratio in average uptime calculation is 100%, otherwise it is 0%. Endorsements received are now included in `BlockHeader` to improve kickout and reward calculation for chunk validators.

### Non-protocol Changes
* Added [documentation](./docs/misc/archival_data_recovery.md) and a [reference](./scripts/recover_missing_archival_data.sh) script to recover the data lost in archival nodes at the beginning of 2024.
* **Archival nodes only:** Stop saving partial chunks to `PartialChunks` column in the Cold DB. Instead, archival nodes will reconstruct partial chunks from the `Chunks` column.
* Decentralized state sync: Before, nodes that needed to download state (either because they're several epochs behind the chain or because they're going to start producing chunks for a shard they don't currently track) would download them from a centralized GCS bucket. Now, nodes will attempt to download pieces of the state from peers in the network, and only fallback to downloading from GCS if that fails. Please note that in order to participate in providing state parts to peers, your node may generate snapshots of the state. These snapshots should not take too much space, since they're hard links to database files that get cleaned up on every epoch.

### 2.2.0

Expand Down
4 changes: 2 additions & 2 deletions chain/chain/src/tests/simple_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ fn build_chain() {
if cfg!(feature = "nightly") {
insta::assert_snapshot!(hash, @"Hc3bWEd7ikHf9BAe2SknvH2jAAakEtBRU1FBu6Udocm3");
} else {
insta::assert_snapshot!(hash, @"dY6Z6HdATLWK3wwxkNtUs8T1GaEQqpxUCXm7TectWW7");
insta::assert_snapshot!(hash, @"GHZFAFiMdGzAfnWTcS9u9wqFvxMrgFpyEr6Use7jk2Lo");
}

for i in 1..5 {
Expand All @@ -52,7 +52,7 @@ fn build_chain() {
if cfg!(feature = "nightly") {
insta::assert_snapshot!(hash, @"39R6bDFXkPfwdYs4crV3RyCde85ecycqP5DBwdtwyjcJ");
} else {
insta::assert_snapshot!(hash, @"6RnKeuiGmxkFxNYeEmAbK6NzwvKYuTcKCwqAmqJ6m3DG");
insta::assert_snapshot!(hash, @"3Pdm44L71Bk8EokPHF1pxakHojsriNadBdZZSpcoDv9q");
}
}

Expand Down
2 changes: 1 addition & 1 deletion chain/jsonrpc/jsonrpc-tests/res/genesis_config.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"protocol_version": 72,
"protocol_version": 73,
"genesis_time": "1970-01-01T00:00:00.000000000Z",
"chain_id": "sample",
"genesis_height": 0,
Expand Down
14 changes: 14 additions & 0 deletions core/parameters/res/runtime_configs/73.yaml
Original file line number Diff line number Diff line change
@@ -1 +1,15 @@
wasm_yield_resume_byte: { old: 1_195_627_285_210 , new: 47_683_715 }

# Congestion Control

# 40 PGAS
max_congestion_incoming_gas: {
old : 20_000_000_000_000_000,
new : 40_000_000_000_000_000,
}

# 0.7
reject_tx_congestion_threshold: {
old : { numerator: 50, denominator: 100 },
new : { numerator: 80, denominator: 100 }
}
6 changes: 3 additions & 3 deletions core/parameters/res/runtime_configs/parameters.snap
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ wasm_alt_bn128_g1_sum_element 5_000_000_000
wasm_yield_create_base 153_411_779_276
wasm_yield_create_byte 15_643_988
wasm_yield_resume_base 1_195_627_285_210
wasm_yield_resume_byte 1_195_627_285_210
wasm_yield_resume_byte 47_683_715
wasm_bls12381_p1_sum_base 16_500_000_000
wasm_bls12381_p1_sum_element 6_000_000_000
wasm_bls12381_p2_sum_base 18_600_000_000
Expand Down Expand Up @@ -208,7 +208,7 @@ vm_kind NearVm
eth_implicit_accounts true
yield_resume true
discard_custom_sections true
max_congestion_incoming_gas 20_000_000_000_000_000
max_congestion_incoming_gas 40_000_000_000_000_000
max_congestion_outgoing_gas 10_000_000_000_000_000
max_congestion_memory_consumption 1_000_000_000
max_congestion_missed_chunks 5
Expand All @@ -217,5 +217,5 @@ min_outgoing_gas 1_000_000_000_000_000
allowed_shard_outgoing_gas 1_000_000_000_000_000
max_tx_gas 500_000_000_000_000
min_tx_gas 20_000_000_000_000
reject_tx_congestion_threshold 50 / 100
reject_tx_congestion_threshold 80 / 100
use_state_stored_receipt true
8 changes: 6 additions & 2 deletions core/parameters/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use crate::config_store::INITIAL_TESTNET_CONFIG;
use crate::cost::RuntimeFeesConfig;
use crate::parameter_table::ParameterTable;
use near_account_id::AccountId;
use near_primitives_core::types::{Balance, Gas};
use near_primitives_core::types::{Balance, Gas, ProtocolVersion};
use near_primitives_core::version::PROTOCOL_VERSION;
use std::sync::Arc;

Expand Down Expand Up @@ -49,8 +49,12 @@ impl RuntimeConfig {
}

pub fn test() -> Self {
Self::test_protocol_version(PROTOCOL_VERSION)
}

pub fn test_protocol_version(protocol_version: ProtocolVersion) -> Self {
let config_store = super::config_store::RuntimeConfigStore::new(None);
let runtime_config = config_store.get_config(PROTOCOL_VERSION);
let runtime_config = config_store.get_config(protocol_version);

let mut wasm_config = crate::vm::Config::clone(&runtime_config.wasm_config);
// Lower the yield timeout length so that we can observe timeouts in integration tests.
Expand Down
1 change: 1 addition & 0 deletions core/parameters/src/config_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ static CONFIG_DIFFS: &[(ProtocolVersion, &str)] = &[
(70, include_config!("70.yaml")),
// Increase main_storage_proof_size_soft_limit and introduces StateStoredReceipt
(72, include_config!("72.yaml")),
// Fix wasm_yield_resume_byte and relax congestion control.
(73, include_config!("73.yaml")),
(129, include_config!("129.yaml")),
];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ expression: config_view
"registrar_account_id": "registrar"
},
"congestion_control_config": {
"max_congestion_incoming_gas": 20000000000000000,
"max_congestion_incoming_gas": 40000000000000000,
"max_congestion_outgoing_gas": 10000000000000000,
"max_congestion_memory_consumption": 1000000000,
"max_congestion_missed_chunks": 5,
Expand All @@ -255,7 +255,7 @@ expression: config_view
"allowed_shard_outgoing_gas": 1000000000000000,
"max_tx_gas": 500000000000000,
"min_tx_gas": 20000000000000,
"reject_tx_congestion_threshold": 0.5,
"reject_tx_congestion_threshold": 0.8,
"outgoing_receipts_usual_size_limit": 102400,
"outgoing_receipts_big_size_limit": 4718592
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ expression: config_view
"registrar_account_id": "registrar"
},
"congestion_control_config": {
"max_congestion_incoming_gas": 20000000000000000,
"max_congestion_incoming_gas": 40000000000000000,
"max_congestion_outgoing_gas": 10000000000000000,
"max_congestion_memory_consumption": 1000000000,
"max_congestion_missed_chunks": 5,
Expand All @@ -255,7 +255,7 @@ expression: config_view
"allowed_shard_outgoing_gas": 1000000000000000,
"max_tx_gas": 500000000000000,
"min_tx_gas": 20000000000000,
"reject_tx_congestion_threshold": 0.5,
"reject_tx_congestion_threshold": 0.8,
"outgoing_receipts_usual_size_limit": 102400,
"outgoing_receipts_big_size_limit": 4718592
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ expression: config_view
"registrar_account_id": "registrar"
},
"congestion_control_config": {
"max_congestion_incoming_gas": 20000000000000000,
"max_congestion_incoming_gas": 40000000000000000,
"max_congestion_outgoing_gas": 10000000000000000,
"max_congestion_memory_consumption": 1000000000,
"max_congestion_missed_chunks": 5,
Expand All @@ -255,7 +255,7 @@ expression: config_view
"allowed_shard_outgoing_gas": 1000000000000000,
"max_tx_gas": 500000000000000,
"min_tx_gas": 20000000000000,
"reject_tx_congestion_threshold": 0.5,
"reject_tx_congestion_threshold": 0.8,
"outgoing_receipts_usual_size_limit": 102400,
"outgoing_receipts_big_size_limit": 4718592
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ expression: config_view
"registrar_account_id": "registrar"
},
"congestion_control_config": {
"max_congestion_incoming_gas": 20000000000000000,
"max_congestion_incoming_gas": 40000000000000000,
"max_congestion_outgoing_gas": 10000000000000000,
"max_congestion_memory_consumption": 1000000000,
"max_congestion_missed_chunks": 5,
Expand All @@ -255,7 +255,7 @@ expression: config_view
"allowed_shard_outgoing_gas": 1000000000000000,
"max_tx_gas": 500000000000000,
"min_tx_gas": 20000000000000,
"reject_tx_congestion_threshold": 0.5,
"reject_tx_congestion_threshold": 0.8,
"outgoing_receipts_usual_size_limit": 102400,
"outgoing_receipts_big_size_limit": 4718592
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ expression: "&view"
"yield_create_base": 153411779276,
"yield_create_byte": 15643988,
"yield_resume_base": 1195627285210,
"yield_resume_byte": 1195627285210,
"yield_resume_byte": 47683715,
"bls12381_p1_sum_base": 16500000000,
"bls12381_p1_sum_element": 6000000000,
"bls12381_p2_sum_base": 18600000000,
Expand Down Expand Up @@ -246,7 +246,7 @@ expression: "&view"
"registrar_account_id": "registrar"
},
"congestion_control_config": {
"max_congestion_incoming_gas": 20000000000000000,
"max_congestion_incoming_gas": 40000000000000000,
"max_congestion_outgoing_gas": 10000000000000000,
"max_congestion_memory_consumption": 1000000000,
"max_congestion_missed_chunks": 5,
Expand All @@ -255,7 +255,7 @@ expression: "&view"
"allowed_shard_outgoing_gas": 1000000000000000,
"max_tx_gas": 500000000000000,
"min_tx_gas": 20000000000000,
"reject_tx_congestion_threshold": 0.5,
"reject_tx_congestion_threshold": 0.8,
"outgoing_receipts_usual_size_limit": 102400,
"outgoing_receipts_big_size_limit": 4718592
},
Expand Down
2 changes: 1 addition & 1 deletion core/primitives-core/src/version.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ impl ProtocolFeature {
}

/// Current protocol version used on the mainnet with all stable features.
const STABLE_PROTOCOL_VERSION: ProtocolVersion = 72;
const STABLE_PROTOCOL_VERSION: ProtocolVersion = 73;

// On nightly, pick big enough version to support all features.
const NIGHTLY_PROTOCOL_VERSION: ProtocolVersion = 145;
Expand Down
4 changes: 3 additions & 1 deletion core/primitives/src/congestion_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,8 +507,10 @@ mod tests {
use super::*;

fn get_config() -> CongestionControlConfig {
// Fix the initial configuration of congestion control for the tests.
let protocol_version = ProtocolFeature::CongestionControl.protocol_version();
let runtime_config_store = RuntimeConfigStore::new(None);
let runtime_config = runtime_config_store.get_config(PROTOCOL_VERSION);
let runtime_config = runtime_config_store.get_config(protocol_version);
runtime_config.congestion_control_config
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ expression: "&view"
"yield_create_base": 153411779276,
"yield_create_byte": 15643988,
"yield_resume_base": 1195627285210,
"yield_resume_byte": 1195627285210,
"yield_resume_byte": 47683715,
"bls12381_p1_sum_base": 16500000000,
"bls12381_p1_sum_element": 6000000000,
"bls12381_p2_sum_base": 18600000000,
Expand Down Expand Up @@ -246,7 +246,7 @@ expression: "&view"
"registrar_account_id": "registrar"
},
"congestion_control_config": {
"max_congestion_incoming_gas": 20000000000000000,
"max_congestion_incoming_gas": 40000000000000000,
"max_congestion_outgoing_gas": 10000000000000000,
"max_congestion_memory_consumption": 1000000000,
"max_congestion_missed_chunks": 5,
Expand All @@ -255,7 +255,7 @@ expression: "&view"
"allowed_shard_outgoing_gas": 1000000000000000,
"max_tx_gas": 500000000000000,
"min_tx_gas": 20000000000000,
"reject_tx_congestion_threshold": 0.5,
"reject_tx_congestion_threshold": 0.8,
"outgoing_receipts_usual_size_limit": 102400,
"outgoing_receipts_big_size_limit": 4718592
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ fn setup_test_runtime(sender_id: AccountId, protocol_version: ProtocolVersion) -
// Chain must be sharded to test cross-shard congestion control.
genesis.config.shard_layout = ShardLayout::v1_test();

let mut config = RuntimeConfig::test();
let mut config = RuntimeConfig::test_protocol_version(protocol_version);
adjust_runtime_config(&mut config);
let runtime_configs = vec![RuntimeConfigStore::with_one_config(config)];

Expand Down Expand Up @@ -515,11 +515,16 @@ fn submit_n_cheap_fns(
/// with remote traffic.
#[test]
fn test_transaction_limit_for_local_congestion() {
init_test_logger();

if !ProtocolFeature::CongestionControl.enabled(PROTOCOL_VERSION) {
return;
}
let runtime_config_store = RuntimeConfigStore::new(None);
let config = runtime_config_store.get_config(PROTOCOL_VERSION);

// Fix the initial configuration of congestion control for the tests.
let protocol_version = ProtocolFeature::CongestionControl.protocol_version();
let config = runtime_config_store.get_config(protocol_version);
// We don't want to go into the TX rejection limit in this test.
let upper_limit_congestion = config.congestion_control_config.reject_tx_congestion_threshold;

Expand All @@ -528,7 +533,7 @@ fn test_transaction_limit_for_local_congestion() {
let contract_id: AccountId = CONTRACT_ID.parse().unwrap();
let sender_id = contract_id.clone();
let dummy_receiver: AccountId = "a_dummy_receiver".parse().unwrap();
let env = setup_test_runtime("test0".parse().unwrap(), PROTOCOL_VERSION);
let env = setup_test_runtime("test0".parse().unwrap(), protocol_version);

let (
remote_tx_included_without_congestion,
Expand Down
8 changes: 4 additions & 4 deletions pytest/tests/sanity/congestion_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import unittest
import pathlib
import sys
import json
import time
import threading

Expand Down Expand Up @@ -141,7 +140,7 @@ def __run_under_congestion(self, node):

def __run_after_congestion(self, node):
logger.info("Checking the chain after congestion")
for height, hash in poll_blocks(node, __target=50):
for height, hash in poll_blocks(node, __target=100):
chunk = self.__get_chunk(node, hash, 0)

gas_used = chunk['header']['gas_used']
Expand Down Expand Up @@ -200,8 +199,10 @@ def __start_load(self, node: BaseNode, accounts):
self.finished = False
self.lock = threading.Lock()

self.txs = []
target_account = accounts[0]
for account in accounts:
# Spawn two thread per each account to get more transactions in.
for account in accounts + accounts:
thread = threading.Thread(
target=self.__load,
args=[node, account, target_account],
Expand All @@ -221,7 +222,6 @@ def __load(self, node: BaseNode, sender_account, target_account):
logger.debug(
f"Starting load thread {sender_account.account_id} -> {target_account.account_id}"
)
self.txs = []
while not self.finished:
tx_hash = self.__call_contract(node, sender_account, target_account)
with self.lock:
Expand Down
Loading