Skip to content

Commit

Permalink
feat: ⏫ upgrade to Polkadot SDK stable2407 (#222)
Browse files Browse the repository at this point in the history
* refactor: 🎨 moved runtime apis into their own file

* fix: ⬆️ fix upgrade to polkadot-sdk v1.10.0

* fix: 🐛 add removed runtime api `get_worst_case_scenario_slashable_amount`

* fix: 🐛 add missing generic in storage providers runtime api

* chore: 🏷️ run typegen

* style: 🚨 run cargo fmt

* fix: 🎨 fix cargo clippy

* fix: 🚑 fix mocked relay chain randomness

* style: 🚨 run cargo fmt

* chore: 🚨 temporary remove unused import (until v1.13.0)

* fix: 🩹 update `query_earliest_file_volunteer_tick` runtime api

* fix: 🚑 fix issues with merge from main

* style: 🚨 run cargo fmt

* feat: 📦 initial update to polkadot sdk v1.11.0

* feat: ⬆️ finish upgrade to polkadot sdk v1.11.0

* chore: 🏷️ run typegen

* chore: 🏷️ run typegen

* fix: ✅ fix node tests after rebenchmark of balances pallet in v1.11.0

* feat: ⬆️ update to Polkadot SDK v1.12.0

* feat: ⬆️ update to Polkadot SDK v1.13.0

* fix: ⬆️ finish upgrading to Polkadot SDK v1.13.0

* fix: 🐛 add missing imports (and run typegen)

* fix: ⬇️ rollback polkadotjs api dependencies

* chore: 🏷️ rerun pnpm typegen

* feat: 🚧 start upgrade to Polkadot SDK 1.14.0

* fix: 🚨 format and lint

* fix: 🚨 remove trailing whitespace

* feat: ⬆️ upgrade to Polkadot SDK v1.14.0

* chore: 🏷️ run typegen

* fix: 🚨 run cargo fmt

* docs: 🔥 remove wrong license docs

* feat: ⬆️ update Polkadot SDK to stable2407

* chore: 🏷️ run typegen

* chore: 🏷️ run typegen

* fix: ✅ fix bsp-threshold test

* fix: 💚 try to fix tests in CI

* fix: ✅ try again to fix the integration tests in the CI

* test: 🧪 fix batch file confirm storing test

* fix: ✅ remove unused imports from volunteer test

* fix: ✅ add wait for file in file storage success and fix volunteer test

* fix: ✅ wait more time for bsp volunteering

* test: 🧪 fix a few issues

* fix: ✅ maybe (hopefully) finish fixing tests

* fix: ✅ fix new bsps not catching up to the tip of the chain and missing events in tests

* test: ✅ disconnect api

* test: 🐛 fix timeout bug

* test: ✅ add initialization wait + fix single bsp initialized network

* chore: 🚨 removed unused variables

* test: 🐛 initialised bsp net now correctly waits for bsp to store file

* test: 🐛 add missing wait for chain tip sync on test

* fix: 🩹 Remove sleeps from single tests, in favour of putting it in setup

* fix: 🚨 Fix TS typecheck

* fix: 🐛 Wait for sync in onboard tests

* fix: 🚨 Remove unused `sleep` import

* test: ✅ Remove second sealBlock after file deletion extrinsics, causing race condition

* fix: 🐛 Make blockchain service check pending forest root writes when releasing lock

* test: ✅ add `bspFileDeletionCompleted` wait using polling and use it in debt collection tests

* test: ✅ Fix volunteer test which after recent optimsation it doesn't require an extra sealBlock

* fix: 🔥 remove leftover condition from payment stream deletion

* fix: 🐛 Delete payment stream when executing stop storing for insolvent user

---------

Co-authored-by: Facundo Farall <37149322+ffarall@users.noreply.github.com>
  • Loading branch information
TDemeco and ffarall authored Oct 11, 2024
1 parent a72381d commit ca4749d
Show file tree
Hide file tree
Showing 29 changed files with 3,572 additions and 3,722 deletions.
5,505 changes: 2,509 additions & 2,996 deletions Cargo.lock

Large diffs are not rendered by default.

206 changes: 103 additions & 103 deletions Cargo.toml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion api-augment/storagehub.json

Large diffs are not rendered by default.

23 changes: 23 additions & 0 deletions client/blockchain-service/src/commands.rs
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,10 @@ pub enum BlockchainServiceCommand {
callback:
tokio::sync::oneshot::Sender<Result<MainStorageProviderId, QueryMspIdOfBucketIdError>>,
},
ReleaseForestRootWriteLock {
forest_root_write_tx: tokio::sync::oneshot::Sender<()>,
callback: tokio::sync::oneshot::Sender<Result<()>>,
},
}

/// Interface for interacting with the BlockchainService actor.
Expand Down Expand Up @@ -361,6 +365,12 @@ pub trait BlockchainServiceInterface {
&self,
bucket_id: BucketId,
) -> Result<MainStorageProviderId, QueryMspIdOfBucketIdError>;

/// Helper function to release the forest root write lock.
async fn release_forest_root_write_lock(
&self,
forest_root_write_tx: tokio::sync::oneshot::Sender<()>,
) -> Result<()>;
}

/// Implement the BlockchainServiceInterface for the ActorHandle<BlockchainService>.
Expand Down Expand Up @@ -807,4 +817,17 @@ impl BlockchainServiceInterface for ActorHandle<BlockchainService> {
self.send(message).await;
rx.await.expect("Failed to receive response from BlockchainService. Probably means BlockchainService has crashed.")
}

async fn release_forest_root_write_lock(
&self,
forest_root_write_tx: tokio::sync::oneshot::Sender<()>,
) -> Result<()> {
let (callback, rx) = tokio::sync::oneshot::channel();
let message = BlockchainServiceCommand::ReleaseForestRootWriteLock {
forest_root_write_tx,
callback,
};
self.send(message).await;
rx.await.expect("Failed to receive response from BlockchainService. Probably means BlockchainService has crashed.")
}
}
25 changes: 25 additions & 0 deletions client/blockchain-service/src/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -888,6 +888,31 @@ impl Actor for BlockchainService {
}
}
}
BlockchainServiceCommand::ReleaseForestRootWriteLock {
forest_root_write_tx,
callback,
} => {
// Release the forest root write "lock".
let forest_root_write_result = forest_root_write_tx.send(()).map_err(|e| {
error!(target: LOG_TARGET, "CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock. This is a critical bug. Please report it to the StorageHub team. \nError while sending the release message: {:?}", e);
anyhow!(
"CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock. This is a critical bug. Please report it to the StorageHub team."
)
});

// Check if there are any pending requests to use the forest root write lock.
// If so, we give them the lock right away.
if forest_root_write_result.is_ok() {
self.check_pending_forest_root_writes();
}

match callback.send(forest_root_write_result) {
Ok(_) => {}
Err(e) => {
error!(target: LOG_TARGET, "Failed to send forest write lock release result: {:?}", e);
}
}
}
}
}
}
Expand Down
1 change: 1 addition & 0 deletions client/blockchain-service/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,7 @@ impl BlockchainService {
.pending_confirm_storing_request_deque()
.pop_front()
{
trace!(target: LOG_TARGET, "Processing confirm storing request for file [{:?}]", request.file_key);
confirm_storing_requests.push(request);
} else {
break;
Expand Down
11 changes: 3 additions & 8 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ use sc_network::{
config::IncomingRequest, service::traits::NetworkService, NetworkBackend, NetworkBlock,
ProtocolName,
};
use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, RpcHandlers, TFullBackend, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
Expand Down Expand Up @@ -914,7 +913,6 @@ where
&task_manager,
relay_chain_interface.clone(),
transaction_pool,
sync_service.clone(),
params.keystore_container.keystore(),
relay_chain_slot_duration,
para_id,
Expand Down Expand Up @@ -966,7 +964,6 @@ fn start_consensus(
task_manager: &TaskManager,
relay_chain_interface: Arc<dyn RelayChainInterface>,
transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient>>,
sync_oracle: Arc<SyncingService<Block>>,
keystore: KeystorePtr,
relay_chain_slot_duration: Duration,
para_id: ParaId,
Expand Down Expand Up @@ -1008,7 +1005,6 @@ fn start_consensus(
.ok()
.map(|c| ValidationCode::from(c).hash())
},
sync_oracle,
keystore,
collator_key,
para_id,
Expand All @@ -1020,10 +1016,9 @@ fn start_consensus(
reinitialize: false,
};

let fut =
aura::run::<Block, sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _, _, _, _>(
params,
);
let fut = aura::run::<Block, sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _, _, _>(
params,
);
task_manager
.spawn_essential_handle()
.spawn("aura", None, fut);
Expand Down
15 changes: 5 additions & 10 deletions node/src/tasks/bsp_charge_fees.rs
Original file line number Diff line number Diff line change
Expand Up @@ -346,16 +346,11 @@ where
}
}

// Release the forest root write "lock".
let forest_root_write_result = forest_root_write_tx.send(());
if forest_root_write_result.is_err() {
error!(target: LOG_TARGET, "CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock. This is a critical bug. Please report it to the StorageHub team.");
return Err(anyhow!(
"CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock."
));
}

Ok(())
// Release the forest root write "lock" and finish the task.
self.storage_hub_handler
.blockchain
.release_forest_root_write_lock(forest_root_write_tx)
.await
}
}

Expand Down
15 changes: 5 additions & 10 deletions node/src/tasks/bsp_submit_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -295,16 +295,11 @@ where
self.check_provider_root(event.data.provider_id).await?;
}

// Release the forest root write "lock".
let forest_root_write_result = forest_root_write_tx.send(());
if forest_root_write_result.is_err() {
error!(target: LOG_TARGET, "CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock. This is a critical bug. Please report it to the StorageHub team.");
return Err(anyhow!(
"CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock."
));
}

Ok(())
// Release the forest root write "lock" and finish the task.
self.storage_hub_handler
.blockchain
.release_forest_root_write_lock(forest_root_write_tx)
.await
}
}

Expand Down
15 changes: 5 additions & 10 deletions node/src/tasks/bsp_upload_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -402,16 +402,11 @@ where
.insert_files_metadata(file_metadatas.as_slice())?;
}

// Release the forest root write "lock".
let forest_root_write_result = forest_root_write_tx.send(());
if forest_root_write_result.is_err() {
error!(target: LOG_TARGET, "CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock. This is a critical bug. Please report it to the StorageHub team.");
return Err(anyhow!(
"CRITICAL❗️❗️ This is a bug! Failed to release forest root write lock."
));
}

Ok(())
// Release the forest root write "lock" and finish the task.
self.storage_hub_handler
.blockchain
.release_forest_root_write_lock(forest_root_write_tx)
.await
}
}

Expand Down
8 changes: 5 additions & 3 deletions node/src/tasks/msp_upload_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -566,9 +566,11 @@ where
.watch_for_success(&self.storage_hub_handler.blockchain)
.await?;

let _ = forest_root_write_tx.send(());

Ok(())
// Release the forest root write "lock" and finish the task.
self.storage_hub_handler
.blockchain
.release_forest_root_write_lock(forest_root_write_tx)
.await
}
}

Expand Down
24 changes: 24 additions & 0 deletions pallets/file-system/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1734,6 +1734,18 @@ where
// Update root of the BSP.
<T::Providers as shp_traits::MutateProvidersInterface>::update_root(sp_id, new_root)?;

// Delete payment stream between this BSP and this user (also charge it for all the owed funds
// of all files that were stored by this BSP).
if <T::PaymentStreams as PaymentStreamsInterface>::get_dynamic_rate_payment_stream_info(
&sp_id, &owner,
)
.is_some()
{
<T::PaymentStreams as PaymentStreamsInterface>::delete_dynamic_rate_payment_stream(
&sp_id, &owner,
)?;
}

new_root
} else {
// If the Provider is a MSP, the proof is verified against the Bucket's root.
Expand Down Expand Up @@ -1780,6 +1792,18 @@ where
bucket_id, new_root,
)?;

// Delete payment stream between this MSP and this user (also charge it for all the owed funds
// of all files that were stored by this MSP).
if <T::PaymentStreams as PaymentStreamsInterface>::get_fixed_rate_payment_stream_info(
&sp_id, &owner,
)
.is_some()
{
<T::PaymentStreams as PaymentStreamsInterface>::delete_fixed_rate_payment_stream(
&sp_id, &owner,
)?;
}

new_root
};

Expand Down
Loading

0 comments on commit ca4749d

Please sign in to comment.