diff --git a/Cargo.lock b/Cargo.lock index 3b6c2288..f8cf6416 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5912,12 +5912,19 @@ dependencies = [ "frame-support 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.38)", "frame-system 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.38)", "log", + "orml-currencies", + "orml-tokens", + "orml-traits", "pallet-balances", "pallet-timestamp", "pallet-valve", "pallet-xcm", + "pallet-xcmp-handler", + "parachain-info", "parity-scale-codec", "polkadot-parachain", + "primitives", + "rand 0.7.3", "scale-info", "serde", "sp-core 7.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.38)", diff --git a/pallets/automation-price/Cargo.toml b/pallets/automation-price/Cargo.toml index a4fc67c8..4be6c08c 100644 --- a/pallets/automation-price/Cargo.toml +++ b/pallets/automation-price/Cargo.toml @@ -29,6 +29,11 @@ xcm = { git = "https://github.com/paritytech/polkadot", default-features = false cumulus-pallet-xcm = { git = 'https://github.com/paritytech/cumulus', default-features = false, branch = 'polkadot-v0.9.38' } cumulus-primitives-core = { git = 'https://github.com/paritytech/cumulus', default-features = false, branch = 'polkadot-v0.9.38' } +## ORML +orml-traits = { git = "https://github.com/open-web3-stack/open-runtime-module-library", default-features = false, branch = "polkadot-v0.9.38" } +orml-currencies = { git = "https://github.com/open-web3-stack/open-runtime-module-library", default-features = false, branch = "polkadot-v0.9.38" } + + # Substrate Dependencies ## Substrate Primitive Dependencies sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.38" } @@ -42,19 +47,31 @@ frame-system = { git = "https://github.com/paritytech/substrate", default-featur ## Substrate Pallet Dependencies pallet-timestamp = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.38" } +## Polkdadot deps +xcm-builder = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.38" } + ## Local pallet-valve = { path = "../valve", default-features = false } +pallet-xcmp-handler = { path = "../xcmp-handler", default-features = false } +primitives = { path = "../../primitives", default-features = false } [dev-dependencies] +rand = { version = "0.7.3" } serde = { version = "1.0.144" } sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.38" } sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.38" } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" } pallet-xcm = { git = 'https://github.com/paritytech/polkadot', default-features = false, branch = "release-v0.9.38" } -xcm-builder = { git = 'https://github.com/paritytech/polkadot', default-features = false, branch = "release-v0.9.38" } xcm-executor = { git = 'https://github.com/paritytech/polkadot', default-features = false, branch = "release-v0.9.38" } +# Cumulus dependencies +parachain-info = { git = 'https://github.com/paritytech/cumulus', branch = 'polkadot-v0.9.38' } + +orml-currencies = { git = "https://github.com/open-web3-stack/open-runtime-module-library", default-features = false, branch = "polkadot-v0.9.38" } +orml-tokens = { git = "https://github.com/open-web3-stack/open-runtime-module-library", default-features = false, branch = "polkadot-v0.9.38" } + + [features] default = ["std"] runtime-benchmarks = ["frame-benchmarking"] @@ -66,6 +83,9 @@ std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "orml-currencies/std", + "orml-tokens/std", + "orml-traits/std", "pallet-timestamp/std", "pallet-valve/std", "pallet-xcm/std", diff --git a/pallets/automation-price/src/fees.rs b/pallets/automation-price/src/fees.rs index f29efc3d..14ab9ef3 100644 --- a/pallets/automation-price/src/fees.rs +++ b/pallets/automation-price/src/fees.rs @@ -16,68 +16,147 @@ // limitations under the License. /// ! Traits and default implementation for paying execution fees. -use crate::{BalanceOf, Config}; +use crate::{AccountOf, Action, ActionOf, Config, Error, MultiBalanceOf}; +use orml_traits::MultiCurrency; +use pallet_xcmp_handler::{InstructionSequence, XcmpTransactor}; use sp_runtime::{ - traits::{CheckedSub, Zero}, - DispatchError, + traits::{CheckedSub, Convert, Saturating, Zero}, + DispatchError, DispatchResult, SaturatedConversion, TokenError::BelowMinimum, }; use sp_std::marker::PhantomData; +use xcm::latest::prelude::*; +use xcm_builder::TakeRevenue; -use frame_support::traits::{Currency, ExistenceRequirement, OnUnbalanced, WithdrawReasons}; - -type NegativeImbalanceOf = <::Currency as Currency< - ::AccountId, ->>::NegativeImbalance; - -/// Handle withdrawing, refunding and depositing of transaction fees. +/// Handle execution fee payments in the context of automation actions pub trait HandleFees { - /// Ensure the fee can be paid. - fn can_pay_fee(who: &T::AccountId, fee: BalanceOf) -> Result<(), DispatchError>; - - /// Once the task has been scheduled we need to charge for the execution cost. - fn withdraw_fee(who: &T::AccountId, fee: BalanceOf) -> Result<(), DispatchError>; + fn pay_checked_fees_for Result>( + owner: &AccountOf, + action: &ActionOf, + executions: u32, + prereq: F, + ) -> Result; +} +pub struct FeeHandler { + owner: T::AccountId, + pub schedule_fee_location: MultiLocation, + pub schedule_fee_amount: MultiBalanceOf, + pub execution_fee_amount: MultiBalanceOf, + _phantom_data: PhantomData, } -pub struct FeeHandler(PhantomData); +impl HandleFees for FeeHandler +where + T: Config, + TR: TakeRevenue, +{ + fn pay_checked_fees_for Result>( + owner: &AccountOf, + action: &ActionOf, + executions: u32, + prereq: F, + ) -> Result { + let fee_handler = Self::new(owner, action, executions)?; + fee_handler.can_pay_fee().map_err(|_| Error::::InsufficientBalance)?; + let outcome = prereq()?; + fee_handler.pay_fees()?; + Ok(outcome) + } +} -/// Implements the transaction payment for a pallet implementing the `Currency` -/// trait (eg. the pallet_balances) using an unbalance handler (implementing -/// `OnUnbalanced`). -impl HandleFees for FeeHandler +impl FeeHandler where T: Config, - OU: OnUnbalanced>, + TR: TakeRevenue, { - // Ensure the fee can be paid. - fn can_pay_fee(who: &T::AccountId, fee: BalanceOf) -> Result<(), DispatchError> { + /// Ensure the fee can be paid. + fn can_pay_fee(&self) -> Result<(), DispatchError> { + let fee = self.schedule_fee_amount.saturating_add(self.execution_fee_amount); + if fee.is_zero() { return Ok(()) } - let free_balance = T::Currency::free_balance(who); - let new_amount = - free_balance.checked_sub(&fee).ok_or(DispatchError::Token(BelowMinimum))?; - T::Currency::ensure_can_withdraw(who, fee, WithdrawReasons::FEE, new_amount)?; + // Manually check for ExistenceRequirement since MultiCurrency doesn't currently support it + let currency_id = T::CurrencyIdConvert::convert(self.schedule_fee_location) + .ok_or("IncoveribleMultilocation")?; + let currency_id = currency_id.into(); + let free_balance = T::MultiCurrency::free_balance(currency_id, &self.owner); + free_balance + .checked_sub(&fee) + .ok_or(DispatchError::Token(BelowMinimum))? + .checked_sub(&T::MultiCurrency::minimum_balance(currency_id)) + .ok_or(DispatchError::Token(BelowMinimum))?; + T::MultiCurrency::ensure_can_withdraw(currency_id, &self.owner, fee)?; Ok(()) } /// Withdraw the fee. - fn withdraw_fee(who: &T::AccountId, fee: BalanceOf) -> Result<(), DispatchError> { + fn withdraw_fee(&self) -> Result<(), DispatchError> { + let fee = self.schedule_fee_amount.saturating_add(self.execution_fee_amount); + if fee.is_zero() { return Ok(()) } - let withdraw_reason = WithdrawReasons::FEE; + let currency_id = T::CurrencyIdConvert::convert(self.schedule_fee_location) + .ok_or("IncoveribleMultilocation")?; + let currency_id = currency_id.into(); + + match T::MultiCurrency::withdraw(currency_id, &self.owner, fee) { + Ok(_) => { + TR::take_revenue(MultiAsset { + id: AssetId::Concrete(self.schedule_fee_location), + fun: Fungibility::Fungible(self.schedule_fee_amount.saturated_into()), + }); + + if self.execution_fee_amount > MultiBalanceOf::::zero() { + T::XcmpTransactor::pay_xcm_fee( + self.owner.clone(), + self.execution_fee_amount.saturated_into(), + )?; + } - match T::Currency::withdraw(who, fee, withdraw_reason, ExistenceRequirement::KeepAlive) { - Ok(imbalance) => { - OU::on_unbalanceds(Some(imbalance).into_iter()); Ok(()) }, Err(_) => Err(DispatchError::Token(BelowMinimum)), } } + + /// Builds an instance of the struct + pub fn new( + owner: &AccountOf, + action: &ActionOf, + executions: u32, + ) -> Result { + let schedule_fee_location = action.schedule_fee_location::(); + + // TODO: FIX THIS BEFORE MERGE + let schedule_fee_amount: u128 = 1_000; + //Pallet::::calculate_schedule_fee_amount(action, executions)?.saturated_into(); + + let execution_fee_amount = match action.clone() { + Action::XCMP { execution_fee, instruction_sequence, .. } + if instruction_sequence == InstructionSequence::PayThroughSovereignAccount => + execution_fee.amount.saturating_mul(executions.into()).saturated_into(), + _ => 0u32.saturated_into(), + }; + + Ok(Self { + owner: owner.clone(), + schedule_fee_location, + schedule_fee_amount: schedule_fee_amount.saturated_into(), + execution_fee_amount, + _phantom_data: Default::default(), + }) + } + + /// Executes the fee handler + fn pay_fees(self) -> DispatchResult { + // This should never error if can_pay_fee passed. + self.withdraw_fee().map_err(|_| Error::::LiquidityRestrictions)?; + Ok(()) + } } diff --git a/pallets/automation-price/src/lib.rs b/pallets/automation-price/src/lib.rs index 909389b7..1f50ceca 100644 --- a/pallets/automation-price/src/lib.rs +++ b/pallets/automation-price/src/lib.rs @@ -33,28 +33,51 @@ pub use pallet::*; pub mod weights; +pub mod types; +pub use types::*; + mod fees; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + pub use fees::*; -use core::convert::TryInto; -use cumulus_pallet_xcm::Origin as CumulusOrigin; +use codec::Decode; +use core::convert::{TryFrom, TryInto}; +use cumulus_primitives_core::InteriorMultiLocation; + +use cumulus_primitives_core::ParaId; use frame_support::{ pallet_prelude::*, - sp_runtime::traits::Hash, traits::{Currency, ExistenceRequirement}, - transactional, BoundedVec, + transactional, }; -use frame_system::{pallet_prelude::*, Config as SystemConfig}; +use frame_system::pallet_prelude::*; +use orml_traits::{FixedConversionRateProvider, MultiCurrency}; use pallet_timestamp::{self as timestamp}; -use scale_info::TypeInfo; +use scale_info::{prelude::format, TypeInfo}; use sp_runtime::{ - traits::{SaturatedConversion, Saturating}, + traits::{Convert, SaturatedConversion, Saturating}, Perbill, }; -use sp_std::{vec, vec::Vec}; +use sp_std::{ + boxed::Box, + collections::btree_map::BTreeMap, + ops::Bound::{Excluded, Included}, + vec, + vec::Vec, +}; +pub use pallet_xcmp_handler::InstructionSequence; +use primitives::EnsureProxy; pub use weights::WeightInfo; +use pallet_xcmp_handler::XcmpTransactor; +use xcm::{latest::prelude::*, VersionedMultiLocation}; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -62,85 +85,62 @@ pub mod pallet { pub type AccountOf = ::AccountId; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + pub type MultiBalanceOf = <::MultiCurrency as MultiCurrency< + ::AccountId, + >>::Balance; + pub type ActionOf = Action, BalanceOf>; + + pub type MultiCurrencyId = <::MultiCurrency as MultiCurrency< + ::AccountId, + >>::CurrencyId; + type UnixTime = u64; - type AssetName = Vec; - type AssetDirection = Direction; - type AssetPrice = u128; - type AssetPercentage = u128; + pub type TaskId = Vec; + pub type TaskIdList = Vec; - #[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] - pub enum Direction { - Up, - Down, - } + // TODO: Cleanup before merge + type ChainName = Vec; + type Exchange = Vec; - /// The enum that stores all action specific data. - #[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] - #[scale_info(skip_type_params(T))] - pub enum Action { - NativeTransfer { sender: AccountOf, recipient: AccountOf, amount: BalanceOf }, - } + type AssetName = Vec; + type AssetPair = (AssetName, AssetName); + type AssetPrice = u128; + type TriggerFunction = Vec; /// The struct that stores all information needed for a task. - #[derive(Debug, Eq, Encode, Decode, TypeInfo)] + #[derive(Debug, Eq, Encode, Decode, TypeInfo, Clone)] #[scale_info(skip_type_params(T))] pub struct Task { - owner_id: AccountOf, - provided_id: Vec, - asset: AssetName, - direction: AssetDirection, - trigger_percentage: AssetPercentage, - action: Action, + // origin data from the account schedule the tasks + pub owner_id: AccountOf, + + // generated data + pub task_id: TaskId, + + // user input data + pub chain: ChainName, + pub exchange: Exchange, + pub asset_pair: AssetPair, + pub expired_at: u128, + + // TODO: Maybe expose enum? + pub trigger_function: Vec, + pub trigger_params: Vec, + pub action: ActionOf, } /// Needed for assert_eq to compare Tasks in tests due to BoundedVec. impl PartialEq for Task { fn eq(&self, other: &Self) -> bool { + // TODO: correct this self.owner_id == other.owner_id && - self.provided_id == other.provided_id && - self.asset == other.asset && - self.direction == other.direction && - self.trigger_percentage == other.trigger_percentage - } - } - - impl Task { - pub fn create_event_task( - owner_id: AccountOf, - provided_id: Vec, - asset: AssetName, - direction: AssetDirection, - trigger_percentage: AssetPercentage, - recipient: AccountOf, - amount: BalanceOf, - ) -> Task { - let action = Action::NativeTransfer { sender: owner_id.clone(), recipient, amount }; - Task:: { owner_id, provided_id, asset, direction, trigger_percentage, action } + self.task_id == other.task_id && + self.asset_pair == other.asset_pair && + self.trigger_function == other.trigger_function && + self.trigger_params == other.trigger_params } } - #[derive(Debug, Encode, Decode, TypeInfo)] - #[scale_info(skip_type_params(T))] - pub struct TaskHashInput { - owner_id: AccountOf, - provided_id: Vec, - } - - impl TaskHashInput { - pub fn create_hash_input(owner_id: AccountOf, provided_id: Vec) -> TaskHashInput { - TaskHashInput:: { owner_id, provided_id } - } - } - - #[derive(Debug, Encode, Decode, TypeInfo)] - #[scale_info(skip_type_params(T))] - pub struct AssetMetadatum { - upper_bound: u16, - lower_bound: u8, - expiration_period: UnixTime, - asset_sudo: AccountOf, - } - #[pallet::config] pub trait Config: frame_system::Config + pallet_timestamp::Config { type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -166,11 +166,45 @@ pub mod pallet { /// The Currency type for interacting with balances type Currency: Currency; + /// The MultiCurrency type for interacting with balances + type MultiCurrency: MultiCurrency; + + /// The currencyIds that our chain supports. + type CurrencyId: Parameter + + Member + + Copy + + MaybeSerializeDeserialize + + Ord + + TypeInfo + + MaxEncodedLen + + From> + + Into> + + From; + + /// Converts CurrencyId to Multiloc + type CurrencyIdConvert: Convert> + + Convert>; + /// Handler for fees type FeeHandler: HandleFees; - type Origin: From<::RuntimeOrigin> - + Into::Origin>>; + //type Origin: From<::RuntimeOrigin> + // + Into::Origin>>; + + /// Converts between comparable currencies + type FeeConversionRateProvider: FixedConversionRateProvider; + + /// This chain's Universal Location. + type UniversalLocation: Get; + + //The paraId of this chain. + type SelfParaId: Get; + + /// Utility for sending XCM messages + type XcmpTransactor: XcmpTransactor; + + /// Ensure proxy + type EnsureProxy: primitives::EnsureProxy; } #[pallet::pallet] @@ -178,57 +212,105 @@ pub mod pallet { #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(_); + // TODO: Cleanup before merge + #[derive(Debug, Encode, Decode, TypeInfo)] + #[scale_info(skip_type_params(T))] + pub struct RegistryInfo { + round: u128, + decimal: u8, + last_update: u64, + oracle_providers: Vec>, + } + + // TODO: Use a ring buffer to also store last n history data effectively + #[derive(Debug, Encode, Decode, TypeInfo)] + #[scale_info(skip_type_params(T))] + pub struct PriceData { + pub round: u128, + pub nonce: u128, + pub amount: u128, + } + + // AssetRegistry holds information and metadata about the asset we support #[pallet::storage] - #[pallet::getter(fn get_scheduled_tasks)] - pub type ScheduledTasks = StorageNMap< + #[pallet::getter(fn get_asset_registry_info)] + pub type AssetRegistry = StorageNMap< _, ( - NMapKey, - NMapKey, - NMapKey, + NMapKey, + NMapKey, + NMapKey, ), - BoundedVec, + RegistryInfo, >; + // PriceRegistry holds price only information for the asset we support #[pallet::storage] - #[pallet::getter(fn get_scheduled_asset_period_reset)] - pub type ScheduledAssetDeletion = - StorageMap<_, Twox64Concat, UnixTime, Vec>; - - #[pallet::storage] - #[pallet::getter(fn get_asset_baseline_price)] - pub type AssetBaselinePrices = StorageMap<_, Twox64Concat, AssetName, AssetPrice>; - - #[pallet::storage] - #[pallet::getter(fn get_asset_price)] - pub type AssetPrices = StorageMap<_, Twox64Concat, AssetName, AssetPrice>; + #[pallet::getter(fn get_asset_price_data)] + pub type PriceRegistry = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + PriceData, + >; + // SortedTasksIndex is our sorted by price task shard + // Each task for a given asset is organized into a BTreeMap + // https://doc.rust-lang.org/std/collections/struct.BTreeMap.html#method.insert + // - key: Trigger Price + // - value: vector of task id + // TODO: move these to a trigger model + // TODO: handle task expiration #[pallet::storage] - #[pallet::getter(fn get_task)] - pub type Tasks = StorageNMap< + #[pallet::getter(fn get_sorted_tasks_index)] + pub type SortedTasksIndex = StorageNMap< _, ( - NMapKey, // asset name - NMapKey, // task ID + NMapKey, + NMapKey, + NMapKey, + NMapKey, ), - Task, + BTreeMap, >; #[pallet::storage] - #[pallet::getter(fn get_task_queue)] - pub type TaskQueue = StorageValue<_, Vec<(AssetName, T::Hash)>, ValueQuery>; + #[pallet::getter(fn get_scheduled_asset_period_reset)] + pub type ScheduledAssetDeletion = + StorageMap<_, Twox64Concat, UnixTime, Vec>; + // Tasks hold all active task, look up through (TaskId) #[pallet::storage] - #[pallet::getter(fn is_shutdown)] - pub type Shutdown = StorageValue<_, bool, ValueQuery>; + #[pallet::getter(fn get_task)] + pub type Tasks = StorageMap<_, Twox64Concat, TaskId, Task>; + // All active tasks, but organized by account + // In this storage, we only interested in returning task belong to an account, we also want to + // have fast lookup for task inserted/remove into the storage + // + // We also want to remove the expired task, so by leveraging this + #[pallet::storage] + #[pallet::getter(fn get_account_task_ids)] + pub type AccountTasks = + StorageDoubleMap<_, Twox64Concat, AccountOf, Twox64Concat, TaskId, u128>; + + // TaskQueue stores the task to be executed. To run any tasks, they need to be move into this + // queue, from there our task execution pick it up and run it + // + // When task is run, we check the price once more and if it fall out of range, we move the task + // back to the Tasks Registry + // + // If the task is expired, we also won't run #[pallet::storage] - #[pallet::getter(fn get_asset_metadata)] - pub type AssetMetadata = StorageMap<_, Twox64Concat, AssetName, AssetMetadatum>; + #[pallet::getter(fn get_task_queue)] + pub type TaskQueue = StorageValue<_, TaskIdList, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn get_number_of_assets)] - pub type NumberOfAssets = StorageValue<_, u8>; + #[pallet::getter(fn is_shutdown)] + pub type Shutdown = StorageValue<_, bool, ValueQuery>; #[pallet::error] pub enum Error { @@ -240,12 +322,16 @@ pub mod pallet { DuplicateTask, /// Non existent asset AssetNotSupported, + AssetNotInitialized, /// Asset already supported AssetAlreadySupported, + AssetAlreadyInitialized, /// Asset cannot be updated by this account InvalidAssetSudo, + OracleNotAuthorized, /// Asset must be in triggerable range. AssetNotInTriggerableRange, + AssetUpdatePayloadMalform, /// Block Time not set BlockTimeNotSet, /// Invalid Expiration Window for new asset @@ -254,14 +340,18 @@ pub mod pallet { MaxTasksReached, /// Failed to insert task TaskInsertionFailure, + /// Failed to remove task + TaskRemoveFailure, /// Insufficient Balance InsufficientBalance, /// Restrictions on Liquidity in Account LiquidityRestrictions, /// Too Many Assets Created AssetLimitReached, - /// Direction Not Supported - DirectionNotSupported, + + /// The version of the `VersionedMultiLocation` value used is not able + /// to be interpreted. + BadVersion, } #[pallet::event] @@ -270,19 +360,28 @@ pub mod pallet { /// Schedule task success. TaskScheduled { who: AccountOf, - task_id: T::Hash, + task_id: TaskId, }, Notify { message: Vec, }, TaskNotFound { - task_id: T::Hash, + task_id: TaskId, }, AssetCreated { - asset: AssetName, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + decimal: u8, }, AssetUpdated { - asset: AssetName, + who: AccountOf, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + price: u128, }, AssetDeleted { asset: AssetName, @@ -292,11 +391,11 @@ pub mod pallet { }, /// Successfully transferred funds SuccessfullyTransferredFunds { - task_id: T::Hash, + task_id: TaskId, }, /// Transfer Failed TransferFailed { - task_id: T::Hash, + task_id: TaskId, error: DispatchError, }, } @@ -317,48 +416,6 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// Schedule a task to fire an event with a custom message. - /// - /// Schedule a transfer task for price triggers - /// - /// # Parameters - /// * `provided_id`: An id provided by the user. This id must be unique for the user. - /// * `asset`: asset type - /// * `direction`: direction of trigger movement - /// * `trigger_percentage`: what percentage task should be triggered at - /// * `recipient`: person to transfer money to - /// * `amount`: amount to transfer - /// - /// # Errors - #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::schedule_transfer_task_extrinsic())] - #[transactional] - pub fn schedule_transfer_task( - origin: OriginFor, - provided_id: Vec, - asset: AssetName, - direction: AssetDirection, - trigger_percentage: AssetPercentage, - recipient: T::AccountId, - #[pallet::compact] amount: BalanceOf, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - let fee = >::saturated_from(1_000_000_000_000u64); - T::FeeHandler::can_pay_fee(&who, fee).map_err(|_| Error::::InsufficientBalance)?; - Self::validate_and_schedule_task( - who.clone(), - provided_id, - asset, - direction, - trigger_percentage, - recipient, - amount, - )?; - T::FeeHandler::withdraw_fee(&who, fee) - .map_err(|_| Error::::LiquidityRestrictions)?; - Ok(()) - } - /// Initialize an asset /// /// Add a new asset @@ -373,121 +430,106 @@ pub mod pallet { /// /// # Errors #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::add_asset_extrinsic())] + #[pallet::weight(::WeightInfo::initialize_asset_extrinsic())] #[transactional] - pub fn add_asset( - origin: OriginFor, - asset: AssetName, - target_price: AssetPrice, - upper_bound: u16, - lower_bound: u8, - asset_owner: AccountOf, - expiration_period: UnixTime, + pub fn initialize_asset( + _origin: OriginFor, + chain: Vec, + exchange: Vec, + asset1: AssetName, + asset2: AssetName, + decimal: u8, + asset_owners: Vec>, ) -> DispatchResult { // TODO: needs fees if opened up to non-sudo - ensure_root(origin)?; - if expiration_period % 86400 != 0 { - Err(Error::::InvalidAssetExpirationWindow)? - } - if let Some(_asset_target_price) = Self::get_asset_baseline_price(asset.clone()) { - Err(Error::::AssetAlreadySupported)? - } - if let Some(number_of_assets) = Self::get_number_of_assets() { - // TODO: remove hardcoded 2 asset limit - if number_of_assets >= 2 { - Err(Error::::AssetLimitReached)? - } else { - Self::create_new_asset( - asset, - target_price, - upper_bound, - lower_bound, - asset_owner, - expiration_period, - number_of_assets, - )?; - } - } else { - Self::create_new_asset( - asset, - target_price, - upper_bound, - lower_bound, - asset_owner, - expiration_period, - 0, - )?; - } - Ok(()) + // temporary comment out for easiser development + //ensure_root(origin)?; + Self::create_new_asset(chain, exchange, asset1, asset2, decimal, asset_owners)?; + + Ok(().into()) } - /// Post asset update + /// Update prices of multiple asset pairs at the same time /// - /// Update the asset price + /// Only authorized origin can update the price. The authorized origin is set when + /// initializing an asset. /// - /// # Parameters - /// * `asset`: asset type - /// * `value`: value of asset + /// An asset is identified by this tuple: (chain, exchange, (asset1, asset2)). /// - /// # Errors + /// To support updating multiple pairs, each element of the tuple become a separate + /// argument to this function, where as each of these argument is a vector. + /// + /// Every element of each vector arguments, in the same position in the vector form the + /// above tuple. + /// + /// # Parameters + /// * `chains`: a vector of chain names + /// * `exchange`: a vector of exchange name + /// * `asset1`: a vector of asset1 name + /// * `asset2`: a vector of asset2 name + /// * `prices`: a vector of price of asset1, re-present in asset2 + /// * `submitted_at`: a vector of epoch. This epoch is the time when the price is recognized from the oracle provider + /// * `rounds`: a number to re-present which round of the asset price we're updating. Unused internally #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::asset_price_update_extrinsic())] #[transactional] - pub fn asset_price_update( + pub fn update_asset_prices( origin: OriginFor, - asset: AssetName, - value: AssetPrice, + chains: Vec, + exchanges: Vec, + assets1: Vec, + assets2: Vec, + prices: Vec, + submitted_at: Vec, + rounds: Vec, ) -> DispatchResult { let who = ensure_signed(origin)?; - if let Some(asset_metadatum) = Self::get_asset_metadata(asset.clone()) { - let asset_sudo: AccountOf = asset_metadatum.asset_sudo; - if asset_sudo != who { - Err(Error::::InvalidAssetSudo)? - } + + if !(chains.len() == exchanges.len() && + exchanges.len() == assets1.len() && + assets1.len() == assets2.len() && + assets2.len() == prices.len() && + prices.len() == submitted_at.len() && + submitted_at.len() == rounds.len()) + { + Err(Error::::AssetUpdatePayloadMalform)? } - let fee = >::saturated_from(1_000_000_000_000u64); - T::FeeHandler::can_pay_fee(&who, fee).map_err(|_| Error::::InsufficientBalance)?; - if let Some(asset_target_price) = Self::get_asset_baseline_price(asset.clone()) { - let last_asset_price: AssetPrice = match Self::get_asset_price(asset.clone()) { - None => Err(Error::::AssetNotSupported)?, - Some(asset_price) => asset_price, - }; - let asset_update_percentage = - Self::calculate_asset_percentage(value, asset_target_price).saturating_add(1); - // NOTE: this is temporarily set to 0 for ease of calculation. Ideally, we can perform - // Self::calculate_asset_percentage(value, last_asset_price) and be able to compare the - // last percentage to the current one. However, calculate_asset_percentage does not return - // a direction. Therefore, let's say base price is 100. Last price is 95, current is 105. - // Since calculate_asset_percentage returns a u128, calculate_asset_percentage will return 5% - // for both, since there's no concept of positive/negative/direction (generic doesn't do just +/-). - // Therefore, this function will think 5% -> 5% means no change occurred, but instead we want to - // check 0% -> 5%. Therefore, we always check all the slots from 0% to x% where x is the updated - // percentage. This is less efficient, but more guaranteed. In the future, we will have to return - // direction for calculate_asset_percentage in the future. - let asset_last_percentage = 0; - if value > last_asset_price { - Self::move_scheduled_tasks( - asset.clone(), - asset_last_percentage, - asset_update_percentage, - Direction::Up, - )?; - } else { - Self::move_scheduled_tasks( - asset.clone(), - asset_last_percentage, - asset_update_percentage, - Direction::Down, - )?; + + for (index, price) in prices.clone().iter().enumerate() { + let index: usize = index.try_into().unwrap(); + + let chain = chains[index].clone(); + let exchange = exchanges[index].clone(); + let asset1 = assets1[index].clone(); + let asset2 = assets2[index].clone(); + let round = rounds[index].clone(); + + let key = (&chain, &exchange, (&asset1, &asset2)); + + if !AssetRegistry::::contains_key(&key) { + Err(Error::::AssetNotInitialized)? + } + + if let Some(asset_registry) = Self::get_asset_registry_info(key) { + let allow_wallets: Vec> = asset_registry.oracle_providers; + if !allow_wallets.contains(&who) { + Err(Error::::OracleNotAuthorized)? + } + + // TODO: Add round and nonce check logic + PriceRegistry::::insert(&key, PriceData { round, nonce: 1, amount: *price }); + + Self::deposit_event(Event::AssetUpdated { + who: who.clone(), + chain, + exchange, + asset1, + asset2, + price: *price, + }); } - AssetPrices::::insert(asset.clone(), value); - T::FeeHandler::withdraw_fee(&who, fee) - .map_err(|_| Error::::LiquidityRestrictions)?; - Self::deposit_event(Event::AssetUpdated { asset }); - } else { - Err(Error::::AssetNotSupported)? } - Ok(()) + Ok(().into()) } /// Delete an asset @@ -500,27 +542,299 @@ pub mod pallet { #[pallet::call_index(3)] #[pallet::weight(::WeightInfo::delete_asset_extrinsic())] #[transactional] - pub fn delete_asset(origin: OriginFor, asset: AssetName) -> DispatchResult { + pub fn delete_asset( + _origin: OriginFor, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + ) -> DispatchResult { // TODO: needs fees if opened up to non-sudo - ensure_root(origin)?; - if let Some(_asset_target_price) = Self::get_asset_baseline_price(asset.clone()) { - AssetBaselinePrices::::remove(asset.clone()); - AssetPrices::::remove(asset.clone()); - AssetMetadata::::remove(asset.clone()); - Self::delete_asset_tasks(asset.clone()); - if let Some(number_of_assets) = Self::get_number_of_assets() { - let new_number_of_assets = number_of_assets - 1; - NumberOfAssets::::put(new_number_of_assets); - } - Self::deposit_event(Event::AssetDeleted { asset }); + // TODO: add a feature flag so we can toggle in dev build without sudo + //ensure_root(origin)?; + + let key = (chain, exchange, (&asset1, &asset2)); + + // TODO: handle delete + if let Some(_asset_target_price) = Self::get_asset_registry_info(key) { + //Self::delete_asset_tasks(asset.clone()); + Self::deposit_event(Event::AssetDeleted { asset: asset1 }); } else { Err(Error::::AssetNotSupported)? } Ok(()) } + + // TODO: correct weight + #[pallet::call_index(4)] + #[pallet::weight(::WeightInfo::schedule_xcmp_task())] + #[transactional] + pub fn schedule_xcmp_task( + origin: OriginFor, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + expired_at: u128, + trigger_function: Vec, + trigger_param: Vec, + destination: Box, + schedule_fee: Box, + execution_fee: Box, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + ) -> DispatchResult { + // Step 1: + // Build Task and put it into the task registry + // Step 2: + // Put task id on the index + // TODO: the value to be inserted into the BTree should come from a function that + // extract value from param + // + // TODO: HANDLE FEE to see user can pay fee + let who = ensure_signed(origin)?; + let task_id = Self::generate_task_id(); + + let destination = + MultiLocation::try_from(*destination).map_err(|()| Error::::BadVersion)?; + let schedule_fee = + MultiLocation::try_from(*schedule_fee).map_err(|()| Error::::BadVersion)?; + + let action = Action::XCMP { + destination, + schedule_fee, + execution_fee: *execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + }; + + let task: Task = Task:: { + owner_id: who.clone(), + task_id: task_id.clone(), + chain, + exchange, + asset_pair: (asset1, asset2), + expired_at, + trigger_function, + trigger_params: trigger_param, + action, + }; + + Self::validate_and_schedule_task(task)?; + // TODO withdraw fee + //T::FeeHandler::withdraw_fee(&who, fee).map_err(|_| Error::::InsufficientBalance)?; + Ok(()) + } + + /// TODO: correct weight to use schedule_xcmp_task + /// Schedule a task through XCMP through proxy account to fire an XCMP message with a provided call. + /// + /// Before the task can be scheduled the task must past validation checks. + /// * The transaction is signed + /// * The asset pair is already initialized + /// + /// # Parameters + /// * `chain`: The chain name where we will send the task over + /// * `exchange`: the exchange name where we + /// * `asset1`: The payment asset location required for scheduling automation task. + /// * `asset2`: The fee will be paid for XCMP execution. + /// * `expired_at`: the epoch when after that time we will remove the task if it has not been executed yet + /// * `trigger_function`: currently only support `gt` or `lt`. Essentially mean greater than or less than. + /// * `trigger_params`: a list of parameter to feed into `trigger_function`. with `gt` and `lt` we only need to pass the target price as a single element vector + /// * `schedule_fee`: The payment asset location required for scheduling automation task. + /// * `execution_fee`: The fee will be paid for XCMP execution. + /// * `encoded_call`: Call that will be sent via XCMP to the parachain id provided. + /// * `encoded_call_weight`: Required weight at most the provided call will take. + /// * `overall_weight`: The overall weight in which fees will be paid for XCM instructions. + #[pallet::call_index(5)] + #[pallet::weight(::WeightInfo::schedule_xcmp_task_through_proxy())] + #[transactional] + pub fn schedule_xcmp_task_through_proxy( + origin: OriginFor, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + expired_at: u128, + trigger_function: Vec, + trigger_params: Vec, + + destination: Box, + schedule_fee: Box, + execution_fee: Box, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: T::AccountId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + // Make sure the owner is the proxy account of the user account. + T::EnsureProxy::ensure_ok(schedule_as.clone(), who.clone())?; + + let destination = + MultiLocation::try_from(*destination).map_err(|()| Error::::BadVersion)?; + let schedule_fee = + MultiLocation::try_from(*schedule_fee).map_err(|()| Error::::BadVersion)?; + + let action = Action::XCMP { + destination, + schedule_fee, + execution_fee: *execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + schedule_as: Some(schedule_as), + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }; + + let task_id = Self::generate_task_id(); + let task: Task = Task:: { + owner_id: who.clone(), + task_id: task_id.clone(), + chain, + exchange, + asset_pair: (asset1, asset2), + expired_at, + trigger_function, + trigger_params, + action, + }; + + Self::validate_and_schedule_task(task)?; + Ok(()) + } + + // When cancel task we removed it from: + // Task Registry + // SortedTasksIndex + // AccountTasks + #[pallet::call_index(6)] + #[pallet::weight(::WeightInfo::cancel_task())] + #[transactional] + pub fn cancel_task(origin: OriginFor, task_id: TaskId) -> DispatchResult { + let who = ensure_signed(origin)?; + + if let Some(task) = Self::get_task(task_id) { + if task.owner_id != who { + // TODO: Fine tune error + Err(Error::::TaskRemoveFailure)? + } + + Tasks::::remove(&task.task_id); + let key = (&task.chain, &task.exchange, &task.asset_pair, &task.trigger_function); + SortedTasksIndex::::remove(&key); + Self::remove_task_from_account(&task); + }; + + Ok(()) + } } impl Pallet { + pub fn generate_task_id() -> TaskId { + let current_block_number = + match TryInto::::try_into(>::block_number()).ok() { + Some(i) => i, + None => 0, + }; + + let tx_id = match >::extrinsic_index() { + Some(i) => i, + None => 0, + }; + + let evt_index = >::event_count(); + + format!("{:}-{:}-{:}", current_block_number, tx_id, evt_index) + .as_bytes() + .to_vec() + } + + // Move task from the SortedTasksIndex into TaskQueue that are ready to be process + pub fn shift_tasks(max_weight: Weight) -> Weight { + let weight_left: Weight = max_weight; + + // TODO: Look into asset that has price move instead + let ref mut task_to_process: TaskIdList = Vec::new(); + + for key in SortedTasksIndex::::iter_keys() { + let (chain, exchange, asset_pair, trigger_func) = key.clone(); + + // TODO: Swap asset to check pair + let current_price_wrap = + Self::get_asset_price_data((&chain, &exchange, &asset_pair)); + + if current_price_wrap.is_none() { + continue + }; + // Example: sell orders + // + // In the list we had tasks such as + // - task1: sell when price > 10 + // - task2: sell when price > 20 + // - task3: sell when price > 30 + // If price used to be 5, and now it's 15, task1 got run + // If price used to be 5, and now it's 25, task1 and task2 got run + // If price used to be 5, and now it's 35, all tasks are run + // + // Example: buy orders + // + // In the list we had tasks such as + // - task1: buy when price < 10 + // - task2: buy when price < 20 + // - task3: buy when price < 30 + // If price used to be 500, and now it's 25, task3 got run + // If price used to be 500, and now it's 15, task2 and task3 got run + // If price used to be 500, and now it's 5, all tasks are run + // + // TODO: handle atomic and transaction + if let Some(mut tasks) = Self::get_sorted_tasks_index(&key) { + let current_price = current_price_wrap.unwrap(); + + //Eg sell order, sell when price > + let range; + // TODO: move magic number into a trigger.rs module + if trigger_func == vec![103_u8, 116_u8] { + range = (Excluded(&u128::MIN), Included(¤t_price.amount)) + } else { + // Eg buy order, buy when price < + range = (Included(¤t_price.amount), Excluded(&u128::MAX)) + }; + + for (&price, task_ids) in (tasks.clone()).range(range) { + // Remove because we map this into task queue + tasks.remove(&price); + let ref mut t = &mut (task_ids.clone()); + task_to_process.append(t); + } + + // all tasks are moved to process, delete the queue + if tasks.is_empty() { + SortedTasksIndex::::remove(&key); + } else { + SortedTasksIndex::::insert(&key, tasks); + } + } + } + + if !task_to_process.is_empty() { + if TaskQueue::::exists() { + let mut old_task = TaskQueue::::get(); + old_task.append(task_to_process); + TaskQueue::::put(old_task); + } else { + TaskQueue::::put(task_to_process); + }; + } + + return weight_left + } + /// Trigger tasks for the block time. /// /// Complete as many tasks as possible given the maximum weight. @@ -531,107 +845,55 @@ pub mod pallet { return weight_left } - // remove assets as necessary - let current_time_slot = match Self::get_current_time_slot() { - Ok(time_slot) => time_slot, - Err(_) => return weight_left, - }; - if let Some(scheduled_deletion_assets) = - Self::get_scheduled_asset_period_reset(current_time_slot) - { - // delete assets' tasks - let asset_reset_weight = ::WeightInfo::reset_asset( - scheduled_deletion_assets.len().saturated_into(), - ); - if weight_left.ref_time() < asset_reset_weight.ref_time() { - return weight_left - } - // TODO: this assumes that all assets that need to be reset in a period can all be done successfully in a block. - // in the future, we need to make sure to be able to break out of for loop if out of weight and continue - // in the next block. Right now, we will not run out of weight - we will simply not execute anything if - // not all of the asset resets can be run at once. this may cause the asset reset triggers to not go off, - // but at least it should not brick the chain. - for asset in scheduled_deletion_assets { - if let Some(last_asset_price) = Self::get_asset_price(asset.clone()) { - AssetBaselinePrices::::insert(asset.clone(), last_asset_price); - Self::delete_asset_tasks(asset.clone()); - Self::update_asset_reset(asset.clone(), current_time_slot); - Self::deposit_event(Event::AssetPeriodReset { asset }); - }; - } - ScheduledAssetDeletion::::remove(current_time_slot); - weight_left -= asset_reset_weight; - } + Self::shift_tasks(weight_left); - // run as many scheduled tasks as we can + // Now we can run those tasks + // TODO: We need to calculate enough weight and balance the tasks so we won't be skew + // by a particular kind of task asset + // + // Now we run as much task as possible + // If weight is over, task will be picked up next time + // If the price is no longer matched, they will be put back into the TaskRegistry let task_queue = Self::get_task_queue(); + weight_left = weight_left + // for above read .saturating_sub(T::DbWeight::get().reads(1u64)) // For measuring the TaskQueue::::put(tasks_left); .saturating_sub(T::DbWeight::get().writes(1u64)); - if !task_queue.is_empty() { + if task_queue.len() > 0 { let (tasks_left, new_weight_left) = Self::run_tasks(task_queue, weight_left); weight_left = new_weight_left; TaskQueue::::put(tasks_left); } - weight_left - } - pub fn update_asset_reset(asset: AssetName, current_time_slot: u64) { - if let Some(metadata) = Self::get_asset_metadata(asset.clone()) { - let expiration_period: u64 = metadata.expiration_period; - // start new duration - // 1. schedule new deletion time - let new_time_slot = current_time_slot.saturating_add(expiration_period); - if let Some(mut future_scheduled_deletion_assets) = - Self::get_scheduled_asset_period_reset(new_time_slot) - { - future_scheduled_deletion_assets.push(asset); - >::insert( - new_time_slot, - future_scheduled_deletion_assets, - ); - } else { - let new_asset_list = vec![asset]; - >::insert(new_time_slot, new_asset_list); - } - }; + weight_left } pub fn create_new_asset( - asset: AssetName, - target_price: AssetPrice, - upper_bound: u16, - lower_bound: u8, - asset_owner: AccountOf, - expiration_period: UnixTime, - number_of_assets: u8, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + decimal: u8, + asset_owners: Vec>, ) -> Result<(), DispatchError> { - AssetBaselinePrices::::insert(asset.clone(), target_price); - let asset_metadatum = AssetMetadatum:: { - upper_bound, - lower_bound, - expiration_period, - asset_sudo: asset_owner, - }; - AssetMetadata::::insert(asset.clone(), asset_metadatum); - let new_time_slot = Self::get_current_time_slot()?.saturating_add(expiration_period); - if let Some(mut future_scheduled_deletion_assets) = - Self::get_scheduled_asset_period_reset(new_time_slot) - { - future_scheduled_deletion_assets.push(asset.clone()); - >::insert( - new_time_slot, - future_scheduled_deletion_assets, - ); - } else { - let new_asset_list = vec![asset.clone()]; - >::insert(new_time_slot, new_asset_list); + let key = (&chain, &exchange, (&asset1, &asset2)); + + if AssetRegistry::::contains_key(&key) { + Err(Error::::AssetAlreadyInitialized)? } - AssetPrices::::insert(asset.clone(), target_price); - let new_number_of_assets = number_of_assets + 1; - NumberOfAssets::::put(new_number_of_assets); - Self::deposit_event(Event::AssetCreated { asset }); + + let asset_info = RegistryInfo:: { + decimal, + round: 0, + last_update: 0, + oracle_providers: asset_owners, + }; + + AssetRegistry::::insert(key, asset_info); + + Self::deposit_event(Event::AssetCreated { chain, exchange, asset1, asset2, decimal }); Ok(()) } @@ -645,27 +907,11 @@ pub mod pallet { Ok(now.saturating_sub(diff_to_min)) } - pub fn delete_asset_tasks(asset: AssetName) { - // delete scheduled tasks - let _ = ScheduledTasks::::clear_prefix((asset.clone(),), u32::MAX, None); - // delete tasks from tasks table - let _ = Tasks::::clear_prefix((asset.clone(),), u32::MAX, None); - // delete tasks from task queue - let existing_task_queue: Vec<(AssetName, T::Hash)> = Self::get_task_queue(); - let mut updated_task_queue: Vec<(AssetName, T::Hash)> = vec![]; - for task in existing_task_queue { - if task.0 != asset { - updated_task_queue.push(task); - } - } - TaskQueue::::put(updated_task_queue); - } - pub fn run_native_transfer_task( sender: T::AccountId, recipient: T::AccountId, amount: BalanceOf, - task_id: T::Hash, + task_id: TaskId, ) -> Weight { match T::Currency::transfer( &sender, @@ -680,32 +926,99 @@ pub mod pallet { ::WeightInfo::run_native_transfer_task() } + pub fn run_xcmp_task( + destination: MultiLocation, + caller: T::AccountId, + fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + flow: InstructionSequence, + ) -> (Weight, Option) { + let fee_asset_location = MultiLocation::try_from(fee.asset_location); + if fee_asset_location.is_err() { + return ( + ::WeightInfo::run_xcmp_task(), + Some(Error::::BadVersion.into()), + ) + } + let fee_asset_location = fee_asset_location.unwrap(); + + match T::XcmpTransactor::transact_xcm( + destination, + fee_asset_location, + fee.amount, + caller, + encoded_call, + encoded_call_weight, + overall_weight, + flow, + ) { + Ok(()) => (::WeightInfo::run_xcmp_task(), None), + Err(e) => (::WeightInfo::run_xcmp_task(), Some(e)), + } + } + /// Runs as many tasks as the weight allows from the provided vec of task_ids. /// /// Returns a vec with the tasks that were not run and the remaining weight. pub fn run_tasks( - mut task_ids: Vec<(AssetName, T::Hash)>, + mut task_ids: Vec, mut weight_left: Weight, - ) -> (Vec<(AssetName, T::Hash)>, Weight) { + ) -> (Vec, Weight) { let mut consumed_task_index: usize = 0; for task_id in task_ids.iter() { consumed_task_index.saturating_inc(); + + // TODO: re-check condition here once more time because the price might have been + // more + // if the task is already expired, don't run them either + let action_weight = match Self::get_task(task_id) { None => { - Self::deposit_event(Event::TaskNotFound { task_id: task_id.1 }); + // TODO: add back signature when insert new task work + //Self::deposit_event(Event::TaskNotFound { task_id: task_id.clone() }); ::WeightInfo::emit_event() }, Some(task) => { let task_action_weight = match task.action.clone() { + // TODO: Run actual task later to return weight + // not just return weight for test to pass + Action::XCMP { + destination, + execution_fee, + schedule_as, + encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + .. + } => { + let (w, _err) = Self::run_xcmp_task( + destination, + schedule_as.unwrap_or(task.owner_id.clone()), + execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + ); + w + }, Action::NativeTransfer { sender, recipient, amount } => Self::run_native_transfer_task( sender, recipient, amount, - task_id.clone().1, + task_id.clone(), ), }; + Tasks::::remove(task_id); + + // TODO: add this weight + Self::remove_task_from_account(&task); + task_action_weight .saturating_add(T::DbWeight::get().writes(1u64)) .saturating_add(T::DbWeight::get().reads(1u64)) @@ -729,149 +1042,51 @@ pub mod pallet { } } - pub fn generate_task_id(owner_id: AccountOf, provided_id: Vec) -> T::Hash { - let task_hash_input = TaskHashInput:: { owner_id, provided_id }; - T::Hashing::hash_of(&task_hash_input) + fn push_task_to_account(task: &Task) { + AccountTasks::::insert(task.owner_id.clone(), task.task_id.clone(), task.expired_at); } - /// Schedule task and return it's task_id. - /// With transaction will protect against a partial success where N of M execution times might be full, - /// rolling back any successful insertions into the schedule task table. - pub fn schedule_task( - owner_id: AccountOf, - provided_id: Vec, - asset: AssetName, - direction: AssetDirection, - trigger_percentage: AssetPercentage, - ) -> Result> { - let task_id = Self::generate_task_id(owner_id, provided_id); - if let Some(_) = Self::get_task((asset.clone(), task_id)) { - Err(Error::::DuplicateTask)? - } - if let Some(mut asset_tasks) = - Self::get_scheduled_tasks((asset.clone(), direction.clone(), trigger_percentage)) - { - if asset_tasks.try_push(task_id).is_err() { - Err(Error::::MaxTasksReached)? - } - >::insert((asset, direction, trigger_percentage), asset_tasks); - } else { - let scheduled_tasks: BoundedVec = - vec![task_id].try_into().unwrap(); - >::insert( - (asset, direction, trigger_percentage), - scheduled_tasks, - ); - } - Ok(task_id) + fn remove_task_from_account(task: &Task) { + AccountTasks::::remove(task.owner_id.clone(), task.task_id.clone()); } + /// With transaction will protect against a partial success where N of M execution times might be full, + /// rolling back any successful insertions into the schedule task table. /// Validate and schedule task. /// This will also charge the execution fee. - pub fn validate_and_schedule_task( - who: T::AccountId, - provided_id: Vec, - asset: AssetName, - direction: AssetDirection, - trigger_percentage: AssetPercentage, - recipient: T::AccountId, - amount: BalanceOf, - ) -> Result<(), Error> { - if provided_id.is_empty() { + /// TODO: double check atomic + pub fn validate_and_schedule_task(task: Task) -> Result<(), Error> { + if task.task_id.is_empty() { Err(Error::::EmptyProvidedId)? } - let asset_target_price: AssetPrice = match Self::get_asset_baseline_price(asset.clone()) - { - None => Err(Error::::AssetNotSupported)?, - Some(asset_price) => asset_price, - }; - let last_asset_price: AssetPrice = match Self::get_asset_price(asset.clone()) { - None => Err(Error::::AssetNotSupported)?, - Some(asset_price) => asset_price, - }; - match direction { - Direction::Down => - if last_asset_price < asset_target_price { - let last_asset_percentage = - Self::calculate_asset_percentage(last_asset_price, asset_target_price); - if trigger_percentage < last_asset_percentage { - Err(Error::::AssetNotInTriggerableRange)? - } - }, - Direction::Up => - if last_asset_price > asset_target_price { - let last_asset_percentage = - Self::calculate_asset_percentage(last_asset_price, asset_target_price); - if trigger_percentage < last_asset_percentage { - Err(Error::::AssetNotInTriggerableRange)? - } - }, - } - let task_id = Self::schedule_task( - who.clone(), - provided_id.clone(), - asset.clone(), - direction.clone(), - trigger_percentage, - )?; - let action = Action::NativeTransfer { sender: who.clone(), recipient, amount }; - let task: Task = Task:: { - owner_id: who.clone(), - provided_id, - asset: asset.clone(), - direction, - trigger_percentage, - action, - }; - >::insert((asset, task_id), task); - Self::deposit_event(Event::TaskScheduled { who, task_id }); - Ok(()) - } + >::insert(task.task_id.clone(), &task); + Self::push_task_to_account(&task); - pub fn move_scheduled_tasks( - asset: AssetName, - lower: AssetPercentage, - higher: AssetPercentage, - direction: AssetDirection, - ) -> DispatchResult { - let mut existing_task_queue: Vec<(AssetName, T::Hash)> = Self::get_task_queue(); - // TODO: fix adjusted_higher to not peg to 20. Should move with the removal of 100 % increase. - let adjusted_higher = match higher > 20 { - true => 20, - false => higher, - }; - for percentage in lower..adjusted_higher { - // TODO: pull all and cycle through in memory - if let Some(asset_tasks) = - Self::get_scheduled_tasks((asset.clone(), direction.clone(), percentage)) - { - for task in asset_tasks { - existing_task_queue.push((asset.clone(), task)); - } - >::remove((asset.clone(), direction.clone(), percentage)); - } - } - TaskQueue::::put(existing_task_queue); - Ok(()) - } + let key = (&task.chain, &task.exchange, &task.asset_pair, &task.trigger_function); - pub fn calculate_asset_percentage( - asset_update_value: AssetPrice, - asset_target_price: AssetPrice, - ) -> AssetPercentage { - // TODO: fix 100 hardcode - if asset_target_price > asset_update_value { - asset_target_price - .saturating_sub(asset_update_value) - .saturating_mul(100) - .saturating_div(asset_target_price) + if let Some(mut sorted_task_index) = Self::get_sorted_tasks_index(key) { + // TODO: remove hard code and take right param + if let Some(tasks_by_price) = sorted_task_index.get_mut(&(task.trigger_params[0])) { + tasks_by_price.push(task.task_id.clone()); + } else { + sorted_task_index.insert(task.trigger_params[0], vec![task.task_id.clone()]); + } + SortedTasksIndex::::insert(key, sorted_task_index); } else { - asset_update_value - .saturating_sub(asset_target_price) - .saturating_mul(100) - .saturating_div(asset_target_price) + let mut sorted_task_index = BTreeMap::::new(); + sorted_task_index.insert(task.trigger_params[0], vec![task.task_id.clone()]); + + // TODO: sorted based on trigger_function comparison of the parameter + // then at the time of trigger we cut off all the left part of the tree + SortedTasksIndex::::insert(key, sorted_task_index); } + + Self::deposit_event(Event::TaskScheduled { + who: task.owner_id, + task_id: task.task_id.clone(), + }); + Ok(()) } } diff --git a/pallets/automation-price/src/mock.rs b/pallets/automation-price/src/mock.rs new file mode 100644 index 00000000..f88f1f29 --- /dev/null +++ b/pallets/automation-price/src/mock.rs @@ -0,0 +1,564 @@ +// This file is part of OAK Blockchain. + +// Copyright (C) 2022 OAK Network +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_automation_price; +use crate::TaskId; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{ConstU32, Contains, Everything}, + weights::Weight, + PalletId, +}; +use frame_system::{self as system, RawOrigin}; +use orml_traits::parameter_type_with_key; +use primitives::{EnsureProxy, TransferCallCreator}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{AccountIdConversion, BlakeTwo256, Convert, IdentityLookup}, + AccountId32, MultiAddress, Perbill, +}; +use sp_std::{marker::PhantomData, vec::Vec}; +use xcm::latest::prelude::*; + +type UncheckedExtrinsic = system::mocking::MockUncheckedExtrinsic; +type Block = system::mocking::MockBlock; + +use crate::weights::WeightInfo; + +pub type Balance = u128; +pub type AccountId = AccountId32; +pub type CurrencyId = u32; + +pub const START_BLOCK_TIME: u64 = 33198768000 * 1_000; + +pub const DEFAULT_SCHEDULE_FEE_LOCATION: MultiLocation = MOONBASE_ASSET_LOCATION; + +pub const ALICE: [u8; 32] = [1u8; 32]; +pub const DELEGATOR_ACCOUNT: [u8; 32] = [3u8; 32]; +pub const PROXY_ACCOUNT: [u8; 32] = [4u8; 32]; + +pub const PARA_ID: u32 = 2000; +pub const NATIVE: CurrencyId = 0; +pub const NATIVE_LOCATION: MultiLocation = MultiLocation { parents: 0, interior: Here }; +pub const NATIVE_EXECUTION_WEIGHT_FEE: u128 = 12; +pub const FOREIGN_CURRENCY_ID: CurrencyId = 1; + +const DOLLAR: u128 = 10_000_000_000; + +pub const MOONBASE_ASSET_LOCATION: MultiLocation = + MultiLocation { parents: 1, interior: X2(Parachain(1000), PalletInstance(3)) }; + +pub const exchange1: &[u8] = "exchange1".as_bytes(); +pub const exchange2: &[u8] = "exchange2".as_bytes(); + +pub const chain1: &[u8] = "KUSAMA".as_bytes(); +pub const chain2: &[u8] = "DOT".as_bytes(); + +pub const asset1: &[u8] = "TUR".as_bytes(); +pub const asset2: &[u8] = "USDC".as_bytes(); +pub const asset3: &[u8] = "KSM".as_bytes(); + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ParachainInfo: parachain_info::{Pallet, Storage, Config}, + Tokens: orml_tokens::{Pallet, Storage, Event, Config}, + Currencies: orml_currencies::{Pallet, Call}, + AutomationPrice: pallet_automation_price::{Pallet, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 51; +} + +impl system::Config for Test { + type BaseCallFilter = Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId32; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + //type RuntimeEvent = From> + IsType<::RuntimeEvent>; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Test { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +impl parachain_info::Config for Test {} + +parameter_type_with_key! { + pub ExistentialDeposits: |_currency_id: CurrencyId| -> Balance { + Default::default() + }; +} +parameter_types! { + pub DustAccount: AccountId = PalletId(*b"auto/dst").into_account_truncating(); +} + +impl orml_tokens::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type Amount = i64; + type CurrencyId = CurrencyId; + type WeightInfo = (); + type ExistentialDeposits = ExistentialDeposits; + type CurrencyHooks = (); + type MaxLocks = ConstU32<100_000>; + type MaxReserves = ConstU32<100_000>; + type ReserveIdentifier = [u8; 8]; + type DustRemovalWhitelist = frame_support::traits::Nothing; +} + +impl orml_currencies::Config for Test { + type MultiCurrency = Tokens; + type NativeCurrency = AdaptedBasicCurrency; + type GetNativeCurrencyId = GetNativeCurrencyId; + type WeightInfo = (); +} +pub type AdaptedBasicCurrency = orml_currencies::BasicCurrencyAdapter; + +parameter_types! { + /// Minimum stake required to become a collator + pub const MinCollatorStk: u128 = 400_000 * DOLLAR; + pub const MinimumPeriod: u64 = 1000; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +impl pallet_automation_price::Config for Test { + type RuntimeEvent = RuntimeEvent; + type MaxTasksPerSlot = MaxTasksPerSlot; + type MaxBlockWeight = MaxBlockWeight; + type MaxWeightPercentage = MaxWeightPercentage; + type WeightInfo = MockWeight; + type ExecutionWeightFee = ExecutionWeightFee; + type CurrencyId = CurrencyId; + type MultiCurrency = Currencies; + type Currency = Balances; + type CurrencyIdConvert = MockTokenIdConvert; + type FeeHandler = FeeHandler; + type FeeConversionRateProvider = MockConversionRateProvider; + type UniversalLocation = UniversalLocation; + type SelfParaId = parachain_info::Pallet; + type XcmpTransactor = MockXcmpTransactor; + + type EnsureProxy = MockEnsureProxy; +} + +parameter_types! { + pub const MaxTasksPerSlot: u32 = 2; + #[derive(Debug)] + pub const MaxScheduleSeconds: u64 = 24 * 60 * 60; + pub const MaxBlockWeight: u64 = 20_000_000; + pub const MaxWeightPercentage: Perbill = Perbill::from_percent(40); + pub const ExecutionWeightFee: Balance = NATIVE_EXECUTION_WEIGHT_FEE; + + // When unit testing dynamic dispatch, we use the real weight value of the extrinsics call + // This is an external lib that we don't own so we try to not mock, follow the rule don't mock + // what you don't own + // One of test we do is Balances::transfer call, which has its weight define here: + // https://github.com/paritytech/substrate/blob/polkadot-v0.9.38/frame/balances/src/weights.rs#L61-L73 + // When logging the final calculated amount, its value is 73_314_000. + // + // in our unit test, we test a few transfers with dynamic dispatch. On top + // of that, there is also weight of our call such as fetching the tasks, + // move from schedule slot to tasks queue,.. so the weight of a schedule + // transfer with dynamic dispatch is even higher. + // + // and because we test run a few of them so I set it to ~10x value of 73_314_000 + pub const MaxWeightPerSlot: u128 = 700_000_000; + pub const XmpFee: u128 = 1_000_000; + pub const GetNativeCurrencyId: CurrencyId = NATIVE; +} + +pub struct MockPalletBalanceWeight(PhantomData); +impl pallet_balances::WeightInfo for MockPalletBalanceWeight { + fn transfer() -> Weight { + Weight::from_ref_time(100_000) + } + + fn transfer_keep_alive() -> Weight { + Weight::zero() + } + fn set_balance_creating() -> Weight { + Weight::zero() + } + fn set_balance_killing() -> Weight { + Weight::zero() + } + fn force_transfer() -> Weight { + Weight::zero() + } + fn transfer_all() -> Weight { + Weight::zero() + } + fn force_unreserve() -> Weight { + Weight::zero() + } +} + +pub struct MockWeight(PhantomData); +impl pallet_automation_price::WeightInfo for MockWeight { + fn emit_event() -> Weight { + Weight::from_ref_time(20_000_000_u64) + } + fn run_native_transfer_task() -> Weight { + Weight::from_ref_time(230_000_000_u64) + } + fn reset_asset(_v: u32) -> Weight { + Weight::from_ref_time(200_000_000_u64) + } + fn update_asset_reset() -> Weight { + Weight::from_ref_time(200_000_000_u64) + } + fn delete_asset_tasks() -> Weight { + Weight::from_ref_time(200_000_000_u64) + } + fn delete_asset_extrinsic() -> Weight { + Weight::from_ref_time(220_000_000_u64) + } + fn asset_price_update_extrinsic() -> Weight { + Weight::from_ref_time(220_000_000_u64) + } + fn initialize_asset_extrinsic() -> Weight { + Weight::from_ref_time(220_000_000_u64) + } + fn schedule_transfer_task_extrinsic() -> Weight { + Weight::from_ref_time(200_000_000_u64) + } + + fn schedule_xcmp_task() -> Weight { + Weight::from_ref_time(200_000_000_u64) + } + + fn schedule_xcmp_task_through_proxy() -> Weight { + Weight::from_ref_time(200_000_000_u64) + } + + fn cancel_task() -> Weight { + Weight::from_ref_time(20_000_000_u64) + } + + fn run_xcmp_task() -> Weight { + Weight::from_ref_time(200_000_000_u64) + } +} + +pub struct MockXcmpTransactor(PhantomData<(T, C)>); +impl pallet_xcmp_handler::XcmpTransactor + for MockXcmpTransactor +where + T: Config + pallet::Config, + C: frame_support::traits::ReservableCurrency, +{ + fn transact_xcm( + _destination: MultiLocation, + _location: xcm::latest::MultiLocation, + _fee: u128, + _caller: T::AccountId, + _transact_encoded_call: sp_std::vec::Vec, + _transact_encoded_call_weight: Weight, + _overall_weight: Weight, + _flow: InstructionSequence, + ) -> Result<(), sp_runtime::DispatchError> { + Ok(()) + } + + fn pay_xcm_fee(_: T::AccountId, _: u128) -> Result<(), sp_runtime::DispatchError> { + Ok(()) + } +} + +pub struct ScheduleAllowList; +impl Contains for ScheduleAllowList { + fn contains(c: &RuntimeCall) -> bool { + match c { + RuntimeCall::System(_) => true, + RuntimeCall::Balances(_) => true, + _ => false, + } + } +} + +pub struct MockConversionRateProvider; +impl FixedConversionRateProvider for MockConversionRateProvider { + fn get_fee_per_second(location: &MultiLocation) -> Option { + get_fee_per_second(location) + } +} + +pub struct MockTokenIdConvert; +impl Convert> for MockTokenIdConvert { + fn convert(id: CurrencyId) -> Option { + if id == NATIVE { + Some(MultiLocation::new(0, Here)) + } else if id == FOREIGN_CURRENCY_ID { + Some(MultiLocation::new(1, X1(Parachain(PARA_ID)))) + } else { + None + } + } +} + +impl Convert> for MockTokenIdConvert { + fn convert(location: MultiLocation) -> Option { + if location == MultiLocation::new(0, Here) { + Some(NATIVE) + } else if location == MultiLocation::new(1, X1(Parachain(PARA_ID))) { + Some(FOREIGN_CURRENCY_ID) + } else { + None + } + } +} + +// TODO: We should extract this and share code with automation-time +pub struct MockEnsureProxy; +impl EnsureProxy for MockEnsureProxy { + fn ensure_ok(_delegator: AccountId, _delegatee: AccountId) -> Result<(), &'static str> { + if _delegator == DELEGATOR_ACCOUNT.into() && _delegatee == PROXY_ACCOUNT.into() { + Ok(()) + } else { + Err("proxy error: expected `ProxyType::Any`") + } + } +} + +pub struct MockTransferCallCreator; +impl TransferCallCreator, Balance, RuntimeCall> + for MockTransferCallCreator +{ + fn create_transfer_call(dest: MultiAddress, value: Balance) -> RuntimeCall { + let account_id = match dest { + MultiAddress::Id(i) => Some(i), + _ => None, + }; + + let call: RuntimeCall = + pallet_balances::Call::transfer { dest: account_id.unwrap(), value }.into(); + call + } +} + +parameter_types! { + pub const RelayNetwork: NetworkId = NetworkId::Rococo; + // The universal location within the global consensus system + pub UniversalLocation: InteriorMultiLocation = + X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext(state_block_time: u64) -> sp_io::TestExternalities { + let genesis_storage = system::GenesisConfig::default().build_storage::().unwrap(); + let mut ext = sp_io::TestExternalities::new(genesis_storage); + ext.execute_with(|| System::set_block_number(1)); + ext.execute_with(|| Timestamp::set_timestamp(state_block_time)); + ext +} + +pub fn events() -> Vec { + let events = System::events(); + let evt = events.into_iter().map(|evt| evt.event).collect::>(); + + System::reset_events(); + + evt +} + +// A utility test function to pluck out the task id from events, useful when dealing with multiple +// task scheduling +pub fn get_task_ids_from_events() -> Vec { + System::events() + .into_iter() + .filter_map(|e| match e.event { + RuntimeEvent::AutomationPrice(crate::Event::TaskScheduled { task_id, .. }) => + Some(task_id), + _ => None, + }) + .collect::>() +} + +pub fn get_funds(account: AccountId) { + let double_action_weight = Weight::from_ref_time(20_000_u64) * 2; + + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee; + Balances::set_balance(RawOrigin::Root.into(), account, max_execution_fee, 0).unwrap(); +} + +pub fn get_minimum_funds(account: AccountId, executions: u32) { + let double_action_weight = Weight::from_ref_time(20_000_u64) * 2; + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee * u128::from(executions); + Balances::set_balance(RawOrigin::Root.into(), account, max_execution_fee, 0).unwrap(); +} + +pub fn get_xcmp_funds(account: AccountId) { + let double_action_weight = MockWeight::::run_xcmp_task() * 2; + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee * u128::from(1u32); + let with_xcm_fees = max_execution_fee + XmpFee::get(); + Balances::set_balance(RawOrigin::Root.into(), account, with_xcm_fees, 0).unwrap(); +} + +pub fn fund_account( + account: &AccountId, + action_weight: u64, + execution_count: usize, + additional_amount: Option, +) { + let amount: u128 = + u128::from(action_weight) * ExecutionWeightFee::get() * execution_count as u128 + + additional_amount.unwrap_or(0) + + u128::from(ExistentialDeposit::get()); + _ = ::Currency::deposit_creating(account, amount); +} + +pub struct MockAssetFeePerSecond { + pub asset_location: MultiLocation, + pub fee_per_second: u128, +} + +pub const ASSET_FEE_PER_SECOND: [MockAssetFeePerSecond; 3] = [ + MockAssetFeePerSecond { + asset_location: MultiLocation { parents: 1, interior: X1(Parachain(2000)) }, + fee_per_second: 416_000_000_000, + }, + MockAssetFeePerSecond { + asset_location: MultiLocation { + parents: 1, + interior: X2(Parachain(2110), GeneralKey { length: 4, data: [0; 32] }), + }, + fee_per_second: 416_000_000_000, + }, + MockAssetFeePerSecond { + asset_location: MOONBASE_ASSET_LOCATION, + fee_per_second: 10_000_000_000_000_000_000, + }, +]; + +pub fn get_fee_per_second(location: &MultiLocation) -> Option { + let location = location + .reanchored( + &MultiLocation::new(1, X1(Parachain(::SelfParaId::get().into()))), + ::UniversalLocation::get(), + ) + .expect("Reanchor location failed"); + + let found_asset = ASSET_FEE_PER_SECOND.into_iter().find(|item| match item { + MockAssetFeePerSecond { asset_location, .. } => *asset_location == location, + }); + + if found_asset.is_some() { + Some(found_asset.unwrap().fee_per_second) + } else { + None + } +} + +pub fn setup_asset(sender: &AccountId32, chain: Vec) { + AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + chain, + exchange1.to_vec(), + asset1.to_vec(), + asset2.to_vec(), + 10, + vec![sender.clone()], + ); +} + +pub fn setup_prices(sender: &AccountId32) { + AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + chain1.to_vec(), + exchange1.to_vec(), + asset1.to_vec(), + asset2.to_vec(), + 10, + vec![sender.clone()], + ); + + AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + chain2.to_vec(), + exchange1.to_vec(), + asset2.to_vec(), + asset3.to_vec(), + 10, + vec![sender.clone()], + ); + + AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + chain2.to_vec(), + exchange1.to_vec(), + asset1.to_vec(), + asset3.to_vec(), + 10, + vec![sender.clone()], + ); +} diff --git a/pallets/automation-price/src/tests.rs b/pallets/automation-price/src/tests.rs new file mode 100644 index 00000000..b1d517c5 --- /dev/null +++ b/pallets/automation-price/src/tests.rs @@ -0,0 +1,550 @@ +// This file is part of OAK Blockchain. + +// Copyright (C) 2022 OAK Network +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{mock::*, AssetPayment, Config, TaskIdList}; +use pallet_xcmp_handler::InstructionSequence; + +use frame_support::{ + assert_ok, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, +}; +use frame_system::{self, RawOrigin}; +use sp_core::Get; +use sp_runtime::AccountId32; + +use xcm::latest::{prelude::*, Junction::Parachain, MultiLocation}; + +use crate::weights::WeightInfo; + +struct XcmpActionParams { + destination: MultiLocation, + schedule_fee: MultiLocation, + execution_fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: Option, + instruction_sequence: InstructionSequence, +} + +impl Default for XcmpActionParams { + fn default() -> Self { + let delegator_account = AccountId32::new(DELEGATOR_ACCOUNT); + XcmpActionParams { + destination: MultiLocation::new(1, X1(Parachain(PARA_ID))), + schedule_fee: DEFAULT_SCHEDULE_FEE_LOCATION, + execution_fee: AssetPayment { + asset_location: MOONBASE_ASSET_LOCATION.into(), + amount: 100, + }, + encoded_call: vec![3, 4, 5], + encoded_call_weight: Weight::from_ref_time(100_000), + overall_weight: Weight::from_ref_time(200_000), + schedule_as: Some(delegator_account), + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + } + } +} + +fn calculate_local_action_schedule_fee(weight: Weight, num_of_execution: u32) -> u128 { + NATIVE_EXECUTION_WEIGHT_FEE * (weight.ref_time() as u128) * (num_of_execution as u128) +} + +fn calculate_expected_xcmp_action_schedule_fee( + schedule_fee_location: MultiLocation, + num_of_execution: u32, +) -> u128 { + let schedule_fee_location = schedule_fee_location + .reanchored( + &MultiLocation::new(1, X1(Parachain(::SelfParaId::get().into()))), + ::UniversalLocation::get(), + ) + .expect("Location reanchor failed"); + let weight = ::WeightInfo::run_xcmp_task(); + + if schedule_fee_location == MultiLocation::default() { + calculate_local_action_schedule_fee(weight, num_of_execution) + } else { + let fee_per_second = + get_fee_per_second(&schedule_fee_location).expect("Get fee per second should work"); + fee_per_second * (weight.ref_time() as u128) * (num_of_execution as u128) / + (WEIGHT_REF_TIME_PER_SECOND as u128) + } +} + +// Helper function to asset event easiser +/// Assert the given `event` exists. +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_has_event(event: RuntimeEvent) { + let evts = System::events().into_iter().map(|evt| evt.event).collect::>(); + assert!(evts.iter().any(|record| record == &event)) +} + +#[allow(dead_code)] +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_last_event(event: RuntimeEvent) { + assert_eq!(events().last().expect("events expected"), &event); +} + +/// Check that events appear in the emitted_events list in order, +fn contains_events(emitted_events: Vec, events: Vec) -> bool { + // If the target events list is empty, consider it satisfied as there are no specific order requirements + if events.is_empty() { + return true + } + + // Convert both lists to iterators + let mut emitted_iter = emitted_events.iter(); + let events_iter = events.iter(); + + // Iterate through the target events + for target_event in events_iter { + // Initialize a boolean variable to track whether the target event is found + let mut found = false; + + // Continue iterating through the emitted events until a match is found or there are no more emitted events + for emitted_event in emitted_iter.by_ref() { + // Compare event type and event data for a match + if emitted_event == target_event { + // Target event found, mark as found and advance the emitted iterator + found = true; + break + } + } + + // If the target event is not found, return false + if !found { + return false + } + } + + // If all target events are found in order, return true + true +} + +#[test] +fn test_initialize_asset_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + assert_ok!(AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + chain1.to_vec(), + exchange1.to_vec(), + asset1.to_vec(), + asset2.to_vec(), + 10, + vec!(AccountId32::new(ALICE)) + )); + }) +} + +#[test] +fn test_update_asset_prices() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + + setup_asset(&sender, chain1.to_vec()); + + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec!(chain1.to_vec()), + vec!(exchange1.to_vec()), + vec!(asset1.to_vec()), + vec!(asset2.to_vec()), + vec!(1005), + vec!(START_BLOCK_TIME as u128), + vec!(1), + )); + + let p = AutomationPrice::get_asset_price_data(( + chain1.to_vec(), + exchange1.to_vec(), + (asset1.to_vec(), asset2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p.round, 1); + assert_eq!(p.amount, 1005); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetUpdated { + who: sender, + chain: chain1.to_vec(), + exchange: exchange1.to_vec(), + asset1: asset1.to_vec(), + asset2: asset2.to_vec(), + price: 1005, + })); + }) +} +#[test] +fn test_update_asset_prices_multi() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + + setup_asset(&sender, chain1.to_vec()); + setup_asset(&sender, chain2.to_vec()); + + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec!(chain1.to_vec(), chain2.to_vec()), + vec!(exchange1.to_vec(), exchange1.to_vec()), + vec!(asset1.to_vec(), asset1.to_vec()), + vec!(asset2.to_vec(), asset2.to_vec()), + vec!(1005, 1009), + vec!(START_BLOCK_TIME as u128, START_BLOCK_TIME as u128), + vec!(1, 2), + )); + + let p1 = AutomationPrice::get_asset_price_data(( + chain1.to_vec(), + exchange1.to_vec(), + (asset1.to_vec(), asset2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p1.round, 1); + assert_eq!(p1.amount, 1005); + + let p2 = AutomationPrice::get_asset_price_data(( + chain2.to_vec(), + exchange1.to_vec(), + (asset1.to_vec(), asset2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p2.round, 2); + assert_eq!(p2.amount, 1009); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetUpdated { + who: sender.clone(), + chain: chain1.to_vec(), + exchange: exchange1.to_vec(), + asset1: asset1.to_vec(), + asset2: asset2.to_vec(), + price: 1005, + })); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetUpdated { + who: sender, + chain: chain2.to_vec(), + exchange: exchange1.to_vec(), + asset1: asset1.to_vec(), + asset2: asset2.to_vec(), + price: 1009, + })); + }) +} + +#[test] +fn test_schedule_xcmp_task_ok() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + // TODO: Setup fund once we add fund check and weight + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = MultiLocation::new(1, X1(Parachain(para_id))); + + setup_asset(&creator, chain1.to_vec()); + + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + chain1.to_vec(), + exchange1.to_vec(), + asset1.to_vec(), + asset2.to_vec(), + 1005u128, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: MultiLocation::new(0, Here).into(), + amount: 10000000000000 + }), + call.clone(), + Weight::from_ref_time(100_000), + Weight::from_ref_time(200_000) + )); + + // Upon schedule, task will be insert into 3 places + // 1. TaskRegistry: a fast hashmap look up using task id only + // 2. SortedTasksIndex: an ordering BTreeMap of the task, only task id and its price + // trigger + // 3. AccountTasks: hashmap to look up user task id + + let task_ids = get_task_ids_from_events(); + let task_id = task_ids.first().expect("task failed to schedule"); + + let task = AutomationPrice::get_task(task_id).expect("missing task in registry"); + assert_eq!( + task.trigger_function, + "gt".as_bytes().to_vec(), + "created task has wrong trigger function" + ); + assert_eq!(task.chain, chain1.to_vec(), "created task has different chain id"); + assert_eq!(task.asset_pair.0, asset1, "created task has wrong asset pair"); + + assert_eq!( + AutomationPrice::get_account_task_ids(&creator, task_id) + .expect("account task is missing"), + task.expired_at + ); + + // Ensure task is inserted into the right SortedIndex + + // Create second task, and make sure both are recorded + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator), + chain1.to_vec(), + exchange1.to_vec(), + asset1.to_vec(), + asset2.to_vec(), + 1005u128, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: MultiLocation::new(0, Here).into(), + amount: 10000000000000 + }), + call.clone(), + Weight::from_ref_time(100_000), + Weight::from_ref_time(200_000) + )); + let task_ids2 = get_task_ids_from_events(); + let task_id2 = task_ids2.last().expect("task failed to schedule"); + assert_ne!(task_id, task_id2, "task id dup"); + + let sorted_task_index = AutomationPrice::get_sorted_tasks_index(( + chain1.to_vec(), + exchange1.to_vec(), + (asset1.to_vec(), asset2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .unwrap(); + let task_ids: Vec = sorted_task_index.into_values().collect(); + assert_eq!(task_ids, vec!(vec!(vec!(49, 45, 48, 45, 49), vec!(49, 45, 48, 45, 50)))); + }) +} + +// Test when price moves, the TaskQueue will be populated with the right task id +#[test] +fn test_shift_tasks_movement_through_price_changes() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + // TODO: Setup fund once we add fund check and weight + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = MultiLocation::new(1, X1(Parachain(para_id))); + + setup_prices(&creator); + + // Lets setup 3 tasks + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + chain1.to_vec(), + exchange1.to_vec(), + asset1.to_vec(), + asset2.to_vec(), + 1000u128, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: MultiLocation::new(0, Here).into(), + amount: 10000000000000 + }), + call.clone(), + Weight::from_ref_time(100_000), + Weight::from_ref_time(200_000) + )); + + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + chain2.to_vec(), + exchange1.to_vec(), + asset2.to_vec(), + asset3.to_vec(), + 3000u128, + "gt".as_bytes().to_vec(), + vec!(900), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: MultiLocation::new(0, Here).into(), + amount: 10000000000000 + }), + call.clone(), + Weight::from_ref_time(100_000), + Weight::from_ref_time(200_000) + )); + + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + chain2.to_vec(), + exchange1.to_vec(), + asset1.to_vec(), + asset3.to_vec(), + 6000u128, + "gt".as_bytes().to_vec(), + vec!(2000), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: MultiLocation::new(0, Here).into(), + amount: 10000000000000 + }), + call.clone(), + Weight::from_ref_time(100_000), + Weight::from_ref_time(200_000) + )); + + let task_ids = get_task_ids_from_events(); + let task_id1 = task_ids.get(task_ids.len().wrapping_sub(3)).unwrap(); + // let _task_id2 = task_ids.get(task_ids.len().wrapping_sub(2)).unwrap(); + let task_id3 = task_ids.get(task_ids.len().wrapping_sub(1)).unwrap(); + + // at this moment our task queue is empty + // There is schedule tasks, but no tasks in the queue at this moment + assert_eq!(AutomationPrice::get_task_queue().is_empty(), true); + + // shift_tasks move task from registry to the queue + // there is no price yet, so task won't move + AutomationPrice::shift_tasks(Weight::from_ref_time(1_000_000_000)); + // The price is too low so there is no change in our tasks + assert_eq!(AutomationPrice::get_task_queue().is_empty(), true); + let sorted_task_index = AutomationPrice::get_sorted_tasks_index(( + chain1.to_vec(), + exchange1.to_vec(), + (asset1.to_vec(), asset2.to_vec()), + "gt".as_bytes().to_vec(), + )); + assert_eq!(sorted_task_index.map_or_else(|| 0, |x| x.len()), 1); + + // + // now we update price, one task moved to the queue + // The target price for those respectively tasks are 100, 900, 2000 in their pair + // Therefore after running this price update, first task are moved + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(creator.clone()), + vec!(chain1.to_vec(), chain2.to_vec(), chain2.to_vec()), + vec!(exchange1.to_vec(), exchange1.to_vec(), exchange1.to_vec()), + vec!(asset1.to_vec(), asset2.to_vec(), asset1.to_vec()), + vec!(asset2.to_vec(), asset3.to_vec(), asset3.to_vec()), + vec!(1005_u128, 10_u128, 300_u128), + vec!(START_BLOCK_TIME as u128, START_BLOCK_TIME as u128, START_BLOCK_TIME as u128), + vec!(1, 2, 3), + )); + AutomationPrice::shift_tasks(Weight::from_ref_time(1_000_000_000)); + assert_eq!(AutomationPrice::get_task_queue(), vec!(task_id1.clone())); + // The task are removed from SortedTasksIndex into the TaskQueue, therefore their length + // decrease to 0 + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + chain1.to_vec(), + exchange1.to_vec(), + (asset1.to_vec(), asset2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 0 + ); + + // Now when price meet trigger condition + AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(creator.clone()), + vec![chain2.to_vec()], + vec![exchange1.to_vec()], + vec![asset1.to_vec()], + vec![asset3.to_vec()], + vec![9000_u128], + vec![START_BLOCK_TIME as u128], + vec![4], + ); + AutomationPrice::shift_tasks(Weight::from_ref_time(1_000_000_000)); + assert_eq!(AutomationPrice::get_task_queue(), vec!(task_id1.clone(), task_id3.clone())); + // The task are removed from SortedTasksIndex into the TaskQueue, therefore their length + // decrease to 0 + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + chain2.to_vec(), + exchange1.to_vec(), + (asset1.to_vec(), asset3.to_vec()), + "gt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 0 + ); + + // + // Now if a task come with <, they can + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + chain2.to_vec(), + exchange1.to_vec(), + asset2.to_vec(), + asset3.to_vec(), + 3000u128, + "lt".as_bytes().to_vec(), + // price for this asset is 10 in our last update + vec!(20), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: MultiLocation::new(0, Here).into(), + amount: 10000000000000 + }), + call.clone(), + Weight::from_ref_time(100_000), + Weight::from_ref_time(200_000) + )); + // The task is now on the SortedTasksIndex + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + chain2.to_vec(), + exchange1.to_vec(), + (asset2.to_vec(), asset3.to_vec()), + "lt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 1 + ); + + AutomationPrice::shift_tasks(Weight::from_ref_time(1_000_000_000)); + let task_id4 = { + let task_ids = get_task_ids_from_events(); + task_ids.last().unwrap().clone() + }; + + // Now the task is again, moved into the queue and be removed from SortedTasksIndex + assert_eq!( + AutomationPrice::get_task_queue(), + vec!(task_id1.clone(), task_id3.clone(), task_id4.clone()) + ); + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + chain2.to_vec(), + exchange1.to_vec(), + (asset2.to_vec(), asset3.to_vec()), + "lt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 0 + ); + }) +} diff --git a/pallets/automation-price/src/types.rs b/pallets/automation-price/src/types.rs new file mode 100644 index 00000000..59cdcfa6 --- /dev/null +++ b/pallets/automation-price/src/types.rs @@ -0,0 +1,54 @@ +use crate::{weights::WeightInfo, Config, InstructionSequence}; + +use frame_support::pallet_prelude::*; + +use sp_std::prelude::*; + +use xcm::{latest::prelude::*, VersionedMultiLocation}; + +/// The struct that stores execution payment for a task. +#[derive(Debug, Encode, Eq, PartialEq, Decode, TypeInfo, Clone)] +pub struct AssetPayment { + pub asset_location: VersionedMultiLocation, + pub amount: u128, +} + +/// The enum that stores all action specific data. +#[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub enum Action { + XCMP { + destination: MultiLocation, + schedule_fee: MultiLocation, + execution_fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: Option, + instruction_sequence: InstructionSequence, + }, + + NativeTransfer { + sender: AccountId, + recipient: AccountId, + amount: BalanceOf, + }, +} + +impl Action { + pub fn execution_weight(&self) -> Result { + let weight = match self { + // TODO: correct with the right run/task function later on + Action::XCMP { .. } => ::WeightInfo::run_xcmp_task(), + Action::NativeTransfer { .. } => ::WeightInfo::run_xcmp_task(), + }; + Ok(weight.ref_time()) + } + + pub fn schedule_fee_location(&self) -> MultiLocation { + match self { + Action::XCMP { schedule_fee, .. } => *schedule_fee, + _ => MultiLocation::default(), + } + } +} diff --git a/pallets/automation-price/src/weights.rs b/pallets/automation-price/src/weights.rs index fc152e28..baacd74e 100644 --- a/pallets/automation-price/src/weights.rs +++ b/pallets/automation-price/src/weights.rs @@ -27,56 +27,86 @@ pub trait WeightInfo { fn delete_asset_extrinsic() -> Weight; fn asset_price_update_extrinsic() -> Weight; - fn add_asset_extrinsic() -> Weight; + fn initialize_asset_extrinsic() -> Weight; fn schedule_transfer_task_extrinsic() -> Weight; + fn schedule_xcmp_task() -> Weight; + fn schedule_xcmp_task_through_proxy() -> Weight; + fn cancel_task() -> Weight; + fn run_xcmp_task() -> Weight; } -impl WeightInfo for () { +/// TODO: these are hard code value, need to be change +/// Weights for pallet_automation_time using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { fn emit_event() -> Weight { - Weight::from_ref_time(20_000_000 as u64) + Weight::from_ref_time(20_000_000_u64) } fn run_native_transfer_task() -> Weight { - Weight::from_ref_time(230_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + Weight::from_ref_time(230_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } fn reset_asset(v: u32, ) -> Weight { - Weight::from_ref_time(200_000_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(Weight::from_ref_time(20_000_000 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(v as u64))) + Weight::from_ref_time(200_000_000_u64) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_ref_time(20_000_000_u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(4_u64.saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().writes(5_u64.saturating_mul(v as u64))) } fn update_asset_reset() -> Weight{ - Weight::from_ref_time(200_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(200_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn delete_asset_tasks() -> Weight{ - Weight::from_ref_time(200_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + Weight::from_ref_time(200_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } fn delete_asset_extrinsic() -> Weight{ - Weight::from_ref_time(220_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(7 as u64)) + Weight::from_ref_time(220_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } fn asset_price_update_extrinsic() -> Weight{ - Weight::from_ref_time(220_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().reads(21 as u64)) - .saturating_add(RocksDbWeight::get().writes(21 as u64)) + Weight::from_ref_time(220_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().reads(21_u64)) + .saturating_add(RocksDbWeight::get().writes(21_u64)) } - fn add_asset_extrinsic() -> Weight{ - Weight::from_ref_time(220_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + fn initialize_asset_extrinsic() -> Weight{ + Weight::from_ref_time(220_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } fn schedule_transfer_task_extrinsic() -> Weight{ - Weight::from_ref_time(200_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + Weight::from_ref_time(200_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + + fn schedule_xcmp_task() -> Weight{ + Weight::from_ref_time(200_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + + fn schedule_xcmp_task_through_proxy() -> Weight{ + Weight::from_ref_time(200_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + +fn cancel_task() -> Weight { + Weight::from_ref_time(20_000_000_u64) + } + + // TODO: Re-generate + fn run_xcmp_task() -> Weight{ + Weight::from_ref_time(200_000_000_u64) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } } diff --git a/runtime/neumann/src/lib.rs b/runtime/neumann/src/lib.rs index b4ff25d4..9d63640e 100644 --- a/runtime/neumann/src/lib.rs +++ b/runtime/neumann/src/lib.rs @@ -94,7 +94,7 @@ use common_runtime::{ SCHEDULED_TASKS_INITIALIZE_RATIO, }, }, - fees::{DealWithExecutionFees, DealWithInclusionFees}, + fees::DealWithInclusionFees, CurrencyHooks, }; use primitives::{ @@ -917,11 +917,18 @@ impl pallet_automation_price::Config for Runtime { type MaxTasksPerSlot = ConstU32<1>; type MaxBlockWeight = MaxBlockWeight; type MaxWeightPercentage = MaxWeightPercentage; - type WeightInfo = (); + type WeightInfo = pallet_automation_price::weights::SubstrateWeight; type ExecutionWeightFee = ExecutionWeightFee; type Currency = Balances; - type FeeHandler = pallet_automation_price::FeeHandler>; - type Origin = RuntimeOrigin; + type MultiCurrency = Currencies; + type CurrencyId = TokenId; + type XcmpTransactor = XcmpHandler; + type CurrencyIdConvert = TokenIdConvert; + type FeeConversionRateProvider = FeePerSecondProvider; + type FeeHandler = pallet_automation_price::FeeHandler; + type UniversalLocation = UniversalLocation; + type SelfParaId = parachain_info::Pallet; + type EnsureProxy = AutomationEnsureProxy; } pub struct ClosedCallFilter; diff --git a/runtime/turing/src/lib.rs b/runtime/turing/src/lib.rs index 1968a266..3bd88ec5 100644 --- a/runtime/turing/src/lib.rs +++ b/runtime/turing/src/lib.rs @@ -93,7 +93,7 @@ use common_runtime::{ SCHEDULED_TASKS_INITIALIZE_RATIO, }, }, - fees::{DealWithExecutionFees, DealWithInclusionFees}, + fees::DealWithInclusionFees, CurrencyHooks, }; use primitives::{ @@ -936,11 +936,18 @@ impl pallet_automation_price::Config for Runtime { type MaxTasksPerSlot = ConstU32<1>; type MaxBlockWeight = MaxBlockWeight; type MaxWeightPercentage = MaxWeightPercentage; - type WeightInfo = (); + type WeightInfo = pallet_automation_price::weights::SubstrateWeight; type ExecutionWeightFee = ExecutionWeightFee; type Currency = Balances; - type FeeHandler = pallet_automation_price::FeeHandler>; - type Origin = RuntimeOrigin; + type MultiCurrency = Currencies; + type CurrencyId = TokenId; + type XcmpTransactor = XcmpHandler; + type EnsureProxy = AutomationEnsureProxy; + type CurrencyIdConvert = TokenIdConvert; + type FeeConversionRateProvider = FeePerSecondProvider; + type FeeHandler = pallet_automation_price::FeeHandler; + type UniversalLocation = UniversalLocation; + type SelfParaId = parachain_info::Pallet; } pub struct ClosedCallFilter;