diff --git a/CHANGELOG.md b/CHANGELOG.md
index b04ebaa0f0..14bd582d0a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 8.2.1 /2024-11-06
+
+## What's Changed
+
+* Expands the type registry to include all the available options by @thewhaleking in https://github.com/opentensor/bittensor/pull/2353
+* add `Subtensor.register`, `Subtensor.difficulty` and related staff with tests by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2352
+* added to Subtensor: `burned_register`, `get_subnet_burn_cost`, `recycle` and related extrinsics by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2359
+* Poem "Risen from the Past". Act 3. by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2363
+* default port from 9946 to 9944 by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2376
+* remove unused prometheus extrinsic by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2378
+* Replace rich.console to btlogging.loggin by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2377
+
+**Full Changelog**: https://github.com/opentensor/bittensor/compare/v8.2.0...v8.2.1
+
## 8.2.0 /2024-10-10
## What's Changed
diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py
new file mode 100644
index 0000000000..e2c75d1086
--- /dev/null
+++ b/bittensor/core/async_subtensor.py
@@ -0,0 +1,1578 @@
+import asyncio
+import ssl
+from typing import Optional, Any, Union, TypedDict, Iterable
+
+import aiohttp
+import numpy as np
+import scalecodec
+from bittensor_wallet import Wallet
+from bittensor_wallet.utils import SS58_FORMAT
+from numpy.typing import NDArray
+from scalecodec import GenericCall
+from scalecodec.base import RuntimeConfiguration
+from scalecodec.type_registry import load_type_registry_preset
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.chain_data import (
+ DelegateInfo,
+ custom_rpc_type_registry,
+ StakeInfo,
+ NeuronInfoLite,
+ NeuronInfo,
+ SubnetHyperparameters,
+ decode_account_id,
+)
+from bittensor.core.extrinsics.async_registration import register_extrinsic
+from bittensor.core.extrinsics.async_root import (
+ set_root_weights_extrinsic,
+ root_register_extrinsic,
+)
+from bittensor.core.extrinsics.async_transfer import transfer_extrinsic
+from bittensor.core.extrinsics.async_weights import (
+ commit_weights_extrinsic,
+ set_weights_extrinsic,
+)
+from bittensor.core.settings import (
+ TYPE_REGISTRY,
+ DEFAULTS,
+ NETWORK_MAP,
+ DELEGATES_DETAILS_URL,
+ DEFAULT_NETWORK,
+)
+from bittensor.core.settings import version_as_int
+from bittensor.utils import (
+ torch,
+ ss58_to_vec_u8,
+ format_error_message,
+ decode_hex_identity_dict,
+ validate_chain_endpoint,
+)
+from bittensor.utils.async_substrate_interface import (
+ AsyncSubstrateInterface,
+ TimeoutException,
+)
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
+from bittensor.utils.delegates_details import DelegatesDetails
+from bittensor.utils.weight_utils import generate_weight_hash
+
+
+class ParamWithTypes(TypedDict):
+ name: str # Name of the parameter.
+ type: str # ScaleType string of the parameter.
+
+
+class ProposalVoteData:
+ index: int
+ threshold: int
+ ayes: list[str]
+ nays: list[str]
+ end: int
+
+ def __init__(self, proposal_dict: dict) -> None:
+ self.index = proposal_dict["index"]
+ self.threshold = proposal_dict["threshold"]
+ self.ayes = self.decode_ss58_tuples(proposal_dict["ayes"])
+ self.nays = self.decode_ss58_tuples(proposal_dict["nays"])
+ self.end = proposal_dict["end"]
+
+ @staticmethod
+ def decode_ss58_tuples(line: tuple):
+ """Decodes a tuple of ss58 addresses formatted as bytes tuples."""
+ return [decode_account_id(line[x][0]) for x in range(len(line))]
+
+
+class AsyncSubtensor:
+ """Thin layer for interacting with Substrate Interface. Mostly a collection of frequently-used calls."""
+
+ def __init__(self, network: str = DEFAULT_NETWORK):
+ if network in NETWORK_MAP:
+ self.chain_endpoint = NETWORK_MAP[network]
+ self.network = network
+ if network == "local":
+ logging.warning(
+ "[yellow]Warning[/yellow]: Verify your local subtensor is running on port 9944."
+ )
+ else:
+ is_valid, _ = validate_chain_endpoint(network)
+ if is_valid:
+ self.chain_endpoint = network
+ if network in NETWORK_MAP.values():
+ self.network = next(
+ key for key, value in NETWORK_MAP.items() if value == network
+ )
+ else:
+ self.network = "custom"
+ else:
+ logging.info(
+ f"Network not specified or not valid. Using default chain endpoint: {NETWORK_MAP[DEFAULTS.subtensor.network]}."
+ )
+ logging.info(
+ "You can set this for commands with the --network flag, or by setting this in the config."
+ )
+ self.chain_endpoint = NETWORK_MAP[DEFAULTS.subtensor.network]
+ self.network = DEFAULTS.subtensor.network
+
+ self.substrate = AsyncSubstrateInterface(
+ chain_endpoint=self.chain_endpoint,
+ ss58_format=SS58_FORMAT,
+ type_registry=TYPE_REGISTRY,
+ chain_name="Bittensor",
+ )
+
+ def __str__(self):
+ return f"Network: {self.network}, Chain: {self.chain_endpoint}"
+
+ async def __aenter__(self):
+ logging.info(
+ f"Connecting to Substrate: {self}..."
+ )
+ try:
+ async with self.substrate:
+ return self
+ except TimeoutException:
+ logging.error(
+ f"Error: Timeout occurred connecting to substrate. Verify your chain and network settings: {self}"
+ )
+ raise ConnectionError
+ except (ConnectionRefusedError, ssl.SSLError) as error:
+ logging.error(
+ f"Error: Connection refused when connecting to substrate. "
+ f"Verify your chain and network settings: {self}. Error: {error}"
+ )
+ raise ConnectionError
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.substrate.close()
+
+ async def encode_params(
+ self,
+ call_definition: list["ParamWithTypes"],
+ params: Union[list[Any], dict[str, Any]],
+ ) -> str:
+ """Returns a hex encoded string of the params using their types."""
+ param_data = scalecodec.ScaleBytes(b"")
+
+ for i, param in enumerate(call_definition["params"]): # type: ignore
+ scale_obj = await self.substrate.create_scale_object(param["type"])
+ if isinstance(params, list):
+ param_data += scale_obj.encode(params[i])
+ else:
+ if param["name"] not in params:
+ raise ValueError(f"Missing param {param['name']} in params dict.")
+
+ param_data += scale_obj.encode(params[param["name"]])
+
+ return param_data.to_hex()
+
+ async def get_current_block(self) -> int:
+ """
+ Returns the current block number on the Bittensor blockchain. This function provides the latest block number, indicating the most recent state of the blockchain.
+
+ Returns:
+ int: The current chain block number.
+
+ Knowing the current block number is essential for querying real-time data and performing time-sensitive operations on the blockchain. It serves as a reference point for network activities and data synchronization.
+ """
+ return await self.substrate.get_block_number()
+
+ async def get_block_hash(self, block_id: Optional[int] = None):
+ """
+ Retrieves the hash of a specific block on the Bittensor blockchain. The block hash is a unique identifier representing the cryptographic hash of the block's content, ensuring its integrity and immutability.
+
+ Args:
+ block_id (int): The block number for which the hash is to be retrieved.
+
+ Returns:
+ str: The cryptographic hash of the specified block.
+
+ The block hash is a fundamental aspect of blockchain technology, providing a secure reference to each block's data. It is crucial for verifying transactions, ensuring data consistency, and maintaining the trustworthiness of the blockchain.
+ """
+ if block_id:
+ return await self.substrate.get_block_hash(block_id)
+ else:
+ return await self.substrate.get_chain_head()
+
+ async def is_hotkey_registered_any(
+ self, hotkey_ss58: str, block_hash: Optional[str] = None
+ ) -> bool:
+ """
+ Checks if a neuron's hotkey is registered on any subnet within the Bittensor network.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ block_hash (Optional[str]): The blockchain block_hash representation of block id.
+
+ Returns:
+ bool: ``True`` if the hotkey is registered on any subnet, False otherwise.
+
+ This function is essential for determining the network-wide presence and participation of a neuron.
+ """
+ return len(await self.get_netuids_for_hotkey(hotkey_ss58, block_hash)) > 0
+
+ async def get_subnet_burn_cost(
+ self, block_hash: Optional[str] = None
+ ) -> Optional[str]:
+ """
+ Retrieves the burn cost for registering a new subnet within the Bittensor network. This cost represents the amount of Tao that needs to be locked or burned to establish a new subnet.
+
+ Args:
+ block_hash (Optional[int]): The blockchain block_hash of the block id.
+
+ Returns:
+ int: The burn cost for subnet registration.
+
+ The subnet burn cost is an important economic parameter, reflecting the network's mechanisms for controlling the proliferation of subnets and ensuring their commitment to the network's long-term viability.
+ """
+ lock_cost = await self.query_runtime_api(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block_hash=block_hash,
+ )
+
+ return lock_cost
+
+ async def get_total_subnets(
+ self, block_hash: Optional[str] = None
+ ) -> Optional[int]:
+ """
+ Retrieves the total number of subnets within the Bittensor network as of a specific blockchain block.
+
+ Args:
+ block_hash (Optional[str]): The blockchain block_hash representation of block id.
+
+ Returns:
+ Optional[str]: The total number of subnets in the network.
+
+ Understanding the total number of subnets is essential for assessing the network's growth and the extent of its decentralized infrastructure.
+ """
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="TotalNetworks",
+ params=[],
+ block_hash=block_hash,
+ )
+ return result
+
+ async def get_subnets(self, block_hash: Optional[str] = None) -> list[int]:
+ """
+ Retrieves the list of all subnet unique identifiers (netuids) currently present in the Bittensor network.
+
+ Args:
+ block_hash (Optional[str]): The hash of the block to retrieve the subnet unique identifiers from.
+
+ Returns:
+ A list of subnet netuids.
+
+ This function provides a comprehensive view of the subnets within the Bittensor network,
+ offering insights into its diversity and scale.
+ """
+ result = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ block_hash=block_hash,
+ reuse_block_hash=True,
+ )
+ return (
+ []
+ if result is None or not hasattr(result, "records")
+ else [netuid async for netuid, exists in result if exists]
+ )
+
+ async def is_hotkey_delegate(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> bool:
+ """
+ Determines whether a given hotkey (public key) is a delegate on the Bittensor network. This function checks if the neuron associated with the hotkey is part of the network's delegation system.
+
+ Args:
+ hotkey_ss58 (str): The SS58 address of the neuron's hotkey.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+ reuse_block (Optional[bool]): Whether to reuse the last-used block hash.
+
+ Returns:
+ `True` if the hotkey is a delegate, `False` otherwise.
+
+ Being a delegate is a significant status within the Bittensor network, indicating a neuron's involvement in consensus and governance processes.
+ """
+ delegates = await self.get_delegates(
+ block_hash=block_hash, reuse_block=reuse_block
+ )
+ return hotkey_ss58 in [info.hotkey_ss58 for info in delegates]
+
+ async def get_delegates(
+ self, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> list[DelegateInfo]:
+ """
+ Fetches all delegates on the chain
+
+ Args:
+ block_hash (Optional[str]): hash of the blockchain block number for the query.
+ reuse_block (Optional[bool]): whether to reuse the last-used block hash.
+
+ Returns:
+ List of DelegateInfo objects, or an empty list if there are no delegates.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="DelegateInfoRuntimeApi",
+ method="get_delegates",
+ params=[],
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+ if hex_bytes_result is not None:
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return DelegateInfo.list_from_vec_u8(bytes_result)
+ else:
+ return []
+
+ async def get_stake_info_for_coldkey(
+ self,
+ coldkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[StakeInfo]:
+ """
+ Retrieves stake information associated with a specific coldkey. This function provides details about the stakes held by an account, including the staked amounts and associated delegates.
+
+ Args:
+ coldkey_ss58 (str): The ``SS58`` address of the account's coldkey.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ A list of StakeInfo objects detailing the stake allocations for the account.
+
+ Stake information is vital for account holders to assess their investment and participation in the network's delegation and consensus processes.
+ """
+ encoded_coldkey = ss58_to_vec_u8(coldkey_ss58)
+
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="StakeInfoRuntimeApi",
+ method="get_stake_info_for_coldkey",
+ params=[encoded_coldkey],
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return StakeInfo.list_from_vec_u8(bytes_result)
+
+ async def get_stake_for_coldkey_and_hotkey(
+ self, hotkey_ss58: str, coldkey_ss58: str, block_hash: Optional[str] = None
+ ) -> Balance:
+ """
+ Retrieves stake information associated with a specific coldkey and hotkey.
+
+ Args:
+ hotkey_ss58 (str): the hotkey SS58 address to query
+ coldkey_ss58 (str): the coldkey SS58 address to query
+ block_hash (Optional[str]): the hash of the blockchain block number for the query.
+
+ Returns:
+ Stake Balance for the given coldkey and hotkey
+ """
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Stake",
+ params=[hotkey_ss58, coldkey_ss58],
+ block_hash=block_hash,
+ )
+ return Balance.from_rao(_result or 0)
+
+ async def query_runtime_api(
+ self,
+ runtime_api: str,
+ method: str,
+ params: Optional[Union[list[list[int]], dict[str, int], list[int]]],
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional[str]:
+ """
+ Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users who need to interact with specific runtime methods and decode complex data types.
+
+ Args:
+ runtime_api (str): The name of the runtime API to query.
+ method (str): The specific method within the runtime API to call.
+ params (Optional[Union[list[list[int]], dict[str, int]]]): The parameters to pass to the method call.
+ block_hash (Optional[str]): The hash of the blockchain block number at which to perform the query.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails.
+
+ This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed and specific interactions with the network's runtime environment.
+ """
+ call_definition = TYPE_REGISTRY["runtime_api"][runtime_api]["methods"][method]
+
+ data = (
+ "0x"
+ if params is None
+ else await self.encode_params(
+ call_definition=call_definition, params=params
+ )
+ )
+ api_method = f"{runtime_api}_{method}"
+
+ json_result = await self.substrate.rpc_request(
+ method="state_call",
+ params=[api_method, data, block_hash] if block_hash else [api_method, data],
+ reuse_block_hash=reuse_block,
+ )
+
+ if json_result is None:
+ return None
+
+ return_type = call_definition["type"]
+
+ as_scale_bytes = scalecodec.ScaleBytes(json_result["result"]) # type: ignore
+
+ rpc_runtime_config = RuntimeConfiguration()
+ rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy"))
+ rpc_runtime_config.update_type_registry(custom_rpc_type_registry)
+
+ obj = rpc_runtime_config.create_scale_object(return_type, as_scale_bytes)
+ if obj.data.to_hex() == "0x0400": # RPC returned None result
+ return None
+
+ return obj.decode()
+
+ async def get_balance(
+ self,
+ *addresses: str,
+ block_hash: Optional[str] = None,
+ ) -> dict[str, Balance]:
+ """
+ Retrieves the balance for given coldkey(s)
+
+ Args:
+ addresses (str): coldkey addresses(s).
+ block_hash (Optional[str]): the block hash, optional.
+
+ Returns:
+ Dict of {address: Balance objects}.
+ """
+ calls = [
+ (
+ await self.substrate.create_storage_key(
+ "System", "Account", [address], block_hash=block_hash
+ )
+ )
+ for address in addresses
+ ]
+ batch_call = await self.substrate.query_multi(calls, block_hash=block_hash)
+ results = {}
+ for item in batch_call:
+ value = item[1] or {"data": {"free": 0}}
+ results.update({item[0].params[0]: Balance(value["data"]["free"])})
+ return results
+
+ async def get_transfer_fee(
+ self, wallet: "Wallet", dest: str, value: Union["Balance", float, int]
+ ) -> "Balance":
+ """
+ Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. This function simulates the transfer to estimate the associated cost, taking into account the current network conditions and transaction complexity.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet from which the transfer is initiated.
+ dest (str): The ``SS58`` address of the destination account.
+ value (Union[bittensor.utils.balance.Balance, float, int]): The amount of tokens to be transferred, specified as a Balance object, or in Tao (float) or Rao (int) units.
+
+ Returns:
+ bittensor.utils.balance.Balance: The estimated transaction fee for the transfer, represented as a Balance object.
+
+ Estimating the transfer fee is essential for planning and executing token transactions, ensuring that the wallet has sufficient funds to cover both the transfer amount and the associated costs. This function provides a crucial tool for managing financial operations within the Bittensor network.
+ """
+ if isinstance(value, float):
+ value = Balance.from_tao(value)
+ elif isinstance(value, int):
+ value = Balance.from_rao(value)
+
+ if isinstance(value, Balance):
+ call = await self.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": dest, "value": value.rao},
+ )
+
+ try:
+ payment_info = await self.substrate.get_payment_info(
+ call=call, keypair=wallet.coldkeypub
+ )
+ except Exception as e:
+ logging.error(
+ f":cross_mark: Failed to get payment info: {e}"
+ )
+ payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao
+
+ fee = Balance.from_rao(payment_info["partialFee"])
+ return fee
+ else:
+ fee = Balance.from_rao(int(2e7))
+ logging.error(
+ "To calculate the transaction fee, the value must be Balance, float, or int. Received type: %s. Fee "
+ "is %s",
+ type(value),
+ 2e7,
+ )
+ return fee
+
+ async def get_total_stake_for_coldkey(
+ self,
+ *ss58_addresses,
+ block_hash: Optional[str] = None,
+ ) -> dict[str, Balance]:
+ """
+ Returns the total stake held on a coldkey.
+
+ Args:
+ ss58_addresses (tuple[str]): The SS58 address(es) of the coldkey(s)
+ block_hash (str): The hash of the block number to retrieve the stake from.
+
+ Returns:
+ Dict in view {address: Balance objects}.
+ """
+ calls = [
+ (
+ await self.substrate.create_storage_key(
+ "SubtensorModule",
+ "TotalColdkeyStake",
+ [address],
+ block_hash=block_hash,
+ )
+ )
+ for address in ss58_addresses
+ ]
+ batch_call = await self.substrate.query_multi(calls, block_hash=block_hash)
+ results = {}
+ for item in batch_call:
+ results.update({item[0].params[0]: Balance.from_rao(item[1] or 0)})
+ return results
+
+ async def get_total_stake_for_hotkey(
+ self,
+ *ss58_addresses,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict[str, Balance]:
+ """
+ Returns the total stake held on a hotkey.
+
+ Args:
+ ss58_addresses (tuple[str]): The SS58 address(es) of the hotkey(s)
+ block_hash (str): The hash of the block number to retrieve the stake from.
+ reuse_block (bool): Whether to reuse the last-used block hash when retrieving info.
+
+ Returns:
+ Dict {address: Balance objects}.
+ """
+ results = await self.substrate.query_multiple(
+ params=[s for s in ss58_addresses],
+ module="SubtensorModule",
+ storage_function="TotalHotkeyStake",
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return {k: Balance.from_rao(r or 0) for (k, r) in results.items()}
+
+ async def get_netuids_for_hotkey(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[int]:
+ """
+ Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function identifies the specific subnets within the Bittensor network where the neuron associated with the hotkey is active.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ block_hash (Optional[str]): The hash of the blockchain block number at which to perform the query.
+ reuse_block (Optional[bool]): Whether to reuse the last-used block hash when retrieving info.
+
+ Returns:
+ A list of netuids where the neuron is a member.
+ """
+
+ result = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="IsNetworkMember",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return (
+ [record[0] async for record in result if record[1]]
+ if result and hasattr(result, "records")
+ else []
+ )
+
+ async def subnet_exists(
+ self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> bool:
+ """
+ Checks if a subnet with the specified unique identifier (netuid) exists within the Bittensor network.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block_hash (Optional[str]): The hash of the blockchain block number at which to check the subnet existence.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ `True` if the subnet exists, `False` otherwise.
+
+ This function is critical for verifying the presence of specific subnets in the network,
+ enabling a deeper understanding of the network's structure and composition.
+ """
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ params=[netuid],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return result
+
+ async def get_hyperparameter(
+ self,
+ param_name: str,
+ netuid: int,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional[Any]:
+ """
+ Retrieves a specified hyperparameter for a specific subnet.
+
+ Args:
+ param_name (str): The name of the hyperparameter to retrieve.
+ netuid (int): The unique identifier of the subnet.
+ block_hash (Optional[str]): The hash of blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ The value of the specified hyperparameter if the subnet exists, or None
+ """
+ if not await self.subnet_exists(netuid, block_hash):
+ print("subnet does not exist")
+ return None
+
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function=param_name,
+ params=[netuid],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+
+ if result is None:
+ return None
+
+ return result
+
+ async def filter_netuids_by_registered_hotkeys(
+ self,
+ all_netuids: Iterable[int],
+ filter_for_netuids: Iterable[int],
+ all_hotkeys: Iterable[Wallet],
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[int]:
+ """
+ Filters a given list of all netuids for certain specified netuids and hotkeys
+
+ Args:
+ all_netuids (Iterable[int]): A list of netuids to filter.
+ filter_for_netuids (Iterable[int]): A subset of all_netuids to filter from the main list
+ all_hotkeys (Iterable[Wallet]): Hotkeys to filter from the main list
+ block_hash (str): hash of the blockchain block number at which to perform the query.
+ reuse_block (bool): whether to reuse the last-used blockchain hash when retrieving info.
+
+ Returns:
+ The filtered list of netuids.
+ """
+ netuids_with_registered_hotkeys = [
+ item
+ for sublist in await asyncio.gather(
+ *[
+ self.get_netuids_for_hotkey(
+ wallet.hotkey.ss58_address,
+ reuse_block=reuse_block,
+ block_hash=block_hash,
+ )
+ for wallet in all_hotkeys
+ ]
+ )
+ for item in sublist
+ ]
+
+ if not filter_for_netuids:
+ all_netuids = netuids_with_registered_hotkeys
+
+ else:
+ filtered_netuids = [
+ netuid for netuid in all_netuids if netuid in filter_for_netuids
+ ]
+
+ registered_hotkeys_filtered = [
+ netuid
+ for netuid in netuids_with_registered_hotkeys
+ if netuid in filter_for_netuids
+ ]
+
+ # Combine both filtered lists
+ all_netuids = filtered_netuids + registered_hotkeys_filtered
+
+ return list(set(all_netuids))
+
+ async def get_existential_deposit(
+ self, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> Balance:
+ """
+ Retrieves the existential deposit amount for the Bittensor blockchain.
+ The existential deposit is the minimum amount of TAO required for an account to exist on the blockchain.
+ Accounts with balances below this threshold can be reaped to conserve network resources.
+
+ Args:
+ block_hash (str): Block hash at which to query the deposit amount. If `None`, the current block is used.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ The existential deposit amount.
+
+ The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring efficient use of storage and preventing the proliferation of dust accounts.
+ """
+ result = await self.substrate.get_constant(
+ module_name="Balances",
+ constant_name="ExistentialDeposit",
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+
+ if result is None:
+ raise Exception("Unable to retrieve existential deposit amount.")
+
+ return Balance.from_rao(result)
+
+ async def neurons(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[NeuronInfo]:
+ """
+ Retrieves a list of all neurons within a specified subnet of the Bittensor network.
+ This function provides a snapshot of the subnet's neuron population, including each neuron's attributes and network interactions.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block_hash (str): The hash of the blockchain block number for the query.
+
+ Returns:
+ A list of NeuronInfo objects detailing each neuron's characteristics in the subnet.
+
+ Understanding the distribution and status of neurons within a subnet is key to comprehending the network's decentralized structure and the dynamics of its consensus and governance processes.
+ """
+ neurons_lite, weights, bonds = await asyncio.gather(
+ self.neurons_lite(netuid=netuid, block_hash=block_hash),
+ self.weights(netuid=netuid, block_hash=block_hash),
+ self.bonds(netuid=netuid, block_hash=block_hash),
+ )
+
+ weights_as_dict = {uid: w for uid, w in weights}
+ bonds_as_dict = {uid: b for uid, b in bonds}
+
+ neurons = [
+ NeuronInfo.from_weights_bonds_and_neuron_lite(
+ neuron_lite, weights_as_dict, bonds_as_dict
+ )
+ for neuron_lite in neurons_lite
+ ]
+
+ return neurons
+
+ async def neurons_lite(
+ self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> list[NeuronInfoLite]:
+ """
+ Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network.
+ This function provides a streamlined view of the neurons, focusing on key attributes such as stake and network participation.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block_hash (str): The hash of the blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ A list of simplified neuron information for the subnet.
+
+ This function offers a quick overview of the neuron population within a subnet, facilitating efficient analysis of the network's decentralized structure and neuron dynamics.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="NeuronInfoRuntimeApi",
+ method="get_neurons_lite",
+ params=[
+ netuid
+ ], # TODO check to see if this can accept more than one at a time
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return NeuronInfoLite.list_from_vec_u8(bytes_result)
+
+ async def neuron_for_uid(
+ self, uid: Optional[int], netuid: int, block_hash: Optional[str] = None
+ ) -> NeuronInfo:
+ """
+ Retrieves detailed information about a specific neuron identified by its unique identifier (UID) within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive view of a neuron's attributes, including its stake, rank, and operational status.
+
+ Args:
+ uid (int): The unique identifier of the neuron.
+ netuid (int): The unique identifier of the subnet.
+ block_hash (str): The hash of the blockchain block number for the query.
+
+ Returns:
+ Detailed information about the neuron if found, a null neuron otherwise
+
+ This function is crucial for analyzing individual neurons' contributions and status within a specific subnet, offering insights into their roles in the network's consensus and validation mechanisms.
+ """
+ if uid is None:
+ return NeuronInfo.get_null_neuron()
+
+ params = [netuid, uid, block_hash] if block_hash else [netuid, uid]
+ json_body = await self.substrate.rpc_request(
+ method="neuronInfo_getNeuron",
+ params=params, # custom rpc method
+ )
+
+ if not (result := json_body.get("result", None)):
+ return NeuronInfo.get_null_neuron()
+
+ bytes_result = bytes(result)
+ return NeuronInfo.from_vec_u8(bytes_result)
+
+ async def get_delegated(
+ self,
+ coldkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[tuple[DelegateInfo, Balance]]:
+ """
+ Retrieves a list of delegates and their associated stakes for a given coldkey. This function identifies the delegates that a specific account has staked tokens on.
+
+ Args:
+ coldkey_ss58 (str): The `SS58` address of the account's coldkey.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ A list of tuples, each containing a delegate's information and staked amount.
+
+ This function is important for account holders to understand their stake allocations and their involvement in the network's delegation and consensus mechanisms.
+ """
+
+ block_hash = (
+ block_hash
+ if block_hash
+ else (self.substrate.last_block_hash if reuse_block else None)
+ )
+ encoded_coldkey = ss58_to_vec_u8(coldkey_ss58)
+ json_body = await self.substrate.rpc_request(
+ method="delegateInfo_getDelegated",
+ params=([block_hash, encoded_coldkey] if block_hash else [encoded_coldkey]),
+ )
+
+ if not (result := json_body.get("result")):
+ return []
+
+ return DelegateInfo.delegated_list_from_vec_u8(bytes(result))
+
+ async def query_identity(
+ self,
+ key: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict:
+ """
+ Queries the identity of a neuron on the Bittensor blockchain using the given key. This function retrieves detailed identity information about a specific neuron, which is a crucial aspect of the network's decentralized identity and governance system.
+
+ Args:
+ key (str): The key used to query the neuron's identity, typically the neuron's SS58 address.
+ block_hash (str): The hash of the blockchain block number at which to perform the query.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ An object containing the identity information of the neuron if found, ``None`` otherwise.
+
+ The identity information can include various attributes such as the neuron's stake, rank, and other network-specific details, providing insights into the neuron's role and status within the Bittensor network.
+
+ Note:
+ See the `Bittensor CLI documentation `_ for supported identity parameters.
+ """
+
+ def decode_hex_identity_dict_(info_dictionary):
+ for k, v in info_dictionary.items():
+ if isinstance(v, dict):
+ item = next(iter(v.values()))
+ else:
+ item = v
+ if isinstance(item, tuple) and item:
+ if len(item) > 1:
+ try:
+ info_dictionary[k] = (
+ bytes(item).hex(sep=" ", bytes_per_sep=2).upper()
+ )
+ except UnicodeDecodeError:
+ print(f"Could not decode: {k}: {item}")
+ else:
+ try:
+ info_dictionary[k] = bytes(item[0]).decode("utf-8")
+ except UnicodeDecodeError:
+ print(f"Could not decode: {k}: {item}")
+ else:
+ info_dictionary[k] = item
+
+ return info_dictionary
+
+ identity_info = await self.substrate.query(
+ module="Registry",
+ storage_function="IdentityOf",
+ params=[key],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ try:
+ return decode_hex_identity_dict_(identity_info["info"])
+ except TypeError:
+ return {}
+
+ async def weights(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[tuple[int, list[tuple[int, int]]]]:
+ """
+ Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network.
+ This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the network's trust and value assignment mechanisms.
+
+ Args:
+ netuid (int): The network UID of the subnet to query.
+ block_hash (str): The hash of the blockchain block for the query.
+
+ Returns:
+ A list of tuples mapping each neuron's UID to its assigned weights.
+
+ The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons, influencing their influence and reward allocation within the subnet.
+ """
+ # TODO look into seeing if we can speed this up with storage query
+ w_map_encoded = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="Weights",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+ w_map = [(uid, w or []) async for uid, w in w_map_encoded]
+
+ return w_map
+
+ async def bonds(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[tuple[int, list[tuple[int, int]]]]:
+ """
+ Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network.
+ Bonds represent the investments or commitments made by neurons in one another, indicating a level of trust and perceived value. This bonding mechanism is integral to the network's market-based approach to measuring and rewarding machine intelligence.
+
+ Args:
+ netuid (int): The network UID of the subnet to query.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+
+ Returns:
+ List of tuples mapping each neuron's UID to its bonds with other neurons.
+
+ Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior within the subnet. It reflects how neurons recognize and invest in each other's intelligence and contributions, supporting diverse and niche systems within the Bittensor ecosystem.
+ """
+ b_map_encoded = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="Bonds",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+ b_map = [(uid, b) async for uid, b in b_map_encoded]
+
+ return b_map
+
+ async def does_hotkey_exist(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> bool:
+ """
+ Returns true if the hotkey is known by the chain and there are accounts.
+
+ Args:
+ hotkey_ss58 (str): The SS58 address of the hotkey.
+ block_hash (Optional[str]): The hash of the block number to check the hotkey against.
+ reuse_block (bool): Whether to reuse the last-used blockchain hash.
+
+ Returns:
+ `True` if the hotkey is known by the chain and there are accounts, `False` otherwise.
+ """
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ result = decode_account_id(_result[0])
+ return_val = (
+ False
+ if result is None
+ else result != "5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM"
+ )
+ return return_val
+
+ async def get_hotkey_owner(
+ self, hotkey_ss58: str, block_hash: str
+ ) -> Optional[str]:
+ """
+ Retrieves the owner of the given hotkey at a specific block hash.
+ This function queries the blockchain for the owner of the provided hotkey. If the hotkey does not exist at the specified block hash, it returns None.
+
+ Args:
+ hotkey_ss58 (str): The SS58 address of the hotkey.
+ block_hash (str): The hash of the block at which to check the hotkey ownership.
+
+ Returns:
+ Optional[str]: The SS58 address of the owner if the hotkey exists, or None if it doesn't.
+ """
+ hk_owner_query = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ )
+ val = decode_account_id(hk_owner_query[0])
+ if val:
+ exists = await self.does_hotkey_exist(hotkey_ss58, block_hash=block_hash)
+ else:
+ exists = False
+ hotkey_owner = val if exists else None
+ return hotkey_owner
+
+ async def sign_and_send_extrinsic(
+ self,
+ call: "GenericCall",
+ wallet: "Wallet",
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ ) -> tuple[bool, str]:
+ """
+ Helper method to sign and submit an extrinsic call to chain.
+
+ Args:
+ call (scalecodec.types.GenericCall): a prepared Call object
+ wallet (bittensor_wallet.Wallet): the wallet whose coldkey will be used to sign the extrinsic
+ wait_for_inclusion (bool): whether to wait until the extrinsic call is included on the chain
+ wait_for_finalization (bool): whether to wait until the extrinsic call is finalized on the chain
+
+ Returns:
+ (success, error message)
+ """
+ extrinsic = await self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ ) # sign with coldkey
+ try:
+ response = await self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, ""
+ await response.process_events()
+ if await response.is_success:
+ return True, ""
+ else:
+ return False, format_error_message(
+ await response.error_message, substrate=self.substrate
+ )
+ except SubstrateRequestException as e:
+ return False, format_error_message(e, substrate=self.substrate)
+
+ async def get_children(self, hotkey: str, netuid: int) -> tuple[bool, list, str]:
+ """
+ This method retrieves the children of a given hotkey and netuid. It queries the SubtensorModule's ChildKeys storage function to get the children and formats them before returning as a tuple.
+
+ Args:
+ hotkey (str): The hotkey value.
+ netuid (int): The netuid value.
+
+ Returns:
+ A tuple containing a boolean indicating success or failure, a list of formatted children, and an error message (if applicable)
+ """
+ try:
+ children = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="ChildKeys",
+ params=[hotkey, netuid],
+ )
+ if children:
+ formatted_children = []
+ for proportion, child in children:
+ # Convert U64 to int
+ formatted_child = decode_account_id(child[0])
+ int_proportion = int(proportion)
+ formatted_children.append((int_proportion, formatted_child))
+ return True, formatted_children, ""
+ else:
+ return True, [], ""
+ except SubstrateRequestException as e:
+ return False, [], format_error_message(e, self.substrate)
+
+ async def get_subnet_hyperparameters(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> Optional[Union[list, SubnetHyperparameters]]:
+ """
+ Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters define the operational settings and rules governing the subnet's behavior.
+
+ Args:
+ netuid (int): The network UID of the subnet to query.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+
+ Returns:
+ The subnet's hyperparameters, or `None` if not available.
+
+ Understanding the hyperparameters is crucial for comprehending how subnets are configured and managed, and how they interact with the network's consensus and incentive mechanisms.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="SubnetInfoRuntimeApi",
+ method="get_subnet_hyperparams",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ if hex_bytes_result.startswith("0x"):
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ else:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return SubnetHyperparameters.from_vec_u8(bytes_result)
+
+ async def get_vote_data(
+ self,
+ proposal_hash: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional["ProposalVoteData"]:
+ """
+ Retrieves the voting data for a specific proposal on the Bittensor blockchain. This data includes information about how senate members have voted on the proposal.
+
+ Args:
+ proposal_hash (str): The hash of the proposal for which voting data is requested.
+ block_hash (Optional[str]): The hash of the blockchain block number to query the voting data.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ An object containing the proposal's voting data, or `None` if not found.
+
+ This function is important for tracking and understanding the decision-making processes within the Bittensor network, particularly how proposals are received and acted upon by the governing body.
+ """
+ vote_data = await self.substrate.query(
+ module="Triumvirate",
+ storage_function="Voting",
+ params=[proposal_hash],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ if vote_data is None:
+ return None
+ else:
+ return ProposalVoteData(vote_data)
+
+ async def get_delegate_identities(
+ self, block_hash: Optional[str] = None
+ ) -> dict[str, DelegatesDetails]:
+ """
+ Fetches delegates identities from the chain and GitHub. Preference is given to chain data, and missing info is filled-in by the info from GitHub. At some point, we want to totally move away from fetching this info from GitHub, but chain data is still limited in that regard.
+
+ Args:
+ block_hash (str): the hash of the blockchain block for the query
+
+ Returns:
+ Dict {ss58: DelegatesDetails, ...}
+
+ """
+ timeout = aiohttp.ClientTimeout(10.0)
+ async with aiohttp.ClientSession(timeout=timeout) as session:
+ identities_info, response = await asyncio.gather(
+ self.substrate.query_map(
+ module="Registry",
+ storage_function="IdentityOf",
+ block_hash=block_hash,
+ ),
+ session.get(DELEGATES_DETAILS_URL),
+ )
+
+ all_delegates_details = {
+ decode_account_id(ss58_address[0]): DelegatesDetails.from_chain_data(
+ decode_hex_identity_dict(identity["info"])
+ )
+ for ss58_address, identity in identities_info
+ }
+
+ if response.ok:
+ all_delegates: dict[str, Any] = await response.json(content_type=None)
+
+ for delegate_hotkey, delegate_details in all_delegates.items():
+ delegate_info = all_delegates_details.setdefault(
+ delegate_hotkey,
+ DelegatesDetails(
+ display=delegate_details.get("name", ""),
+ web=delegate_details.get("url", ""),
+ additional=delegate_details.get("description", ""),
+ pgp_fingerprint=delegate_details.get("fingerprint", ""),
+ ),
+ )
+ delegate_info.display = (
+ delegate_info.display or delegate_details.get("name", "")
+ )
+ delegate_info.web = delegate_info.web or delegate_details.get(
+ "url", ""
+ )
+ delegate_info.additional = (
+ delegate_info.additional
+ or delegate_details.get("description", "")
+ )
+ delegate_info.pgp_fingerprint = (
+ delegate_info.pgp_fingerprint
+ or delegate_details.get("fingerprint", "")
+ )
+
+ return all_delegates_details
+
+ async def is_hotkey_registered(self, netuid: int, hotkey_ss58: str) -> bool:
+ """Checks to see if the hotkey is registered on a given netuid"""
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ )
+ if _result is not None:
+ return True
+ else:
+ return False
+
+ async def get_uid_for_hotkey_on_subnet(
+ self, hotkey_ss58: str, netuid: int, block_hash: Optional[str] = None
+ ):
+ """
+ Retrieves the unique identifier (UID) for a neuron's hotkey on a specific subnet.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ netuid (int): The unique identifier of the subnet.
+ block_hash (Optional[str]): The blockchain block_hash representation of the block id.
+
+ Returns:
+ Optional[int]: The UID of the neuron if it is registered on the subnet, ``None`` otherwise.
+
+ The UID is a critical identifier within the network, linking the neuron's hotkey to its operational and governance activities on a particular subnet.
+ """
+ return self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ block_hash=block_hash,
+ )
+
+ # extrinsics
+
+ async def transfer(
+ self,
+ wallet: "Wallet",
+ destination: str,
+ amount: float,
+ transfer_all: bool,
+ ) -> bool:
+ """
+ Transfer token of amount to destination.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): Source wallet for the transfer.
+ destination (str): Destination address for the transfer.
+ amount (float): Amount of tokens to transfer.
+ transfer_all (bool): Flag to transfer all tokens.
+
+ Returns:
+ `True` if the transferring was successful, otherwise `False`.
+ """
+ return await transfer_extrinsic(
+ self,
+ wallet,
+ destination,
+ Balance.from_tao(amount),
+ transfer_all,
+ )
+
+ async def register(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ block_hash: Optional[str] = None,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = True,
+ ) -> bool:
+ """
+ Register neuron by recycling some TAO.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): Bittensor wallet instance.
+ netuid (int): Subnet uniq id.
+ block_hash (Optional[str]): The hash of the blockchain block for the query.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
+
+ Returns:
+ `True` if registration was successful, otherwise `False`.
+ """
+ logging.info(
+ f"Registering on netuid 0 on network: {self.network}"
+ )
+
+ # Check current recycle amount
+ logging.info("Fetching recycle amount & balance.")
+ block_hash = block_hash if block_hash else await self.get_block_hash()
+ recycle_call, balance_ = await asyncio.gather(
+ self.get_hyperparameter(param_name="Burn", netuid=netuid, reuse_block=True),
+ self.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash),
+ )
+ current_recycle = Balance.from_rao(int(recycle_call))
+ try:
+ balance: Balance = balance_[wallet.coldkeypub.ss58_address]
+ except TypeError as e:
+ logging.error(f"Unable to retrieve current recycle. {e}")
+ return False
+ except KeyError:
+ logging.error("Unable to retrieve current balance.")
+ return False
+
+ # Check balance is sufficient
+ if balance < current_recycle:
+ logging.error(
+ f"Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO."
+ )
+ return False
+
+ return await root_register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ async def pow_register(
+ self: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid,
+ processors,
+ update_interval,
+ output_in_place,
+ verbose,
+ use_cuda,
+ dev_id,
+ threads_per_block,
+ ):
+ """Register neuron."""
+ return await register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ tpb=threads_per_block,
+ update_interval=update_interval,
+ num_processes=processors,
+ cuda=use_cuda,
+ dev_id=dev_id,
+ output_in_place=output_in_place,
+ log_verbose=verbose,
+ )
+
+ async def set_weights(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ uids: Union[NDArray[np.int64], "torch.LongTensor", list],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list],
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ max_retries: int = 5,
+ ):
+ """
+ Sets the inter-neuronal weights for the specified neuron. This process involves specifying the influence or trust a neuron places on other neurons in the network, which is a fundamental aspect of Bittensor's decentralized learning architecture.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights.
+ netuid (int): The unique identifier of the subnet.
+ uids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs that the weights are being set for.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each UID.
+ version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
+ max_retries (int): The number of maximum attempts to set weights. Default is ``5``.
+
+ Returns:
+ tuple[bool, str]: ``True`` if the setting of weights is successful, False otherwise. And `msg`, a string value describing the success or potential error.
+
+ This function is crucial in shaping the network's collective intelligence, where each neuron's learning and contribution are influenced by the weights it sets towards others【81†source】.
+ """
+ uid = self.get_uid_for_hotkey_on_subnet(wallet.hotkey.ss58_address, netuid)
+ retries = 0
+ success = False
+ message = "No attempt made. Perhaps it is too soon to set weights!"
+ while (
+ self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore
+ and retries < max_retries
+ ):
+ try:
+ logging.info(
+ f"Setting weights for subnet #{netuid}. Attempt {retries + 1} of {max_retries}."
+ )
+ success, message = await set_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ uids=uids,
+ weights=weights,
+ version_key=version_key,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ except Exception as e:
+ logging.error(f"Error setting weights: {e}")
+ finally:
+ retries += 1
+
+ return success, message
+
+ async def root_set_weights(
+ self,
+ wallet: "Wallet",
+ netuids: list[int],
+ weights: list[float],
+ ) -> bool:
+ """
+ Set weights for root network.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): bittensor wallet instance.
+ netuids (list[int]): The list of subnet uids.
+ weights (list[float]): The list of weights to be set.
+
+ Returns:
+ `True` if the setting of weights is successful, `False` otherwise.
+ """
+ netuids_ = np.array(netuids, dtype=np.int64)
+ weights_ = np.array(weights, dtype=np.float32)
+ logging.info(f"Setting weights in network: {self.network}")
+ # Run the set weights operation.
+ return await set_root_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuids=netuids_,
+ weights=weights_,
+ version_key=0,
+ wait_for_finalization=True,
+ wait_for_inclusion=True,
+ )
+
+ async def commit_weights(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ salt: list[int],
+ uids: Union[NDArray[np.int64], list],
+ weights: Union[NDArray[np.int64], list],
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ max_retries: int = 5,
+ ) -> tuple[bool, str]:
+ """
+ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet.
+ This action serves as a commitment or snapshot of the neuron's current weight distribution.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights.
+ netuid (int): The unique identifier of the subnet.
+ salt (list[int]): list of randomly generated integers as salt to generated weighted hash.
+ uids (np.ndarray): NumPy array of neuron UIDs for which weights are being committed.
+ weights (np.ndarray): NumPy array of weight values corresponding to each UID.
+ version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
+ max_retries (int): The number of maximum attempts to commit weights. Default is ``5``.
+
+ Returns:
+ tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string value describing the success or potential error.
+
+ This function allows neurons to create a tamper-proof record of their weight distribution at a specific point in time, enhancing transparency and accountability within the Bittensor network.
+ """
+ retries = 0
+ success = False
+ message = "No attempt made. Perhaps it is too soon to commit weights!"
+
+ logging.info(
+ f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}"
+ )
+
+ # Generate the hash of the weights
+ commit_hash = generate_weight_hash(
+ address=wallet.hotkey.ss58_address,
+ netuid=netuid,
+ uids=list(uids),
+ values=list(weights),
+ salt=salt,
+ version_key=version_key,
+ )
+
+ while retries < max_retries:
+ try:
+ success, message = await commit_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ commit_hash=commit_hash,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ if success:
+ break
+ except Exception as e:
+ logging.error(f"Error committing weights: {e}")
+ finally:
+ retries += 1
+
+ return success, message
diff --git a/bittensor/core/chain_data/__init__.py b/bittensor/core/chain_data/__init__.py
index 9ad1e38881..68936a6b5f 100644
--- a/bittensor/core/chain_data/__init__.py
+++ b/bittensor/core/chain_data/__init__.py
@@ -17,6 +17,6 @@
from .stake_info import StakeInfo
from .subnet_hyperparameters import SubnetHyperparameters
from .subnet_info import SubnetInfo
-from .utils import custom_rpc_type_registry
+from .utils import custom_rpc_type_registry, decode_account_id, process_stake_data
ProposalCallData = GenericCall
diff --git a/bittensor/core/chain_data/delegate_info.py b/bittensor/core/chain_data/delegate_info.py
index d77f1e1412..a840d1bb15 100644
--- a/bittensor/core/chain_data/delegate_info.py
+++ b/bittensor/core/chain_data/delegate_info.py
@@ -1,10 +1,9 @@
-from dataclasses import dataclass
-from typing import Optional, Any
+import bt_decode
-from scalecodec.utils.ss58 import ss58_encode
+from dataclasses import dataclass
+from typing import Optional
-from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType
-from bittensor.core.settings import SS58_FORMAT
+from bittensor.core.chain_data.utils import decode_account_id
from bittensor.utils import u16_normalized_float
from bittensor.utils.balance import Balance
@@ -24,7 +23,6 @@ class DelegateInfo:
validator_permits (list[int]): List of subnets that the delegate is allowed to validate on.
return_per_1000 (int): Return per 1000 TAO, for the delegate over a day.
total_daily_return (int): Total daily return of the delegate.
-
"""
hotkey_ss58: str # Hotkey of delegate
@@ -37,69 +35,78 @@ class DelegateInfo:
validator_permits: list[
int
] # List of subnets that the delegate is allowed to validate on
- registrations: tuple[int] # List of subnets that the delegate is registered on
+ registrations: list[int] # list of subnets that the delegate is registered on
return_per_1000: Balance # Return per 1000 tao of the delegate over a day
total_daily_return: Balance # Total daily return of the delegate
@classmethod
- def fix_decoded_values(cls, decoded: Any) -> "DelegateInfo":
- """Fixes the decoded values."""
-
- return cls(
- hotkey_ss58=ss58_encode(decoded["delegate_ss58"], SS58_FORMAT),
- owner_ss58=ss58_encode(decoded["owner_ss58"], SS58_FORMAT),
- take=u16_normalized_float(decoded["take"]),
- nominators=[
- (
- ss58_encode(nom[0], SS58_FORMAT),
- Balance.from_rao(nom[1]),
- )
- for nom in decoded["nominators"]
- ],
- total_stake=Balance.from_rao(
- sum([nom[1] for nom in decoded["nominators"]])
- ),
- validator_permits=decoded["validator_permits"],
- registrations=decoded["registrations"],
- return_per_1000=Balance.from_rao(decoded["return_per_1000"]),
- total_daily_return=Balance.from_rao(decoded["total_daily_return"]),
+ def from_vec_u8(cls, vec_u8: bytes) -> Optional["DelegateInfo"]:
+ decoded = bt_decode.DelegateInfo.decode(vec_u8)
+ hotkey = decode_account_id(decoded.delegate_ss58)
+ owner = decode_account_id(decoded.owner_ss58)
+ nominators = [
+ (decode_account_id(x), Balance.from_rao(y)) for x, y in decoded.nominators
+ ]
+ total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0)
+ return DelegateInfo(
+ hotkey_ss58=hotkey,
+ total_stake=total_stake,
+ nominators=nominators,
+ owner_ss58=owner,
+ take=u16_normalized_float(decoded.take),
+ validator_permits=decoded.validator_permits,
+ registrations=decoded.registrations,
+ return_per_1000=Balance.from_rao(decoded.return_per_1000),
+ total_daily_return=Balance.from_rao(decoded.total_daily_return),
)
@classmethod
- def from_vec_u8(cls, vec_u8: list[int]) -> Optional["DelegateInfo"]:
- """Returns a DelegateInfo object from a ``vec_u8``."""
- if len(vec_u8) == 0:
- return None
-
- decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo)
- if decoded is None:
- return None
-
- return DelegateInfo.fix_decoded_values(decoded)
-
- @classmethod
- def list_from_vec_u8(cls, vec_u8: list[int]) -> list["DelegateInfo"]:
- """Returns a list of DelegateInfo objects from a ``vec_u8``."""
- decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo, is_vec=True)
-
- if decoded is None:
- return []
-
- return [DelegateInfo.fix_decoded_values(d) for d in decoded]
+ def list_from_vec_u8(cls, vec_u8: bytes) -> list["DelegateInfo"]:
+ decoded = bt_decode.DelegateInfo.decode_vec(vec_u8)
+ results = []
+ for d in decoded:
+ hotkey = decode_account_id(d.delegate_ss58)
+ owner = decode_account_id(d.owner_ss58)
+ nominators = [
+ (decode_account_id(x), Balance.from_rao(y)) for x, y in d.nominators
+ ]
+ total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0)
+ results.append(
+ DelegateInfo(
+ hotkey_ss58=hotkey,
+ total_stake=total_stake,
+ nominators=nominators,
+ owner_ss58=owner,
+ take=u16_normalized_float(d.take),
+ validator_permits=d.validator_permits,
+ registrations=d.registrations,
+ return_per_1000=Balance.from_rao(d.return_per_1000),
+ total_daily_return=Balance.from_rao(d.total_daily_return),
+ )
+ )
+ return results
@classmethod
def delegated_list_from_vec_u8(
- cls, vec_u8: list[int]
- ) -> list[tuple["DelegateInfo", "Balance"]]:
- """Returns a list of Tuples of DelegateInfo objects, and Balance, from a ``vec_u8``.
-
- This is the list of delegates that the user has delegated to, and the amount of stake delegated.
- """
- decoded = from_scale_encoding(vec_u8, ChainDataType.DelegatedInfo, is_vec=True)
- if decoded is None:
- return []
-
- return [
- (DelegateInfo.fix_decoded_values(d), Balance.from_rao(s))
- for d, s in decoded
- ]
+ cls, vec_u8: bytes
+ ) -> list[tuple["DelegateInfo", Balance]]:
+ decoded = bt_decode.DelegateInfo.decode_delegated(vec_u8)
+ results = []
+ for d, b in decoded:
+ nominators = [
+ (decode_account_id(x), Balance.from_rao(y)) for x, y in d.nominators
+ ]
+ total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0)
+ delegate = DelegateInfo(
+ hotkey_ss58=decode_account_id(d.delegate_ss58),
+ total_stake=total_stake,
+ nominators=nominators,
+ owner_ss58=decode_account_id(d.owner_ss58),
+ take=u16_normalized_float(d.take),
+ validator_permits=d.validator_permits,
+ registrations=d.registrations,
+ return_per_1000=Balance.from_rao(d.return_per_1000),
+ total_daily_return=Balance.from_rao(d.total_daily_return),
+ )
+ results.append((delegate, Balance.from_rao(b)))
+ return results
diff --git a/bittensor/core/chain_data/subnet_info.py b/bittensor/core/chain_data/subnet_info.py
index f1ce151872..4169746a08 100644
--- a/bittensor/core/chain_data/subnet_info.py
+++ b/bittensor/core/chain_data/subnet_info.py
@@ -1,13 +1,10 @@
from dataclasses import dataclass
-from typing import Any, Optional, Union
-from scalecodec.utils.ss58 import ss58_encode
+import bt_decode
-from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType
-from bittensor.core.settings import SS58_FORMAT
+from bittensor.core.chain_data.utils import decode_account_id
from bittensor.utils import u16_normalized_float
from bittensor.utils.balance import Balance
-from bittensor.utils.registration import torch, use_torch
@dataclass
@@ -28,76 +25,39 @@ class SubnetInfo:
blocks_since_epoch: int
tempo: int
modality: int
- # netuid -> topk percentile prunning score requirement (u16:MAX normalized.)
connection_requirements: dict[str, float]
emission_value: float
burn: Balance
owner_ss58: str
@classmethod
- def from_vec_u8(cls, vec_u8: list[int]) -> Optional["SubnetInfo"]:
- """Returns a SubnetInfo object from a ``vec_u8``."""
- if len(vec_u8) == 0:
- return None
-
- decoded = from_scale_encoding(vec_u8, ChainDataType.SubnetInfo)
- if decoded is None:
- return None
-
- return SubnetInfo.fix_decoded_values(decoded)
-
- @classmethod
- def list_from_vec_u8(cls, vec_u8: list[int]) -> list["SubnetInfo"]:
- """Returns a list of SubnetInfo objects from a ``vec_u8``."""
- decoded = from_scale_encoding(
- vec_u8, ChainDataType.SubnetInfo, is_vec=True, is_option=True
- )
-
- if decoded is None:
- return []
-
- return [SubnetInfo.fix_decoded_values(d) for d in decoded]
-
- @classmethod
- def fix_decoded_values(cls, decoded: dict) -> "SubnetInfo":
- """Returns a SubnetInfo object from a decoded SubnetInfo dictionary."""
- return SubnetInfo(
- netuid=decoded["netuid"],
- rho=decoded["rho"],
- kappa=decoded["kappa"],
- difficulty=decoded["difficulty"],
- immunity_period=decoded["immunity_period"],
- max_allowed_validators=decoded["max_allowed_validators"],
- min_allowed_weights=decoded["min_allowed_weights"],
- max_weight_limit=decoded["max_weights_limit"],
- scaling_law_power=decoded["scaling_law_power"],
- subnetwork_n=decoded["subnetwork_n"],
- max_n=decoded["max_allowed_uids"],
- blocks_since_epoch=decoded["blocks_since_last_step"],
- tempo=decoded["tempo"],
- modality=decoded["network_modality"],
- connection_requirements={
- str(int(netuid)): u16_normalized_float(int(req))
- for netuid, req in decoded["network_connect"]
- },
- emission_value=decoded["emission_values"],
- burn=Balance.from_rao(decoded["burn"]),
- owner_ss58=ss58_encode(decoded["owner"], SS58_FORMAT),
- )
-
- def to_parameter_dict(self) -> Union[dict[str, Any], "torch.nn.ParameterDict"]:
- """Returns a torch tensor or dict of the subnet info."""
- if use_torch():
- return torch.nn.ParameterDict(self.__dict__)
- else:
- return self.__dict__
-
- @classmethod
- def from_parameter_dict(
- cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"]
- ) -> "SubnetInfo":
- """Creates a SubnetInfo instance from a parameter dictionary."""
- if use_torch():
- return cls(**dict(parameter_dict))
- else:
- return cls(**parameter_dict)
+ def list_from_vec_u8(cls, vec_u8: bytes) -> list["SubnetInfo"]:
+ decoded = bt_decode.SubnetInfo.decode_vec_option(vec_u8)
+ result = []
+ for d in decoded:
+ result.append(
+ SubnetInfo(
+ netuid=d.netuid,
+ rho=d.rho,
+ kappa=d.kappa,
+ difficulty=d.difficulty,
+ immunity_period=d.immunity_period,
+ max_allowed_validators=d.max_allowed_validators,
+ min_allowed_weights=d.min_allowed_weights,
+ max_weight_limit=d.max_weights_limit,
+ scaling_law_power=d.scaling_law_power,
+ subnetwork_n=d.subnetwork_n,
+ max_n=d.max_allowed_uids,
+ blocks_since_epoch=d.blocks_since_last_step,
+ tempo=d.tempo,
+ modality=d.network_modality,
+ connection_requirements={
+ str(int(netuid)): u16_normalized_float(int(req))
+ for (netuid, req) in d.network_connect
+ },
+ emission_value=d.emission_values,
+ burn=Balance.from_rao(d.burn),
+ owner_ss58=decode_account_id(d.owner),
+ )
+ )
+ return result
diff --git a/bittensor/core/chain_data/utils.py b/bittensor/core/chain_data/utils.py
index 0544ca85a2..9c21c9d22e 100644
--- a/bittensor/core/chain_data/utils.py
+++ b/bittensor/core/chain_data/utils.py
@@ -260,7 +260,7 @@ def from_scale_encoding_using_type_string(
}
-def decode_account_id(account_id_bytes: list) -> str:
+def decode_account_id(account_id_bytes: Union[bytes, str]) -> str:
"""
Decodes an AccountId from bytes to a Base64 string using SS58 encoding.
diff --git a/bittensor/core/config.py b/bittensor/core/config.py
index 5027bbecb5..f38aff20e6 100644
--- a/bittensor/core/config.py
+++ b/bittensor/core/config.py
@@ -97,18 +97,6 @@ def __init__(
# this can fail if --no_version_checking has already been added.
pass
- try:
- parser.add_argument(
- "--no_prompt",
- dest="no_prompt",
- action="store_true",
- help="Set ``true`` to stop cli from prompting the user.",
- default=False,
- )
- except Exception:
- # this can fail if --no_version_checking has already been added.
- pass
-
# Get args from argv if not passed in.
if args is None:
args = sys.argv[1:]
diff --git a/bittensor/core/extrinsics/async_registration.py b/bittensor/core/extrinsics/async_registration.py
new file mode 100644
index 0000000000..3739087b10
--- /dev/null
+++ b/bittensor/core/extrinsics/async_registration.py
@@ -0,0 +1,1562 @@
+import asyncio
+import binascii
+import functools
+import hashlib
+import io
+import math
+import multiprocessing as mp
+import os
+import random
+import subprocess
+import time
+from contextlib import redirect_stdout
+from dataclasses import dataclass
+from datetime import timedelta
+from multiprocessing import Process, Event, Lock, Array, Value, Queue
+from multiprocessing.queues import Queue as Queue_Type
+from queue import Empty, Full
+from typing import Optional, Union, TYPE_CHECKING, Callable, Any
+
+import backoff
+import numpy as np
+from Crypto.Hash import keccak
+from bittensor_wallet import Wallet
+from bittensor_wallet.errors import KeyFileError
+from rich.console import Console
+from rich.status import Status
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.chain_data import NeuronInfo
+from bittensor.utils import format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.formatting import millify, get_human_readable
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+# TODO: compair and remove existing code (bittensor.utils.registration)
+
+
+def use_torch() -> bool:
+ """Force the use of torch over numpy for certain operations."""
+ return True if os.getenv("USE_TORCH") == "1" else False
+
+
+def legacy_torch_api_compat(func: Callable):
+ """
+ Convert function operating on numpy Input&Output to legacy torch Input&Output API if `use_torch()` is True.
+
+ Args:
+ func: Function with numpy Input/Output to be decorated.
+
+ Returns:
+ Decorated function
+ """
+
+ @functools.wraps(func)
+ def decorated(*args, **kwargs):
+ if use_torch():
+ # if argument is a Torch tensor, convert it to numpy
+ args = [
+ arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg
+ for arg in args
+ ]
+ kwargs = {
+ key: value.cpu().numpy() if isinstance(value, torch.Tensor) else value
+ for key, value in kwargs.items()
+ }
+ ret = func(*args, **kwargs)
+ if use_torch():
+ # if return value is a numpy array, convert it to Torch tensor
+ if isinstance(ret, np.ndarray):
+ ret = torch.from_numpy(ret)
+ return ret
+
+ return decorated
+
+
+@functools.cache
+def _get_real_torch():
+ try:
+ import torch as _real_torch
+ except ImportError:
+ _real_torch = None
+ return _real_torch
+
+
+def log_no_torch_error():
+ logging.info(
+ "This command requires torch. You can install torch with `pip install torch` and run the command again."
+ )
+
+
+@dataclass
+class POWSolution:
+ """A solution to the registration PoW problem."""
+
+ nonce: int
+ block_number: int
+ difficulty: int
+ seal: bytes
+
+ async def is_stale(self, subtensor: "AsyncSubtensor") -> bool:
+ """
+ Returns True if the POW is stale.
+
+ This means the block the POW is solved for is within 3 blocks of the current block.
+ """
+ current_block = await subtensor.substrate.get_block_number(None)
+ return self.block_number < current_block - 3
+
+
+@dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: str
+
+
+class RegistrationStatisticsLogger:
+ """Logs statistics for a registration."""
+
+ console: Console
+ status: Optional[Status]
+
+ def __init__(
+ self, console_: Optional["Console"] = None, output_in_place: bool = True
+ ) -> None:
+ if console_ is None:
+ console_ = Console()
+ self.console = console_
+
+ if output_in_place:
+ self.status = self.console.status("Solving")
+ else:
+ self.status = None
+
+ def start(self) -> None:
+ if self.status is not None:
+ self.status.start()
+
+ def stop(self) -> None:
+ if self.status is not None:
+ self.status.stop()
+
+ @classmethod
+ def get_status_message(
+ cls, stats: RegistrationStatistics, verbose: bool = False
+ ) -> str:
+ """Provides a message of the current status of the block solving as a str for a logger or stdout."""
+ message = (
+ "Solving\n"
+ + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n"
+ + (
+ f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n"
+ + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n"
+ if verbose
+ else ""
+ )
+ + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n"
+ + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / "
+ + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n"
+ + f"Block Number: [bold white]{stats.block_number}[/bold white]\n"
+ + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n"
+ )
+ return message
+
+ def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None:
+ """Passes the current status to the logger."""
+ if self.status is not None:
+ self.status.update(self.get_status_message(stats, verbose=verbose))
+ else:
+ self.console.log(self.get_status_message(stats, verbose=verbose))
+
+
+class _SolverBase(Process):
+ """
+ A process that solves the registration PoW problem.
+
+ Args:
+ proc_num: The number of the process being created.
+ num_proc: The total number of processes running.
+ update_interval: The number of nonces to try to solve before checking for a new block.
+ finished_queue: The queue to put the process number when a process finishes each update_interval. Used for calculating the average time per update_interval across all processes.
+ solution_queue: The queue to put the solution the process has found during the pow solve.
+ stop_event: The event to set by the main process when all the solver processes should stop. The solver process will check for the event after each update_interval. The solver process will stop when the event is set. Used to stop the solver processes when a solution is found.
+ curr_block: The array containing this process's current block hash. The main process will set the array to the new block hash when a new block is finalized in the network. The solver process will get the new block hash from this array when newBlockEvent is set
+ curr_block_num: The value containing this process's current block number. The main process will set the value to the new block number when a new block is finalized in the network. The solver process will get the new block number from this value when new_block_event is set.
+ curr_diff: The array containing this process's current difficulty. The main process will set the array to the new difficulty when a new block is finalized in the network. The solver process will get the new difficulty from this array when newBlockEvent is set.
+ check_block: The lock to prevent this process from getting the new block data while the main process is updating the data.
+ limit: The limit of the pow solve for a valid solution.
+
+ Returns:
+ new_block_event: The event to set by the main process when a new block is finalized in the network. The solver process will check for the event after each update_interval. The solver process will get the new block hash and difficulty and start solving for a new nonce.
+ """
+
+ proc_num: int
+ num_proc: int
+ update_interval: int
+ finished_queue: Queue_Type
+ solution_queue: Queue_Type
+ new_block_event: Event
+ stop_event: Event
+ hotkey_bytes: bytes
+ curr_block: Array
+ curr_block_num: Value
+ curr_diff: Array
+ check_block: Lock
+ limit: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ ):
+ Process.__init__(self, daemon=True)
+ self.proc_num = proc_num
+ self.num_proc = num_proc
+ self.update_interval = update_interval
+ self.finished_queue = finished_queue
+ self.solution_queue = solution_queue
+ self.new_block_event = Event()
+ self.new_block_event.clear()
+ self.curr_block = curr_block
+ self.curr_block_num = curr_block_num
+ self.curr_diff = curr_diff
+ self.check_block = check_block
+ self.stop_event = stop_event
+ self.limit = limit
+
+ def run(self):
+ raise NotImplementedError("_SolverBase is an abstract class")
+
+ @staticmethod
+ def create_shared_memory() -> tuple[Array, Value, Array]:
+ """Creates shared memory for the solver processes to use."""
+ curr_block = Array("h", 32, lock=True) # byte array
+ curr_block_num = Value("i", 0, lock=True) # int
+ curr_diff = Array("Q", [0, 0], lock=True) # [high, low]
+
+ return curr_block, curr_block_num, curr_diff
+
+
+class _Solver(_SolverBase):
+ """Performs POW Solution."""
+
+ def run(self):
+ block_number: int
+ block_and_hotkey_hash_bytes: bytes
+ block_difficulty: int
+ nonce_limit = int(math.pow(2, 64)) - 1
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_end = nonce_start + self.update_interval
+ while not self.stop_event.is_set():
+ if self.new_block_event.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.new_block_event.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block(
+ nonce_start,
+ nonce_end,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Send time
+ self.finished_queue.put_nowait(self.proc_num)
+ except Full:
+ pass
+
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_start = nonce_start % nonce_limit
+ nonce_end = nonce_start + self.update_interval
+
+
+class _CUDASolver(_SolverBase):
+ """Performs POW Solution using CUDA."""
+
+ dev_id: int
+ tpb: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id: int,
+ tpb: int,
+ ):
+ super().__init__(
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ self.dev_id = dev_id
+ self.tpb = tpb
+
+ def run(self):
+ block_number: int = 0 # dummy value
+ block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value
+ block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value
+ nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ while not self.stop_event.is_set():
+ if self.new_block_event.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.new_block_event.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block_cuda(
+ nonce_start,
+ self.update_interval,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ self.dev_id,
+ self.tpb,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Signal that a nonce_block was finished using queue
+ # send our proc_num
+ self.finished_queue.put(self.proc_num)
+ except Full:
+ pass
+
+ # increase nonce by number of nonces processed
+ nonce_start += self.update_interval * self.tpb
+ nonce_start = nonce_start % nonce_limit
+
+
+class LazyLoadedTorch:
+ def __bool__(self):
+ return bool(_get_real_torch())
+
+ def __getattr__(self, name):
+ if real_torch := _get_real_torch():
+ return getattr(real_torch, name)
+ else:
+ log_no_torch_error()
+ raise ImportError("torch not installed")
+
+
+if TYPE_CHECKING:
+ import torch
+else:
+ torch = LazyLoadedTorch()
+
+
+class MaxSuccessException(Exception):
+ """Raised when the POW Solver has reached the max number of successful solutions."""
+
+
+class MaxAttemptsException(Exception):
+ """Raised when the POW Solver has reached the max number of attempts."""
+
+
+async def is_hotkey_registered(
+ subtensor: "AsyncSubtensor", netuid: int, hotkey_ss58: str
+) -> bool:
+ """Checks to see if the hotkey is registered on a given netuid"""
+ _result = await subtensor.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ )
+ if _result is not None:
+ return True
+ else:
+ return False
+
+
+async def register_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+) -> bool:
+ """Registers the wallet to the chain.
+
+ Args:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): initialized AsyncSubtensor object to use for chain interactions
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to register on.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ max_allowed_attempts (int): Maximum number of attempts to register the wallet.
+ output_in_place (bool): Whether the POW solving should be outputted to the console as it goes along.
+ cuda (bool): If `True`, the wallet should be registered using CUDA device(s).
+ dev_id: The CUDA device id to use, or a list of device ids.
+ tpb: The number of threads per block (CUDA).
+ num_processes: The number of processes to use to register.
+ update_interval: The number of nonces to solve between updates.
+ log_verbose: If `True`, the registration process will log more information.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`.
+ """
+
+ async def get_neuron_for_pubkey_and_subnet():
+ uid = await subtensor.substrate.query(
+ "SubtensorModule", "Uids", [netuid, wallet.hotkey.ss58_address]
+ )
+ if uid is None:
+ return NeuronInfo.get_null_neuron()
+
+ params = [netuid, uid]
+ json_body = await subtensor.substrate.rpc_request(
+ method="neuronInfo_getNeuron",
+ params=params,
+ )
+
+ if not (result := json_body.get("result", None)):
+ return NeuronInfo.get_null_neuron()
+
+ return NeuronInfo.from_vec_u8(bytes(result))
+
+ logging.debug("Checking subnet status")
+ if not await subtensor.subnet_exists(netuid):
+ logging.error(
+ f":cross_mark: Failed error: subnet {netuid} does not exist."
+ )
+ return False
+
+ logging.info(
+ f":satellite: Checking Account on subnet {netuid} ..."
+ )
+ neuron = await get_neuron_for_pubkey_and_subnet()
+ if not neuron.is_null:
+ logging.debug(
+ f"Wallet {wallet} is already registered on subnet {neuron.netuid} with uid{neuron.uid}."
+ )
+ return True
+
+ if not torch:
+ log_no_torch_error()
+ return False
+
+ # Attempt rolling registration.
+ attempts = 1
+ pow_result: Optional[POWSolution]
+ while True:
+ logging.info(
+ f":satellite: Registering... ({attempts}/{max_allowed_attempts})"
+ )
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ return False
+ pow_result = await create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result = await create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ # pow failed
+ if not pow_result:
+ # might be registered already on this subnet
+ is_registered = await is_hotkey_registered(
+ subtensor, netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.error(
+ f":white_heavy_check_mark: Already registered on netuid: {netuid}"
+ )
+ return True
+
+ # pow successful, proceed to submit pow to chain for registration
+ else:
+ logging.info(":satellite: Submitting POW...")
+ # check if pow result is still valid
+ while not await pow_result.is_stale(subtensor=subtensor):
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="register",
+ call_params={
+ "netuid": netuid,
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ "hotkey": wallet.hotkey.ss58_address,
+ "coldkey": wallet.coldkeypub.ss58_address,
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.hotkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ if not wait_for_finalization and not wait_for_inclusion:
+ success, err_msg = True, ""
+ else:
+ await response.process_events()
+ success = await response.is_success
+ if not success:
+ success, err_msg = (
+ False,
+ format_error_message(
+ await response.error_message,
+ substrate=subtensor.substrate,
+ ),
+ )
+ # Look error here
+ # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs
+
+ if "HotKeyAlreadyRegisteredInSubNet" in err_msg:
+ logging.info(
+ f":white_heavy_check_mark: Already Registered on subnet: {netuid}."
+ )
+ return True
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ await asyncio.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ if success:
+ logging.info(":satellite: Checking Registration status...")
+ is_registered = await is_hotkey_registered(
+ subtensor,
+ netuid=netuid,
+ hotkey_ss58=wallet.hotkey.ss58_address,
+ )
+ if is_registered:
+ logging.success(
+ ":white_heavy_check_mark: Registered"
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(
+ ":cross_mark: Unknown error. Neuron not found."
+ )
+ continue
+ else:
+ # Exited loop because pow is no longer valid.
+ logging.error("POW is stale.")
+ # Try again.
+ continue
+
+ if attempts < max_allowed_attempts:
+ # Failed registration, retry pow
+ attempts += 1
+ logging.error(
+ f":satellite: Failed registration, retrying pow ... ({attempts}/{max_allowed_attempts})"
+ )
+ else:
+ # Failed to register after max attempts.
+ logging.error("No more attempts.")
+ return False
+
+
+async def run_faucet_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: int = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+ max_successes: int = 3,
+) -> tuple[bool, str]:
+ """Runs a continual POW to get a faucet of TAO on the test net.
+
+ Args:
+ subtensor: The subtensor interface object used to run the extrinsic
+ wallet: Bittensor wallet object.
+ wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ max_allowed_attempts: Maximum number of attempts to register the wallet.
+ output_in_place: Whether to output logging data as the process runs.
+ cuda: If `True`, the wallet should be registered using CUDA device(s).
+ dev_id: The CUDA device id to use
+ tpb: The number of threads per block (CUDA).
+ num_processes: The number of processes to use to register.
+ update_interval: The number of nonces to solve between updates.
+ log_verbose: If `True`, the registration process will log more information.
+ max_successes: The maximum number of successful faucet runs for the wallet.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is also `True`
+ """
+
+ if not torch:
+ log_no_torch_error()
+ return False, "Requires torch"
+
+ # Unlock coldkey
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ return False, "There was an error unlocking your coldkey"
+
+ # Get previous balance.
+ old_balance = await subtensor.get_balance(wallet.coldkeypub.ss58_address)
+
+ # Attempt rolling registration.
+ attempts = 1
+ successes = 1
+ while True:
+ try:
+ pow_result = None
+ while pow_result is None or await pow_result.is_stale(subtensor=subtensor):
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ return False, "CUDA is not available."
+ pow_result: Optional[POWSolution] = await create_pow(
+ subtensor,
+ wallet,
+ -1,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result: Optional[POWSolution] = await create_pow(
+ subtensor,
+ wallet,
+ -1,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="faucet",
+ call_params={
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # process if registration successful, try again if pow is still valid
+ await response.process_events()
+ if not await response.is_success:
+ logging.error(
+ f":cross_mark: Failed: {format_error_message(await response.error_message, subtensor.substrate)}"
+ )
+ if attempts == max_allowed_attempts:
+ raise MaxAttemptsException
+ attempts += 1
+ # Wait a bit before trying again
+ time.sleep(1)
+
+ # Successful registration
+ else:
+ new_balance = await subtensor.get_balance(
+ wallet.coldkeypub.ss58_address
+ )
+ logging.info(
+ f"Balance: {old_balance[wallet.coldkeypub.ss58_address]} :arrow_right: {new_balance[wallet.coldkeypub.ss58_address]}"
+ )
+ old_balance = new_balance
+
+ if successes == max_successes:
+ raise MaxSuccessException
+
+ attempts = 1 # Reset attempts on success
+ successes += 1
+
+ except KeyboardInterrupt:
+ return True, "Done"
+
+ except MaxSuccessException:
+ return True, f"Max successes reached: {3}"
+
+ except MaxAttemptsException:
+ return False, f"Max attempts reached: {max_allowed_attempts}"
+
+
+async def _check_for_newest_block_and_update(
+ subtensor: "AsyncSubtensor",
+ netuid: int,
+ old_block_number: int,
+ hotkey_bytes: bytes,
+ curr_diff: Array,
+ curr_block: Array,
+ curr_block_num: Value,
+ update_curr_block: "Callable",
+ check_block: Lock,
+ solvers: list[_Solver],
+ curr_stats: "RegistrationStatistics",
+) -> int:
+ """
+ Checks for a new block and updates the current block information if a new block is found.
+
+ Args:
+ subtensor: The subtensor object to use for getting the current block.
+ netuid: The netuid to use for retrieving the difficulty.
+ old_block_number: The old block number to check against.
+ hotkey_bytes: The bytes of the hotkey's pubkey.
+ curr_diff: The current difficulty as a multiprocessing array.
+ curr_block: Where the current block is stored as a multiprocessing array.
+ curr_block_num: Where the current block number is stored as a multiprocessing value.
+ update_curr_block: A function that updates the current block.
+ check_block: A mp lock that is used to check for a new block.
+ solvers: A list of solvers to update the current block for.
+ curr_stats: The current registration statistics to update.
+
+ Returns:
+ The current block number.
+ """
+ block_number = await subtensor.substrate.get_block_number(None)
+ if block_number != old_block_number:
+ old_block_number = block_number
+ # update block information
+ block_number, difficulty, block_hash = await _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+ block_bytes = bytes.fromhex(block_hash[2:])
+
+ update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+ # Set new block events for each solver
+
+ for worker in solvers:
+ worker.new_block_event.set()
+
+ # update stats
+ curr_stats.block_number = block_number
+ curr_stats.block_hash = block_hash
+ curr_stats.difficulty = difficulty
+
+ return old_block_number
+
+
+async def _block_solver(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ num_processes: int,
+ netuid: int,
+ dev_id: list[int],
+ tpb: int,
+ update_interval: int,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ n_samples,
+ alpha_,
+ output_in_place,
+ log_verbose,
+ cuda: bool,
+):
+ """Shared code used by the Solvers to solve the POW solution."""
+ limit = int(math.pow(2, 256)) - 1
+
+ # Establish communication queues
+ # See the _Solver class for more information on the queues.
+ stop_event = Event()
+ stop_event.clear()
+
+ solution_queue = Queue()
+ finished_queues = [Queue() for _ in range(num_processes)]
+ check_block = Lock()
+
+ hotkey_bytes = (
+ wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key
+ )
+
+ if cuda:
+ # Create a worker per CUDA device
+ num_processes = len(dev_id)
+ solvers = [
+ _CUDASolver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id[i],
+ tpb,
+ )
+ for i in range(num_processes)
+ ]
+ else:
+ # Start consumers
+ solvers = [
+ _Solver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ for i in range(num_processes)
+ ]
+
+ # Get first block
+ block_number, difficulty, block_hash = await _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+
+ block_bytes = bytes.fromhex(block_hash[2:])
+ old_block_number = block_number
+ # Set to current block
+ _update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.new_block_event.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total=0.0,
+ time_average=0.0,
+ rounds_total=0,
+ time_spent=0.0,
+ hash_rate_perpetual=0.0,
+ hash_rate=0.0,
+ difficulty=difficulty,
+ block_number=block_number,
+ block_hash=block_hash,
+ )
+
+ start_time_perpetual = time.time()
+
+ logger = RegistrationStatisticsLogger(output_in_place=output_in_place)
+ logger.start()
+
+ solution = None
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha
+
+ timeout = 0.15 if cuda else 0.15
+ while netuid == -1 or not await is_hotkey_registered(
+ subtensor, netuid, wallet.hotkey.ss58_address
+ ):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=timeout)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = await _check_for_newest_block_and_update(
+ subtensor=subtensor,
+ netuid=netuid,
+ hotkey_bytes=hotkey_bytes,
+ old_block_number=old_block_number,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_stats=curr_stats,
+ update_curr_block=_update_curr_block,
+ check_block=check_block,
+ solvers=solvers,
+ )
+
+ num_time = 0
+ for finished_queue in finished_queues:
+ try:
+ finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ if cuda:
+ hash_rate_ = (num_time * tpb * update_interval) / time_since_last
+ else:
+ hash_rate_ = (num_time * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum(
+ [hash_rates[i] * weights[i] for i in range(n_samples)]
+ ) / (sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (
+ curr_stats.time_average * curr_stats.rounds_total
+ + curr_stats.time_spent
+ ) / (curr_stats.rounds_total + num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ if cuda:
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * (tpb * update_interval)
+ ) / new_time_spent_total
+ else:
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * update_interval
+ ) / new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, solution contains the nonce or wallet is registered
+ stop_event.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ _terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+async def _solve_for_difficulty_fast_cuda(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ update_interval: int = 50_000,
+ tpb: int = 512,
+ dev_id: Union[list[int], int] = 0,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional["POWSolution"]:
+ """
+ Solves the registration fast using CUDA
+
+ Args:
+ subtensor: The subtensor node to grab blocks
+ wallet: The wallet to register
+ netuid: The netuid of the subnet to register to.
+ output_in_place: If true, prints the output in place, otherwise prints to new lines
+ update_interval: The number of nonces to try before checking for more blocks
+ tpb: The number of threads per block. CUDA param that should match the GPU capability
+ dev_id: The CUDA device IDs to execute the registration on, either a single device or a list of devices
+ n_samples: The number of samples of the hash_rate to keep for the EWMA
+ alpha_: The alpha for the EWMA for the hash_rate calculation
+ log_verbose: If true, prints more verbose logging of the registration metrics.
+
+ Note:
+ The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ """
+ if isinstance(dev_id, int):
+ dev_id = [dev_id]
+ elif dev_id is None:
+ dev_id = [0]
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ if not torch.cuda.is_available():
+ raise Exception("CUDA not available")
+
+ # Set mp start to use spawn so CUDA doesn't complain
+ with _UsingSpawnStartMethod(force=True):
+ curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory()
+
+ solution = await _block_solver(
+ subtensor=subtensor,
+ wallet=wallet,
+ num_processes=None,
+ netuid=netuid,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_diff=curr_diff,
+ n_samples=n_samples,
+ alpha_=alpha_,
+ output_in_place=output_in_place,
+ log_verbose=log_verbose,
+ cuda=True,
+ )
+
+ return solution
+
+
+async def _solve_for_difficulty_fast(
+ subtensor,
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional[POWSolution]:
+ """
+ Solves the POW for registration using multiprocessing.
+
+ Args:
+ subtensor: Subtensor to connect to for block information and to submit.
+ wallet: wallet to use for registration.
+ netuid: The netuid of the subnet to register to.
+ output_in_place: If true, prints the status in place. Otherwise, prints the status on a new line.
+ num_processes: Number of processes to use.
+ update_interval: Number of nonces to solve before updating block information.
+ n_samples: The number of samples of the hash_rate to keep for the EWMA
+ alpha_: The alpha for the EWMA for the hash_rate calculation
+ log_verbose: If true, prints more verbose logging of the registration metrics.
+
+ Notes:
+ The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ We can also modify the update interval to do smaller blocks of work, while still updating the block information after a different number of nonces, to increase the transparency of the process while still keeping the speed.
+ """
+ if not num_processes:
+ # get the number of allowed processes for this process
+ num_processes = min(1, get_cpu_count())
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory()
+
+ solution = await _block_solver(
+ subtensor=subtensor,
+ wallet=wallet,
+ num_processes=num_processes,
+ netuid=netuid,
+ dev_id=None,
+ tpb=None,
+ update_interval=update_interval,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_diff=curr_diff,
+ n_samples=n_samples,
+ alpha_=alpha_,
+ output_in_place=output_in_place,
+ log_verbose=log_verbose,
+ cuda=False,
+ )
+
+ return solution
+
+
+def _terminate_workers_and_wait_for_exit(
+ workers: list[Union[Process, Queue_Type]],
+) -> None:
+ for worker in workers:
+ if isinstance(worker, Queue_Type):
+ worker.join_thread()
+ else:
+ try:
+ worker.join(3.0)
+ except subprocess.TimeoutExpired:
+ worker.terminate()
+ try:
+ worker.close()
+ except ValueError:
+ worker.terminate()
+
+
+# TODO verify this works with async
+@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3)
+async def _get_block_with_retry(
+ subtensor: "AsyncSubtensor", netuid: int
+) -> tuple[int, int, bytes]:
+ """
+ Gets the current block number, difficulty, and block hash from the substrate node.
+
+ Args:
+ subtensor: The subtensor object to use to get the block number, difficulty, and block hash.
+ netuid: The netuid of the network to get the block number, difficulty, and block hash from.
+
+ Returns:
+ The current block number, difficulty of the subnet, block hash
+
+ Raises:
+ Exception: If the block hash is None.
+ ValueError: If the difficulty is None.
+ """
+ block_number = await subtensor.substrate.get_block_number(None)
+ block_hash = await subtensor.substrate.get_block_hash(
+ block_number
+ ) # TODO check if I need to do all this
+ try:
+ difficulty = (
+ 1_000_000
+ if netuid == -1
+ else int(
+ await subtensor.get_hyperparameter(
+ param_name="Difficulty", netuid=netuid, block_hash=block_hash
+ )
+ )
+ )
+ except TypeError:
+ raise ValueError("Chain error. Difficulty is None")
+ except SubstrateRequestException:
+ raise Exception(
+ "Network error. Could not connect to substrate to get block hash"
+ )
+ return block_number, difficulty, block_hash
+
+
+def _registration_diff_unpack(packed_diff: Array) -> int:
+ """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian."""
+ return int(packed_diff[0] << 32 | packed_diff[1])
+
+
+def _registration_diff_pack(diff: int, packed_diff: Array):
+ """Packs the difficulty into two 32-bit integers. Little endian."""
+ packed_diff[0] = diff >> 32
+ packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits
+
+
+class _UsingSpawnStartMethod:
+ def __init__(self, force: bool = False):
+ self._old_start_method = None
+ self._force = force
+
+ def __enter__(self):
+ self._old_start_method = mp.get_start_method(allow_none=True)
+ if self._old_start_method is None:
+ self._old_start_method = "spawn" # default to spawn
+
+ mp.set_start_method("spawn", force=self._force)
+
+ def __exit__(self, *args):
+ # restore the old start method
+ mp.set_start_method(self._old_start_method, force=True)
+
+
+async def create_pow(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: int = None,
+ update_interval: int = None,
+ log_verbose: bool = False,
+) -> Optional[dict[str, Any]]:
+ """
+ Creates a proof of work for the given subtensor and wallet.
+
+ Args:
+ subtensor: The subtensor to create a proof of work for.
+ wallet: The wallet to create a proof of work for.
+ netuid: The netuid for the subnet to create a proof of work for.
+ output_in_place: If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines.
+ cuda: If true, uses CUDA to solve the proof of work.
+ dev_id: The CUDA device id(s) to use. If cuda is true and dev_id is a list, then multiple CUDA devices will be used to solve the proof of work.
+ tpb: The number of threads per block to use when solving the proof of work. Should be a multiple of 32.
+ num_processes: The number of processes to use when solving the proof of work. If None, then the number of processes is equal to the number of CPU cores.
+ update_interval: The number of nonces to run before checking for a new block.
+ log_verbose: If true, prints the progress of the proof of work more verbosely.
+
+ Returns:
+ The proof of work solution or None if the wallet is already registered or there is a different error.
+
+ Raises:
+ ValueError: If the subnet does not exist.
+ """
+ if netuid != -1:
+ if not await subtensor.subnet_exists(netuid=netuid):
+ raise ValueError(f"Subnet {netuid} does not exist")
+
+ if cuda:
+ solution: Optional[POWSolution] = await _solve_for_difficulty_fast_cuda(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ solution: Optional[POWSolution] = await _solve_for_difficulty_fast(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ return solution
+
+
+def _solve_for_nonce_block_cuda(
+ nonce_start: int,
+ update_interval: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+ dev_id: int,
+ tpb: int,
+) -> Optional[POWSolution]:
+ """
+ Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb
+ """
+ solution, seal = solve_cuda(
+ nonce_start,
+ update_interval,
+ tpb,
+ block_and_hotkey_hash_bytes,
+ difficulty,
+ limit,
+ dev_id,
+ )
+
+ if solution != -1:
+ # Check if solution is valid (i.e. not -1)
+ return POWSolution(solution, block_number, difficulty, seal)
+
+ return None
+
+
+def _solve_for_nonce_block(
+ nonce_start: int,
+ nonce_end: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+) -> Optional[POWSolution]:
+ """
+ Tries to solve the POW for a block of nonces (nonce_start, nonce_end)
+ """
+ for nonce in range(nonce_start, nonce_end):
+ # Create seal.
+ seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce)
+
+ # Check if seal meets difficulty
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ # Found a solution, save it.
+ return POWSolution(nonce, block_number, difficulty, seal)
+
+ return None
+
+
+class CUDAException(Exception):
+ """An exception raised when an error occurs in the CUDA environment."""
+
+
+def _hex_bytes_to_u8_list(hex_bytes: bytes):
+ hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)]
+ return hex_chunks
+
+
+def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes:
+ """
+ Create a cryptographic seal hash from the given block and hotkey hash bytes and nonce.
+
+ This function generates a seal hash by combining the given block and hotkey hash bytes with a nonce.
+ It first converts the nonce to a byte representation, then concatenates it with the first 64 hex characters of the block and hotkey hash bytes. The result is then hashed using SHA-256 followed by the Keccak-256 algorithm to produce the final seal hash.
+
+ Args:
+ block_and_hotkey_hash_bytes (bytes): The combined hash bytes of the block and hotkey.
+ nonce (int): The nonce value used for hashing.
+
+ Returns:
+ The resulting seal hash.
+ """
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little"))
+ pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+ seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest()
+ kec = keccak.new(digest_bits=256)
+ seal = kec.update(seal_sh256).digest()
+ return seal
+
+
+def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int) -> bool:
+ """Determines if a seal meets the specified difficulty"""
+ seal_number = int.from_bytes(seal, "big")
+ product = seal_number * difficulty
+ return product < limit
+
+
+def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes:
+ """Hashes the block with the hotkey using Keccak-256 to get 32 bytes"""
+ kec = keccak.new(digest_bits=256)
+ kec = kec.update(bytearray(block_bytes + hotkey_bytes))
+ block_and_hotkey_hash_bytes = kec.digest()
+ return block_and_hotkey_hash_bytes
+
+
+def _update_curr_block(
+ curr_diff: Array,
+ curr_block: Array,
+ curr_block_num: Value,
+ block_number: int,
+ block_bytes: bytes,
+ diff: int,
+ hotkey_bytes: bytes,
+ lock: Lock,
+):
+ """
+ Update the current block data with the provided block information and difficulty.
+
+ This function updates the current block and its difficulty in a thread-safe manner. It sets the current block
+ number, hashes the block with the hotkey, updates the current block bytes, and packs the difficulty.
+
+ curr_diff: Shared array to store the current difficulty.
+ curr_block: Shared array to store the current block data.
+ curr_block_num: Shared value to store the current block number.
+ block_number: The block number to set as the current block number.
+ block_bytes: The block data bytes to be hashed with the hotkey.
+ diff: The difficulty value to be packed into the current difficulty array.
+ hotkey_bytes: The hotkey bytes used for hashing the block.
+ lock: A lock to ensure thread-safe updates.
+ """
+ with lock:
+ curr_block_num.value = block_number
+ # Hash the block with the hotkey
+ block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes)
+ for i in range(32):
+ curr_block[i] = block_and_hotkey_hash_bytes[i]
+ _registration_diff_pack(diff, curr_diff)
+
+
+def get_cpu_count() -> int:
+ try:
+ return len(os.sched_getaffinity(0))
+ except AttributeError:
+ # macOS does not have sched_getaffinity
+ return os.cpu_count()
+
+
+@dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: bytes
+
+
+def solve_cuda(
+ nonce_start: np.int64,
+ update_interval: np.int64,
+ tpb: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ dev_id: int = 0,
+) -> tuple[np.int64, bytes]:
+ """
+ Solves the PoW problem using CUDA.
+
+ nonce_start: Starting nonce.
+ update_interval: Number of nonces to solve before updating block information.
+ tpb: Threads per block.
+ block_and_hotkey_hash_bytes: Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes.
+ difficulty: Difficulty of the PoW problem.
+ limit: Upper limit of the nonce.
+ dev_id: The CUDA device ID
+
+ :return: (nonce, seal) corresponding to the solution. Returns -1 for nonce if no solution is found.
+ """
+
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ upper = int(limit // difficulty)
+
+ upper_bytes = upper.to_bytes(32, byteorder="little", signed=False)
+
+ # Call cython function
+ # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit,
+ # const unsigned char[:] block_bytes, int dev_id
+ block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+
+ solution = cubit.solve_cuda(
+ tpb,
+ nonce_start,
+ update_interval,
+ upper_bytes,
+ block_and_hotkey_hash_hex,
+ dev_id,
+ ) # 0 is first GPU
+ seal = None
+ if solution != -1:
+ seal = _create_seal_hash(block_and_hotkey_hash_hex, solution)
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ return solution, seal
+ else:
+ return -1, b"\x00" * 32
+
+ return solution, seal
+
+
+def reset_cuda():
+ """
+ Resets the CUDA environment.
+ """
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ cubit.reset_cuda()
+
+
+def log_cuda_errors() -> str:
+ """
+ Logs any CUDA errors.
+ """
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ f = io.StringIO()
+ with redirect_stdout(f):
+ cubit.log_cuda_errors()
+
+ s = f.getvalue()
+
+ return s
diff --git a/bittensor/core/extrinsics/async_root.py b/bittensor/core/extrinsics/async_root.py
new file mode 100644
index 0000000000..8a92c06a57
--- /dev/null
+++ b/bittensor/core/extrinsics/async_root.py
@@ -0,0 +1,237 @@
+import asyncio
+import time
+from typing import Union, TYPE_CHECKING
+
+import numpy as np
+from bittensor_wallet import Wallet
+from bittensor_wallet.errors import KeyFileError
+from numpy.typing import NDArray
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.utils import u16_normalized_float, format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.weight_utils import (
+ normalize_max_weight,
+ convert_weights_and_uids_for_emit,
+)
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def get_limits(subtensor: "AsyncSubtensor") -> tuple[int, float]:
+ """
+ Retrieves the minimum allowed weights and maximum weight limit for the given subnet.
+
+ These values are fetched asynchronously using `asyncio.gather` to run both requests concurrently.
+
+ Args:
+ subtensor (AsyncSubtensor): The AsyncSubtensor object used to interface with the network's substrate node.
+
+ Returns:
+ tuple[int, float]: A tuple containing:
+ - `min_allowed_weights` (int): The minimum allowed weights.
+ - `max_weight_limit` (float): The maximum weight limit, normalized to a float value.
+ """
+ # Get weight restrictions.
+ maw, mwl = await asyncio.gather(
+ subtensor.get_hyperparameter("MinAllowedWeights", netuid=0),
+ subtensor.get_hyperparameter("MaxWeightsLimit", netuid=0),
+ )
+ min_allowed_weights = int(maw)
+ max_weight_limit = u16_normalized_float(int(mwl))
+ return min_allowed_weights, max_weight_limit
+
+
+async def root_register_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = True,
+) -> bool:
+ """Registers the wallet to root network.
+
+ Arguments:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ netuid (int): Subnet uid.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`.
+ """
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error("Error decrypting coldkey (possibly incorrect password)")
+ return False
+
+ logging.debug(
+ f"Checking if hotkey ({wallet.hotkey_str}) is registered on root."
+ )
+ is_registered = await subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.error(
+ ":white_heavy_check_mark: Already registered on root network."
+ )
+ return True
+
+ logging.info(":satellite: Registering to root network...")
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="root_register",
+ call_params={"hotkey": wallet.hotkey.ss58_address},
+ )
+ success, err_msg = await subtensor.sign_and_send_extrinsic(
+ call,
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not success:
+ logging.error(f":cross_mark: Failed error: {err_msg}")
+ time.sleep(0.5)
+ return False
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ uid = await subtensor.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, wallet.hotkey.ss58_address],
+ )
+ if uid is not None:
+ logging.info(
+ f":white_heavy_check_mark: Registered with UID {uid}."
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(":cross_mark: Unknown error. Neuron not found.")
+ return False
+
+
+async def set_root_weights_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuids: Union[NDArray[np.int64], list[int]],
+ weights: Union[NDArray[np.float32], list[float]],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> bool:
+ """Sets the given weights and values on chain for wallet hotkey account.
+
+ Arguments:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ netuids (Union[NDArray[np.int64], list[int]]): The `netuid` of the subnet to set weights for.
+ weights (Union[NDArray[np.float32], list[float]]): Weights to set. These must be `float` s and must correspond to the passed `netuid` s.
+ version_key (int): The version key of the validator.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`.
+ """
+
+ async def _do_set_weights():
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_root_weights",
+ call_params={
+ "dests": weight_uids,
+ "weights": weight_vals,
+ "netuid": 0,
+ "version_key": version_key,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.coldkey,
+ era={"period": 5},
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ await response.process_events()
+ if await response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, await response.error_message
+
+ my_uid = await subtensor.substrate.query(
+ "SubtensorModule", "Uids", [0, wallet.hotkey.ss58_address]
+ )
+
+ if my_uid is None:
+ logging.error("Your hotkey is not registered to the root network.")
+ return False
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error("Error decrypting coldkey (possibly incorrect password).")
+ return False
+
+ # First convert types.
+ if isinstance(netuids, list):
+ netuids = np.array(netuids, dtype=np.int64)
+ if isinstance(weights, list):
+ weights = np.array(weights, dtype=np.float32)
+
+ logging.debug("Fetching weight limits")
+ min_allowed_weights, max_weight_limit = await get_limits(subtensor)
+
+ # Get non zero values.
+ non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1)
+ non_zero_weights = weights[non_zero_weight_idx]
+ if non_zero_weights.size < min_allowed_weights:
+ raise ValueError(
+ "The minimum number of weights required to set weights is {}, got {}".format(
+ min_allowed_weights, non_zero_weights.size
+ )
+ )
+
+ # Normalize the weights to max value.
+ logging.info("Normalizing weights")
+ formatted_weights = normalize_max_weight(x=weights, limit=max_weight_limit)
+ logging.info(
+ f"Raw weights -> Normalized weights: {weights} -> {formatted_weights}"
+ )
+
+ try:
+ logging.info(":satellite: Setting root weights...")
+ weight_uids, weight_vals = convert_weights_and_uids_for_emit(netuids, weights)
+
+ success, error_message = await _do_set_weights()
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True
+
+ if success is True:
+ logging.info(":white_heavy_check_mark: Finalized")
+ return True
+ else:
+ fmt_err = format_error_message(error_message, subtensor.substrate)
+ logging.error(f":cross_mark: Failed error: {fmt_err}")
+ return False
+
+ except SubstrateRequestException as e:
+ fmt_err = format_error_message(e, subtensor.substrate)
+ logging.error(f":cross_mark: Failed error: {fmt_err}")
+ return False
diff --git a/bittensor/core/extrinsics/async_transfer.py b/bittensor/core/extrinsics/async_transfer.py
new file mode 100644
index 0000000000..81b8ea7137
--- /dev/null
+++ b/bittensor/core/extrinsics/async_transfer.py
@@ -0,0 +1,189 @@
+import asyncio
+from typing import TYPE_CHECKING
+
+from bittensor_wallet import Wallet
+from bittensor_wallet.errors import KeyFileError
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.settings import NETWORK_EXPLORER_MAP
+from bittensor.utils import (
+ format_error_message,
+ get_explorer_url_for_network,
+ is_valid_bittensor_address_or_public_key,
+)
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def transfer_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ destination: str,
+ amount: "Balance",
+ transfer_all: bool = False,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ keep_alive: bool = True,
+) -> bool:
+ """Transfers funds from this wallet to the destination public key address.
+
+ Args:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): initialized AsyncSubtensor object used for transfer
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object to make transfer from.
+ destination (str): Destination public key address (ss58_address or ed25519) of recipient.
+ amount (bittensor.utils.balance.Balance): Amount to stake as Bittensor balance.
+ transfer_all (bool): Whether to transfer all funds from this wallet to the destination address.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit.
+
+ Returns:
+ success (bool): Flag is `True` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is `True`, regardless of its inclusion.
+ """
+
+ async def get_transfer_fee() -> Balance:
+ """
+ Calculates the transaction fee for transferring tokens from a wallet to a specified destination address.
+ This function simulates the transfer to estimate the associated cost, taking into account the current
+ network conditions and transaction complexity.
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": destination, "value": amount.rao},
+ )
+
+ try:
+ payment_info = await subtensor.substrate.get_payment_info(
+ call=call, keypair=wallet.coldkeypub
+ )
+ except SubstrateRequestException as e:
+ payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao
+ logging.error(f":cross_mark: Failed to get payment info:")
+ logging.error(f"\t\t{format_error_message(e, subtensor.substrate)}")
+ logging.error(
+ f"\t\tDefaulting to default transfer fee: {payment_info['partialFee']}"
+ )
+
+ return Balance.from_rao(payment_info["partialFee"])
+
+ async def do_transfer() -> tuple[bool, str, str]:
+ """
+ Makes transfer from wallet to destination public key address.
+
+ Returns:
+ success, block hash, formatted error message
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": destination, "value": amount.rao},
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "", ""
+
+ # Otherwise continue with finalization.
+ await response.process_events()
+ if await response.is_success:
+ block_hash_ = response.block_hash
+ return True, block_hash_, ""
+ else:
+ return (
+ False,
+ "",
+ format_error_message(
+ await response.error_message, substrate=subtensor.substrate
+ ),
+ )
+
+ # Validate destination address.
+ if not is_valid_bittensor_address_or_public_key(destination):
+ logging.error(
+ f":cross_mark: Invalid destination SS58 address:[bold white]\n {destination}[/bold white]"
+ )
+ return False
+ logging.info(f"Initiating transfer on network: {subtensor.network}")
+ # Unlock wallet coldkey.
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error("Error decrypting coldkey (possibly incorrect password)")
+ return False
+
+ # Check balance.
+ logging.info(
+ f":satellite: Checking balance and fees on chain {subtensor.network}"
+ )
+ # check existential deposit and fee
+ logging.debug("Fetching existential and fee")
+ block_hash = await subtensor.substrate.get_chain_head()
+ account_balance_, existential_deposit = await asyncio.gather(
+ subtensor.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash),
+ subtensor.get_existential_deposit(block_hash=block_hash),
+ )
+ account_balance = account_balance_[wallet.coldkeypub.ss58_address]
+ fee = await get_transfer_fee()
+
+ if not keep_alive:
+ # Check if the transfer should keep_alive the account
+ existential_deposit = Balance(0)
+
+ # Check if we have enough balance.
+ if transfer_all is True:
+ amount = account_balance - fee - existential_deposit
+ if amount < Balance(0):
+ logging.error("Not enough balance to transfer")
+ return False
+
+ if account_balance < (amount + fee + existential_deposit):
+ logging.error(":cross_mark: Not enough balance")
+ logging.error(f"\t\tBalance:\t{account_balance}")
+ logging.error(f"\t\tAmount:\t{amount}")
+ logging.error(f"\t\tFor fee:\t{fee}")
+ return False
+
+ logging.info(":satellite: Transferring...")
+ logging.info(f"[green]Block Hash: {block_hash}")
+
+ if subtensor.network == "finney":
+ logging.debug("Fetching explorer URLs")
+ explorer_urls = get_explorer_url_for_network(
+ subtensor.network, block_hash, NETWORK_EXPLORER_MAP
+ )
+ if explorer_urls != {} and explorer_urls:
+ logging.info(
+ f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}"
+ )
+ logging.info(
+ f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}"
+ )
+ else:
+ logging.error(f":cross_mark: Failed: {err_msg}")
+
+ if success:
+ logging.info(":satellite: Checking Balance...")
+ new_balance = await subtensor.get_balance(
+ wallet.coldkeypub.ss58_address, reuse_block=False
+ )
+ logging.info(
+ f"Balance: [blue]{account_balance} :arrow_right: [green]{new_balance[wallet.coldkeypub.ss58_address]}"
+ )
+ return True
+
+ return False
diff --git a/bittensor/core/extrinsics/async_weights.py b/bittensor/core/extrinsics/async_weights.py
new file mode 100644
index 0000000000..926ce94c2c
--- /dev/null
+++ b/bittensor/core/extrinsics/async_weights.py
@@ -0,0 +1,256 @@
+"""This module provides functionality for setting weights on the Bittensor network."""
+
+from typing import Union, TYPE_CHECKING, Optional
+
+import numpy as np
+from numpy.typing import NDArray
+
+import bittensor.utils.weight_utils as weight_utils
+from bittensor.core.settings import version_as_int
+from bittensor.utils import format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.registration import torch, use_torch
+
+if TYPE_CHECKING:
+ from bittensor_wallet import Wallet
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def _do_set_weights(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ uids: list[int],
+ vals: list[int],
+ netuid: int,
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, Optional[str]]: # (success, error_message)
+ """
+ Internal method to send a transaction to the Bittensor blockchain, setting weights
+ for specified neurons. This method constructs and submits the transaction, handling
+ retries and blockchain communication.
+
+ Args:
+ subtensor (subtensor.core.async_subtensor.AsyncSubtensor): Async Subtensor instance.
+ wallet (bittensor.wallet): The wallet associated with the neuron setting the weights.
+ uids (List[int]): List of neuron UIDs for which weights are being set.
+ vals (List[int]): List of weight values corresponding to each UID.
+ netuid (int): Unique identifier for the network.
+ version_key (int, optional): Version key for compatibility with the network.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain.
+
+ Returns:
+ Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message.
+
+ This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their
+ trust in other neurons based on observed performance and contributions.
+ """
+
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_weights",
+ call_params={
+ "dests": uids,
+ "weights": vals,
+ "netuid": netuid,
+ "version_key": version_key,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.hotkey,
+ era={"period": 5},
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ await response.process_events()
+ if await response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, format_error_message(
+ response.error_message, substrate=subtensor.substrate
+ )
+
+
+async def set_weights_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ uids: Union[NDArray[np.int64], "torch.LongTensor", list],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, str]:
+ """Sets the given weights and values on chain for wallet hotkey account.
+
+ Args:
+ subtensor (bittensor.subtensor): Bittensor subtensor object.
+ wallet (bittensor.wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to set weights for.
+ uids (Union[NDArray[np.int64], torch.LongTensor, list]): The ``uint64`` uids of destination neurons.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The weights to set. These must be ``float`` s and correspond to the passed ``uid`` s.
+ version_key (int): The version key of the validator.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+ # First convert types.
+ if use_torch():
+ if isinstance(uids, list):
+ uids = torch.tensor(uids, dtype=torch.int64)
+ if isinstance(weights, list):
+ weights = torch.tensor(weights, dtype=torch.float32)
+ else:
+ if isinstance(uids, list):
+ uids = np.array(uids, dtype=np.int64)
+ if isinstance(weights, list):
+ weights = np.array(weights, dtype=np.float32)
+
+ # Reformat and normalize.
+ weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit(
+ uids, weights
+ )
+
+ logging.info(
+ ":satellite: Setting weights on {subtensor.network} ..."
+ )
+ try:
+ success, error_message = await _do_set_weights(
+ subtensor=subtensor,
+ wallet=wallet,
+ netuid=netuid,
+ uids=weight_uids,
+ vals=weight_vals,
+ version_key=version_key,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ if success is True:
+ message = "Successfully set weights and Finalized."
+ logging.success(f":white_heavy_check_mark: {message}")
+ return True, message
+ else:
+ logging.error(f"Failed set weights. Error: {error_message}")
+ return False, error_message
+
+ except Exception as error:
+ logging.error(f":cross_mark: Failed set weights. Error: {error}")
+ return False, str(error)
+
+
+async def _do_commit_weights(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ commit_hash: str,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, Optional[str]]:
+ """
+ Internal method to send a transaction to the Bittensor blockchain, committing the hash of a neuron's weights.
+ This method constructs and submits the transaction, handling retries and blockchain communication.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights.
+ netuid (int): The unique identifier of the subnet.
+ commit_hash (str): The hash of the neuron's weights to be committed.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
+
+ Returns:
+ tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message.
+
+ This method ensures that the weight commitment is securely recorded on the Bittensor blockchain, providing a verifiable record of the neuron's weight distribution at a specific point in time.
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="commit_weights",
+ call_params={
+ "netuid": netuid,
+ "commit_hash": commit_hash,
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.hotkey,
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ substrate=subtensor.substrate,
+ extrinsic=extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
+
+ await response.process_events()
+ if await response.is_success:
+ return True, None
+ else:
+ return False, format_error_message(
+ response.error_message, substrate=subtensor.substrate
+ )
+
+
+async def commit_weights_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ commit_hash: str,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, str]:
+ """
+ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet.
+ This function is a wrapper around the `do_commit_weights` method.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights.
+ netuid (int): The unique identifier of the subnet.
+ commit_hash (str): The hash of the neuron's weights to be committed.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
+
+ Returns:
+ tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string
+ value describing the success or potential error.
+
+ This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required.
+ """
+
+ success, error_message = await _do_commit_weights(
+ subtensor=subtensor,
+ wallet=wallet,
+ netuid=netuid,
+ commit_hash=commit_hash,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if success:
+ success_message = "Successfully committed weights."
+ logging.info(success_message)
+ return True, success_message
+ else:
+ logging.error(f"Failed to commit weights: {error_message}")
+ return False, error_message
diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py
index 5e9f2e9e19..c53a527ea2 100644
--- a/bittensor/core/extrinsics/commit_weights.py
+++ b/bittensor/core/extrinsics/commit_weights.py
@@ -20,7 +20,6 @@
from typing import Optional, TYPE_CHECKING
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.extrinsics.utils import submit_extrinsic
from bittensor.utils import format_error_message
@@ -33,7 +32,7 @@
from bittensor.core.subtensor import Subtensor
-# # Chain call for `commit_weights_extrinsic`
+# Chain call for `commit_weights_extrinsic`
@ensure_connected
def do_commit_weights(
self: "Subtensor",
@@ -101,11 +100,10 @@ def commit_weights_extrinsic(
commit_hash: str,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> tuple[bool, str]:
"""
Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet.
- This function is a wrapper around the `do_commit_weights` method, handling user prompts and error messages.
+ This function is a wrapper around the `do_commit_weights` method.
Args:
subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
@@ -114,16 +112,12 @@ def commit_weights_extrinsic(
commit_hash (str): The hash of the neuron's weights to be committed.
wait_for_inclusion (bool): Waits for the transaction to be included in a block.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding.
Returns:
- tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string
- value describing the success or potential error.
+ tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string value describing the success or potential error.
This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required.
"""
- if prompt and not Confirm.ask(f"Would you like to commit weights?"):
- return False, "User cancelled the operation."
success, error_message = do_commit_weights(
self=subtensor,
@@ -139,7 +133,9 @@ def commit_weights_extrinsic(
logging.info(success_message)
return True, success_message
else:
- error_message = format_error_message(error_message)
+ error_message = format_error_message(
+ error_message, substrate=subtensor.substrate
+ )
logging.error(f"Failed to commit weights: {error_message}")
return False, error_message
@@ -224,11 +220,10 @@ def reveal_weights_extrinsic(
version_key: int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> tuple[bool, str]:
"""
Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet.
- This function is a wrapper around the `_do_reveal_weights` method, handling user prompts and error messages.
+ This function is a wrapper around the `_do_reveal_weights` method.
Args:
subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
@@ -240,18 +235,13 @@ def reveal_weights_extrinsic(
version_key (int): Version key for compatibility with the network.
wait_for_inclusion (bool): Waits for the transaction to be included in a block.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding.
Returns:
- tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string
- value describing the success or potential error.
+ tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string value describing the success or potential error.
This function provides a user-friendly interface for revealing weights on the Bittensor blockchain, ensuring proper error handling and user interaction when required.
"""
- if prompt and not Confirm.ask(f"Would you like to reveal weights?"):
- return False, "User cancelled the operation."
-
success, error_message = do_reveal_weights(
self=subtensor,
wallet=wallet,
@@ -269,6 +259,8 @@ def reveal_weights_extrinsic(
logging.info(success_message)
return True, success_message
else:
- error_message = format_error_message(error_message)
+ error_message = format_error_message(
+ error_message, substrate=subtensor.substrate
+ )
logging.error(f"Failed to reveal weights: {error_message}")
return False, error_message
diff --git a/bittensor/core/extrinsics/prometheus.py b/bittensor/core/extrinsics/prometheus.py
index a6ab1cfb16..e69de29bb2 100644
--- a/bittensor/core/extrinsics/prometheus.py
+++ b/bittensor/core/extrinsics/prometheus.py
@@ -1,187 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2024 Opentensor Foundation
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-#
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import json
-from typing import Optional, TYPE_CHECKING
-
-from retry import retry
-
-from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import version_as_int, bt_console
-from bittensor.utils import networking as net, format_error_message
-from bittensor.utils.btlogging import logging
-from bittensor.utils.networking import ensure_connected
-
-# For annotation purposes
-if TYPE_CHECKING:
- from bittensor_wallet import Wallet
- from bittensor.core.subtensor import Subtensor
- from bittensor.core.types import PrometheusServeCallParams
-
-
-# Chain call for `prometheus_extrinsic`
-@ensure_connected
-def do_serve_prometheus(
- self: "Subtensor",
- wallet: "Wallet",
- call_params: "PrometheusServeCallParams",
- wait_for_inclusion: bool = False,
- wait_for_finalization: bool = True,
-) -> tuple[bool, Optional[dict]]:
- """
- Sends a serve prometheus extrinsic to the chain.
-
- Args:
- self (bittensor.core.subtensor.Subtensor): Bittensor subtensor object
- wallet (bittensor_wallet.Wallet): Wallet object.
- call_params (bittensor.core.types.PrometheusServeCallParams): Prometheus serve call parameters.
- wait_for_inclusion (bool): If ``true``, waits for inclusion.
- wait_for_finalization (bool): If ``true``, waits for finalization.
-
- Returns:
- success (bool): ``True`` if serve prometheus was successful.
- error (Optional[str]): Error message if serve prometheus failed, ``None`` otherwise.
- """
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=call_params,
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call, keypair=wallet.hotkey
- )
- response = submit_extrinsic(
- substrate=self.substrate,
- extrinsic=extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
- if wait_for_inclusion or wait_for_finalization:
- response.process_events()
- if response.is_success:
- return True, None
- else:
- return False, response.error_message
- else:
- return True, None
-
- return make_substrate_call_with_retry()
-
-
-def prometheus_extrinsic(
- subtensor: "Subtensor",
- wallet: "Wallet",
- port: int,
- netuid: int,
- ip: int = None,
- wait_for_inclusion: bool = False,
- wait_for_finalization=True,
-) -> bool:
- """Subscribes a Bittensor endpoint to the Subtensor chain.
-
- Args:
- subtensor (bittensor.core.subtensor.Subtensor): Bittensor subtensor object.
- wallet (bittensor_wallet.Wallet): Bittensor wallet object.
- ip (str): Endpoint host port i.e., ``192.122.31.4``.
- port (int): Endpoint port number i.e., `9221`.
- netuid (int): Network `uid` to serve on.
- wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
- wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
-
- Returns:
- success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
- """
-
- # Get external ip
- if ip is None:
- try:
- external_ip = net.get_external_ip()
- bt_console.print(
- f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]"
- )
- logging.success(prefix="External IP", suffix="{external_ip}")
- except Exception as e:
- raise RuntimeError(
- f"Unable to attain your external ip. Check your internet connection. error: {e}"
- ) from e
- else:
- external_ip = ip
-
- call_params: "PrometheusServeCallParams" = {
- "version": version_as_int,
- "ip": net.ip_to_int(external_ip),
- "port": port,
- "ip_type": net.ip_version(external_ip),
- }
-
- with bt_console.status(":satellite: Checking Prometheus..."):
- neuron = subtensor.get_neuron_for_pubkey_and_subnet(
- wallet.hotkey.ss58_address, netuid=netuid
- )
- neuron_up_to_date = not neuron.is_null and call_params == {
- "version": neuron.prometheus_info.version,
- "ip": net.ip_to_int(neuron.prometheus_info.ip),
- "port": neuron.prometheus_info.port,
- "ip_type": neuron.prometheus_info.ip_type,
- }
-
- if neuron_up_to_date:
- bt_console.print(
- f":white_heavy_check_mark: [green]Prometheus already Served[/green]\n"
- f"[green not bold]- Status: [/green not bold] |"
- f"[green not bold] ip: [/green not bold][white not bold]{neuron.prometheus_info.ip}[/white not bold] |"
- f"[green not bold] ip_type: [/green not bold][white not bold]{neuron.prometheus_info.ip_type}[/white not bold] |"
- f"[green not bold] port: [/green not bold][white not bold]{neuron.prometheus_info.port}[/white not bold] | "
- f"[green not bold] version: [/green not bold][white not bold]{neuron.prometheus_info.version}[/white not bold] |"
- )
-
- bt_console.print(
- f":white_heavy_check_mark: [white]Prometheus already served.[/white]"
- )
- return True
-
- # Add netuid, not in prometheus_info
- call_params["netuid"] = netuid
-
- with bt_console.status(
- f":satellite: Serving prometheus on: [white]{subtensor.network}:{netuid}[/white] ..."
- ):
- success, error_message = do_serve_prometheus(
- self=subtensor,
- wallet=wallet,
- call_params=call_params,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
-
- if wait_for_inclusion or wait_for_finalization:
- if success is True:
- json_ = json.dumps(call_params, indent=4, sort_keys=True)
- bt_console.print(
- f":white_heavy_check_mark: [green]Served prometheus[/green]\n [bold white]{json_}[/bold white]"
- )
- return True
- else:
- bt_console.print(
- f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}"
- )
- return False
- else:
- return True
diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py
new file mode 100644
index 0000000000..de38869a80
--- /dev/null
+++ b/bittensor/core/extrinsics/registration.py
@@ -0,0 +1,421 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import time
+from typing import Union, Optional, TYPE_CHECKING
+
+from bittensor_wallet.errors import KeyFileError
+from retry import retry
+
+from bittensor.utils import format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.networking import ensure_connected
+from bittensor.utils.registration import (
+ POWSolution,
+ create_pow,
+ torch,
+ log_no_torch_error,
+)
+
+# For annotation purposes
+if TYPE_CHECKING:
+ from bittensor.core.subtensor import Subtensor
+ from bittensor_wallet import Wallet
+
+
+@ensure_connected
+def _do_pow_register(
+ self: "Subtensor",
+ netuid: int,
+ wallet: "Wallet",
+ pow_result: "POWSolution",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> tuple[bool, Optional[str]]:
+ """Sends a (POW) register extrinsic to the chain.
+
+ Args:
+ netuid (int): The subnet to register on.
+ wallet (bittensor.wallet): The wallet to register.
+ pow_result (POWSolution): The PoW result to register.
+ wait_for_inclusion (bool): If ``True``, waits for the extrinsic to be included in a block. Default to `False`.
+ wait_for_finalization (bool): If ``True``, waits for the extrinsic to be finalized. Default to `True`.
+
+ Returns:
+ success (bool): ``True`` if the extrinsic was included in a block.
+ error (Optional[str]): ``None`` on success or not waiting for inclusion/finalization, otherwise the error message.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="register",
+ call_params={
+ "netuid": netuid,
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ "hotkey": wallet.hotkey.ss58_address,
+ "coldkey": wallet.coldkeypub.ss58_address,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.hotkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
+
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
+ # Successful registration
+ else:
+ return True, None
+
+ return make_substrate_call_with_retry()
+
+
+def register_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+) -> bool:
+ """Registers the wallet to the chain.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor interface.
+ wallet (bittensor.wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to register on.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
+ max_allowed_attempts (int): Maximum number of attempts to register the wallet.
+ output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`.
+ cuda (bool): If ``true``, the wallet should be registered using CUDA device(s).
+ dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids.
+ tpb (int): The number of threads per block (CUDA).
+ num_processes (int): The number of processes to use to register.
+ update_interval (int): The number of nonces to solve between updates.
+ log_verbose (bool): If ``true``, the registration process will log more information.
+
+ Returns:
+ success (bool):
+ Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+ if not subtensor.subnet_exists(netuid):
+ logging.error(
+ f":cross_mark: Failed: Subnet {netuid} does not exist."
+ )
+ return False
+
+ logging.info(
+ f":satellite: Checking Account on subnet {netuid}..."
+ )
+ neuron = subtensor.get_neuron_for_pubkey_and_subnet(
+ wallet.hotkey.ss58_address, netuid=netuid
+ )
+ if not neuron.is_null:
+ logging.debug(
+ f"Wallet {wallet} is already registered on {neuron.netuid} with {neuron.uid}."
+ )
+ return True
+
+ logging.debug(
+ f"Registration hotkey: {wallet.hotkey.ss58_address}, Public coldkey: {wallet.coldkey.ss58_address} in the network: {subtensor.network}."
+ )
+
+ if not torch:
+ log_no_torch_error()
+ return False
+
+ # Attempt rolling registration.
+ attempts = 1
+ while True:
+ logging.info(
+ f":satellite: Registering... ({attempts}/{max_allowed_attempts})"
+ )
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ return False
+ pow_result: Optional[POWSolution] = create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result: Optional[POWSolution] = create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ # pow failed
+ if not pow_result:
+ # might be registered already on this subnet
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.info(
+ f":white_heavy_check_mark: Already registered on netuid: {netuid}."
+ )
+ return True
+
+ # pow successful, proceed to submit pow to chain for registration
+ else:
+ logging.info(":satellite: Submitting POW...")
+ # check if pow result is still valid
+ while not pow_result.is_stale(subtensor=subtensor):
+ result: tuple[bool, Optional[str]] = _do_pow_register(
+ self=subtensor,
+ netuid=netuid,
+ wallet=wallet,
+ pow_result=pow_result,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ success, err_msg = result
+
+ if not success:
+ # Look error here
+ # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs
+ if "HotKeyAlreadyRegisteredInSubNet" in err_msg:
+ logging.info(
+ f":white_heavy_check_mark: Already Registered on subnet {netuid}."
+ )
+ return True
+
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ time.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ logging.info(":satellite: Checking Balance...")
+ is_registered = subtensor.is_hotkey_registered(
+ hotkey_ss58=wallet.hotkey.ss58_address,
+ netuid=netuid,
+ )
+ if is_registered:
+ logging.info(
+ ":white_heavy_check_mark: Registered"
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(
+ ":cross_mark: Unknown error. Neuron not found."
+ )
+ continue
+ else:
+ # Exited loop because pow is no longer valid.
+ logging.error("POW is stale.")
+ # Try again.
+ continue
+
+ if attempts < max_allowed_attempts:
+ # Failed registration, retry pow
+ attempts += 1
+ logging.info(
+ f":satellite: Failed registration, retrying pow ... ({attempts}/{max_allowed_attempts})"
+ )
+ else:
+ # Failed to register after max attempts.
+ logging.error("No more attempts.")
+ return False
+
+
+@ensure_connected
+def _do_burned_register(
+ self,
+ netuid: int,
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> tuple[bool, Optional[str]]:
+ """
+ Performs a burned register extrinsic call to the Subtensor chain.
+
+ This method sends a registration transaction to the Subtensor blockchain using the burned register mechanism. It
+ retries the call up to three times with exponential backoff in case of failures.
+
+ Args:
+ self (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ netuid (int): The network unique identifier to register on.
+ wallet (bittensor_wallet.Wallet): The wallet to be registered.
+ wait_for_inclusion (bool): Whether to wait for the transaction to be included in a block. Default is False.
+ wait_for_finalization (bool): Whether to wait for the transaction to be finalized. Default is True.
+
+ Returns:
+ Tuple[bool, Optional[str]]: A tuple containing a boolean indicating success or failure, and an optional error message.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="burned_register",
+ call_params={
+ "netuid": netuid,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
+
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
+ # Successful registration
+ else:
+ return True, None
+
+ return make_substrate_call_with_retry()
+
+
+def burned_register_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> bool:
+ """Registers the wallet to chain by recycling TAO.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor.wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to register on.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+ if not subtensor.subnet_exists(netuid):
+ logging.error(
+ f":cross_mark: Failed error: subnet {netuid} does not exist."
+ )
+ return False
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error(
+ ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
+ )
+ return False
+ logging.info(
+ f":satellite: Checking Account on subnet {netuid} ..."
+ )
+ neuron = subtensor.get_neuron_for_pubkey_and_subnet(
+ wallet.hotkey.ss58_address, netuid=netuid
+ )
+
+ old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address)
+
+ if not neuron.is_null:
+ logging.info(":white_heavy_check_mark: Already Registered")
+ logging.info(f"\t\tuid: {neuron.uid}")
+ logging.info(f"\t\tnetuid: {neuron.netuid}")
+ logging.info(f"\t\thotkey: {neuron.hotkey}")
+ logging.info(f"\t\tcoldkey: {neuron.coldkey}")
+ return True
+
+ logging.info(":satellite: Recycling TAO for Registration...")
+
+ recycle_amount = subtensor.recycle(netuid=netuid)
+ logging.info(f"Recycling {recycle_amount} to register on subnet:{netuid}")
+
+ success, err_msg = _do_burned_register(
+ self=subtensor,
+ netuid=netuid,
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not success:
+ logging.error(f":cross_mark: Failed error: {err_msg}")
+ time.sleep(0.5)
+ return False
+ # Successful registration, final check for neuron and pubkey
+ else:
+ logging.info(":satellite: Checking Balance...")
+ block = subtensor.get_current_block()
+ new_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address, block=block)
+
+ logging.info(
+ f"Balance: {old_balance} :arrow_right: {new_balance}"
+ )
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.info(":white_heavy_check_mark: Registered")
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(":cross_mark: Unknown error. Neuron not found.")
+ return False
diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py
new file mode 100644
index 0000000000..de72212146
--- /dev/null
+++ b/bittensor/core/extrinsics/root.py
@@ -0,0 +1,281 @@
+import time
+from typing import Optional, Union, TYPE_CHECKING
+
+import numpy as np
+from bittensor_wallet.errors import KeyFileError
+from numpy.typing import NDArray
+from retry import retry
+
+from bittensor.core.settings import version_as_int
+from bittensor.utils import format_error_message, weight_utils
+from bittensor.utils.btlogging import logging
+from bittensor.utils.networking import ensure_connected
+from bittensor.utils.registration import torch, legacy_torch_api_compat
+
+if TYPE_CHECKING:
+ from bittensor_wallet import Wallet
+ from bittensor.core.subtensor import Subtensor
+
+
+@ensure_connected
+def _do_root_register(
+ self: "Subtensor",
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> tuple[bool, Optional[str]]:
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="root_register",
+ call_params={"hotkey": wallet.hotkey.ss58_address},
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True
+
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
+ # Successful registration
+ else:
+ return True, None
+
+ return make_substrate_call_with_retry()
+
+
+def root_register_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> bool:
+ """Registers the wallet to root network.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``True``.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error(
+ "Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
+ )
+ return False
+
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=0, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.info(
+ ":white_heavy_check_mark: Already registered on root network."
+ )
+ return True
+
+ logging.info(":satellite: Registering to root network...")
+ success, err_msg = _do_root_register(
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not success:
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ time.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=0, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.success(":white_heavy_check_mark: Registered")
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(":cross_mark: Unknown error. Neuron not found.")
+
+
+@ensure_connected
+def _do_set_root_weights(
+ self: "Subtensor",
+ wallet: "Wallet",
+ uids: list[int],
+ vals: list[int],
+ netuid: int = 0,
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, Optional[str]]:
+ """
+ Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons on root. This method constructs and submits the transaction, handling retries and blockchain communication.
+
+ Args:
+ self (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights.
+ uids (List[int]): List of neuron UIDs for which weights are being set.
+ vals (List[int]): List of weight values corresponding to each UID.
+ netuid (int): Unique identifier for the network.
+ version_key (int, optional): Version key for compatibility with the network. Defaults is a current ``version_as_int``.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults is ``False``.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults is ``False``.
+
+ Returns:
+ Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message.
+
+ This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their trust in other neurons based on observed performance and contributions on the root network.
+ """
+
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_root_weights",
+ call_params={
+ "dests": uids,
+ "weights": vals,
+ "netuid": netuid,
+ "version_key": version_key,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.coldkey,
+ era={"period": 5},
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalziation or inclusion."
+
+ response.process_events()
+ if response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, response.error_message
+
+ return make_substrate_call_with_retry()
+
+
+@legacy_torch_api_compat
+def set_root_weights_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuids: Union[NDArray[np.int64], "torch.LongTensor", list[int]],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list[float]],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> bool:
+ """Sets the given weights and values on chain for wallet hotkey account.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object. Bittensor wallet object.
+ netuids (Union[NDArray[np.int64], torch.LongTensor, list[int]]): The ``netuid`` of the subnet to set weights for.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list[float]]): Weights to set. These must be ``float`` s and must correspond to the passed ``netuid`` s.
+ version_key (int): The version key of the validator. Default is ``0``.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``False``.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error(
+ ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
+ )
+ return False
+
+ # First convert types.
+ if isinstance(netuids, list):
+ netuids = np.array(netuids, dtype=np.int64)
+ if isinstance(weights, list):
+ weights = np.array(weights, dtype=np.float32)
+
+ # Get weight restrictions.
+ min_allowed_weights = subtensor.min_allowed_weights(netuid=0)
+ max_weight_limit = subtensor.max_weight_limit(netuid=0)
+
+ # Get non zero values.
+ non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1)
+ non_zero_weight_uids = netuids[non_zero_weight_idx]
+ non_zero_weights = weights[non_zero_weight_idx]
+ if non_zero_weights.size < min_allowed_weights:
+ raise ValueError(
+ "The minimum number of weights required to set weights is {}, got {}".format(
+ min_allowed_weights, non_zero_weights.size
+ )
+ )
+
+ # Normalize the weights to max value.
+ formatted_weights = weight_utils.normalize_max_weight(
+ x=weights, limit=max_weight_limit
+ )
+ logging.info(
+ f"Raw Weights -> Normalized weights: {weights} -> {formatted_weights}"
+ )
+
+ logging.info(
+ f":satellite: Setting root weights on {subtensor.network} ..."
+ )
+ try:
+ weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit(
+ netuids, weights
+ )
+ success, error_message = _do_set_root_weights(
+ wallet=wallet,
+ netuid=0,
+ uids=weight_uids,
+ vals=weight_vals,
+ version_key=version_key,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True
+
+ if success is True:
+ logging.info(":white_heavy_check_mark: Finalized")
+ logging.success(f"Set weights {str(success)}")
+ return True
+ else:
+ logging.error(
+ f":cross_mark: Failed set weights. {str(error_message)}"
+ )
+ return False
+
+ except Exception as e:
+ logging.error(f":cross_mark: Failed set weights. {str(e)}")
+ return False
diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py
index 490f9c268e..b4ce249719 100644
--- a/bittensor/core/extrinsics/serving.py
+++ b/bittensor/core/extrinsics/serving.py
@@ -15,15 +15,13 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-import json
from typing import Optional, TYPE_CHECKING
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.errors import MetadataError
from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import version_as_int, bt_console
+from bittensor.core.settings import version_as_int
from bittensor.utils import format_error_message, networking as net
from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
@@ -100,7 +98,6 @@ def serve_extrinsic(
placeholder2: int = 0,
wait_for_inclusion: bool = False,
wait_for_finalization=True,
- prompt: bool = False,
) -> bool:
"""Subscribes a Bittensor endpoint to the subtensor chain.
@@ -115,7 +112,6 @@ def serve_extrinsic(
placeholder2 (int): A placeholder for future use.
wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
- prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding.
Returns:
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
@@ -159,15 +155,6 @@ def serve_extrinsic(
)
return True
- if prompt:
- output = params.copy()
- output["coldkey"] = wallet.coldkeypub.ss58_address
- output["hotkey"] = wallet.hotkey.ss58_address
- if not Confirm.ask(
- f"Do you want to serve axon:\n [bold white]{json.dumps(output, indent=4, sort_keys=True)}[/bold white]"
- ):
- return False
-
logging.debug(
f"Serving axon with: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) -> {subtensor.network}:{netuid}"
)
@@ -186,7 +173,9 @@ def serve_extrinsic(
)
return True
else:
- logging.error(f"Failed: {format_error_message(error_message)}")
+ logging.error(
+ f"Failed: {format_error_message(error_message, substrate=subtensor.substrate)}"
+ )
return False
else:
return True
@@ -219,10 +208,9 @@ def serve_axon_extrinsic(
if axon.external_ip is None:
try:
external_ip = net.get_external_ip()
- bt_console.print(
- f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]"
+ logging.success(
+ f":white_heavy_check_mark: Found external ip: {external_ip}"
)
- logging.success(prefix="External IP", suffix=f"{external_ip}")
except Exception as e:
raise RuntimeError(
f"Unable to attain your external ip. Check your internet connection. error: {e}"
@@ -299,7 +287,9 @@ def publish_metadata(
if response.is_success:
return True
else:
- raise MetadataError(format_error_message(response.error_message))
+ raise MetadataError(
+ format_error_message(response.error_message, substrate=self.substrate)
+ )
# Community uses this function directly
diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py
index 7680061c5b..0475b4222a 100644
--- a/bittensor/core/extrinsics/set_weights.py
+++ b/bittensor/core/extrinsics/set_weights.py
@@ -21,10 +21,9 @@
import numpy as np
from numpy.typing import NDArray
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import bt_console, version_as_int
+from bittensor.core.settings import version_as_int
from bittensor.utils import format_error_message, weight_utils
from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
@@ -47,7 +46,7 @@ def do_set_weights(
version_key: int = version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
-) -> tuple[bool, Optional[dict]]: # (success, error_message)
+) -> tuple[bool, Optional[str]]: # (success, error_message)
"""
Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons. This method constructs and submits the transaction, handling retries and blockchain communication.
@@ -99,7 +98,9 @@ def make_substrate_call_with_retry():
if response.is_success:
return True, "Successfully set weights."
else:
- return False, response.error_message
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
return make_substrate_call_with_retry()
@@ -114,7 +115,6 @@ def set_weights_extrinsic(
version_key: int = 0,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> tuple[bool, str]:
"""Sets the given weights and values on chain for wallet hotkey account.
@@ -127,7 +127,6 @@ def set_weights_extrinsic(
version_key (int): The version key of the validator.
wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
- prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding.
Returns:
tuple[bool, str]: A tuple containing a success flag and an optional response message.
@@ -149,46 +148,34 @@ def set_weights_extrinsic(
uids, weights
)
- # Ask before moving on.
- if prompt:
- if not Confirm.ask(
- f"Do you want to set weights:\n[bold white] weights: {[float(v / 65535) for v in weight_vals]}\n"
- f"uids: {weight_uids}[/bold white ]?"
- ):
- return False, "Prompt refused."
-
- with bt_console.status(
- f":satellite: Setting weights on [white]{subtensor.network}[/white] ..."
- ):
- try:
- success, error_message = do_set_weights(
- self=subtensor,
- wallet=wallet,
- netuid=netuid,
- uids=weight_uids,
- vals=weight_vals,
- version_key=version_key,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
+ logging.info(
+ f":satellite: Setting weights on {subtensor.network} ..."
+ )
+ logging.debug(f"Weights: {[float(v / 65535) for v in weight_vals]}")
+
+ try:
+ success, error_message = do_set_weights(
+ self=subtensor,
+ wallet=wallet,
+ netuid=netuid,
+ uids=weight_uids,
+ vals=weight_vals,
+ version_key=version_key,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ if success is True:
+ logging.success(f"Finalized! Set weights: {str(success)}")
+ return True, "Successfully set weights and Finalized."
+ else:
+ logging.error(error_message)
+ return False, error_message
- if not wait_for_finalization and not wait_for_inclusion:
- return True, "Not waiting for finalization or inclusion."
-
- if success is True:
- bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]")
- logging.success(
- msg=str(success),
- prefix="Set weights",
- suffix="Finalized: ",
- )
- return True, "Successfully set weights and Finalized."
- else:
- error_message = format_error_message(error_message)
- logging.error(error_message)
- return False, error_message
-
- except Exception as e:
- bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}")
- logging.debug(str(e))
- return False, str(e)
+ except Exception as e:
+ logging.error(f":cross_mark: Failed.: Error: {e}")
+ logging.debug(str(e))
+ return False, str(e)
diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py
index 896fecbf96..b2e0606064 100644
--- a/bittensor/core/extrinsics/transfer.py
+++ b/bittensor/core/extrinsics/transfer.py
@@ -18,16 +18,16 @@
from typing import Optional, Union, TYPE_CHECKING
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import bt_console, NETWORK_EXPLORER_MAP
+from bittensor.core.settings import NETWORK_EXPLORER_MAP
from bittensor.utils import (
get_explorer_url_for_network,
format_error_message,
is_valid_bittensor_address_or_public_key,
)
from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
# For annotation purposes
@@ -102,7 +102,6 @@ def transfer_extrinsic(
wait_for_inclusion: bool = True,
wait_for_finalization: bool = False,
keep_alive: bool = True,
- prompt: bool = False,
) -> bool:
"""Transfers funds from this wallet to the destination public key address.
@@ -114,16 +113,13 @@ def transfer_extrinsic(
wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit.
- prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding.
Returns:
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
"""
# Validate destination address.
if not is_valid_bittensor_address_or_public_key(dest):
- bt_console.print(
- f":cross_mark: [red]Invalid destination address[/red]:[bold white]\n {dest}[/bold white]"
- )
+ logging.error(f"Invalid destination address: {dest}")
return False
if isinstance(dest, bytes):
@@ -140,15 +136,15 @@ def transfer_extrinsic(
transfer_balance = amount
# Check balance.
- with bt_console.status(":satellite: Checking Balance..."):
- account_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
- # check existential deposit.
- existential_deposit = subtensor.get_existential_deposit()
-
- with bt_console.status(":satellite: Transferring..."):
- fee = subtensor.get_transfer_fee(
- wallet=wallet, dest=dest, value=transfer_balance.rao
- )
+ logging.info(":satellite: Checking Balance...")
+ account_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
+ # check existential deposit.
+ existential_deposit = subtensor.get_existential_deposit()
+
+ logging.info(":satellite: Transferring...")
+ fee = subtensor.get_transfer_fee(
+ wallet=wallet, dest=dest, value=transfer_balance.rao
+ )
if not keep_alive:
# Check if the transfer should keep_alive the account
@@ -156,60 +152,52 @@ def transfer_extrinsic(
# Check if we have enough balance.
if account_balance < (transfer_balance + fee + existential_deposit):
- bt_console.print(
- ":cross_mark: [red]Not enough balance[/red]:[bold white]\n"
- f" balance: {account_balance}\n"
- f" amount: {transfer_balance}\n"
- f" for fee: {fee}[/bold white]"
- )
+ logging.error(":cross_mark: Not enough balance:")
+ logging.info(f"\t\tBalance: \t{account_balance}")
+ logging.info(f"\t\tAmount: \t{transfer_balance}")
+ logging.info(f"\t\tFor fee: \t{fee}")
return False
- # Ask before moving on.
- if prompt:
- if not Confirm.ask(
- "Do you want to transfer:[bold white]\n"
- f" amount: {transfer_balance}\n"
- f" from: {wallet.name}:{wallet.coldkey.ss58_address}\n"
- f" to: {dest}\n"
- f" for fee: {fee}[/bold white]"
- ):
- return False
-
- with bt_console.status(":satellite: Transferring..."):
- success, block_hash, error_message = do_transfer(
- self=subtensor,
- wallet=wallet,
- dest=dest,
- transfer_balance=transfer_balance,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
+ logging.info(":satellite: Transferring...")
+ logging.info(f"\tAmount: {transfer_balance}")
+ logging.info(f"\tfrom: {wallet.name}:{wallet.coldkey.ss58_address}")
+ logging.info(f"\tTo: {dest}")
+ logging.info(f"\tFor fee: {fee}")
+
+ success, block_hash, error_message = do_transfer(
+ self=subtensor,
+ wallet=wallet,
+ dest=dest,
+ transfer_balance=transfer_balance,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
- if success:
- bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]")
- bt_console.print(f"[green]Block Hash: {block_hash}[/green]")
+ if success:
+ logging.success(":white_heavy_check_mark: Finalized")
+ logging.info(f"Block Hash: {block_hash}")
- explorer_urls = get_explorer_url_for_network(
- subtensor.network, block_hash, NETWORK_EXPLORER_MAP
+ explorer_urls = get_explorer_url_for_network(
+ subtensor.network, block_hash, NETWORK_EXPLORER_MAP
+ )
+ if explorer_urls != {} and explorer_urls:
+ logging.info(
+ f"Opentensor Explorer Link: {explorer_urls.get('opentensor')}"
)
- if explorer_urls != {} and explorer_urls:
- bt_console.print(
- f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}[/green]"
- )
- bt_console.print(
- f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}[/green]"
- )
- else:
- bt_console.print(
- f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}"
+ logging.info(
+ f"Taostats Explorer Link: {explorer_urls.get('taostats')}"
)
+ else:
+ logging.error(
+ f":cross_mark: Failed: {format_error_message(error_message, substrate=subtensor.substrate)}"
+ )
if success:
- with bt_console.status(":satellite: Checking Balance..."):
- new_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
- bt_console.print(
- f"Balance:\n [blue]{account_balance}[/blue] :arrow_right: [green]{new_balance}[/green]"
- )
- return True
+ logging.info(":satellite: Checking Balance...")
+ new_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
+ logging.success(
+ f"Balance: {account_balance} :arrow_right: {new_balance}"
+ )
+ return True
return False
diff --git a/bittensor/core/metagraph.py b/bittensor/core/metagraph.py
index 208eaa6b9f..75e8d947c9 100644
--- a/bittensor/core/metagraph.py
+++ b/bittensor/core/metagraph.py
@@ -1249,12 +1249,11 @@ def load_from_path(self, dir_path: str) -> "Metagraph":
with open(graph_filename, "rb") as graph_file:
state_dict = pickle.load(graph_file)
except pickle.UnpicklingError:
- settings.bt_console.print(
+ logging.info(
"Unable to load file. Attempting to restore metagraph using torch."
)
- settings.bt_console.print(
- ":warning:[yellow]Warning:[/yellow] This functionality exists to load "
- "metagraph state from legacy saves, but will not be supported in the future."
+ logging.warning(
+ ":warning: This functionality exists to load metagraph state from legacy saves, but will not be supported in the future."
)
try:
import torch as real_torch
@@ -1264,7 +1263,7 @@ def load_from_path(self, dir_path: str) -> "Metagraph":
state_dict[key] = state_dict[key].detach().numpy()
del real_torch
except (RuntimeError, ImportError):
- settings.bt_console.print("Unable to load file. It may be corrupted.")
+ logging.error("Unable to load file. It may be corrupted.")
raise
self.n = state_dict["n"]
diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py
index 36314c2b72..5de0d923d6 100644
--- a/bittensor/core/settings.py
+++ b/bittensor/core/settings.py
@@ -15,7 +15,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-__version__ = "8.2.0"
+__version__ = "8.2.1"
import os
import re
@@ -23,36 +23,6 @@
from pathlib import Path
from munch import munchify
-from rich.console import Console
-from rich.traceback import install
-
-# Rich console.
-__console__ = Console()
-__use_console__ = True
-
-# Remove overdue locals in debug training.
-install(show_locals=False)
-
-
-def turn_console_off():
- global __use_console__
- global __console__
- from io import StringIO
-
- __use_console__ = False
- __console__ = Console(file=StringIO(), stderr=False)
-
-
-def turn_console_on():
- global __use_console__
- global __console__
- __use_console__ = True
- __console__ = Console()
-
-
-turn_console_off()
-
-bt_console = __console__
HOME_DIR = Path.home()
@@ -60,22 +30,36 @@ def turn_console_on():
WALLETS_DIR = USER_BITTENSOR_DIR / "wallets"
MINERS_DIR = USER_BITTENSOR_DIR / "miners"
-# Bittensor networks name
-NETWORKS = ["local", "finney", "test", "archive"]
-
-DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443"
-DEFAULT_NETWORK = NETWORKS[1]
# Create dirs if they don't exist
WALLETS_DIR.mkdir(parents=True, exist_ok=True)
MINERS_DIR.mkdir(parents=True, exist_ok=True)
+# Bittensor networks name
+NETWORKS = ["finney", "test", "archive", "local"]
+
+DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443"
+DEFAULT_NETWORK = NETWORKS[0]
# Bittensor endpoints (Needs to use wss://)
FINNEY_ENTRYPOINT = "wss://entrypoint-finney.opentensor.ai:443"
FINNEY_TEST_ENTRYPOINT = "wss://test.finney.opentensor.ai:443/"
ARCHIVE_ENTRYPOINT = "wss://archive.chain.opentensor.ai:443/"
-LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9946"
+LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9944"
+
+NETWORK_MAP = {
+ NETWORKS[0]: FINNEY_ENTRYPOINT,
+ NETWORKS[1]: FINNEY_TEST_ENTRYPOINT,
+ NETWORKS[2]: ARCHIVE_ENTRYPOINT,
+ NETWORKS[3]: LOCAL_ENTRYPOINT,
+}
+
+NETWORK_MAP = {
+ NETWORKS[0]: FINNEY_ENTRYPOINT,
+ NETWORKS[1]: FINNEY_TEST_ENTRYPOINT,
+ NETWORKS[2]: ARCHIVE_ENTRYPOINT,
+ NETWORKS[3]: LOCAL_ENTRYPOINT,
+}
# Currency Symbols Bittensor
TAO_SYMBOL: str = chr(0x03C4)
@@ -112,11 +96,28 @@ def turn_console_on():
}
# --- Type Registry ---
-TYPE_REGISTRY: dict = {
+TYPE_REGISTRY: dict[str, dict] = {
"types": {
"Balance": "u64", # Need to override default u128
},
"runtime_api": {
+ "DelegateInfoRuntimeApi": {
+ "methods": {
+ "get_delegated": {
+ "params": [
+ {
+ "name": "coldkey",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_delegates": {
+ "params": [],
+ "type": "Vec",
+ },
+ }
+ },
"NeuronInfoRuntimeApi": {
"methods": {
"get_neuron_lite": {
@@ -141,8 +142,65 @@ def turn_console_on():
],
"type": "Vec",
},
+ "get_neuron": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ {
+ "name": "uid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_neurons": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
}
},
+ "StakeInfoRuntimeApi": {
+ "methods": {
+ "get_stake_info_for_coldkey": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_stake_info_for_coldkeys": {
+ "params": [
+ {
+ "name": "coldkey_account_vecs",
+ "type": "Vec>",
+ },
+ ],
+ "type": "Vec",
+ },
+ },
+ },
+ "ValidatorIPRuntimeApi": {
+ "methods": {
+ "get_associated_validator_ip_info_for_subnet": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
+ },
+ },
"SubnetInfoRuntimeApi": {
"methods": {
"get_subnet_hyperparams": {
@@ -153,12 +211,56 @@ def turn_console_on():
},
],
"type": "Vec",
- }
+ },
+ "get_subnet_info": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_subnets_info": {
+ "params": [],
+ "type": "Vec",
+ },
}
},
"SubnetRegistrationRuntimeApi": {
"methods": {"get_network_registration_cost": {"params": [], "type": "u64"}}
},
+ "ColdkeySwapRuntimeApi": {
+ "methods": {
+ "get_scheduled_coldkey_swap": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_remaining_arbitration_period": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_coldkey_swap_destinations": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ }
+ },
},
}
diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py
index ca7397adb6..8f5d147f7a 100644
--- a/bittensor/core/subtensor.py
+++ b/bittensor/core/subtensor.py
@@ -16,13 +16,14 @@
# DEALINGS IN THE SOFTWARE.
"""
-The ``bittensor.core.subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor
+The ``bittensor.core.subtensor.Subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor
blockchain, facilitating a range of operations essential for the decentralized machine learning network.
"""
import argparse
import copy
import socket
+import ssl
from typing import Union, Optional, TypedDict, Any
import numpy as np
@@ -39,20 +40,26 @@
from bittensor.core import settings
from bittensor.core.axon import Axon
from bittensor.core.chain_data import (
+ custom_rpc_type_registry,
+ DelegateInfo,
NeuronInfo,
+ NeuronInfoLite,
PrometheusInfo,
SubnetHyperparameters,
- NeuronInfoLite,
- custom_rpc_type_registry,
+ SubnetInfo,
)
from bittensor.core.config import Config
from bittensor.core.extrinsics.commit_weights import (
commit_weights_extrinsic,
reveal_weights_extrinsic,
)
-from bittensor.core.extrinsics.prometheus import (
- do_serve_prometheus,
- prometheus_extrinsic,
+from bittensor.core.extrinsics.registration import (
+ burned_register_extrinsic,
+ register_extrinsic,
+)
+from bittensor.core.extrinsics.root import (
+ root_register_extrinsic,
+ set_root_weights_extrinsic,
)
from bittensor.core.extrinsics.serving import (
do_serve_axon,
@@ -65,10 +72,10 @@
transfer_extrinsic,
)
from bittensor.core.metagraph import Metagraph
-from bittensor.utils import torch
-from bittensor.utils import u16_normalized_float, networking
+from bittensor.utils import networking, torch, ss58_to_vec_u8, u16_normalized_float
from bittensor.utils.balance import Balance
from bittensor.utils.btlogging import logging
+from bittensor.utils.registration import legacy_torch_api_compat
from bittensor.utils.weight_utils import generate_weight_hash
KEY_NONCE: dict[str, int] = {}
@@ -225,7 +232,7 @@ def _get_substrate(self):
except (AttributeError, TypeError, socket.error, OSError) as e:
logging.warning(f"Error setting timeout: {e}")
- except ConnectionRefusedError as error:
+ except (ConnectionRefusedError, ssl.SSLError) as error:
logging.error(
f"Could not connect to {self.network} network with {self.chain_endpoint} chain endpoint.",
)
@@ -846,7 +853,6 @@ def set_weights(
version_key: int = settings.version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
max_retries: int = 5,
) -> tuple[bool, str]:
"""
@@ -860,7 +866,6 @@ def set_weights(
version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
max_retries (int): The number of maximum attempts to set weights. Default is ``5``.
Returns:
@@ -889,7 +894,6 @@ def set_weights(
version_key=version_key,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
except Exception as e:
logging.error(f"Error setting weights: {e}")
@@ -898,6 +902,151 @@ def set_weights(
return success, message
+ @legacy_torch_api_compat
+ def root_set_weights(
+ self,
+ wallet: "Wallet",
+ netuids: Union[NDArray[np.int64], "torch.LongTensor", list],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ ) -> bool:
+ """
+ Sets the weights for neurons on the root network. This action is crucial for defining the influence and interactions of neurons at the root level of the Bittensor network.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights.
+ netuids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs for which weights are being set.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each UID.
+ version_key (int, optional): Version key for compatibility with the network. Default is ``0``.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to ``False``.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to ``False``.
+
+ Returns:
+ bool: ``True`` if the setting of root-level weights is successful, False otherwise.
+
+ This function plays a pivotal role in shaping the root network's collective intelligence and decision-making processes, reflecting the principles of decentralized governance and collaborative learning in Bittensor.
+ """
+ return set_root_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuids=netuids,
+ weights=weights,
+ version_key=version_key,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ def register(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+ ) -> bool:
+ """
+ Registers a neuron on the Bittensor network using the provided wallet.
+
+ Registration is a critical step for a neuron to become an active participant in the network, enabling it to stake, set weights, and receive incentives.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron to be registered.
+ netuid (int): The unique identifier of the subnet.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`.
+ max_allowed_attempts (int): Maximum number of attempts to register the wallet.
+ output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`.
+ cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). Defaults to `False`.
+ dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids. Defaults to `0` (zero).
+ tpb (int): The number of threads per block (CUDA). Default to `256`.
+ num_processes (Optional[int]): The number of processes to use to register. Default to `None`.
+ update_interval (Optional[int]): The number of nonces to solve between updates. Default to `None`.
+ log_verbose (bool): If ``true``, the registration process will log more information. Default to `False`.
+
+ Returns:
+ bool: ``True`` if the registration is successful, False otherwise.
+
+ This function facilitates the entry of new neurons into the network, supporting the decentralized
+ growth and scalability of the Bittensor ecosystem.
+ """
+ return register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ max_allowed_attempts=max_allowed_attempts,
+ output_in_place=output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ def root_register(
+ self,
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ ) -> bool:
+ """
+ Registers the neuron associated with the wallet on the root network. This process is integral for participating in the highest layer of decision-making and governance within the Bittensor network.
+
+ Args:
+ wallet (bittensor.wallet): The wallet associated with the neuron to be registered on the root network.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`.
+
+ Returns:
+ bool: ``True`` if the registration on the root network is successful, False otherwise.
+
+ This function enables neurons to engage in the most critical and influential aspects of the network's governance, signifying a high level of commitment and responsibility in the Bittensor ecosystem.
+ """
+ return root_register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ def burned_register(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ ) -> bool:
+ """
+ Registers a neuron on the Bittensor network by recycling TAO. This method of registration involves recycling TAO tokens, allowing them to be re-mined by performing work on the network.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron to be registered.
+ netuid (int): The unique identifier of the subnet.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to `False`.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to `True`.
+
+ Returns:
+ bool: ``True`` if the registration is successful, False otherwise.
+ """
+ return burned_register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
def serve_axon(
self,
netuid: int,
@@ -1013,7 +1162,6 @@ def transfer(
amount: Union["Balance", float],
wait_for_inclusion: bool = True,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> bool:
"""
Executes a transfer of funds from the provided wallet to the specified destination address. This function is used to move TAO tokens within the Bittensor network, facilitating transactions between neurons.
@@ -1024,7 +1172,6 @@ def transfer(
amount (Union[bittensor.utils.balance.Balance, float]): The amount of TAO to be transferred.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``True``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
Returns:
transfer_extrinsic (bool): ``True`` if the transfer is successful, False otherwise.
@@ -1038,7 +1185,6 @@ def transfer(
amount=amount,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
# Community uses this method via `bittensor.api.extrinsics.prometheus.prometheus_extrinsic`
@@ -1102,37 +1248,6 @@ def make_substrate_call_with_retry():
return NeuronInfo.from_vec_u8(result)
- # Community uses this method
- def serve_prometheus(
- self,
- wallet: "Wallet",
- port: int,
- netuid: int,
- wait_for_inclusion: bool = False,
- wait_for_finalization: bool = True,
- ) -> bool:
- """
- Serves Prometheus metrics by submitting an extrinsic to a blockchain network via the specified wallet. The function allows configuring whether to wait for the transaction's inclusion in a block and its finalization.
-
- Args:
- wallet (bittensor_wallet.Wallet): Bittensor wallet instance used for submitting the extrinsic.
- port (int): The port number on which Prometheus metrics are served.
- netuid (int): The unique identifier of the subnetwork.
- wait_for_inclusion (bool): If True, waits for the transaction to be included in a block. Defaults to ``False``.
- wait_for_finalization (bool): If True, waits for the transaction to be finalized. Defaults to ``True``.
-
- Returns:
- bool: Returns True if the Prometheus extrinsic is successfully processed, otherwise False.
- """
- return prometheus_extrinsic(
- self,
- wallet=wallet,
- port=port,
- netuid=netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
# Community uses this method
def get_subnet_hyperparameters(
self, netuid: int, block: Optional[int] = None
@@ -1326,6 +1441,36 @@ def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool:
_result = self.query_subtensor("NetworksAdded", block, [netuid])
return getattr(_result, "value", False)
+ @networking.ensure_connected
+ def get_all_subnets_info(self, block: Optional[int] = None) -> list[SubnetInfo]:
+ """
+ Retrieves detailed information about all subnets within the Bittensor network. This function provides comprehensive data on each subnet, including its characteristics and operational parameters.
+
+ Args:
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ list[SubnetInfo]: A list of SubnetInfo objects, each containing detailed information about a subnet.
+
+ Gaining insights into the subnets' details assists in understanding the network's composition, the roles of different subnets, and their unique features.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
+
+ return self.substrate.rpc_request(
+ method="subnetInfo_getSubnetsInfo", # custom rpc method
+ params=[block_hash] if block_hash else [],
+ )
+
+ json_body = make_substrate_call_with_retry()
+
+ if not (result := json_body.get("result", None)):
+ return []
+
+ return SubnetInfo.list_from_vec_u8(result)
+
# Metagraph uses this method
def bonds(
self, netuid: int, block: Optional[int] = None
@@ -1352,6 +1497,30 @@ def bonds(
return b_map
+ def get_subnet_burn_cost(self, block: Optional[int] = None) -> Optional[str]:
+ """
+ Retrieves the burn cost for registering a new subnet within the Bittensor network. This cost represents the amount of Tao that needs to be locked or burned to establish a new subnet.
+
+ Args:
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ int: The burn cost for subnet registration.
+
+ The subnet burn cost is an important economic parameter, reflecting the network's mechanisms for controlling the proliferation of subnets and ensuring their commitment to the network's long-term viability.
+ """
+ lock_cost = self.query_runtime_api(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block=block,
+ )
+
+ if lock_cost is None:
+ return None
+
+ return lock_cost
+
# Metagraph uses this method
def neurons(self, netuid: int, block: Optional[int] = None) -> list["NeuronInfo"]:
"""
@@ -1549,9 +1718,7 @@ def get_transfer_fee(
call=call, keypair=wallet.coldkeypub
)
except Exception as e:
- settings.bt_console.print(
- f":cross_mark: [red]Failed to get payment info[/red]:[bold white]\n {e}[/bold white]"
- )
+ logging.error(f"Failed to get payment info. {e}")
payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao
fee = Balance.from_rao(payment_info["partialFee"])
@@ -1599,7 +1766,6 @@ def commit_weights(
version_key: int = settings.version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
max_retries: int = 5,
) -> tuple[bool, str]:
"""
@@ -1615,7 +1781,6 @@ def commit_weights(
version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
max_retries (int): The number of maximum attempts to commit weights. Default is ``5``.
Returns:
@@ -1654,7 +1819,6 @@ def commit_weights(
commit_hash=commit_hash,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
if success:
break
@@ -1676,7 +1840,6 @@ def reveal_weights(
version_key: int = settings.version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
max_retries: int = 5,
) -> tuple[bool, str]:
"""
@@ -1692,7 +1855,6 @@ def reveal_weights(
version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version``.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
max_retries (int): The number of maximum attempts to reveal weights. Default is ``5``.
Returns:
@@ -1719,7 +1881,6 @@ def reveal_weights(
version_key=version_key,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
if success:
break
@@ -1730,7 +1891,101 @@ def reveal_weights(
return success, message
- # Subnet 27 uses this method
- _do_serve_prometheus = do_serve_prometheus
+ def difficulty(self, netuid: int, block: Optional[int] = None) -> Optional[int]:
+ """
+ Retrieves the 'Difficulty' hyperparameter for a specified subnet in the Bittensor network.
+
+ This parameter is instrumental in determining the computational challenge required for neurons to participate in consensus and validation processes.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ Optional[int]: The value of the 'Difficulty' hyperparameter if the subnet exists, ``None`` otherwise.
+
+ The 'Difficulty' parameter directly impacts the network's security and integrity by setting the computational effort required for validating transactions and participating in the network's consensus mechanism.
+ """
+ call = self._get_hyperparameter(
+ param_name="Difficulty", netuid=netuid, block=block
+ )
+ if call is None:
+ return None
+ return int(call)
+
+ def recycle(self, netuid: int, block: Optional[int] = None) -> Optional["Balance"]:
+ """
+ Retrieves the 'Burn' hyperparameter for a specified subnet. The 'Burn' parameter represents the amount of Tao that is effectively recycled within the Bittensor network.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ Optional[Balance]: The value of the 'Burn' hyperparameter if the subnet exists, None otherwise.
+
+ Understanding the 'Burn' rate is essential for analyzing the network registration usage, particularly how it is correlated with user activity and the overall cost of participation in a given subnet.
+ """
+ call = self._get_hyperparameter(param_name="Burn", netuid=netuid, block=block)
+ return None if call is None else Balance.from_rao(int(call))
+
+ def get_delegate_take(
+ self, hotkey_ss58: str, block: Optional[int] = None
+ ) -> Optional[float]:
+ """
+ Retrieves the delegate 'take' percentage for a neuron identified by its hotkey. The 'take' represents the percentage of rewards that the delegate claims from its nominators' stakes.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ Optional[float]: The delegate take percentage, None if not available.
+
+ The delegate take is a critical parameter in the network's incentive structure, influencing the distribution of rewards among neurons and their nominators.
+ """
+ _result = self.query_subtensor("Delegates", block, [hotkey_ss58])
+ return (
+ None
+ if getattr(_result, "value", None) is None
+ else u16_normalized_float(_result.value)
+ )
+
+ @networking.ensure_connected
+ def get_delegate_by_hotkey(
+ self, hotkey_ss58: str, block: Optional[int] = None
+ ) -> Optional[DelegateInfo]:
+ """
+ Retrieves detailed information about a delegate neuron based on its hotkey. This function provides a comprehensive view of the delegate's status, including its stakes, nominators, and reward distribution.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the delegate's hotkey.
+ block (Optional[int]): The blockchain block number for the query. Default is ``None``.
+
+ Returns:
+ Optional[DelegateInfo]: Detailed information about the delegate neuron, ``None`` if not found.
+
+ This function is essential for understanding the roles and influence of delegate neurons within the Bittensor network's consensus and governance structures.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry(encoded_hotkey_: list[int]):
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
+
+ return self.substrate.rpc_request(
+ method="delegateInfo_getDelegate", # custom rpc method
+ params=(
+ [encoded_hotkey_, block_hash] if block_hash else [encoded_hotkey_]
+ ),
+ )
+
+ encoded_hotkey = ss58_to_vec_u8(hotkey_ss58)
+ json_body = make_substrate_call_with_retry(encoded_hotkey)
+
+ if not (result := json_body.get("result", None)):
+ return None
+
+ return DelegateInfo.from_vec_u8(result)
+
# Subnet 27 uses this method name
_do_serve_axon = do_serve_axon
diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py
index 6239d89808..745726c264 100644
--- a/bittensor/utils/__init__.py
+++ b/bittensor/utils/__init__.py
@@ -15,8 +15,10 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+from urllib.parse import urlparse
+import ast
import hashlib
-from typing import Literal, Union, Optional, TYPE_CHECKING
+from typing import Any, Literal, Union, Optional, TYPE_CHECKING
import scalecodec
from bittensor_wallet import Keypair
@@ -28,6 +30,7 @@
from .version import version_checking, check_version, VersionCheckError
if TYPE_CHECKING:
+ from bittensor.utils.async_substrate_interface import AsyncSubstrateInterface
from substrateinterface import SubstrateInterface
RAOPERTAO = 1e9
@@ -142,14 +145,16 @@ def get_hash(content, encoding="utf-8"):
def format_error_message(
- error_message: dict, substrate: "SubstrateInterface" = None
+ error_message: Union[dict, Exception],
+ substrate: Union["AsyncSubstrateInterface", "SubstrateInterface"],
) -> str:
"""
Formats an error message from the Subtensor error information for use in extrinsics.
Args:
- error_message (dict): A dictionary containing the error information from Subtensor.
- substrate (SubstrateInterface, optional): The substrate interface to use.
+ error_message: A dictionary containing the error information from Subtensor, or a SubstrateRequestException
+ containing dictionary literal args.
+ substrate: The initialised SubstrateInterface object to use.
Returns:
str: A formatted error message string.
@@ -158,6 +163,27 @@ def format_error_message(
err_type = "UnknownType"
err_description = "Unknown Description"
+ if isinstance(error_message, Exception):
+ # generally gotten through SubstrateRequestException args
+ new_error_message = None
+ for arg in error_message.args:
+ try:
+ d = ast.literal_eval(arg)
+ if isinstance(d, dict):
+ if "error" in d:
+ new_error_message = d["error"]
+ break
+ elif all(x in d for x in ["code", "message", "data"]):
+ new_error_message = d
+ break
+ except ValueError:
+ pass
+ if new_error_message is None:
+ return_val = " ".join(error_message.args)
+ return f"Subtensor returned: {return_val}"
+ else:
+ error_message = new_error_message
+
if isinstance(error_message, dict):
# subtensor error structure
if (
@@ -166,14 +192,11 @@ def format_error_message(
and error_message.get("data")
):
err_name = "SubstrateRequestException"
- err_type = error_message.get("message")
- err_data = error_message.get("data")
+ err_type = error_message.get("message", "")
+ err_data = error_message.get("data", "")
# subtensor custom error marker
if err_data.startswith("Custom error:") and substrate:
- if not substrate.metadata:
- substrate.get_metadata()
-
if substrate.metadata:
try:
pallet = substrate.metadata.get_metadata_pallet(
@@ -185,8 +208,10 @@ def format_error_message(
err_type = error_dict.get("message", err_type)
err_docs = error_dict.get("docs", [])
err_description = err_docs[0] if err_docs else err_description
- except Exception:
- logging.error("Substrate pallets data unavailable.")
+ except (AttributeError, IndexError):
+ logging.error(
+ "Substrate pallets data unavailable. This is usually caused by an uninitialized substrate."
+ )
else:
err_description = err_data
@@ -277,3 +302,71 @@ def is_valid_bittensor_address_or_public_key(address: Union[str, bytes]) -> bool
else:
# Invalid address type
return False
+
+
+def decode_hex_identity_dict(info_dictionary) -> dict[str, Any]:
+ """
+ Decodes hex-encoded strings in a dictionary.
+
+ This function traverses the given dictionary, identifies hex-encoded strings, and decodes them into readable strings. It handles nested dictionaries and lists within the dictionary.
+
+ Args:
+ info_dictionary (dict): The dictionary containing hex-encoded strings to decode.
+
+ Returns:
+ dict: The dictionary with decoded strings.
+
+ Examples:
+ input_dict = {
+ ... "name": {"value": "0x6a6f686e"},
+ ... "additional": [
+ ... [{"data": "0x64617461"}]
+ ... ]
+ ... }
+ decode_hex_identity_dict(input_dict)
+ {'name': 'john', 'additional': [('data', 'data')]}
+ """
+
+ def get_decoded(data: str) -> str:
+ """Decodes a hex-encoded string."""
+ try:
+ return bytes.fromhex(data[2:]).decode()
+ except UnicodeDecodeError:
+ print(f"Could not decode: {key}: {item}")
+
+ for key, value in info_dictionary.items():
+ if isinstance(value, dict):
+ item = list(value.values())[0]
+ if isinstance(item, str) and item.startswith("0x"):
+ try:
+ info_dictionary[key] = get_decoded(item)
+ except UnicodeDecodeError:
+ print(f"Could not decode: {key}: {item}")
+ else:
+ info_dictionary[key] = item
+ if key == "additional":
+ additional = []
+ for item in value:
+ additional.append(
+ tuple(
+ get_decoded(data=next(iter(sub_item.values())))
+ for sub_item in item
+ )
+ )
+ info_dictionary[key] = additional
+
+ return info_dictionary
+
+
+def validate_chain_endpoint(endpoint_url: str) -> tuple[bool, str]:
+ """Validates if the provided endpoint URL is a valid WebSocket URL."""
+ parsed = urlparse(endpoint_url)
+ if parsed.scheme not in ("ws", "wss"):
+ return False, (
+ f"Invalid URL or network name provided: [bright_cyan]({endpoint_url})[/bright_cyan].\n"
+ "Allowed network names are [bright_cyan]finney, test, local[/bright_cyan]. "
+ "Valid chain endpoints should use the scheme [bright_cyan]`ws` or `wss`[/bright_cyan].\n"
+ )
+ if not parsed.netloc:
+ return False, "Invalid URL passed as the endpoint"
+ return True, ""
diff --git a/bittensor/utils/async_substrate_interface.py b/bittensor/utils/async_substrate_interface.py
new file mode 100644
index 0000000000..de0547e7b5
--- /dev/null
+++ b/bittensor/utils/async_substrate_interface.py
@@ -0,0 +1,2742 @@
+import asyncio
+import json
+import random
+from collections import defaultdict
+from dataclasses import dataclass
+from hashlib import blake2b
+from typing import Optional, Any, Union, Callable, Awaitable, cast
+
+import websockets
+from async_property import async_property
+from bittensor_wallet import Keypair
+from bt_decode import PortableRegistry, decode as decode_by_type_string, MetadataV15
+from scalecodec import GenericExtrinsic
+from scalecodec.base import ScaleBytes, ScaleType, RuntimeConfigurationObject
+from scalecodec.type_registry import load_type_registry_preset
+from scalecodec.types import GenericCall
+from substrateinterface.exceptions import (
+ SubstrateRequestException,
+ ExtrinsicNotFound,
+ BlockNotFound,
+)
+from substrateinterface.storage import StorageKey
+
+ResultHandler = Callable[[dict, Any], Awaitable[tuple[dict, bool]]]
+
+
+class TimeoutException(Exception):
+ pass
+
+
+def timeout_handler(signum, frame):
+ raise TimeoutException("Operation timed out")
+
+
+class ExtrinsicReceipt:
+ """
+ Object containing information of submitted extrinsic. Block hash where extrinsic is included is required
+ when retrieving triggered events or determine if extrinsic was successful
+ """
+
+ def __init__(
+ self,
+ substrate: "AsyncSubstrateInterface",
+ extrinsic_hash: Optional[str] = None,
+ block_hash: Optional[str] = None,
+ block_number: Optional[int] = None,
+ extrinsic_idx: Optional[int] = None,
+ finalized=None,
+ ):
+ """
+ Object containing information of submitted extrinsic. Block hash where extrinsic is included is required
+ when retrieving triggered events or determine if extrinsic was successful
+
+ Parameters
+ ----------
+ substrate
+ extrinsic_hash
+ block_hash
+ finalized
+ """
+ self.substrate = substrate
+ self.extrinsic_hash = extrinsic_hash
+ self.block_hash = block_hash
+ self.block_number = block_number
+ self.finalized = finalized
+
+ self.__extrinsic_idx = extrinsic_idx
+ self.__extrinsic = None
+
+ self.__triggered_events: Optional[list] = None
+ self.__is_success: Optional[bool] = None
+ self.__error_message = None
+ self.__weight = None
+ self.__total_fee_amount = None
+
+ async def get_extrinsic_identifier(self) -> str:
+ """
+ Returns the on-chain identifier for this extrinsic in format "[block_number]-[extrinsic_idx]" e.g. 134324-2
+ Returns
+ -------
+ str
+ """
+ if self.block_number is None:
+ if self.block_hash is None:
+ raise ValueError(
+ "Cannot create extrinsic identifier: block_hash is not set"
+ )
+
+ self.block_number = await self.substrate.get_block_number(self.block_hash)
+
+ if self.block_number is None:
+ raise ValueError(
+ "Cannot create extrinsic identifier: unknown block_hash"
+ )
+
+ return f"{self.block_number}-{await self.extrinsic_idx}"
+
+ async def retrieve_extrinsic(self):
+ if not self.block_hash:
+ raise ValueError(
+ "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is "
+ "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic"
+ )
+ # Determine extrinsic idx
+
+ block = await self.substrate.get_block(block_hash=self.block_hash)
+
+ extrinsics = block["extrinsics"]
+
+ if len(extrinsics) > 0:
+ if self.__extrinsic_idx is None:
+ self.__extrinsic_idx = self.__get_extrinsic_index(
+ block_extrinsics=extrinsics, extrinsic_hash=self.extrinsic_hash
+ )
+
+ if self.__extrinsic_idx >= len(extrinsics):
+ raise ExtrinsicNotFound()
+
+ self.__extrinsic = extrinsics[self.__extrinsic_idx]
+
+ @async_property
+ async def extrinsic_idx(self) -> int:
+ """
+ Retrieves the index of this extrinsic in containing block
+
+ Returns
+ -------
+ int
+ """
+ if self.__extrinsic_idx is None:
+ await self.retrieve_extrinsic()
+ return self.__extrinsic_idx
+
+ @async_property
+ async def triggered_events(self) -> list:
+ """
+ Gets triggered events for submitted extrinsic. block_hash where extrinsic is included is required, manually
+ set block_hash or use `wait_for_inclusion` when submitting extrinsic
+
+ Returns
+ -------
+ list
+ """
+ if self.__triggered_events is None:
+ if not self.block_hash:
+ raise ValueError(
+ "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is "
+ "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic"
+ )
+
+ if await self.extrinsic_idx is None:
+ await self.retrieve_extrinsic()
+
+ self.__triggered_events = []
+
+ for event in await self.substrate.get_events(block_hash=self.block_hash):
+ if event["extrinsic_idx"] == await self.extrinsic_idx:
+ self.__triggered_events.append(event)
+
+ return cast(list, self.__triggered_events)
+
+ async def process_events(self):
+ if await self.triggered_events:
+ self.__total_fee_amount = 0
+
+ # Process fees
+ has_transaction_fee_paid_event = False
+
+ for event in await self.triggered_events:
+ if (
+ event["event"]["module_id"] == "TransactionPayment"
+ and event["event"]["event_id"] == "TransactionFeePaid"
+ ):
+ self.__total_fee_amount = event["event"]["attributes"]["actual_fee"]
+ has_transaction_fee_paid_event = True
+
+ # Process other events
+ for event in await self.triggered_events:
+ # Check events
+ if (
+ event["event"]["module_id"] == "System"
+ and event["event"]["event_id"] == "ExtrinsicSuccess"
+ ):
+ self.__is_success = True
+ self.__error_message = None
+
+ if "dispatch_info" in event["event"]["attributes"]:
+ self.__weight = event["event"]["attributes"]["dispatch_info"][
+ "weight"
+ ]
+ else:
+ # Backwards compatibility
+ self.__weight = event["event"]["attributes"]["weight"]
+
+ elif (
+ event["event"]["module_id"] == "System"
+ and event["event"]["event_id"] == "ExtrinsicFailed"
+ ):
+ self.__is_success = False
+
+ dispatch_info = event["event"]["attributes"]["dispatch_info"]
+ dispatch_error = event["event"]["attributes"]["dispatch_error"]
+
+ self.__weight = dispatch_info["weight"]
+
+ if "Module" in dispatch_error:
+ module_index = dispatch_error["Module"][0]["index"]
+ error_index = int.from_bytes(
+ bytes(dispatch_error["Module"][0]["error"]),
+ byteorder="little",
+ signed=False,
+ )
+
+ if isinstance(error_index, str):
+ # Actual error index is first u8 in new [u8; 4] format
+ error_index = int(error_index[2:4], 16)
+ module_error = self.substrate.metadata.get_module_error(
+ module_index=module_index, error_index=error_index
+ )
+ self.__error_message = {
+ "type": "Module",
+ "name": module_error.name,
+ "docs": module_error.docs,
+ }
+ elif "BadOrigin" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "BadOrigin",
+ "docs": "Bad origin",
+ }
+ elif "CannotLookup" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "CannotLookup",
+ "docs": "Cannot lookup",
+ }
+ elif "Other" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "Other",
+ "docs": "Unspecified error occurred",
+ }
+
+ elif not has_transaction_fee_paid_event:
+ if (
+ event["event"]["module_id"] == "Treasury"
+ and event["event"]["event_id"] == "Deposit"
+ ):
+ self.__total_fee_amount += event["event"]["attributes"]["value"]
+ elif (
+ event["event"]["module_id"] == "Balances"
+ and event["event"]["event_id"] == "Deposit"
+ ):
+ self.__total_fee_amount += event.value["attributes"]["amount"]
+
+ @async_property
+ async def is_success(self) -> bool:
+ """
+ Returns `True` if `ExtrinsicSuccess` event is triggered, `False` in case of `ExtrinsicFailed`
+ In case of False `error_message` will contain more details about the error
+
+
+ Returns
+ -------
+ bool
+ """
+ if self.__is_success is None:
+ await self.process_events()
+
+ return cast(bool, self.__is_success)
+
+ @async_property
+ async def error_message(self) -> Optional[dict]:
+ """
+ Returns the error message if the extrinsic failed in format e.g.:
+
+ `{'type': 'System', 'name': 'BadOrigin', 'docs': 'Bad origin'}`
+
+ Returns
+ -------
+ dict
+ """
+ if self.__error_message is None:
+ if await self.is_success:
+ return None
+ await self.process_events()
+ return self.__error_message
+
+ @async_property
+ async def weight(self) -> Union[int, dict]:
+ """
+ Contains the actual weight when executing this extrinsic
+
+ Returns
+ -------
+ int (WeightV1) or dict (WeightV2)
+ """
+ if self.__weight is None:
+ await self.process_events()
+ return self.__weight
+
+ @async_property
+ async def total_fee_amount(self) -> int:
+ """
+ Contains the total fee costs deducted when executing this extrinsic. This includes fee for the validator (
+ (`Balances.Deposit` event) and the fee deposited for the treasury (`Treasury.Deposit` event)
+
+ Returns
+ -------
+ int
+ """
+ if self.__total_fee_amount is None:
+ await self.process_events()
+ return cast(int, self.__total_fee_amount)
+
+ # Helper functions
+ @staticmethod
+ def __get_extrinsic_index(block_extrinsics: list, extrinsic_hash: str) -> int:
+ """
+ Returns the index of a provided extrinsic
+ """
+ for idx, extrinsic in enumerate(block_extrinsics):
+ if (
+ extrinsic.extrinsic_hash
+ and f"0x{extrinsic.extrinsic_hash.hex()}" == extrinsic_hash
+ ):
+ return idx
+ raise ExtrinsicNotFound()
+
+ # Backwards compatibility methods
+ def __getitem__(self, item):
+ return getattr(self, item)
+
+ def __iter__(self):
+ for item in self.__dict__.items():
+ yield item
+
+ def get(self, name):
+ return self[name]
+
+
+class QueryMapResult:
+ def __init__(
+ self,
+ records: list,
+ page_size: int,
+ substrate: "AsyncSubstrateInterface",
+ module: Optional[str] = None,
+ storage_function: Optional[str] = None,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ last_key: Optional[str] = None,
+ max_results: Optional[int] = None,
+ ignore_decoding_errors: bool = False,
+ ):
+ self.records = records
+ self.page_size = page_size
+ self.module = module
+ self.storage_function = storage_function
+ self.block_hash = block_hash
+ self.substrate = substrate
+ self.last_key = last_key
+ self.max_results = max_results
+ self.params = params
+ self.ignore_decoding_errors = ignore_decoding_errors
+ self.loading_complete = False
+ self._buffer = iter(self.records) # Initialize the buffer with initial records
+
+ async def retrieve_next_page(self, start_key) -> list:
+ result = await self.substrate.query_map(
+ module=self.module,
+ storage_function=self.storage_function,
+ params=self.params,
+ page_size=self.page_size,
+ block_hash=self.block_hash,
+ start_key=start_key,
+ max_results=self.max_results,
+ ignore_decoding_errors=self.ignore_decoding_errors,
+ )
+
+ # Update last key from new result set to use as offset for next page
+ self.last_key = result.last_key
+ return result.records
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ try:
+ # Try to get the next record from the buffer
+ return next(self._buffer)
+ except StopIteration:
+ # If no more records in the buffer, try to fetch the next page
+ if self.loading_complete:
+ raise StopAsyncIteration
+
+ next_page = await self.retrieve_next_page(self.last_key)
+ if not next_page:
+ self.loading_complete = True
+ raise StopAsyncIteration
+
+ # Update the buffer with the newly fetched records
+ self._buffer = iter(next_page)
+ return next(self._buffer)
+
+ def __getitem__(self, item):
+ return self.records[item]
+
+
+@dataclass
+class Preprocessed:
+ queryable: str
+ method: str
+ params: list
+ value_scale_type: str
+ storage_item: ScaleType
+
+
+class RuntimeCache:
+ blocks: dict[int, "Runtime"]
+ block_hashes: dict[str, "Runtime"]
+
+ def __init__(self):
+ self.blocks = {}
+ self.block_hashes = {}
+
+ def add_item(
+ self, block: Optional[int], block_hash: Optional[str], runtime: "Runtime"
+ ):
+ if block is not None:
+ self.blocks[block] = runtime
+ if block_hash is not None:
+ self.block_hashes[block_hash] = runtime
+
+ def retrieve(
+ self, block: Optional[int] = None, block_hash: Optional[str] = None
+ ) -> Optional["Runtime"]:
+ if block is not None:
+ return self.blocks.get(block)
+ elif block_hash is not None:
+ return self.block_hashes.get(block_hash)
+ else:
+ return None
+
+
+class Runtime:
+ block_hash: str
+ block_id: int
+ runtime_version = None
+ transaction_version = None
+ cache_region = None
+ metadata = None
+ type_registry_preset = None
+
+ def __init__(self, chain, runtime_config, metadata, type_registry):
+ self.runtime_config = RuntimeConfigurationObject()
+ self.config = {}
+ self.chain = chain
+ self.type_registry = type_registry
+ self.runtime_config = runtime_config
+ self.metadata = metadata
+
+ @property
+ def implements_scaleinfo(self) -> bool:
+ """
+ Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher)
+ """
+ if self.metadata:
+ return self.metadata.portable_registry is not None
+ else:
+ return False
+
+ def reload_type_registry(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ """
+ Reload type registry and preset used to instantiate the SubstrateInterface object. Useful to periodically apply
+ changes in type definitions when a runtime upgrade occurred
+
+ Parameters
+ ----------
+ use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed
+ scalecodec package
+ auto_discover
+
+ Returns
+ -------
+
+ """
+ self.runtime_config.clear_type_registry()
+
+ self.runtime_config.implements_scale_info = self.implements_scaleinfo
+
+ # Load metadata types in runtime configuration
+ self.runtime_config.update_type_registry(load_type_registry_preset(name="core"))
+ self.apply_type_registry_presets(
+ use_remote_preset=use_remote_preset, auto_discover=auto_discover
+ )
+
+ def apply_type_registry_presets(
+ self,
+ use_remote_preset: bool = True,
+ auto_discover: bool = True,
+ ):
+ """
+ Applies type registry presets to the runtime
+ :param use_remote_preset: bool, whether to use presets from remote
+ :param auto_discover: bool, whether to use presets from local installed scalecodec package
+ """
+ if self.type_registry_preset is not None:
+ # Load type registry according to preset
+ type_registry_preset_dict = load_type_registry_preset(
+ name=self.type_registry_preset, use_remote_preset=use_remote_preset
+ )
+
+ if not type_registry_preset_dict:
+ raise ValueError(
+ f"Type registry preset '{self.type_registry_preset}' not found"
+ )
+
+ elif auto_discover:
+ # Try to auto discover type registry preset by chain name
+ type_registry_name = self.chain.lower().replace(" ", "-")
+ try:
+ type_registry_preset_dict = load_type_registry_preset(
+ type_registry_name
+ )
+ self.type_registry_preset = type_registry_name
+ except ValueError:
+ type_registry_preset_dict = None
+
+ else:
+ type_registry_preset_dict = None
+
+ if type_registry_preset_dict:
+ # Load type registries in runtime configuration
+ if self.implements_scaleinfo is False:
+ # Only runtime with no embedded types in metadata need the default set of explicit defined types
+ self.runtime_config.update_type_registry(
+ load_type_registry_preset(
+ "legacy", use_remote_preset=use_remote_preset
+ )
+ )
+
+ if self.type_registry_preset != "legacy":
+ self.runtime_config.update_type_registry(type_registry_preset_dict)
+
+ if self.type_registry:
+ # Load type registries in runtime configuration
+ self.runtime_config.update_type_registry(self.type_registry)
+
+
+class RequestManager:
+ RequestResults = dict[Union[str, int], list[Union[ScaleType, dict]]]
+
+ def __init__(self, payloads):
+ self.response_map = {}
+ self.responses = defaultdict(lambda: {"complete": False, "results": []})
+ self.payloads_count = len(payloads)
+
+ def add_request(self, item_id: int, request_id: Any):
+ """
+ Adds an outgoing request to the responses map for later retrieval
+ """
+ self.response_map[item_id] = request_id
+
+ def overwrite_request(self, item_id: int, request_id: Any):
+ """
+ Overwrites an existing request in the responses map with a new request_id. This is used
+ for multipart responses that generate a subscription id we need to watch, rather than the initial
+ request_id.
+ """
+ self.response_map[request_id] = self.response_map.pop(item_id)
+ return request_id
+
+ def add_response(self, item_id: int, response: dict, complete: bool):
+ """
+ Maps a response to the request for later retrieval
+ """
+ request_id = self.response_map[item_id]
+ self.responses[request_id]["results"].append(response)
+ self.responses[request_id]["complete"] = complete
+
+ @property
+ def is_complete(self) -> bool:
+ """
+ Returns whether all requests in the manager have completed
+ """
+ return (
+ all(info["complete"] for info in self.responses.values())
+ and len(self.responses) == self.payloads_count
+ )
+
+ def get_results(self) -> RequestResults:
+ """
+ Generates a dictionary mapping the requests initiated to the responses received.
+ """
+ return {
+ request_id: info["results"] for request_id, info in self.responses.items()
+ }
+
+
+class Websocket:
+ def __init__(
+ self,
+ ws_url: str,
+ max_subscriptions=1024,
+ max_connections=100,
+ shutdown_timer=5,
+ options: Optional[dict] = None,
+ ):
+ """
+ Websocket manager object. Allows for the use of a single websocket connection by multiple
+ calls.
+
+ :param ws_url: Websocket URL to connect to
+ :param max_subscriptions: Maximum number of subscriptions per websocket connection
+ :param max_connections: Maximum number of connections total
+ :param shutdown_timer: Number of seconds to shut down websocket connection after last use
+ """
+ # TODO allow setting max concurrent connections and rpc subscriptions per connection
+ # TODO reconnection logic
+ self.ws_url = ws_url
+ self.ws: Optional[websockets.WebSocketClientProtocol] = None
+ self.id = 0
+ self.max_subscriptions = max_subscriptions
+ self.max_connections = max_connections
+ self.shutdown_timer = shutdown_timer
+ self._received = {}
+ self._in_use = 0
+ self._receiving_task = None
+ self._attempts = 0
+ self._initialized = False
+ self._lock = asyncio.Lock()
+ self._exit_task = None
+ self._open_subscriptions = 0
+ self._options = options if options else {}
+
+ async def __aenter__(self):
+ async with self._lock:
+ self._in_use += 1
+ if self._exit_task:
+ self._exit_task.cancel()
+ if not self._initialized:
+ self._initialized = True
+ await self._connect()
+ self._receiving_task = asyncio.create_task(self._start_receiving())
+ return self
+
+ async def _connect(self):
+ self.ws = await asyncio.wait_for(
+ websockets.connect(self.ws_url, **self._options), timeout=10
+ )
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ async with self._lock:
+ self._in_use -= 1
+ if self._exit_task is not None:
+ self._exit_task.cancel()
+ try:
+ await self._exit_task
+ except asyncio.CancelledError:
+ pass
+ if self._in_use == 0 and self.ws is not None:
+ self.id = 0
+ self._open_subscriptions = 0
+ self._exit_task = asyncio.create_task(self._exit_with_timer())
+
+ async def _exit_with_timer(self):
+ """
+ Allows for graceful shutdown of websocket connection after specified number of seconds, allowing
+ for reuse of the websocket connection.
+ """
+ try:
+ await asyncio.sleep(self.shutdown_timer)
+ await self.shutdown()
+ except asyncio.CancelledError:
+ pass
+
+ async def shutdown(self):
+ async with self._lock:
+ try:
+ self._receiving_task.cancel()
+ await self._receiving_task
+ await self.ws.close()
+ except (AttributeError, asyncio.CancelledError):
+ pass
+ self.ws = None
+ self._initialized = False
+ self._receiving_task = None
+ self.id = 0
+
+ async def _recv(self) -> None:
+ try:
+ response = json.loads(
+ await cast(websockets.WebSocketClientProtocol, self.ws).recv()
+ )
+ async with self._lock:
+ self._open_subscriptions -= 1
+ if "id" in response:
+ self._received[response["id"]] = response
+ elif "params" in response:
+ self._received[response["params"]["subscription"]] = response
+ else:
+ raise KeyError(response)
+ except websockets.ConnectionClosed:
+ raise
+ except KeyError as e:
+ raise e
+
+ async def _start_receiving(self):
+ try:
+ while True:
+ await self._recv()
+ except asyncio.CancelledError:
+ pass
+ except websockets.ConnectionClosed:
+ # TODO try reconnect, but only if it's needed
+ raise
+
+ async def send(self, payload: dict) -> int:
+ """
+ Sends a payload to the websocket connection.
+
+ :param payload: payload, generate a payload with the AsyncSubstrateInterface.make_payload method
+ """
+ async with self._lock:
+ original_id = self.id
+ self.id += 1
+ self._open_subscriptions += 1
+ try:
+ await self.ws.send(json.dumps({**payload, **{"id": original_id}}))
+ return original_id
+ except websockets.ConnectionClosed:
+ raise
+
+ async def retrieve(self, item_id: int) -> Optional[dict]:
+ """
+ Retrieves a single item from received responses dict queue
+
+ :param item_id: id of the item to retrieve
+
+ :return: retrieved item
+ """
+ while True:
+ async with self._lock:
+ if item_id in self._received:
+ return self._received.pop(item_id)
+ await asyncio.sleep(0.1)
+
+
+class AsyncSubstrateInterface:
+ runtime = None
+ registry: Optional[PortableRegistry] = None
+
+ def __init__(
+ self,
+ chain_endpoint: str,
+ use_remote_preset=False,
+ auto_discover=True,
+ auto_reconnect=True,
+ ss58_format=None,
+ type_registry=None,
+ chain_name=None,
+ ):
+ """
+ The asyncio-compatible version of the subtensor interface commands we use in bittensor
+ """
+ self.chain_endpoint = chain_endpoint
+ self.__chain = chain_name
+ self.ws = Websocket(
+ chain_endpoint,
+ options={
+ "max_size": 2**32,
+ "read_limit": 2**16,
+ "write_limit": 2**16,
+ },
+ )
+ self._lock = asyncio.Lock()
+ self.last_block_hash: Optional[str] = None
+ self.config = {
+ "use_remote_preset": use_remote_preset,
+ "auto_discover": auto_discover,
+ "auto_reconnect": auto_reconnect,
+ "rpc_methods": None,
+ "strict_scale_decode": True,
+ }
+ self.initialized = False
+ self._forgettable_task = None
+ self.ss58_format = ss58_format
+ self.type_registry = type_registry
+ self.runtime_cache = RuntimeCache()
+ self.block_id: Optional[int] = None
+ self.runtime_version = None
+ self.runtime_config = RuntimeConfigurationObject()
+ self.__metadata_cache = {}
+ self.type_registry_preset = None
+ self.transaction_version = None
+ self.metadata = None
+ self.metadata_version_hex = "0x0f000000" # v15
+
+ async def __aenter__(self):
+ await self.initialize()
+
+ async def initialize(self):
+ """
+ Initialize the connection to the chain.
+ """
+ async with self._lock:
+ if not self.initialized:
+ if not self.__chain:
+ chain = await self.rpc_request("system_chain", [])
+ self.__chain = chain.get("result")
+ self.reload_type_registry()
+ await asyncio.gather(self.load_registry(), self.init_runtime(None))
+ self.initialized = True
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+ @property
+ def chain(self):
+ """
+ Returns the substrate chain currently associated with object
+ """
+ return self.__chain
+
+ async def get_storage_item(self, module: str, storage_function: str):
+ if not self.metadata:
+ await self.init_runtime()
+ metadata_pallet = self.metadata.get_metadata_pallet(module)
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+ return storage_item
+
+ async def _get_current_block_hash(
+ self, block_hash: Optional[str], reuse: bool
+ ) -> Optional[str]:
+ if block_hash:
+ self.last_block_hash = block_hash
+ return block_hash
+ elif reuse:
+ if self.last_block_hash:
+ return self.last_block_hash
+ return block_hash
+
+ async def load_registry(self):
+ metadata_rpc_result = await self.rpc_request(
+ "state_call",
+ ["Metadata_metadata_at_version", self.metadata_version_hex],
+ )
+ metadata_option_hex_str = metadata_rpc_result["result"]
+ metadata_option_bytes = bytes.fromhex(metadata_option_hex_str[2:])
+ metadata_v15 = MetadataV15.decode_from_metadata_option(metadata_option_bytes)
+ self.registry = PortableRegistry.from_metadata_v15(metadata_v15)
+
+ async def decode_scale(
+ self, type_string, scale_bytes: bytes, return_scale_obj=False
+ ):
+ """
+ Helper function to decode arbitrary SCALE-bytes (e.g. 0x02000000) according to given RUST type_string
+ (e.g. BlockNumber). The relevant versioning information of the type (if defined) will be applied if block_hash
+ is set
+
+ Parameters
+ ----------
+ type_string
+ scale_bytes
+ block_hash
+ return_scale_obj: if True the SCALE object itself is returned, otherwise the serialized dict value of the object
+
+ Returns
+ -------
+
+ """
+ if scale_bytes == b"\x00":
+ obj = None
+ else:
+ obj = decode_by_type_string(type_string, self.registry, scale_bytes)
+ return obj
+
+ async def init_runtime(
+ self, block_hash: Optional[str] = None, block_id: Optional[int] = None
+ ) -> Runtime:
+ """
+ This method is used by all other methods that deals with metadata and types defined in the type registry.
+ It optionally retrieves the block_hash when block_id is given and sets the applicable metadata for that
+ block_hash. Also, it applies all the versioned types at the time of the block_hash.
+
+ Because parsing of metadata and type registry is quite heavy, the result will be cached per runtime id.
+ In the future there could be support for caching backends like Redis to make this cache more persistent.
+
+ :param block_hash: optional block hash, should not be specified if block_id is
+ :param block_id: optional block id, should not be specified if block_hash is
+
+ :returns: Runtime object
+ """
+
+ async def get_runtime(block_hash, block_id) -> Runtime:
+ # Check if runtime state already set to current block
+ if (block_hash and block_hash == self.last_block_hash) or (
+ block_id and block_id == self.block_id
+ ):
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ if block_id is not None:
+ block_hash = await self.get_block_hash(block_id)
+
+ if not block_hash:
+ block_hash = await self.get_chain_head()
+
+ self.last_block_hash = block_hash
+ self.block_id = block_id
+
+ # In fact calls and storage functions are decoded against runtime of previous block, therefor retrieve
+ # metadata and apply type registry of runtime of parent block
+ block_header = await self.rpc_request(
+ "chain_getHeader", [self.last_block_hash]
+ )
+
+ if block_header["result"] is None:
+ raise SubstrateRequestException(
+ f'Block not found for "{self.last_block_hash}"'
+ )
+
+ parent_block_hash: str = block_header["result"]["parentHash"]
+
+ if (
+ parent_block_hash
+ == "0x0000000000000000000000000000000000000000000000000000000000000000"
+ ):
+ runtime_block_hash = self.last_block_hash
+ else:
+ runtime_block_hash = parent_block_hash
+
+ runtime_info = await self.get_block_runtime_version(
+ block_hash=runtime_block_hash
+ )
+
+ if runtime_info is None:
+ raise SubstrateRequestException(
+ f"No runtime information for block '{block_hash}'"
+ )
+
+ # Check if runtime state already set to current block
+ if runtime_info.get("specVersion") == self.runtime_version:
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ self.runtime_version = runtime_info.get("specVersion")
+ self.transaction_version = runtime_info.get("transactionVersion")
+
+ if not self.metadata:
+ if self.runtime_version in self.__metadata_cache:
+ # Get metadata from cache
+ # self.debug_message('Retrieved metadata for {} from memory'.format(self.runtime_version))
+ self.metadata = self.__metadata_cache[self.runtime_version]
+ else:
+ self.metadata = await self.get_block_metadata(
+ block_hash=runtime_block_hash, decode=True
+ )
+ # self.debug_message('Retrieved metadata for {} from Substrate node'.format(self.runtime_version))
+
+ # Update metadata cache
+ self.__metadata_cache[self.runtime_version] = self.metadata
+
+ # Update type registry
+ self.reload_type_registry(use_remote_preset=False, auto_discover=True)
+
+ if self.implements_scaleinfo:
+ # self.debug_message('Add PortableRegistry from metadata to type registry')
+ self.runtime_config.add_portable_registry(self.metadata)
+
+ # Set active runtime version
+ self.runtime_config.set_active_spec_version_id(self.runtime_version)
+
+ # Check and apply runtime constants
+ ss58_prefix_constant = await self.get_constant(
+ "System", "SS58Prefix", block_hash=block_hash
+ )
+
+ if ss58_prefix_constant:
+ self.ss58_format = ss58_prefix_constant
+
+ # Set runtime compatibility flags
+ try:
+ _ = self.runtime_config.create_scale_object(
+ "sp_weights::weight_v2::Weight"
+ )
+ self.config["is_weight_v2"] = True
+ self.runtime_config.update_type_registry_types(
+ {"Weight": "sp_weights::weight_v2::Weight"}
+ )
+ except NotImplementedError:
+ self.config["is_weight_v2"] = False
+ self.runtime_config.update_type_registry_types({"Weight": "WeightV1"})
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ if block_id and block_hash:
+ raise ValueError("Cannot provide block_hash and block_id at the same time")
+
+ if not (runtime := self.runtime_cache.retrieve(block_id, block_hash)):
+ runtime = await get_runtime(block_hash, block_id)
+ self.runtime_cache.add_item(block_id, block_hash, runtime)
+ return runtime
+
+ def reload_type_registry(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ """
+ Reload type registry and preset used to instantiate the SubtrateInterface object. Useful to periodically apply
+ changes in type definitions when a runtime upgrade occurred
+
+ Parameters
+ ----------
+ use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed scalecodec package
+ auto_discover
+
+ Returns
+ -------
+
+ """
+ self.runtime_config.clear_type_registry()
+
+ self.runtime_config.implements_scale_info = self.implements_scaleinfo
+
+ # Load metadata types in runtime configuration
+ self.runtime_config.update_type_registry(load_type_registry_preset(name="core"))
+ self.apply_type_registry_presets(
+ use_remote_preset=use_remote_preset, auto_discover=auto_discover
+ )
+
+ def apply_type_registry_presets(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ if self.type_registry_preset is not None:
+ # Load type registry according to preset
+ type_registry_preset_dict = load_type_registry_preset(
+ name=self.type_registry_preset, use_remote_preset=use_remote_preset
+ )
+
+ if not type_registry_preset_dict:
+ raise ValueError(
+ f"Type registry preset '{self.type_registry_preset}' not found"
+ )
+
+ elif auto_discover:
+ # Try to auto discover type registry preset by chain name
+ type_registry_name = self.chain.lower().replace(" ", "-")
+ try:
+ type_registry_preset_dict = load_type_registry_preset(
+ type_registry_name
+ )
+ # self.debug_message(f"Auto set type_registry_preset to {type_registry_name} ...")
+ self.type_registry_preset = type_registry_name
+ except ValueError:
+ type_registry_preset_dict = None
+
+ else:
+ type_registry_preset_dict = None
+
+ if type_registry_preset_dict:
+ # Load type registries in runtime configuration
+ if self.implements_scaleinfo is False:
+ # Only runtime with no embedded types in metadata need the default set of explicit defined types
+ self.runtime_config.update_type_registry(
+ load_type_registry_preset(
+ "legacy", use_remote_preset=use_remote_preset
+ )
+ )
+
+ if self.type_registry_preset != "legacy":
+ self.runtime_config.update_type_registry(type_registry_preset_dict)
+
+ if self.type_registry:
+ # Load type registries in runtime configuration
+ self.runtime_config.update_type_registry(self.type_registry)
+
+ @property
+ def implements_scaleinfo(self) -> Optional[bool]:
+ """
+ Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher)
+
+ Returns
+ -------
+ bool
+ """
+ if self.metadata:
+ return self.metadata.portable_registry is not None
+ else:
+ return None
+
+ async def create_storage_key(
+ self,
+ pallet: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: str = None,
+ ) -> StorageKey:
+ """
+ Create a `StorageKey` instance providing storage function details. See `subscribe_storage()`.
+
+ Parameters
+ ----------
+ pallet: name of pallet
+ storage_function: name of storage function
+ params: Optional list of parameters in case of a Mapped storage function
+
+ Returns
+ -------
+ StorageKey
+ """
+ await self.init_runtime(block_hash=block_hash)
+
+ return StorageKey.create_from_storage_function(
+ pallet,
+ storage_function,
+ params,
+ runtime_config=self.runtime_config,
+ metadata=self.metadata,
+ )
+
+ async def _get_block_handler(
+ self,
+ block_hash: str,
+ ignore_decoding_errors: bool = False,
+ include_author: bool = False,
+ header_only: bool = False,
+ finalized_only: bool = False,
+ subscription_handler: Optional[Callable] = None,
+ ):
+ try:
+ await self.init_runtime(block_hash=block_hash)
+ except BlockNotFound:
+ return None
+
+ async def decode_block(block_data, block_data_hash=None):
+ if block_data:
+ if block_data_hash:
+ block_data["header"]["hash"] = block_data_hash
+
+ if type(block_data["header"]["number"]) is str:
+ # Convert block number from hex (backwards compatibility)
+ block_data["header"]["number"] = int(
+ block_data["header"]["number"], 16
+ )
+
+ extrinsic_cls = self.runtime_config.get_decoder_class("Extrinsic")
+
+ if "extrinsics" in block_data:
+ for idx, extrinsic_data in enumerate(block_data["extrinsics"]):
+ extrinsic_decoder = extrinsic_cls(
+ data=ScaleBytes(extrinsic_data),
+ metadata=self.metadata,
+ runtime_config=self.runtime_config,
+ )
+ try:
+ extrinsic_decoder.decode(check_remaining=True)
+ block_data["extrinsics"][idx] = extrinsic_decoder
+
+ except Exception as e:
+ if not ignore_decoding_errors:
+ raise
+ block_data["extrinsics"][idx] = None
+
+ for idx, log_data in enumerate(block_data["header"]["digest"]["logs"]):
+ if type(log_data) is str:
+ # Convert digest log from hex (backwards compatibility)
+ try:
+ log_digest_cls = self.runtime_config.get_decoder_class(
+ "sp_runtime::generic::digest::DigestItem"
+ )
+
+ if log_digest_cls is None:
+ raise NotImplementedError(
+ "No decoding class found for 'DigestItem'"
+ )
+
+ log_digest = log_digest_cls(data=ScaleBytes(log_data))
+ log_digest.decode(
+ check_remaining=self.config.get("strict_scale_decode")
+ )
+
+ block_data["header"]["digest"]["logs"][idx] = log_digest
+
+ if include_author and "PreRuntime" in log_digest.value:
+ if self.implements_scaleinfo:
+ engine = bytes(log_digest[1][0])
+ # Retrieve validator set
+ parent_hash = block_data["header"]["parentHash"]
+ validator_set = await self.query(
+ "Session", "Validators", block_hash=parent_hash
+ )
+
+ if engine == b"BABE":
+ babe_predigest = (
+ self.runtime_config.create_scale_object(
+ type_string="RawBabePreDigest",
+ data=ScaleBytes(
+ bytes(log_digest[1][1])
+ ),
+ )
+ )
+
+ babe_predigest.decode(
+ check_remaining=self.config.get(
+ "strict_scale_decode"
+ )
+ )
+
+ rank_validator = babe_predigest[1].value[
+ "authority_index"
+ ]
+
+ block_author = validator_set[rank_validator]
+ block_data["author"] = block_author.value
+
+ elif engine == b"aura":
+ aura_predigest = (
+ self.runtime_config.create_scale_object(
+ type_string="RawAuraPreDigest",
+ data=ScaleBytes(
+ bytes(log_digest[1][1])
+ ),
+ )
+ )
+
+ aura_predigest.decode(check_remaining=True)
+
+ rank_validator = aura_predigest.value[
+ "slot_number"
+ ] % len(validator_set)
+
+ block_author = validator_set[rank_validator]
+ block_data["author"] = block_author.value
+ else:
+ raise NotImplementedError(
+ f"Cannot extract author for engine {log_digest.value['PreRuntime'][0]}"
+ )
+ else:
+ if (
+ log_digest.value["PreRuntime"]["engine"]
+ == "BABE"
+ ):
+ validator_set = await self.query(
+ "Session",
+ "Validators",
+ block_hash=block_hash,
+ )
+ rank_validator = log_digest.value["PreRuntime"][
+ "data"
+ ]["authority_index"]
+
+ block_author = validator_set.elements[
+ rank_validator
+ ]
+ block_data["author"] = block_author.value
+ else:
+ raise NotImplementedError(
+ f"Cannot extract author for engine {log_digest.value['PreRuntime']['engine']}"
+ )
+
+ except Exception:
+ if not ignore_decoding_errors:
+ raise
+ block_data["header"]["digest"]["logs"][idx] = None
+
+ return block_data
+
+ if callable(subscription_handler):
+ rpc_method_prefix = "Finalized" if finalized_only else "New"
+
+ async def result_handler(message, update_nr, subscription_id):
+ new_block = await decode_block({"header": message["params"]["result"]})
+
+ subscription_result = subscription_handler(
+ new_block, update_nr, subscription_id
+ )
+
+ if subscription_result is not None:
+ # Handler returned end result: unsubscribe from further updates
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request(
+ f"chain_unsubscribe{rpc_method_prefix}Heads",
+ [subscription_id],
+ )
+ )
+
+ return subscription_result
+
+ result = await self._make_rpc_request(
+ [
+ self.make_payload(
+ "_get_block_handler",
+ f"chain_subscribe{rpc_method_prefix}Heads",
+ [],
+ )
+ ],
+ result_handler=result_handler,
+ )
+
+ return result
+
+ else:
+ if header_only:
+ response = await self.rpc_request("chain_getHeader", [block_hash])
+ return await decode_block(
+ {"header": response["result"]}, block_data_hash=block_hash
+ )
+
+ else:
+ response = await self.rpc_request("chain_getBlock", [block_hash])
+ return await decode_block(
+ response["result"]["block"], block_data_hash=block_hash
+ )
+
+ async def get_block(
+ self,
+ block_hash: Optional[str] = None,
+ block_number: Optional[int] = None,
+ ignore_decoding_errors: bool = False,
+ include_author: bool = False,
+ finalized_only: bool = False,
+ ) -> Optional[dict]:
+ """
+ Retrieves a block and decodes its containing extrinsics and log digest items. If `block_hash` and `block_number`
+ is omitted the chain tip will be retrieve, or the finalized head if `finalized_only` is set to true.
+
+ Either `block_hash` or `block_number` should be set, or both omitted.
+
+ Parameters
+ ----------
+ block_hash: the hash of the block to be retrieved
+ block_number: the block number to retrieved
+ ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue decoding
+ include_author: This will retrieve the block author from the validator set and add to the result
+ finalized_only: when no `block_hash` or `block_number` is set, this will retrieve the finalized head
+
+ Returns
+ -------
+ A dict containing the extrinsic and digest logs data
+ """
+ if block_hash and block_number:
+ raise ValueError("Either block_hash or block_number should be be set")
+
+ if block_number is not None:
+ block_hash = await self.get_block_hash(block_number)
+
+ if block_hash is None:
+ return
+
+ if block_hash and finalized_only:
+ raise ValueError(
+ "finalized_only cannot be True when block_hash is provided"
+ )
+
+ if block_hash is None:
+ # Retrieve block hash
+ if finalized_only:
+ block_hash = await self.get_chain_finalised_head()
+ else:
+ block_hash = await self.get_chain_head()
+
+ return await self._get_block_handler(
+ block_hash=block_hash,
+ ignore_decoding_errors=ignore_decoding_errors,
+ header_only=False,
+ include_author=include_author,
+ )
+
+ async def get_events(self, block_hash: Optional[str] = None) -> list:
+ """
+ Convenience method to get events for a certain block (storage call for module 'System' and function 'Events')
+
+ Parameters
+ ----------
+ block_hash
+
+ Returns
+ -------
+ list
+ """
+
+ def convert_event_data(data):
+ # Extract phase information
+ phase_key, phase_value = next(iter(data["phase"].items()))
+ try:
+ extrinsic_idx = phase_value[0]
+ except IndexError:
+ extrinsic_idx = None
+
+ # Extract event details
+ module_id, event_data = next(iter(data["event"].items()))
+ event_id, attributes_data = next(iter(event_data[0].items()))
+
+ # Convert class and pays_fee dictionaries to their string equivalents if they exist
+ attributes = attributes_data
+ if isinstance(attributes, dict):
+ for key, value in attributes.items():
+ if isinstance(value, dict):
+ # Convert nested single-key dictionaries to their keys as strings
+ sub_key = next(iter(value.keys()))
+ if value[sub_key] == ():
+ attributes[key] = sub_key
+
+ # Create the converted dictionary
+ converted = {
+ "phase": phase_key,
+ "extrinsic_idx": extrinsic_idx,
+ "event": {
+ "module_id": module_id,
+ "event_id": event_id,
+ "attributes": attributes,
+ },
+ "topics": list(data["topics"]), # Convert topics tuple to a list
+ }
+
+ return converted
+
+ events = []
+
+ if not block_hash:
+ block_hash = await self.get_chain_head()
+
+ storage_obj = await self.query(
+ module="System", storage_function="Events", block_hash=block_hash
+ )
+ if storage_obj:
+ for item in list(storage_obj):
+ # print("item!", item)
+ events.append(convert_event_data(item))
+ # events += list(storage_obj)
+ return events
+
+ async def get_block_runtime_version(self, block_hash: str) -> dict:
+ """
+ Retrieve the runtime version id of given block_hash
+ """
+ response = await self.rpc_request("state_getRuntimeVersion", [block_hash])
+ return response.get("result")
+
+ async def get_block_metadata(
+ self, block_hash: Optional[str] = None, decode: bool = True
+ ) -> Union[dict, ScaleType]:
+ """
+ A pass-though to existing JSONRPC method `state_getMetadata`.
+
+ Parameters
+ ----------
+ block_hash
+ decode: True for decoded version
+
+ Returns
+ -------
+
+ """
+ params = None
+ if decode and not self.runtime_config:
+ raise ValueError(
+ "Cannot decode runtime configuration without a supplied runtime_config"
+ )
+
+ if block_hash:
+ params = [block_hash]
+ response = await self.rpc_request("state_getMetadata", params)
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ if response.get("result") and decode:
+ metadata_decoder = self.runtime_config.create_scale_object(
+ "MetadataVersioned", data=ScaleBytes(response.get("result"))
+ )
+ metadata_decoder.decode()
+
+ return metadata_decoder
+
+ return response
+
+ async def _preprocess(
+ self,
+ query_for: Optional[list],
+ block_hash: Optional[str],
+ storage_function: str,
+ module: str,
+ ) -> Preprocessed:
+ """
+ Creates a Preprocessed data object for passing to `_make_rpc_request`
+ """
+ params = query_for if query_for else []
+ # Search storage call in metadata
+ metadata_pallet = self.metadata.get_metadata_pallet(module)
+
+ if not metadata_pallet:
+ raise SubstrateRequestException(f'Pallet "{module}" not found')
+
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+
+ if not metadata_pallet or not storage_item:
+ raise SubstrateRequestException(
+ f'Storage function "{module}.{storage_function}" not found'
+ )
+
+ # SCALE type string of value
+ param_types = storage_item.get_params_type_string()
+ value_scale_type = storage_item.get_value_type_string()
+
+ if len(params) != len(param_types):
+ raise ValueError(
+ f"Storage function requires {len(param_types)} parameters, {len(params)} given"
+ )
+
+ storage_key = StorageKey.create_from_storage_function(
+ module,
+ storage_item.value["name"],
+ params,
+ runtime_config=self.runtime_config,
+ metadata=self.metadata,
+ )
+ method = "state_getStorageAt"
+ return Preprocessed(
+ str(query_for),
+ method,
+ [storage_key.to_hex(), block_hash],
+ value_scale_type,
+ storage_item,
+ )
+
+ async def _process_response(
+ self,
+ response: dict,
+ subscription_id: Union[int, str],
+ value_scale_type: Optional[str] = None,
+ storage_item: Optional[ScaleType] = None,
+ runtime: Optional[Runtime] = None,
+ result_handler: Optional[ResultHandler] = None,
+ ) -> tuple[Union[ScaleType, dict], bool]:
+ """
+ Processes the RPC call response by decoding it, returning it as is, or setting a handler for subscriptions,
+ depending on the specific call.
+
+ :param response: the RPC call response
+ :param subscription_id: the subscription id for subscriptions, used only for subscriptions with a result handler
+ :param value_scale_type: Scale Type string used for decoding ScaleBytes results
+ :param storage_item: The ScaleType object used for decoding ScaleBytes results
+ :param runtime: the runtime object, used for decoding ScaleBytes results
+ :param result_handler: the result handler coroutine used for handling longer-running subscriptions
+
+ :return: (decoded response, completion)
+ """
+ result: Union[dict, ScaleType] = response
+ if value_scale_type and isinstance(storage_item, ScaleType):
+ if not runtime:
+ async with self._lock:
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+ if response.get("result") is not None:
+ query_value = response.get("result")
+ elif storage_item.value["modifier"] == "Default":
+ # Fallback to default value of storage function if no result
+ query_value = storage_item.value_object["default"].value_object
+ else:
+ # No result is interpreted as an Option<...> result
+ value_scale_type = f"Option<{value_scale_type}>"
+ query_value = storage_item.value_object["default"].value_object
+ if isinstance(query_value, str):
+ q = bytes.fromhex(query_value[2:])
+ elif isinstance(query_value, bytearray):
+ q = bytes(query_value)
+ else:
+ q = query_value
+ obj = await self.decode_scale(value_scale_type, q, True)
+ result = obj
+ if asyncio.iscoroutinefunction(result_handler):
+ # For multipart responses as a result of subscriptions.
+ message, bool_result = await result_handler(response, subscription_id)
+ return message, bool_result
+ return result, True
+
+ async def _make_rpc_request(
+ self,
+ payloads: list[dict],
+ value_scale_type: Optional[str] = None,
+ storage_item: Optional[ScaleType] = None,
+ runtime: Optional[Runtime] = None,
+ result_handler: Optional[ResultHandler] = None,
+ ) -> RequestManager.RequestResults:
+ request_manager = RequestManager(payloads)
+
+ subscription_added = False
+
+ async with self.ws as ws:
+ for item in payloads:
+ item_id = await ws.send(item["payload"])
+ request_manager.add_request(item_id, item["id"])
+
+ while True:
+ for item_id in request_manager.response_map.keys():
+ if (
+ item_id not in request_manager.responses
+ or asyncio.iscoroutinefunction(result_handler)
+ ):
+ if response := await ws.retrieve(item_id):
+ if (
+ asyncio.iscoroutinefunction(result_handler)
+ and not subscription_added
+ ):
+ # handles subscriptions, overwrites the previous mapping of {item_id : payload_id}
+ # with {subscription_id : payload_id}
+ try:
+ item_id = request_manager.overwrite_request(
+ item_id, response["result"]
+ )
+ except KeyError:
+ raise SubstrateRequestException(str(response))
+ decoded_response, complete = await self._process_response(
+ response,
+ item_id,
+ value_scale_type,
+ storage_item,
+ runtime,
+ result_handler,
+ )
+ request_manager.add_response(
+ item_id, decoded_response, complete
+ )
+ if (
+ asyncio.iscoroutinefunction(result_handler)
+ and not subscription_added
+ ):
+ subscription_added = True
+ break
+
+ if request_manager.is_complete:
+ break
+
+ return request_manager.get_results()
+
+ @staticmethod
+ def make_payload(id_: str, method: str, params: list) -> dict:
+ """
+ Creates a payload for making an rpc_request with _make_rpc_request
+
+ :param id_: a unique name you would like to give to this request
+ :param method: the method in the RPC request
+ :param params: the params in the RPC request
+
+ :return: the payload dict
+ """
+ return {
+ "id": id_,
+ "payload": {"jsonrpc": "2.0", "method": method, "params": params},
+ }
+
+ async def rpc_request(
+ self,
+ method: str,
+ params: Optional[list],
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> Any:
+ """
+ Makes an RPC request to the subtensor. Use this only if ``self.query`` and ``self.query_multiple`` and
+ ``self.query_map`` do not meet your needs.
+
+ :param method: str the method in the RPC request
+ :param params: list of the params in the RPC request
+ :param block_hash: optional str, the hash of the block — only supply this if not supplying the block
+ hash in the params, and not reusing the block hash
+ :param reuse_block_hash: optional bool, whether to reuse the block hash in the params — only mark as True
+ if not supplying the block hash in the params, or via the `block_hash` parameter
+
+ :return: the response from the RPC request
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ params = params or []
+ payload_id = f"{method}{random.randint(0, 7000)}"
+ payloads = [
+ self.make_payload(
+ payload_id,
+ method,
+ params + [block_hash] if block_hash else params,
+ )
+ ]
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+ result = await self._make_rpc_request(payloads, runtime=runtime)
+ if "error" in result[payload_id][0]:
+ raise SubstrateRequestException(result[payload_id][0]["error"]["message"])
+ if "result" in result[payload_id][0]:
+ return result[payload_id][0]
+ else:
+ raise SubstrateRequestException(result[payload_id][0])
+
+ async def get_block_hash(self, block_id: int) -> str:
+ return (await self.rpc_request("chain_getBlockHash", [block_id]))["result"]
+
+ async def get_chain_head(self) -> str:
+ result = await self._make_rpc_request(
+ [
+ self.make_payload(
+ "rpc_request",
+ "chain_getHead",
+ [],
+ )
+ ],
+ runtime=Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ ),
+ )
+ self.last_block_hash = result["rpc_request"][0]["result"]
+ return result["rpc_request"][0]["result"]
+
+ async def compose_call(
+ self,
+ call_module: str,
+ call_function: str,
+ call_params: Optional[dict] = None,
+ block_hash: Optional[str] = None,
+ ) -> GenericCall:
+ """
+ Composes a call payload which can be used in an extrinsic.
+
+ :param call_module: Name of the runtime module e.g. Balances
+ :param call_function: Name of the call function e.g. transfer
+ :param call_params: This is a dict containing the params of the call. e.g.
+ `{'dest': 'EaG2CRhJWPb7qmdcJvy3LiWdh26Jreu9Dx6R1rXxPmYXoDk', 'value': 1000000000000}`
+ :param block_hash: Use metadata at given block_hash to compose call
+
+ :return: A composed call
+ """
+ if call_params is None:
+ call_params = {}
+
+ await self.init_runtime(block_hash=block_hash)
+
+ call = self.runtime_config.create_scale_object(
+ type_string="Call", metadata=self.metadata
+ )
+
+ call.encode(
+ {
+ "call_module": call_module,
+ "call_function": call_function,
+ "call_args": call_params,
+ }
+ )
+
+ return call
+
+ async def query_multiple(
+ self,
+ params: list,
+ storage_function: str,
+ module: str,
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> dict[str, ScaleType]:
+ """
+ Queries the subtensor. Only use this when making multiple queries, else use ``self.query``
+ """
+ # By allowing for specifying the block hash, users, if they have multiple query types they want
+ # to do, can simply query the block hash first, and then pass multiple query_subtensor calls
+ # into an asyncio.gather, with the specified block hash
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+ preprocessed: tuple[Preprocessed] = await asyncio.gather(
+ *[
+ self._preprocess([x], block_hash, storage_function, module)
+ for x in params
+ ]
+ )
+ all_info = [
+ self.make_payload(item.queryable, item.method, item.params)
+ for item in preprocessed
+ ]
+ # These will always be the same throughout the preprocessed list, so we just grab the first one
+ value_scale_type = preprocessed[0].value_scale_type
+ storage_item = preprocessed[0].storage_item
+
+ responses = await self._make_rpc_request(
+ all_info, value_scale_type, storage_item, runtime
+ )
+ return {
+ param: responses[p.queryable][0] for (param, p) in zip(params, preprocessed)
+ }
+
+ async def query_multi(
+ self, storage_keys: list[StorageKey], block_hash: Optional[str] = None
+ ) -> list:
+ """
+ Query multiple storage keys in one request.
+
+ Example:
+
+ ```
+ storage_keys = [
+ substrate.create_storage_key(
+ "System", "Account", ["F4xQKRUagnSGjFqafyhajLs94e7Vvzvr8ebwYJceKpr8R7T"]
+ ),
+ substrate.create_storage_key(
+ "System", "Account", ["GSEX8kR4Kz5UZGhvRUCJG93D5hhTAoVZ5tAe6Zne7V42DSi"]
+ )
+ ]
+
+ result = substrate.query_multi(storage_keys)
+ ```
+
+ Parameters
+ ----------
+ storage_keys: list of StorageKey objects
+ block_hash: Optional block_hash of state snapshot
+
+ Returns
+ -------
+ list of `(storage_key, scale_obj)` tuples
+ """
+
+ await self.init_runtime(block_hash=block_hash)
+
+ # Retrieve corresponding value
+ response = await self.rpc_request(
+ "state_queryStorageAt", [[s.to_hex() for s in storage_keys], block_hash]
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ result = []
+
+ storage_key_map = {s.to_hex(): s for s in storage_keys}
+
+ for result_group in response["result"]:
+ for change_storage_key, change_data in result_group["changes"]:
+ # Decode result for specified storage_key
+ storage_key = storage_key_map[change_storage_key]
+ if change_data is None:
+ change_data = b"\x00"
+ else:
+ change_data = bytes.fromhex(change_data[2:])
+ result.append(
+ (
+ storage_key,
+ await self.decode_scale(
+ storage_key.value_scale_type, change_data
+ ),
+ )
+ )
+
+ return result
+
+ async def create_scale_object(
+ self,
+ type_string: str,
+ data: Optional[ScaleBytes] = None,
+ block_hash: Optional[str] = None,
+ **kwargs,
+ ) -> "ScaleType":
+ """
+ Convenience method to create a SCALE object of type `type_string`, this will initialize the runtime
+ automatically at moment of `block_hash`, or chain tip if omitted.
+
+ :param type_string: str Name of SCALE type to create
+ :param data: ScaleBytes Optional ScaleBytes to decode
+ :param block_hash: Optional block hash for moment of decoding, when omitted the chain tip will be used
+ :param kwargs: keyword args for the Scale Type constructor
+
+ :return: The created Scale Type object
+ """
+ runtime = await self.init_runtime(block_hash=block_hash)
+ if "metadata" not in kwargs:
+ kwargs["metadata"] = runtime.metadata
+
+ return runtime.runtime_config.create_scale_object(
+ type_string, data=data, **kwargs
+ )
+
+ async def generate_signature_payload(
+ self,
+ call: GenericCall,
+ era=None,
+ nonce: int = 0,
+ tip: int = 0,
+ tip_asset_id: Optional[int] = None,
+ include_call_length: bool = False,
+ ) -> ScaleBytes:
+ # Retrieve genesis hash
+ genesis_hash = await self.get_block_hash(0)
+
+ if not era:
+ era = "00"
+
+ if era == "00":
+ # Immortal extrinsic
+ block_hash = genesis_hash
+ else:
+ # Determine mortality of extrinsic
+ era_obj = self.runtime_config.create_scale_object("Era")
+
+ if isinstance(era, dict) and "current" not in era and "phase" not in era:
+ raise ValueError(
+ 'The era dict must contain either "current" or "phase" element to encode a valid era'
+ )
+
+ era_obj.encode(era)
+ block_hash = await self.get_block_hash(
+ block_id=era_obj.birth(era.get("current"))
+ )
+
+ # Create signature payload
+ signature_payload = self.runtime_config.create_scale_object(
+ "ExtrinsicPayloadValue"
+ )
+
+ # Process signed extensions in metadata
+ if "signed_extensions" in self.metadata[1][1]["extrinsic"]:
+ # Base signature payload
+ signature_payload.type_mapping = [["call", "CallBytes"]]
+
+ # Add signed extensions to payload
+ signed_extensions = self.metadata.get_signed_extensions()
+
+ if "CheckMortality" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["era", signed_extensions["CheckMortality"]["extrinsic"]]
+ )
+
+ if "CheckEra" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["era", signed_extensions["CheckEra"]["extrinsic"]]
+ )
+
+ if "CheckNonce" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["nonce", signed_extensions["CheckNonce"]["extrinsic"]]
+ )
+
+ if "ChargeTransactionPayment" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["tip", signed_extensions["ChargeTransactionPayment"]["extrinsic"]]
+ )
+
+ if "ChargeAssetTxPayment" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["asset_id", signed_extensions["ChargeAssetTxPayment"]["extrinsic"]]
+ )
+
+ if "CheckMetadataHash" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["mode", signed_extensions["CheckMetadataHash"]["extrinsic"]]
+ )
+
+ if "CheckSpecVersion" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "spec_version",
+ signed_extensions["CheckSpecVersion"]["additional_signed"],
+ ]
+ )
+
+ if "CheckTxVersion" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "transaction_version",
+ signed_extensions["CheckTxVersion"]["additional_signed"],
+ ]
+ )
+
+ if "CheckGenesis" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "genesis_hash",
+ signed_extensions["CheckGenesis"]["additional_signed"],
+ ]
+ )
+
+ if "CheckMortality" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "block_hash",
+ signed_extensions["CheckMortality"]["additional_signed"],
+ ]
+ )
+
+ if "CheckEra" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["block_hash", signed_extensions["CheckEra"]["additional_signed"]]
+ )
+
+ if "CheckMetadataHash" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "metadata_hash",
+ signed_extensions["CheckMetadataHash"]["additional_signed"],
+ ]
+ )
+
+ if include_call_length:
+ length_obj = self.runtime_config.create_scale_object("Bytes")
+ call_data = str(length_obj.encode(str(call.data)))
+
+ else:
+ call_data = str(call.data)
+
+ payload_dict = {
+ "call": call_data,
+ "era": era,
+ "nonce": nonce,
+ "tip": tip,
+ "spec_version": self.runtime_version,
+ "genesis_hash": genesis_hash,
+ "block_hash": block_hash,
+ "transaction_version": self.transaction_version,
+ "asset_id": {"tip": tip, "asset_id": tip_asset_id},
+ "metadata_hash": None,
+ "mode": "Disabled",
+ }
+
+ signature_payload.encode(payload_dict)
+
+ if signature_payload.data.length > 256:
+ return ScaleBytes(
+ data=blake2b(signature_payload.data.data, digest_size=32).digest()
+ )
+
+ return signature_payload.data
+
+ async def create_signed_extrinsic(
+ self,
+ call: GenericCall,
+ keypair: Keypair,
+ era: Optional[dict] = None,
+ nonce: Optional[int] = None,
+ tip: int = 0,
+ tip_asset_id: Optional[int] = None,
+ signature: Optional[Union[bytes, str]] = None,
+ ) -> "GenericExtrinsic":
+ """
+ Creates an extrinsic signed by given account details
+
+ :param call: GenericCall to create extrinsic for
+ :param keypair: Keypair used to sign the extrinsic
+ :param era: Specify mortality in blocks in follow format:
+ {'period': [amount_blocks]} If omitted the extrinsic is immortal
+ :param nonce: nonce to include in extrinsics, if omitted the current nonce is retrieved on-chain
+ :param tip: The tip for the block author to gain priority during network congestion
+ :param tip_asset_id: Optional asset ID with which to pay the tip
+ :param signature: Optionally provide signature if externally signed
+
+ :return: The signed Extrinsic
+ """
+ await self.init_runtime()
+
+ # Check requirements
+ if not isinstance(call, GenericCall):
+ raise TypeError("'call' must be of type Call")
+
+ # Check if extrinsic version is supported
+ if self.metadata[1][1]["extrinsic"]["version"] != 4: # type: ignore
+ raise NotImplementedError(
+ f"Extrinsic version {self.metadata[1][1]['extrinsic']['version']} not supported" # type: ignore
+ )
+
+ # Retrieve nonce
+ if nonce is None:
+ nonce = await self.get_account_nonce(keypair.ss58_address) or 0
+
+ # Process era
+ if era is None:
+ era = "00"
+ else:
+ if isinstance(era, dict) and "current" not in era and "phase" not in era:
+ # Retrieve current block id
+ era["current"] = await self.get_block_number(
+ await self.get_chain_finalised_head()
+ )
+
+ if signature is not None:
+ if isinstance(signature, str) and signature[0:2] == "0x":
+ signature = bytes.fromhex(signature[2:])
+
+ # Check if signature is a MultiSignature and contains signature version
+ if len(signature) == 65:
+ signature_version = signature[0]
+ signature = signature[1:]
+ else:
+ signature_version = keypair.crypto_type
+
+ else:
+ # Create signature payload
+ signature_payload = await self.generate_signature_payload(
+ call=call, era=era, nonce=nonce, tip=tip, tip_asset_id=tip_asset_id
+ )
+
+ # Set Signature version to crypto type of keypair
+ signature_version = keypair.crypto_type
+
+ # Sign payload
+ signature = keypair.sign(signature_payload)
+
+ # Create extrinsic
+ extrinsic = self.runtime_config.create_scale_object(
+ type_string="Extrinsic", metadata=self.metadata
+ )
+
+ value = {
+ "account_id": f"0x{keypair.public_key.hex()}",
+ "signature": f"0x{signature.hex()}",
+ "call_function": call.value["call_function"],
+ "call_module": call.value["call_module"],
+ "call_args": call.value["call_args"],
+ "nonce": nonce,
+ "era": era,
+ "tip": tip,
+ "asset_id": {"tip": tip, "asset_id": tip_asset_id},
+ "mode": "Disabled",
+ }
+
+ # Check if ExtrinsicSignature is MultiSignature, otherwise omit signature_version
+ signature_cls = self.runtime_config.get_decoder_class("ExtrinsicSignature")
+ if issubclass(signature_cls, self.runtime_config.get_decoder_class("Enum")):
+ value["signature_version"] = signature_version
+
+ extrinsic.encode(value)
+
+ return extrinsic
+
+ async def get_chain_finalised_head(self):
+ """
+ A pass-though to existing JSONRPC method `chain_getFinalizedHead`
+
+ Returns
+ -------
+
+ """
+ response = await self.rpc_request("chain_getFinalizedHead", [])
+
+ if response is not None:
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ return response.get("result")
+
+ async def runtime_call(
+ self,
+ api: str,
+ method: str,
+ params: Optional[Union[list, dict]] = None,
+ block_hash: Optional[str] = None,
+ ) -> ScaleType:
+ """
+ Calls a runtime API method
+
+ :param api: Name of the runtime API e.g. 'TransactionPaymentApi'
+ :param method: Name of the method e.g. 'query_fee_details'
+ :param params: List of parameters needed to call the runtime API
+ :param block_hash: Hash of the block at which to make the runtime API call
+
+ :return: ScaleType from the runtime call
+ """
+ await self.init_runtime()
+
+ if params is None:
+ params = {}
+
+ try:
+ runtime_call_def = self.runtime_config.type_registry["runtime_api"][api][
+ "methods"
+ ][method]
+ runtime_api_types = self.runtime_config.type_registry["runtime_api"][
+ api
+ ].get("types", {})
+ except KeyError:
+ raise ValueError(f"Runtime API Call '{api}.{method}' not found in registry")
+
+ if isinstance(params, list) and len(params) != len(runtime_call_def["params"]):
+ raise ValueError(
+ f"Number of parameter provided ({len(params)}) does not "
+ f"match definition {len(runtime_call_def['params'])}"
+ )
+
+ # Add runtime API types to registry
+ self.runtime_config.update_type_registry_types(runtime_api_types)
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ # Encode params
+ param_data = ScaleBytes(bytes())
+ for idx, param in enumerate(runtime_call_def["params"]):
+ scale_obj = runtime.runtime_config.create_scale_object(param["type"])
+ if isinstance(params, list):
+ param_data += scale_obj.encode(params[idx])
+ else:
+ if param["name"] not in params:
+ raise ValueError(f"Runtime Call param '{param['name']}' is missing")
+
+ param_data += scale_obj.encode(params[param["name"]])
+
+ # RPC request
+ result_data = await self.rpc_request(
+ "state_call", [f"{api}_{method}", str(param_data), block_hash]
+ )
+
+ # Decode result
+ # TODO update this to use bt-decode
+ result_obj = runtime.runtime_config.create_scale_object(
+ runtime_call_def["type"]
+ )
+ result_obj.decode(
+ ScaleBytes(result_data["result"]),
+ check_remaining=self.config.get("strict_scale_decode"),
+ )
+
+ return result_obj
+
+ async def get_account_nonce(self, account_address: str) -> int:
+ """
+ Returns current nonce for given account address
+
+ :param account_address: SS58 formatted address
+
+ :return: Nonce for given account address
+ """
+ nonce_obj = await self.runtime_call(
+ "AccountNonceApi", "account_nonce", [account_address]
+ )
+ return nonce_obj.value
+
+ async def get_metadata_constant(self, module_name, constant_name, block_hash=None):
+ """
+ Retrieves the details of a constant for given module name, call function name and block_hash
+ (or chaintip if block_hash is omitted)
+
+ Parameters
+ ----------
+ module_name
+ constant_name
+ block_hash
+
+ Returns
+ -------
+ MetadataModuleConstants
+ """
+
+ # await self.init_runtime(block_hash=block_hash)
+
+ for module in self.metadata.pallets:
+ if module_name == module.name and module.constants:
+ for constant in module.constants:
+ if constant_name == constant.value["name"]:
+ return constant
+
+ async def get_constant(
+ self,
+ module_name: str,
+ constant_name: str,
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> "ScaleType":
+ """
+ Returns the decoded `ScaleType` object of the constant for given module name, call function name and block_hash
+ (or chaintip if block_hash is omitted)
+
+ Parameters
+ ----------
+ :param module_name: Name of the module to query
+ :param constant_name: Name of the constant to query
+ :param block_hash: Hash of the block at which to make the runtime API call
+ :param reuse_block_hash: Reuse last-used block hash if set to true
+
+ :return: ScaleType from the runtime call
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ constant = await self.get_metadata_constant(
+ module_name, constant_name, block_hash=block_hash
+ )
+ if constant:
+ # Decode to ScaleType
+ return await self.decode_scale(
+ constant.type,
+ bytes(constant.constant_value),
+ return_scale_obj=True,
+ )
+ else:
+ return None
+
+ async def get_payment_info(
+ self, call: GenericCall, keypair: Keypair
+ ) -> dict[str, Any]:
+ """
+ Retrieves fee estimation via RPC for given extrinsic
+
+ Parameters
+ ----------
+ call: Call object to estimate fees for
+ keypair: Keypair of the sender, does not have to include private key because no valid signature is required
+
+ Returns
+ -------
+ Dict with payment info
+
+ E.g. `{'class': 'normal', 'partialFee': 151000000, 'weight': {'ref_time': 143322000}}`
+
+ """
+
+ # Check requirements
+ if not isinstance(call, GenericCall):
+ raise TypeError("'call' must be of type Call")
+
+ if not isinstance(keypair, Keypair):
+ raise TypeError("'keypair' must be of type Keypair")
+
+ # No valid signature is required for fee estimation
+ signature = "0x" + "00" * 64
+
+ # Create extrinsic
+ extrinsic = await self.create_signed_extrinsic(
+ call=call, keypair=keypair, signature=signature
+ )
+ extrinsic_len = self.runtime_config.create_scale_object("u32")
+ extrinsic_len.encode(len(extrinsic.data))
+
+ result = await self.runtime_call(
+ "TransactionPaymentApi", "query_info", [extrinsic, extrinsic_len]
+ )
+
+ return result.value
+
+ async def query(
+ self,
+ module: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ raw_storage_key: Optional[bytes] = None,
+ subscription_handler=None,
+ reuse_block_hash: bool = False,
+ ) -> Union["ScaleType"]:
+ """
+ Queries subtensor. This should only be used when making a single request. For multiple requests,
+ you should use ``self.query_multiple``
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+ preprocessed: Preprocessed = await self._preprocess(
+ params, block_hash, storage_function, module
+ )
+ payload = [
+ self.make_payload(
+ preprocessed.queryable, preprocessed.method, preprocessed.params
+ )
+ ]
+ value_scale_type = preprocessed.value_scale_type
+ storage_item = preprocessed.storage_item
+
+ responses = await self._make_rpc_request(
+ payload,
+ value_scale_type,
+ storage_item,
+ runtime,
+ result_handler=subscription_handler,
+ )
+ return responses[preprocessed.queryable][0]
+
+ async def query_map(
+ self,
+ module: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ max_results: Optional[int] = None,
+ start_key: Optional[str] = None,
+ page_size: int = 100,
+ ignore_decoding_errors: bool = False,
+ reuse_block_hash: bool = False,
+ ) -> "QueryMapResult":
+ """
+ Iterates over all key-pairs located at the given module and storage_function. The storage
+ item must be a map.
+
+ Example:
+
+ ```
+ result = await substrate.query_map('System', 'Account', max_results=100)
+
+ async for account, account_info in result:
+ print(f"Free balance of account '{account.value}': {account_info.value['data']['free']}")
+ ```
+
+ Note: it is important that you do not use `for x in result.records`, as this will sidestep possible
+ pagination. You must do `async for x in result`.
+
+ :param module: The module name in the metadata, e.g. System or Balances.
+ :param storage_function: The storage function name, e.g. Account or Locks.
+ :param params: The input parameters in case of for example a `DoubleMap` storage function
+ :param block_hash: Optional block hash for result at given block, when left to None the chain tip will be used.
+ :param max_results: the maximum of results required, if set the query will stop fetching results when number is
+ reached
+ :param start_key: The storage key used as offset for the results, for pagination purposes
+ :param page_size: The results are fetched from the node RPC in chunks of this size
+ :param ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue
+ decoding
+ :param reuse_block_hash: use True if you wish to make the query using the last-used block hash. Do not mark True
+ if supplying a block_hash
+
+ :return: QueryMapResult object
+ """
+ params = params or []
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+
+ metadata_pallet = runtime.metadata.get_metadata_pallet(module)
+ if not metadata_pallet:
+ raise ValueError(f'Pallet "{module}" not found')
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+
+ if not metadata_pallet or not storage_item:
+ raise ValueError(
+ f'Storage function "{module}.{storage_function}" not found'
+ )
+
+ value_type = storage_item.get_value_type_string()
+ param_types = storage_item.get_params_type_string()
+ key_hashers = storage_item.get_param_hashers()
+
+ # Check MapType conditions
+ if len(param_types) == 0:
+ raise ValueError("Given storage function is not a map")
+ if len(params) > len(param_types) - 1:
+ raise ValueError(
+ f"Storage function map can accept max {len(param_types) - 1} parameters, {len(params)} given"
+ )
+
+ # Generate storage key prefix
+ storage_key = StorageKey.create_from_storage_function(
+ module,
+ storage_item.value["name"],
+ params,
+ runtime_config=runtime.runtime_config,
+ metadata=runtime.metadata,
+ )
+ prefix = storage_key.to_hex()
+
+ if not start_key:
+ start_key = prefix
+
+ # Make sure if the max result is smaller than the page size, adjust the page size
+ if max_results is not None and max_results < page_size:
+ page_size = max_results
+
+ # Retrieve storage keys
+ response = await self.rpc_request(
+ method="state_getKeysPaged",
+ params=[prefix, page_size, start_key, block_hash],
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ result_keys = response.get("result")
+
+ result = []
+ last_key = None
+
+ def concat_hash_len(key_hasher: str) -> int:
+ """
+ Helper function to avoid if statements
+ """
+ if key_hasher == "Blake2_128Concat":
+ return 16
+ elif key_hasher == "Twox64Concat":
+ return 8
+ elif key_hasher == "Identity":
+ return 0
+ else:
+ raise ValueError("Unsupported hash type")
+
+ if len(result_keys) > 0:
+ last_key = result_keys[-1]
+
+ # Retrieve corresponding value
+ response = await self.rpc_request(
+ method="state_queryStorageAt", params=[result_keys, block_hash]
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ for result_group in response["result"]:
+ for item in result_group["changes"]:
+ try:
+ # Determine type string
+ key_type_string = []
+ for n in range(len(params), len(param_types)):
+ key_type_string.append(
+ f"[u8; {concat_hash_len(key_hashers[n])}]"
+ )
+ key_type_string.append(param_types[n])
+
+ item_key_obj = await self.decode_scale(
+ type_string=f"({', '.join(key_type_string)})",
+ scale_bytes=bytes.fromhex(item[0][len(prefix) :]),
+ return_scale_obj=True,
+ )
+
+ # strip key_hashers to use as item key
+ if len(param_types) - len(params) == 1:
+ item_key = item_key_obj[1]
+ else:
+ item_key = tuple(
+ item_key_obj[key + 1]
+ for key in range(len(params), len(param_types) + 1, 2)
+ )
+
+ except Exception as _:
+ if not ignore_decoding_errors:
+ raise
+ item_key = None
+
+ try:
+ try:
+ item_bytes = bytes.fromhex(item[1][2:])
+ except ValueError:
+ item_bytes = bytes.fromhex(item[1])
+
+ item_value = await self.decode_scale(
+ type_string=value_type,
+ scale_bytes=item_bytes,
+ return_scale_obj=True,
+ )
+ except Exception as _:
+ if not ignore_decoding_errors:
+ raise
+ item_value = None
+
+ result.append([item_key, item_value])
+
+ return QueryMapResult(
+ records=result,
+ page_size=page_size,
+ module=module,
+ storage_function=storage_function,
+ params=params,
+ block_hash=block_hash,
+ substrate=self,
+ last_key=last_key,
+ max_results=max_results,
+ ignore_decoding_errors=ignore_decoding_errors,
+ )
+
+ async def submit_extrinsic(
+ self,
+ extrinsic: GenericExtrinsic,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ ) -> "ExtrinsicReceipt":
+ """
+ Submit an extrinsic to the connected node, with the possibility to wait until the extrinsic is included
+ in a block and/or the block is finalized. The receipt returned provided information about the block and
+ triggered events
+
+ Parameters
+ ----------
+ extrinsic: Extrinsic The extrinsic to be sent to the network
+ wait_for_inclusion: wait until extrinsic is included in a block (only works for websocket connections)
+ wait_for_finalization: wait until extrinsic is finalized (only works for websocket connections)
+
+ Returns
+ -------
+ ExtrinsicReceipt
+
+ """
+
+ # Check requirements
+ if not isinstance(extrinsic, GenericExtrinsic):
+ raise TypeError("'extrinsic' must be of type Extrinsics")
+
+ async def result_handler(message: dict, subscription_id) -> tuple[dict, bool]:
+ """
+ Result handler function passed as an arg to _make_rpc_request as the result_handler
+ to handle the results of the extrinsic rpc call, which are multipart, and require
+ subscribing to the message
+
+ :param message: message received from the rpc call
+ :param subscription_id: subscription id received from the initial rpc call for the subscription
+
+ :returns: tuple containing the dict of the block info for the subscription, and bool for whether
+ the subscription is completed.
+ """
+ # Check if extrinsic is included and finalized
+ if "params" in message and isinstance(message["params"]["result"], dict):
+ # Convert result enum to lower for backwards compatibility
+ message_result = {
+ k.lower(): v for k, v in message["params"]["result"].items()
+ }
+
+ if "finalized" in message_result and wait_for_finalization:
+ # Created as a task because we don't actually care about the result
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request("author_unwatchExtrinsic", [subscription_id])
+ )
+ return {
+ "block_hash": message_result["finalized"],
+ "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()),
+ "finalized": True,
+ }, True
+ elif (
+ "inblock" in message_result
+ and wait_for_inclusion
+ and not wait_for_finalization
+ ):
+ # Created as a task because we don't actually care about the result
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request("author_unwatchExtrinsic", [subscription_id])
+ )
+ return {
+ "block_hash": message_result["inblock"],
+ "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()),
+ "finalized": False,
+ }, True
+ return message, False
+
+ if wait_for_inclusion or wait_for_finalization:
+ responses = (
+ await self._make_rpc_request(
+ [
+ self.make_payload(
+ "rpc_request",
+ "author_submitAndWatchExtrinsic",
+ [str(extrinsic.data)],
+ )
+ ],
+ result_handler=result_handler,
+ )
+ )["rpc_request"]
+ response = next(
+ (r for r in responses if "block_hash" in r and "extrinsic_hash" in r),
+ None,
+ )
+
+ if not response:
+ raise SubstrateRequestException(responses)
+
+ # Also, this will be a multipart response, so maybe should change to everything after the first response?
+ # The following code implies this will be a single response after the initial subscription id.
+ result = ExtrinsicReceipt(
+ substrate=self,
+ extrinsic_hash=response["extrinsic_hash"],
+ block_hash=response["block_hash"],
+ finalized=response["finalized"],
+ )
+
+ else:
+ response = await self.rpc_request(
+ "author_submitExtrinsic", [str(extrinsic.data)]
+ )
+
+ if "result" not in response:
+ raise SubstrateRequestException(response.get("error"))
+
+ result = ExtrinsicReceipt(substrate=self, extrinsic_hash=response["result"])
+
+ return result
+
+ async def get_metadata_call_function(
+ self,
+ module_name: str,
+ call_function_name: str,
+ block_hash: Optional[str] = None,
+ ) -> Optional[list]:
+ """
+ Retrieves a list of all call functions in metadata active for given block_hash (or chaintip if block_hash
+ is omitted)
+
+ :param module_name: name of the module
+ :param call_function_name: name of the call function
+ :param block_hash: optional block hash
+
+ :return: list of call functions
+ """
+ runtime = await self.init_runtime(block_hash=block_hash)
+
+ for pallet in runtime.metadata.pallets:
+ if pallet.name == module_name and pallet.calls:
+ for call in pallet.calls:
+ if call.name == call_function_name:
+ return call
+ return None
+
+ async def get_block_number(self, block_hash: Optional[str] = None) -> int:
+ """Async version of `substrateinterface.base.get_block_number` method."""
+ response = await self.rpc_request("chain_getHeader", [block_hash])
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ elif "result" in response:
+ if response["result"]:
+ return int(response["result"]["number"], 16)
+
+ async def close(self):
+ """
+ Closes the substrate connection, and the websocket connection.
+ """
+ try:
+ await self.ws.shutdown()
+ except AttributeError:
+ pass
diff --git a/bittensor/utils/btlogging/format.py b/bittensor/utils/btlogging/format.py
index 1aa505c82c..9e279a3b26 100644
--- a/bittensor/utils/btlogging/format.py
+++ b/bittensor/utils/btlogging/format.py
@@ -54,6 +54,8 @@ def _success(self, message: str, *args, **kws):
":white_heavy_check_mark:": "✅",
":cross_mark:": "❌",
":satellite:": "🛰️",
+ ":warning:": "⚠️",
+ ":arrow_right:": "➡️",
}
@@ -64,6 +66,8 @@ def _success(self, message: str, *args, **kws):
"": Style.RESET_ALL,
"": Fore.GREEN,
"": Style.RESET_ALL,
+ "": Fore.MAGENTA,
+ "": Style.RESET_ALL,
}
diff --git a/bittensor/utils/btlogging/loggingmachine.py b/bittensor/utils/btlogging/loggingmachine.py
index abc4758bf8..66d7cc7595 100644
--- a/bittensor/utils/btlogging/loggingmachine.py
+++ b/bittensor/utils/btlogging/loggingmachine.py
@@ -49,7 +49,8 @@
def _concat_message(msg="", prefix="", suffix=""):
"""Concatenates a message with optional prefix and suffix."""
- msg = f"{f'{prefix} - ' if prefix else ''}{msg}{f' - {suffix}' if suffix else ''}"
+ empty_pref_suf = [None, ""]
+ msg = f"{f'{prefix} - ' if prefix not in empty_pref_suf else ''}{msg}{f' - {suffix}' if suffix not in empty_pref_suf else ''}"
return msg
@@ -443,27 +444,27 @@ def info(self, msg="", prefix="", suffix="", *args, **kwargs):
def success(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps success message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.success(msg, *args, **kwargs)
def warning(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps warning message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.warning(msg, *args, **kwargs)
def error(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps error message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.error(msg, *args, **kwargs)
def critical(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps critical message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.critical(msg, *args, **kwargs)
def exception(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps exception message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.exception(msg, *args, **kwargs)
def on(self):
diff --git a/bittensor/utils/delegates_details.py b/bittensor/utils/delegates_details.py
new file mode 100644
index 0000000000..88a5633e76
--- /dev/null
+++ b/bittensor/utils/delegates_details.py
@@ -0,0 +1,43 @@
+from dataclasses import dataclass
+from typing import Any, Optional
+
+
+@dataclass
+class DelegatesDetails:
+ display: str
+ additional: list[tuple[str, str]]
+ web: str
+ legal: Optional[str] = None
+ riot: Optional[str] = None
+ email: Optional[str] = None
+ pgp_fingerprint: Optional[str] = None
+ image: Optional[str] = None
+ twitter: Optional[str] = None
+
+ @classmethod
+ def from_chain_data(cls, data: dict[str, Any]) -> "DelegatesDetails":
+ def decode(key: str, default: Optional[str] = ""):
+ try:
+ if isinstance(data.get(key), dict):
+ value = next(data.get(key).values())
+ return bytes(value[0]).decode("utf-8")
+ elif isinstance(data.get(key), int):
+ return data.get(key)
+ elif isinstance(data.get(key), tuple):
+ return bytes(data.get(key)[0]).decode("utf-8")
+ else:
+ return default
+ except (UnicodeDecodeError, TypeError):
+ return default
+
+ return cls(
+ display=decode("display"),
+ additional=decode("additional", []),
+ web=decode("web"),
+ legal=decode("legal"),
+ riot=decode("riot"),
+ email=decode("email"),
+ pgp_fingerprint=decode("pgp_fingerprint", None),
+ image=decode("image"),
+ twitter=decode("twitter"),
+ )
diff --git a/bittensor/utils/deprecated.py b/bittensor/utils/deprecated.py
index 146e8395d0..124c0daac9 100644
--- a/bittensor/utils/deprecated.py
+++ b/bittensor/utils/deprecated.py
@@ -45,6 +45,7 @@
from bittensor_wallet import Keypair # noqa: F401
from bittensor.core import settings
+from bittensor.core.async_subtensor import AsyncSubtensor
from bittensor.core.axon import Axon
from bittensor.core.chain_data import ( # noqa: F401
AxonInfo,
@@ -116,6 +117,7 @@
from bittensor.utils.subnets import SubnetsAPI # noqa: F401
# Backwards compatibility with previous bittensor versions.
+async_subtensor = AsyncSubtensor
axon = Axon
config = Config
dendrite = Dendrite
diff --git a/bittensor/utils/formatting.py b/bittensor/utils/formatting.py
new file mode 100644
index 0000000000..1ee3fd6671
--- /dev/null
+++ b/bittensor/utils/formatting.py
@@ -0,0 +1,41 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import math
+
+
+def get_human_readable(num, suffix="H"):
+ """Convert a number into a human-readable format with suffixes."""
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
+ if abs(num) < 1000.0:
+ return f"{num:3.1f}{unit}{suffix}"
+ num /= 1000.0
+ return f"{num:.1f}Y{suffix}"
+
+
+def millify(n: int):
+ """Converts a number into a more readable format with suffixes."""
+ mill_names = ["", " K", " M", " B", " T"]
+ n = float(n)
+ mill_idx = max(
+ 0,
+ min(
+ len(mill_names) - 1,
+ int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)),
+ ),
+ )
+ return "{:.2f}{}".format(n / 10 ** (3 * mill_idx), mill_names[mill_idx])
diff --git a/bittensor/utils/register_cuda.py b/bittensor/utils/register_cuda.py
new file mode 100644
index 0000000000..e0a77f19c9
--- /dev/null
+++ b/bittensor/utils/register_cuda.py
@@ -0,0 +1,130 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import binascii
+import hashlib
+import io
+import math
+from contextlib import redirect_stdout
+from typing import Any, Union
+
+import numpy as np
+from Crypto.Hash import keccak
+
+
+def solve_cuda(
+ nonce_start: "np.int64",
+ update_interval: "np.int64",
+ tpb: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ dev_id: int = 0,
+) -> Union[tuple[Any, bytes], tuple[int, bytes], tuple[Any, None]]:
+ """
+ Solves the PoW problem using CUDA.
+
+ Args:
+ nonce_start (numpy.int64): Starting nonce.
+ update_interval (numpy.int64): Number of nonces to solve before updating block information.
+ tpb (int): Threads per block.
+ block_and_hotkey_hash_bytes (bytes): Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes.
+ difficulty (int): Difficulty of the PoW problem.
+ limit (int): Upper limit of the nonce.
+ dev_id (int): The CUDA device ID. Defaults to ``0``.
+
+ Returns:
+ (Union[tuple[Any, bytes], tuple[int, bytes], tuple[Any, None]]): Tuple of the nonce and the seal corresponding to the solution. Returns -1 for nonce if no solution is found.
+ """
+
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError(
+ "Please install cubit. See the instruction https://github.com/opentensor/cubit?tab=readme-ov-file#install."
+ )
+
+ upper = int(limit // difficulty)
+
+ upper_bytes = upper.to_bytes(32, byteorder="little", signed=False)
+
+ def _hex_bytes_to_u8_list(hex_bytes: bytes):
+ """Converts a sequence of hex bytes to a list of unsigned 8-bit integers."""
+ hex_chunks = [
+ int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)
+ ]
+ return hex_chunks
+
+ def _create_seal_hash(block_and_hotkey_hash_hex_: bytes, nonce: int) -> bytes:
+ """Creates a seal hash from the block and hotkey hash and nonce."""
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little"))
+ pre_seal = nonce_bytes + block_and_hotkey_hash_hex_
+ seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest()
+ kec = keccak.new(digest_bits=256)
+ return kec.update(seal_sh256).digest()
+
+ def _seal_meets_difficulty(seal_: bytes, difficulty_: int):
+ """Checks if the seal meets the given difficulty."""
+ seal_number = int.from_bytes(seal_, "big")
+ product = seal_number * difficulty_
+ limit_ = int(math.pow(2, 256)) - 1
+
+ return product < limit_
+
+ # Call cython function
+ # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit,
+ # const unsigned char[:] block_bytes, int dev_id
+ block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+
+ solution = cubit.solve_cuda(
+ tpb,
+ nonce_start,
+ update_interval,
+ upper_bytes,
+ block_and_hotkey_hash_hex,
+ dev_id,
+ ) # 0 is first GPU
+ seal = None
+ if solution != -1:
+ seal = _create_seal_hash(block_and_hotkey_hash_hex, solution)
+ if _seal_meets_difficulty(seal, difficulty):
+ return solution, seal
+ else:
+ return -1, b"\x00" * 32
+ return solution, seal
+
+
+def reset_cuda():
+ """Resets the CUDA environment."""
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+ cubit.reset_cuda()
+
+
+def log_cuda_errors() -> str:
+ """Logs any CUDA errors."""
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ file = io.StringIO()
+ with redirect_stdout(file):
+ cubit.log_cuda_errors()
+ return file.getvalue()
diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py
index 4d0cdb93d6..4dd6d8ec67 100644
--- a/bittensor/utils/registration.py
+++ b/bittensor/utils/registration.py
@@ -15,13 +15,30 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+import binascii
+import dataclasses
import functools
+import hashlib
+import math
+import multiprocessing
import os
-from typing import TYPE_CHECKING
+import random
+import subprocess
+import time
+from datetime import timedelta
+from multiprocessing.queues import Queue as QueueType
+from queue import Empty, Full
+from typing import Any, Callable, Optional, Union, TYPE_CHECKING
import numpy
+from Crypto.Hash import keccak
+from retry import retry
+from rich import console as rich_console, status as rich_status
+from rich.console import Console
from bittensor.utils.btlogging import logging
+from bittensor.utils.formatting import get_human_readable, millify
+from bittensor.utils.register_cuda import solve_cuda
def use_torch() -> bool:
@@ -95,5 +112,1011 @@ def __getattr__(self, name):
if TYPE_CHECKING:
import torch
+ from bittensor.core.subtensor import Subtensor
+ from bittensor_wallet import Wallet
else:
torch = LazyLoadedTorch()
+
+
+def _hex_bytes_to_u8_list(hex_bytes: bytes):
+ hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)]
+ return hex_chunks
+
+
+def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes:
+ """Create a seal hash for a given block and nonce."""
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little"))
+ pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+ seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest()
+ kec = keccak.new(digest_bits=256)
+ seal = kec.update(seal_sh256).digest()
+ return seal
+
+
+def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int):
+ """Check if the seal meets the given difficulty criteria."""
+ seal_number = int.from_bytes(seal, "big")
+ product = seal_number * difficulty
+ return product < limit
+
+
+@dataclasses.dataclass
+class POWSolution:
+ """A solution to the registration PoW problem."""
+
+ nonce: int
+ block_number: int
+ difficulty: int
+ seal: bytes
+
+ def is_stale(self, subtensor: "Subtensor") -> bool:
+ """
+ Returns True if the POW is stale.
+
+ This means the block the POW is solved for is within 3 blocks of the current block.
+ """
+ return self.block_number < subtensor.get_current_block() - 3
+
+
+class _UsingSpawnStartMethod:
+ def __init__(self, force: bool = False):
+ self._old_start_method = None
+ self._force = force
+
+ def __enter__(self):
+ self._old_start_method = multiprocessing.get_start_method(allow_none=True)
+ if self._old_start_method is None:
+ self._old_start_method = "spawn" # default to spawn
+
+ multiprocessing.set_start_method("spawn", force=self._force)
+
+ def __exit__(self, *args):
+ # restore the old start method
+ multiprocessing.set_start_method(self._old_start_method, force=True)
+
+
+class _SolverBase(multiprocessing.Process):
+ """
+ A process that solves the registration PoW problem.
+
+ Args:
+ proc_num (int): The number of the process being created.
+ num_proc (int): The total number of processes running.
+ update_interval (int): The number of nonces to try to solve before checking for a new block.
+ finished_queue (multiprocessing.Queue): The queue to put the process number when a process finishes each update_interval. Used for calculating the average time per update_interval across all processes.
+ solution_queue (multiprocessing.Queue): The queue to put the solution the process has found during the pow solve.
+ newBlockEvent (multiprocessing.Event): The event to set by the main process when a new block is finalized in the network. The solver process will check for the event after each update_interval. The solver process will get the new block hash and difficulty and start solving for a new nonce.
+ stopEvent (multiprocessing.Event): The event to set by the main process when all the solver processes should stop. The solver process will check for the event after each update_interval. The solver process will stop when the event is set. Used to stop the solver processes when a solution is found.
+ curr_block (multiprocessing.Array): The array containing this process's current block hash. The main process will set the array to the new block hash when a new block is finalized in the network. The solver process will get the new block hash from this array when newBlockEvent is set.
+ curr_block_num (multiprocessing.Value): The value containing this process's current block number. The main process will set the value to the new block number when a new block is finalized in the network. The solver process will get the new block number from this value when newBlockEvent is set.
+ curr_diff (multiprocessing.Array): The array containing this process's current difficulty. The main process will set the array to the new difficulty when a new block is finalized in the network. The solver process will get the new difficulty from this array when newBlockEvent is set.
+ check_block (multiprocessing.Lock): The lock to prevent this process from getting the new block data while the main process is updating the data.
+ limit (int): The limit of the pow solve for a valid solution.
+ """
+
+ proc_num: int
+ num_proc: int
+ update_interval: int
+ finished_queue: "multiprocessing.Queue"
+ solution_queue: "multiprocessing.Queue"
+ newBlockEvent: "multiprocessing.Event"
+ stopEvent: "multiprocessing.Event"
+ hotkey_bytes: bytes
+ curr_block: "multiprocessing.Array"
+ curr_block_num: "multiprocessing.Value"
+ curr_diff: "multiprocessing.Array"
+ check_block: "multiprocessing.Lock"
+ limit: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ ):
+ multiprocessing.Process.__init__(self, daemon=True)
+ self.proc_num = proc_num
+ self.num_proc = num_proc
+ self.update_interval = update_interval
+ self.finished_queue = finished_queue
+ self.solution_queue = solution_queue
+ self.newBlockEvent = multiprocessing.Event()
+ self.newBlockEvent.clear()
+ self.curr_block = curr_block
+ self.curr_block_num = curr_block_num
+ self.curr_diff = curr_diff
+ self.check_block = check_block
+ self.stopEvent = stopEvent
+ self.limit = limit
+
+ def run(self):
+ raise NotImplementedError("_SolverBase is an abstract class")
+
+ @staticmethod
+ def create_shared_memory() -> (
+ tuple["multiprocessing.Array", "multiprocessing.Value", "multiprocessing.Array"]
+ ):
+ """Creates shared memory for the solver processes to use."""
+ curr_block = multiprocessing.Array("h", 32, lock=True) # byte array
+ curr_block_num = multiprocessing.Value("i", 0, lock=True) # int
+ curr_diff = multiprocessing.Array("Q", [0, 0], lock=True) # [high, low]
+
+ return curr_block, curr_block_num, curr_diff
+
+
+class _Solver(_SolverBase):
+ def run(self):
+ block_number: int
+ block_and_hotkey_hash_bytes: bytes
+ block_difficulty: int
+ nonce_limit = int(math.pow(2, 64)) - 1
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_end = nonce_start + self.update_interval
+ while not self.stopEvent.is_set():
+ if self.newBlockEvent.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.newBlockEvent.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block(
+ nonce_start,
+ nonce_end,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Send time
+ self.finished_queue.put_nowait(self.proc_num)
+ except Full:
+ pass
+
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_start = nonce_start % nonce_limit
+ nonce_end = nonce_start + self.update_interval
+
+
+class _CUDASolver(_SolverBase):
+ dev_id: int
+ tpb: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id: int,
+ tpb: int,
+ ):
+ super().__init__(
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ self.dev_id = dev_id
+ self.tpb = tpb
+
+ def run(self):
+ block_number: int = 0 # dummy value
+ block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value
+ block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value
+ nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ while not self.stopEvent.is_set():
+ if self.newBlockEvent.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.newBlockEvent.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block_cuda(
+ nonce_start,
+ self.update_interval,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ self.dev_id,
+ self.tpb,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Signal that a nonce_block was finished using queue
+ # send our proc_num
+ self.finished_queue.put(self.proc_num)
+ except Full:
+ pass
+
+ # increase nonce by number of nonces processed
+ nonce_start += self.update_interval * self.tpb
+ nonce_start = nonce_start % nonce_limit
+
+
+def _solve_for_nonce_block_cuda(
+ nonce_start: int,
+ update_interval: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+ dev_id: int,
+ tpb: int,
+) -> Optional["POWSolution"]:
+ """Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb"""
+ solution, seal = solve_cuda(
+ nonce_start,
+ update_interval,
+ tpb,
+ block_and_hotkey_hash_bytes,
+ difficulty,
+ limit,
+ dev_id,
+ )
+
+ if solution != -1:
+ # Check if solution is valid (i.e. not -1)
+ return POWSolution(solution, block_number, difficulty, seal)
+
+ return None
+
+
+def _solve_for_nonce_block(
+ nonce_start: int,
+ nonce_end: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+) -> Optional["POWSolution"]:
+ """Tries to solve the POW for a block of nonces (nonce_start, nonce_end)"""
+ for nonce in range(nonce_start, nonce_end):
+ # Create seal.
+ seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce)
+
+ # Check if seal meets difficulty
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ # Found a solution, save it.
+ return POWSolution(nonce, block_number, difficulty, seal)
+
+ return None
+
+
+def _registration_diff_unpack(packed_diff: "multiprocessing.Array") -> int:
+ """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian."""
+ return int(packed_diff[0] << 32 | packed_diff[1])
+
+
+def _registration_diff_pack(diff: int, packed_diff: "multiprocessing.Array"):
+ """Packs the difficulty into two 32-bit integers. Little endian."""
+ packed_diff[0] = diff >> 32
+ packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits
+
+
+def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes:
+ """Hashes the block with the hotkey using Keccak-256 to get 32 bytes"""
+ kec = keccak.new(digest_bits=256)
+ kec = kec.update(bytearray(block_bytes + hotkey_bytes))
+ block_and_hotkey_hash_bytes = kec.digest()
+ return block_and_hotkey_hash_bytes
+
+
+def _update_curr_block(
+ curr_diff: "multiprocessing.Array",
+ curr_block: "multiprocessing.Array",
+ curr_block_num: "multiprocessing.Value",
+ block_number: int,
+ block_bytes: bytes,
+ diff: int,
+ hotkey_bytes: bytes,
+ lock: "multiprocessing.Lock",
+):
+ """Updates the current block's information atomically using a lock."""
+ with lock:
+ curr_block_num.value = block_number
+ # Hash the block with the hotkey
+ block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes)
+ for i in range(32):
+ curr_block[i] = block_and_hotkey_hash_bytes[i]
+ _registration_diff_pack(diff, curr_diff)
+
+
+def get_cpu_count() -> int:
+ """Returns the number of CPUs in the system."""
+ try:
+ return len(os.sched_getaffinity(0))
+ except AttributeError:
+ # OSX does not have sched_getaffinity
+ return os.cpu_count()
+
+
+@dataclasses.dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: bytes
+
+
+class RegistrationStatisticsLogger:
+ """Logs statistics for a registration."""
+
+ status: Optional[rich_status.Status]
+
+ def __init__(
+ self,
+ console: Optional[rich_console.Console] = None,
+ output_in_place: bool = True,
+ ) -> None:
+ if console is None:
+ console = Console()
+
+ self.console = console
+
+ if output_in_place:
+ self.status = self.console.status("Solving")
+ else:
+ self.status = None
+
+ def start(self) -> None:
+ if self.status is not None:
+ self.status.start()
+
+ def stop(self) -> None:
+ if self.status is not None:
+ self.status.stop()
+
+ def get_status_message(
+ self, stats: RegistrationStatistics, verbose: bool = False
+ ) -> str:
+ """Generates the status message based on registration statistics."""
+ message = (
+ "Solving\n"
+ + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n"
+ + (
+ f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n"
+ + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n"
+ if verbose
+ else ""
+ )
+ + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n"
+ + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / "
+ + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n"
+ + f"Block Number: [bold white]{stats.block_number}[/bold white]\n"
+ + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n"
+ )
+ return message
+
+ def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None:
+ if self.status is not None:
+ self.status.update(self.get_status_message(stats, verbose=verbose))
+ else:
+ self.console.log(self.get_status_message(stats, verbose=verbose))
+
+
+def _solve_for_difficulty_fast(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ output_in_place: bool = True,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional[POWSolution]:
+ """
+ Solves the POW for registration using multiprocessing.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance to connect to for block information and to submit.
+ wallet (bittensor_wallet.Wallet): wallet to use for registration.
+ netuid (int): The netuid of the subnet to register to.
+ output_in_place (bool): If true, prints the status in place. Otherwise, prints the status on a new line.
+ num_processes (int): Number of processes to use.
+ update_interval (int): Number of nonces to solve before updating block information.
+ n_samples (int): The number of samples of the hash_rate to keep for the EWMA.
+ alpha_ (float): The alpha for the EWMA for the hash_rate calculation.
+ log_verbose (bool): If true, prints more verbose logging of the registration metrics.
+
+ Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ Note: We can also modify the update interval to do smaller blocks of work, while still updating the block information after a different number of nonces, to increase the transparency of the process while still keeping the speed.
+ """
+ if num_processes is None:
+ # get the number of allowed processes for this process
+ num_processes = min(1, get_cpu_count())
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ limit = int(math.pow(2, 256)) - 1
+
+ curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory()
+
+ # Establish communication queues
+ # See the _Solver class for more information on the queues.
+ stopEvent = multiprocessing.Event()
+ stopEvent.clear()
+
+ solution_queue = multiprocessing.Queue()
+ finished_queues = [multiprocessing.Queue() for _ in range(num_processes)]
+ check_block = multiprocessing.Lock()
+
+ hotkey_bytes = (
+ wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key
+ )
+ # Start consumers
+ solvers = [
+ _Solver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ for i in range(num_processes)
+ ]
+
+ # Get first block
+ block_number, difficulty, block_hash = _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+
+ block_bytes = bytes.fromhex(block_hash[2:])
+ old_block_number = block_number
+ # Set to current block
+ _update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total=0.0,
+ time_average=0.0,
+ rounds_total=0,
+ time_spent=0.0,
+ hash_rate_perpetual=0.0,
+ hash_rate=0.0,
+ difficulty=difficulty,
+ block_number=block_number,
+ block_hash=block_hash,
+ )
+
+ start_time_perpetual = time.time()
+
+ logger = RegistrationStatisticsLogger(output_in_place=output_in_place)
+ logger.start()
+
+ solution = None
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha
+
+ while netuid == -1 or not subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ ):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=0.25)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = _check_for_newest_block_and_update(
+ subtensor=subtensor,
+ netuid=netuid,
+ hotkey_bytes=hotkey_bytes,
+ old_block_number=old_block_number,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_stats=curr_stats,
+ update_curr_block=_update_curr_block,
+ check_block=check_block,
+ solvers=solvers,
+ )
+
+ num_time = 0
+ for finished_queue in finished_queues:
+ try:
+ proc_num = finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ hash_rate_ = (num_time * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum(
+ [hash_rates[i] * weights[i] for i in range(n_samples)]
+ ) / (sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (
+ curr_stats.time_average * curr_stats.rounds_total
+ + curr_stats.time_spent
+ ) / (curr_stats.rounds_total + num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * update_interval
+ ) / new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, solution contains the nonce or wallet is registered
+ stopEvent.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ _terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+@retry(Exception, tries=3, delay=1)
+def _get_block_with_retry(
+ subtensor: "Subtensor", netuid: int
+) -> tuple[int, int, bytes]:
+ """
+ Gets the current block number, difficulty, and block hash from the substrate node.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor object to use to get the block number, difficulty, and block hash.
+ netuid (int): The netuid of the network to get the block number, difficulty, and block hash from.
+
+ Returns:
+ tuple[int, int, bytes]
+ block_number (int): The current block number.
+ difficulty (int): The current difficulty of the subnet.
+ block_hash (bytes): The current block hash.
+
+ Raises:
+ Exception: If the block hash is None.
+ ValueError: If the difficulty is None.
+ """
+ block_number = subtensor.get_current_block()
+ difficulty = 1_000_000 if netuid == -1 else subtensor.difficulty(netuid=netuid)
+ block_hash = subtensor.get_block_hash(block_number)
+ if block_hash is None:
+ raise Exception(
+ "Network error. Could not connect to substrate to get block hash"
+ )
+ if difficulty is None:
+ raise ValueError("Chain error. Difficulty is None")
+ return block_number, difficulty, block_hash
+
+
+def _check_for_newest_block_and_update(
+ subtensor: "Subtensor",
+ netuid: int,
+ old_block_number: int,
+ hotkey_bytes: bytes,
+ curr_diff: "multiprocessing.Array",
+ curr_block: "multiprocessing.Array",
+ curr_block_num: "multiprocessing.Value",
+ update_curr_block: "Callable",
+ check_block: "multiprocessing.Lock",
+ solvers: Union[list["_Solver"], list["_CUDASolver"]],
+ curr_stats: "RegistrationStatistics",
+) -> int:
+ """
+ Checks for a new block and updates the current block information if a new block is found.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor object to use for getting the current block.
+ netuid (int): The netuid to use for retrieving the difficulty.
+ old_block_number (int): The old block number to check against.
+ hotkey_bytes (bytes): The bytes of the hotkey's pubkey.
+ curr_diff (multiprocessing.Array): The current difficulty as a multiprocessing array.
+ curr_block (multiprocessing.Array): Where the current block is stored as a multiprocessing array.
+ curr_block_num (multiprocessing.Value): Where the current block number is stored as a multiprocessing value.
+ update_curr_block (typing.Callable): A function that updates the current block.
+ check_block (multiprocessing.Lock): A mp lock that is used to check for a new block.
+ solvers (list[bittensor.utils.registration._Solver]): A list of solvers to update the current block for.
+ curr_stats (bittensor.utils.registration.RegistrationStatistics): The current registration statistics to update.
+
+ Returns:
+ (int) The current block number.
+ """
+ block_number = subtensor.get_current_block()
+ if block_number != old_block_number:
+ old_block_number = block_number
+ # update block information
+ block_number, difficulty, block_hash = _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+ block_bytes = bytes.fromhex(block_hash[2:])
+
+ update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+ # Set new block events for each solver
+
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ # update stats
+ curr_stats.block_number = block_number
+ curr_stats.block_hash = block_hash
+ curr_stats.difficulty = difficulty
+
+ return old_block_number
+
+
+def _solve_for_difficulty_fast_cuda(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ output_in_place: bool = True,
+ update_interval: int = 50_000,
+ tpb: int = 512,
+ dev_id: Union[list[int], int] = 0,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional["POWSolution"]:
+ """
+ Solves the registration fast using CUDA.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor node to grab blocks.
+ wallet (bittensor_wallet.Wallet): The wallet to register.
+ netuid (int): The netuid of the subnet to register to.
+ output_in_place (bool) If true, prints the output in place, otherwise prints to new lines.
+ update_interval (int): The number of nonces to try before checking for more blocks.
+ tpb (int): The number of threads per block. CUDA param that should match the GPU capability
+ dev_id (Union[list[int], int]): The CUDA device IDs to execute the registration on, either a single device or a list of devices.
+ n_samples (int): The number of samples of the hash_rate to keep for the EWMA.
+ alpha_ (float): The alpha for the EWMA for the hash_rate calculation.
+ log_verbose (bool): If true, prints more verbose logging of the registration metrics.
+
+ Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ """
+ if isinstance(dev_id, int):
+ dev_id = [dev_id]
+ elif dev_id is None:
+ dev_id = [0]
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ if not torch.cuda.is_available():
+ raise Exception("CUDA not available")
+
+ limit = int(math.pow(2, 256)) - 1
+
+ # Set mp start to use spawn so CUDA doesn't complain
+ with _UsingSpawnStartMethod(force=True):
+ curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory()
+
+ # Create a worker per CUDA device
+ num_processes = len(dev_id)
+
+ # Establish communication queues
+ stopEvent = multiprocessing.Event()
+ stopEvent.clear()
+ solution_queue = multiprocessing.Queue()
+ finished_queues = [multiprocessing.Queue() for _ in range(num_processes)]
+ check_block = multiprocessing.Lock()
+
+ hotkey_bytes = wallet.hotkey.public_key
+ # Start workers
+ solvers = [
+ _CUDASolver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id[i],
+ tpb,
+ )
+ for i in range(num_processes)
+ ]
+
+ # Get first block
+ block_number, difficulty, block_hash = _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+
+ block_bytes = bytes.fromhex(block_hash[2:])
+ old_block_number = block_number
+
+ # Set to current block
+ _update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total=0.0,
+ time_average=0.0,
+ rounds_total=0,
+ time_spent=0.0,
+ hash_rate_perpetual=0.0,
+ hash_rate=0.0, # EWMA hash_rate (H/s)
+ difficulty=difficulty,
+ block_number=block_number,
+ block_hash=block_hash,
+ )
+
+ start_time_perpetual = time.time()
+
+ logger = RegistrationStatisticsLogger(output_in_place=output_in_place)
+ logger.start()
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha
+
+ solution = None
+ while netuid == -1 or not subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ ):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=0.15)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = _check_for_newest_block_and_update(
+ subtensor=subtensor,
+ netuid=netuid,
+ hotkey_bytes=hotkey_bytes,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ old_block_number=old_block_number,
+ curr_stats=curr_stats,
+ update_curr_block=_update_curr_block,
+ check_block=check_block,
+ solvers=solvers,
+ )
+
+ num_time = 0
+ # Get times for each solver
+ for finished_queue in finished_queues:
+ try:
+ proc_num = finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ hash_rate_ = (num_time * tpb * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum(
+ [hash_rates[i] * weights[i] for i in range(n_samples)]
+ ) / (sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (
+ curr_stats.time_average * curr_stats.rounds_total
+ + curr_stats.time_spent
+ ) / (curr_stats.rounds_total + num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * (tpb * update_interval)
+ ) / new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, found_solution contains the nonce or wallet is registered
+
+ stopEvent.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ _terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+def _terminate_workers_and_wait_for_exit(
+ workers: list[Union[multiprocessing.Process, QueueType]],
+) -> None:
+ for worker in workers:
+ if isinstance(worker, QueueType):
+ worker.join_thread()
+ else:
+ try:
+ worker.join(3.0)
+ except subprocess.TimeoutExpired:
+ worker.terminate()
+ try:
+ worker.close()
+ except ValueError:
+ worker.terminate()
+
+
+def create_pow(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+) -> Optional[dict[str, Any]]:
+ """
+ Creates a proof of work for the given subtensor and wallet.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor to create a proof of work for.
+ wallet (bittensor_wallet.Wallet): The wallet to create a proof of work for.
+ netuid (int): The netuid for the subnet to create a proof of work for.
+ output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Default is ``True``.
+ cuda (bool): If true, uses CUDA to solve the proof of work. Default is ``False``.
+ dev_id (Union[List[int], int]): The CUDA device id(s) to use. If cuda is true and dev_id is a list, then multiple CUDA devices will be used to solve the proof of work. Default is ``0``.
+ tpb (int): The number of threads per block to use when solving the proof of work. Should be a multiple of 32. Default is ``256``.
+ num_processes (Optional[int]): The number of processes to use when solving the proof of work. If None, then the number of processes is equal to the number of CPU cores. Default is None.
+ update_interval (Optional[int]): The number of nonces to run before checking for a new block. Default is ``None``.
+ log_verbose (bool): If true, prints the progress of the proof of work more verbosely. Default is ``False``.
+
+ Returns:
+ Optional[Dict[str, Any]]: The proof of work solution or None if the wallet is already registered or there is a different error.
+
+ Raises:
+ ValueError: If the subnet does not exist.
+ """
+ if netuid != -1:
+ if not subtensor.subnet_exists(netuid=netuid):
+ raise ValueError(f"Subnet {netuid} does not exist.")
+
+ if cuda:
+ solution: Optional[POWSolution] = _solve_for_difficulty_fast_cuda(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ solution: Optional[POWSolution] = _solve_for_difficulty_fast(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ return solution
diff --git a/requirements/prod.txt b/requirements/prod.txt
index 4a319c506c..bb8e243948 100644
--- a/requirements/prod.txt
+++ b/requirements/prod.txt
@@ -1,7 +1,9 @@
wheel
setuptools~=70.0.0
-bittensor-cli
aiohttp~=3.9
+async-property==0.2.2
+backoff
+bittensor-cli
bt-decode
colorama~=0.4.6
fastapi~=0.110.1
@@ -12,6 +14,7 @@ nest_asyncio
netaddr
packaging
python-statemachine~=2.1
+pycryptodome>=3.18.0,<4.0.0
pyyaml
retry
requests
diff --git a/scripts/environments/apple_m1_environment.yml b/scripts/environments/apple_m1_environment.yml
index 25824aa64e..7d949c7e4e 100644
--- a/scripts/environments/apple_m1_environment.yml
+++ b/scripts/environments/apple_m1_environment.yml
@@ -126,7 +126,6 @@ dependencies:
- argparse==1.4.0
- arrow==1.2.3
- async-timeout==4.0.2
- - backoff==2.1.0
- blinker==1.6.2
- cachetools==4.2.4
- certifi==2024.2.2
diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py
index 59170c9512..4a7b2ccf62 100644
--- a/tests/e2e_tests/conftest.py
+++ b/tests/e2e_tests/conftest.py
@@ -8,7 +8,7 @@
import pytest
from substrateinterface import SubstrateInterface
-from bittensor import logging
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.e2e_test_utils import (
clone_or_update_templates,
install_templates,
diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py
index 853719f85d..a21c4ae532 100644
--- a/tests/e2e_tests/test_axon.py
+++ b/tests/e2e_tests/test_axon.py
@@ -4,8 +4,8 @@
import pytest
import bittensor
-from bittensor import logging
from bittensor.utils import networking
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import register_neuron, register_subnet
from tests.e2e_tests.utils.e2e_test_utils import (
setup_wallet,
@@ -65,7 +65,6 @@ async def test_axon(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/miner.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py
index ca9b0a0a2c..5c03a3788b 100644
--- a/tests/e2e_tests/test_commit_weights.py
+++ b/tests/e2e_tests/test_commit_weights.py
@@ -3,8 +3,9 @@
import numpy as np
import pytest
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
@@ -20,7 +21,7 @@
@pytest.mark.asyncio
async def test_commit_and_reveal_weights(local_chain):
"""
- Tests the commit/reveal weights mechanism
+ Tests the commit/reveal weights mechanism with subprocess disabled (CR1.0)
Steps:
1. Register a subnet through Alice
@@ -48,7 +49,7 @@ async def test_commit_and_reveal_weights(local_chain):
), "Unable to register Alice as a neuron"
# Stake to become to top neuron after the first epoch
- add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000))
+ add_stake(local_chain, alice_wallet, Balance.from_tao(100_000))
# Enable commit_reveal on the subnet
assert sudo_set_hyperparameter_bool(
@@ -59,9 +60,9 @@ async def test_commit_and_reveal_weights(local_chain):
netuid,
), "Unable to enable commit reveal on the subnet"
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert subtensor.get_subnet_hyperparameters(
- netuid=netuid
+ netuid=netuid,
).commit_reveal_weights_enabled, "Failed to enable commit/reveal"
# Lower the commit_reveal interval
@@ -69,17 +70,16 @@ async def test_commit_and_reveal_weights(local_chain):
local_chain,
alice_wallet,
call_function="sudo_set_commit_reveal_weights_interval",
- call_params={"netuid": netuid, "interval": "370"},
+ call_params={"netuid": netuid, "interval": "1"},
return_error_message=True,
)
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(
netuid=netuid
).commit_reveal_weights_interval
- == 370
- ), "Failed to set commit/reveal interval"
+ == 1
+ ), "Failed to set commit/reveal periods"
assert (
subtensor.weights_rate_limit(netuid=netuid) > 0
@@ -92,7 +92,7 @@ async def test_commit_and_reveal_weights(local_chain):
call_params={"netuid": netuid, "weights_set_rate_limit": "0"},
return_error_message=True,
)
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+
assert (
subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0
), "Failed to set weights_rate_limit"
@@ -117,6 +117,8 @@ async def test_commit_and_reveal_weights(local_chain):
wait_for_finalization=True,
)
+ assert success is True
+
weight_commits = subtensor.query_module(
module="SubtensorModule",
name="WeightCommits",
@@ -124,18 +126,20 @@ async def test_commit_and_reveal_weights(local_chain):
)
# Assert that the committed weights are set correctly
assert weight_commits.value is not None, "Weight commit not found in storage"
- commit_hash, commit_block = weight_commits.value
+ commit_hash, commit_block, reveal_block, expire_block = weight_commits.value[0]
assert commit_block > 0, f"Invalid block number: {commit_block}"
# Query the WeightCommitRevealInterval storage map
- weight_commit_reveal_interval = subtensor.query_module(
- module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid]
+ reveal_periods = subtensor.query_module(
+ module="SubtensorModule", name="RevealPeriodEpochs", params=[netuid]
)
- interval = weight_commit_reveal_interval.value
- assert interval > 0, "Invalid WeightCommitRevealInterval"
+ periods = reveal_periods.value
+ assert periods > 0, "Invalid RevealPeriodEpochs"
# Wait until the reveal block range
- await wait_interval(interval, subtensor)
+ await wait_interval(
+ subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor
+ )
# Reveal weights
success, message = subtensor.reveal_weights(
@@ -147,6 +151,9 @@ async def test_commit_and_reveal_weights(local_chain):
wait_for_inclusion=True,
wait_for_finalization=True,
)
+
+ assert success is True
+
time.sleep(10)
# Query the Weights storage map
diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py
index e075326ca5..24484f68d3 100644
--- a/tests/e2e_tests/test_dendrite.py
+++ b/tests/e2e_tests/test_dendrite.py
@@ -3,20 +3,21 @@
import pytest
-import bittensor
-from bittensor import logging, Subtensor
-
-from tests.e2e_tests.utils.e2e_test_utils import (
- setup_wallet,
- template_path,
- templates_repo,
-)
+from bittensor.core.metagraph import Metagraph
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
register_neuron,
register_subnet,
add_stake,
wait_epoch,
)
+from tests.e2e_tests.utils.e2e_test_utils import (
+ setup_wallet,
+ template_path,
+ templates_repo,
+)
@pytest.mark.asyncio
@@ -56,7 +57,7 @@ async def test_dendrite(local_chain):
local_chain, bob_wallet, netuid
), f"Neuron wasn't registered to subnet {netuid}"
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
subtensor = Subtensor(network="ws://localhost:9945")
# Assert one neuron is Bob
@@ -69,10 +70,10 @@ async def test_dendrite(local_chain):
assert neuron.stake.tao == 0
# Stake to become to top neuron after the first epoch
- assert add_stake(local_chain, bob_wallet, bittensor.Balance.from_tao(10_000))
+ assert add_stake(local_chain, bob_wallet, Balance.from_tao(10_000))
# Refresh metagraph
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
old_neuron = metagraph.neurons[0]
# Assert stake is 10000
@@ -91,7 +92,6 @@ async def test_dendrite(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/validator.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
@@ -121,7 +121,7 @@ async def test_dendrite(local_chain):
await wait_epoch(subtensor, netuid=netuid)
# Refresh metagraph
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
# Refresh validator neuron
updated_neuron = metagraph.neurons[0]
diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py
index 3e309f4f64..a95cf37660 100644
--- a/tests/e2e_tests/test_incentive.py
+++ b/tests/e2e_tests/test_incentive.py
@@ -70,7 +70,6 @@ async def test_incentive(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/miner.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
@@ -103,7 +102,6 @@ async def test_incentive(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/validator.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
diff --git a/tests/e2e_tests/test_liquid_alpha.py b/tests/e2e_tests/test_liquid_alpha.py
index d73162fbb4..4725704f61 100644
--- a/tests/e2e_tests/test_liquid_alpha.py
+++ b/tests/e2e_tests/test_liquid_alpha.py
@@ -1,5 +1,6 @@
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
register_neuron,
@@ -49,10 +50,10 @@ def test_liquid_alpha(local_chain):
), "Unable to register Alice as a neuron"
# Stake to become to top neuron after the first epoch
- add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000))
+ add_stake(local_chain, alice_wallet, Balance.from_tao(100_000))
# Assert liquid alpha is disabled
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(netuid=netuid).liquid_alpha_enabled
is False
@@ -118,7 +119,7 @@ def test_liquid_alpha(local_chain):
alpha_high_too_high = u16_max + 1 # One more than the max acceptable value
call_params = liquid_alpha_call_params(netuid, f"6553, {alpha_high_too_high}")
try:
- result, error_message = sudo_set_hyperparameter_values(
+ sudo_set_hyperparameter_values(
local_chain,
alice_wallet,
call_function="sudo_set_alpha_values",
diff --git a/tests/e2e_tests/test_metagraph.py b/tests/e2e_tests/test_metagraph.py
index ff16dde369..8999b30358 100644
--- a/tests/e2e_tests/test_metagraph.py
+++ b/tests/e2e_tests/test_metagraph.py
@@ -1,7 +1,8 @@
import time
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
register_neuron,
@@ -64,7 +65,7 @@ def test_metagraph(local_chain):
).serialize(), "Subnet wasn't created successfully"
# Initialize metagraph
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
metagraph = subtensor.metagraph(netuid=1)
# Assert metagraph is empty
@@ -129,17 +130,17 @@ def test_metagraph(local_chain):
# Test staking with low balance
assert not add_stake(
- local_chain, dave_wallet, bittensor.Balance.from_tao(10_000)
+ local_chain, dave_wallet, Balance.from_tao(10_000)
), "Low balance stake should fail"
# Add stake by Bob
assert add_stake(
- local_chain, bob_wallet, bittensor.Balance.from_tao(10_000)
+ local_chain, bob_wallet, Balance.from_tao(10_000)
), "Failed to add stake for Bob"
# Assert stake is added after updating metagraph
metagraph.sync(subtensor=subtensor)
- assert metagraph.neurons[0].stake == bittensor.Balance.from_tao(
+ assert metagraph.neurons[0].stake == Balance.from_tao(
10_000
), "Bob's stake not updated in metagraph"
diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py
index 32d0f6e14d..d00e587fba 100644
--- a/tests/e2e_tests/test_subtensor_functions.py
+++ b/tests/e2e_tests/test_subtensor_functions.py
@@ -3,8 +3,8 @@
import pytest
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
register_neuron,
register_subnet,
@@ -31,7 +31,7 @@ async def test_subtensor_extrinsics(local_chain):
AssertionError: If any of the checks or verifications fail
"""
netuid = 1
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
# Subnets 0 and 3 are bootstrapped from the start
assert subtensor.get_subnets() == [0, 3]
@@ -111,7 +111,6 @@ async def test_subtensor_extrinsics(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/validator.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
@@ -139,7 +138,7 @@ async def test_subtensor_extrinsics(local_chain):
await asyncio.sleep(
5
) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
# Verify neuron info is updated after running as a validator
neuron_info = subtensor.get_neuron_for_pubkey_and_subnet(
diff --git a/tests/e2e_tests/test_transfer.py b/tests/e2e_tests/test_transfer.py
index b6be1cd6ae..62cf9723cc 100644
--- a/tests/e2e_tests/test_transfer.py
+++ b/tests/e2e_tests/test_transfer.py
@@ -32,7 +32,6 @@ def test_transfer(local_chain):
amount=2,
wait_for_finalization=True,
wait_for_inclusion=True,
- prompt=False,
)
# Account details after transfer
diff --git a/tests/e2e_tests/utils/chain_interactions.py b/tests/e2e_tests/utils/chain_interactions.py
index aad53812c8..20e4a65dea 100644
--- a/tests/e2e_tests/utils/chain_interactions.py
+++ b/tests/e2e_tests/utils/chain_interactions.py
@@ -6,7 +6,7 @@
import asyncio
from typing import Union, Optional, TYPE_CHECKING
-from bittensor import logging
+from bittensor.utils.btlogging import logging
# for typing purposes
if TYPE_CHECKING:
diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py
index f876d249bd..3c6badb91c 100644
--- a/tests/helpers/__init__.py
+++ b/tests/helpers/__init__.py
@@ -18,7 +18,6 @@
import os
from .helpers import ( # noqa: F401
CLOSE_IN_VALUE,
- MockConsole,
__mock_wallet_factory__,
)
from bittensor_wallet.mock.wallet_mock import ( # noqa: F401
diff --git a/tests/helpers/helpers.py b/tests/helpers/helpers.py
index 417bd643b3..41109ee5e6 100644
--- a/tests/helpers/helpers.py
+++ b/tests/helpers/helpers.py
@@ -22,14 +22,11 @@
from bittensor_wallet.mock.wallet_mock import get_mock_hotkey
from bittensor_wallet.mock.wallet_mock import get_mock_wallet
-from rich.console import Console
-from rich.text import Text
-
from bittensor.utils.balance import Balance
from bittensor.core.chain_data import AxonInfo, NeuronInfo, PrometheusInfo
-def __mock_wallet_factory__(*args, **kwargs) -> _MockWallet:
+def __mock_wallet_factory__(*_, **__) -> _MockWallet:
"""Returns a mock wallet object."""
mock_wallet = get_mock_wallet()
@@ -118,53 +115,3 @@ def get_mock_neuron_by_uid(uid: int, **kwargs) -> NeuronInfo:
return get_mock_neuron(
uid=uid, hotkey=get_mock_hotkey(uid), coldkey=get_mock_coldkey(uid), **kwargs
)
-
-
-class MockStatus:
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- pass
-
- def start(self):
- pass
-
- def stop(self):
- pass
-
- def update(self, *args, **kwargs):
- MockConsole().print(*args, **kwargs)
-
-
-class MockConsole:
- """
- Mocks the console object for status and print.
- Captures the last print output as a string.
- """
-
- captured_print = None
-
- def status(self, *args, **kwargs):
- return MockStatus()
-
- def print(self, *args, **kwargs):
- console = Console(
- width=1000, no_color=True, markup=False
- ) # set width to 1000 to avoid truncation
- console.begin_capture()
- console.print(*args, **kwargs)
- self.captured_print = console.end_capture()
-
- def clear(self, *args, **kwargs):
- pass
-
- @staticmethod
- def remove_rich_syntax(text: str) -> str:
- """
- Removes rich syntax from the given text.
- Removes markup and ansi syntax.
- """
- output_no_syntax = Text.from_ansi(Text.from_markup(text).plain).plain
-
- return output_no_syntax
diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py
index e252cb63f1..bacb340f2c 100644
--- a/tests/integration_tests/test_subtensor_integration.py
+++ b/tests/integration_tests/test_subtensor_integration.py
@@ -15,7 +15,9 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+import random
import unittest
+from queue import Empty as QueueEmpty
from unittest.mock import MagicMock, patch
import pytest
@@ -28,7 +30,6 @@
from bittensor.utils.mock import MockSubtensor
from tests.helpers import (
get_mock_coldkey,
- MockConsole,
get_mock_keypair,
get_mock_wallet,
)
@@ -50,12 +51,6 @@ def setUp(self):
@classmethod
def setUpClass(cls) -> None:
- # mock rich console status
- mock_console = MockConsole()
- cls._mock_console_patcher = patch(
- "bittensor.core.settings.bt_console", mock_console
- )
- cls._mock_console_patcher.start()
# Keeps the same mock network for all tests. This stops the network from being re-setup for each test.
cls._mock_subtensor = MockSubtensor()
cls._do_setup_subnet()
@@ -67,10 +62,6 @@ def _do_setup_subnet(cls):
# Setup the mock subnet 3
cls._mock_subtensor.create_subnet(netuid=3)
- @classmethod
- def tearDownClass(cls) -> None:
- cls._mock_console_patcher.stop()
-
def test_network_overrides(self):
"""Tests that the network overrides the chain_endpoint."""
# Argument importance: chain_endpoint (arg) > network (arg) > config.subtensor.chain_endpoint > config.subtensor.network
@@ -247,6 +238,170 @@ def test_defaults_to_finney(self):
assert sub.network == "finney"
assert sub.chain_endpoint == settings.FINNEY_ENTRYPOINT
+ def test_registration_multiprocessed_already_registered(self):
+ work_blocks_before_is_registered = random.randint(5, 10)
+ # return False each work block but return True after a random number of blocks
+ is_registered_return_values = (
+ [False for _ in range(work_blocks_before_is_registered)]
+ + [True]
+ + [True, False]
+ )
+ # this should pass the initial False check in the subtensor class and then return True because the neuron is already registered
+
+ mock_neuron = MagicMock()
+ mock_neuron.is_null = True
+
+ # patch solution queue to return None
+ with patch(
+ "multiprocessing.queues.Queue.get", return_value=None
+ ) as mock_queue_get:
+ # patch time queue get to raise Empty exception
+ with patch(
+ "multiprocessing.queues.Queue.get_nowait", side_effect=QueueEmpty
+ ) as mock_queue_get_nowait:
+ wallet = get_mock_wallet(
+ hotkey=get_mock_keypair(0, self.id()),
+ coldkey=get_mock_keypair(1, self.id()),
+ )
+ self.subtensor.is_hotkey_registered = MagicMock(
+ side_effect=is_registered_return_values
+ )
+
+ self.subtensor.difficulty = MagicMock(return_value=1)
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ side_effect=mock_neuron
+ )
+ self.subtensor._do_pow_register = MagicMock(return_value=(True, None))
+
+ # should return True
+ assert self.subtensor.register(
+ wallet=wallet, netuid=3, num_processes=3, update_interval=5
+ )
+
+ # calls until True and once again before exiting subtensor class
+ # This assertion is currently broken when difficulty is too low
+ assert (
+ self.subtensor.is_hotkey_registered.call_count
+ == work_blocks_before_is_registered + 2
+ )
+
+ def test_registration_partly_failed(self):
+ do_pow_register_mock = MagicMock(
+ side_effect=[(False, "Failed"), (False, "Failed"), (True, None)]
+ )
+
+ def is_registered_side_effect(*args, **kwargs):
+ nonlocal do_pow_register_mock
+ return do_pow_register_mock.call_count < 3
+
+ current_block = [i for i in range(0, 100)]
+
+ wallet = get_mock_wallet(
+ hotkey=get_mock_keypair(0, self.id()),
+ coldkey=get_mock_keypair(1, self.id()),
+ )
+
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ return_value=bittensor.NeuronInfo.get_null_neuron()
+ )
+ self.subtensor.is_hotkey_registered = MagicMock(
+ side_effect=is_registered_side_effect
+ )
+
+ self.subtensor.difficulty = MagicMock(return_value=1)
+ self.subtensor.get_current_block = MagicMock(side_effect=current_block)
+ self.subtensor._do_pow_register = do_pow_register_mock
+
+ # should return True
+ self.assertTrue(
+ self.subtensor.register(
+ wallet=wallet, netuid=3, num_processes=3, update_interval=5
+ ),
+ msg="Registration should succeed",
+ )
+
+ def test_registration_failed(self):
+ is_registered_return_values = [False for _ in range(100)]
+ current_block = [i for i in range(0, 100)]
+ mock_neuron = MagicMock()
+ mock_neuron.is_null = True
+
+ with patch(
+ "bittensor.core.extrinsics.registration.create_pow", return_value=None
+ ) as mock_create_pow:
+ wallet = get_mock_wallet(
+ hotkey=get_mock_keypair(0, self.id()),
+ coldkey=get_mock_keypair(1, self.id()),
+ )
+
+ self.subtensor.is_hotkey_registered = MagicMock(
+ side_effect=is_registered_return_values
+ )
+
+ self.subtensor.get_current_block = MagicMock(side_effect=current_block)
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ return_value=mock_neuron
+ )
+ self.subtensor.substrate.get_block_hash = MagicMock(
+ return_value="0x" + "0" * 64
+ )
+ self.subtensor._do_pow_register = MagicMock(return_value=(False, "Failed"))
+
+ # should return True
+ self.assertIsNot(
+ self.subtensor.register(wallet=wallet, netuid=3),
+ True,
+ msg="Registration should fail",
+ )
+ self.assertEqual(mock_create_pow.call_count, 3)
+
+ def test_registration_stale_then_continue(self):
+ # verify that after a stale solution, to solve will continue without exiting
+
+ class ExitEarly(Exception):
+ pass
+
+ mock_is_stale = MagicMock(side_effect=[True, False])
+
+ mock_do_pow_register = MagicMock(side_effect=ExitEarly())
+
+ mock_subtensor_self = MagicMock(
+ neuron_for_pubkey=MagicMock(
+ return_value=MagicMock(is_null=True)
+ ), # not registered
+ substrate=MagicMock(
+ get_block_hash=MagicMock(return_value="0x" + "0" * 64),
+ ),
+ )
+
+ mock_wallet = MagicMock()
+
+ mock_create_pow = MagicMock(return_value=MagicMock(is_stale=mock_is_stale))
+
+ with patch(
+ "bittensor.core.extrinsics.registration.create_pow", mock_create_pow
+ ), patch(
+ "bittensor.core.extrinsics.registration._do_pow_register",
+ mock_do_pow_register,
+ ):
+ # should create a pow and check if it is stale
+ # then should create a new pow and check if it is stale
+ # then should enter substrate and exit early because of test
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ return_value=bittensor.NeuronInfo.get_null_neuron()
+ )
+ with pytest.raises(ExitEarly):
+ bittensor.subtensor.register(mock_subtensor_self, mock_wallet, netuid=3)
+ self.assertEqual(
+ mock_create_pow.call_count, 2, msg="must try another pow after stale"
+ )
+ self.assertEqual(mock_is_stale.call_count, 2)
+ self.assertEqual(
+ mock_do_pow_register.call_count,
+ 1,
+ msg="only tries to submit once, then exits",
+ )
+
if __name__ == "__main__":
unittest.main()
diff --git a/tests/unit_tests/extrinsics/test_init.py b/tests/unit_tests/extrinsics/test_init.py
index 8a2480a9b9..8ff60d2de6 100644
--- a/tests/unit_tests/extrinsics/test_init.py
+++ b/tests/unit_tests/extrinsics/test_init.py
@@ -1,9 +1,10 @@
"""Tests for bittensor/extrinsics/__ini__ module."""
from bittensor.utils import format_error_message
+from tests.unit_tests.extrinsics.test_commit_weights import subtensor
-def test_format_error_message_with_right_error_message():
+def test_format_error_message_with_right_error_message(mocker):
"""Verify that error message from extrinsic response parses correctly."""
# Prep
fake_error_message = {
@@ -13,7 +14,7 @@ def test_format_error_message_with_right_error_message():
}
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
@@ -22,13 +23,13 @@ def test_format_error_message_with_right_error_message():
assert "Some error description." in result
-def test_format_error_message_with_empty_error_message():
+def test_format_error_message_with_empty_error_message(mocker):
"""Verify that empty error message from extrinsic response parses correctly."""
# Prep
fake_error_message = {}
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
@@ -37,13 +38,13 @@ def test_format_error_message_with_empty_error_message():
assert "Unknown Description" in result
-def test_format_error_message_with_wrong_type_error_message():
+def test_format_error_message_with_wrong_type_error_message(mocker):
"""Verify that error message from extrinsic response with wrong type parses correctly."""
# Prep
fake_error_message = None
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
diff --git a/tests/unit_tests/extrinsics/test_prometheus.py b/tests/unit_tests/extrinsics/test_prometheus.py
deleted file mode 100644
index dbcfed1e47..0000000000
--- a/tests/unit_tests/extrinsics/test_prometheus.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2024 Opentensor Foundation
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-#
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from unittest.mock import MagicMock, patch
-
-import pytest
-from bittensor_wallet import Wallet
-
-from bittensor.core.extrinsics.prometheus import (
- prometheus_extrinsic,
-)
-from bittensor.core.subtensor import Subtensor
-from bittensor.core.settings import version_as_int
-
-
-# Mocking the bittensor and networking modules
-@pytest.fixture
-def mock_bittensor():
- with patch("bittensor.core.subtensor.Subtensor") as mock:
- yield mock
-
-
-@pytest.fixture
-def mock_wallet():
- with patch("bittensor_wallet.Wallet") as mock:
- yield mock
-
-
-@pytest.fixture
-def mock_net():
- with patch("bittensor.utils.networking") as mock:
- yield mock
-
-
-@pytest.mark.parametrize(
- "ip, port, netuid, wait_for_inclusion, wait_for_finalization, expected_result, test_id",
- [
- (None, 9221, 0, False, True, True, "happy-path-default-ip"),
- ("192.168.0.1", 9221, 0, False, True, True, "happy-path-custom-ip"),
- (None, 9221, 0, True, False, True, "happy-path-wait-for-inclusion"),
- (None, 9221, 0, False, False, True, "happy-path-no-waiting"),
- ],
-)
-def test_prometheus_extrinsic_happy_path(
- mock_bittensor,
- mock_wallet,
- mock_net,
- ip,
- port,
- netuid,
- wait_for_inclusion,
- wait_for_finalization,
- expected_result,
- test_id,
-):
- # Arrange
- subtensor = MagicMock(spec=Subtensor)
- subtensor.network = "test_network"
- subtensor.substrate = MagicMock()
- wallet = MagicMock(spec=Wallet)
- mock_net.get_external_ip.return_value = "192.168.0.1"
- mock_net.ip_to_int.return_value = 3232235521 # IP in integer form
- mock_net.ip_version.return_value = 4
- neuron = MagicMock()
- neuron.is_null = False
- neuron.prometheus_info.version = version_as_int
- neuron.prometheus_info.ip = 3232235521
- neuron.prometheus_info.port = port
- neuron.prometheus_info.ip_type = 4
- subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron
- subtensor._do_serve_prometheus.return_value = (True, None)
-
- # Act
- result = prometheus_extrinsic(
- subtensor=subtensor,
- wallet=wallet,
- ip=ip,
- port=port,
- netuid=netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- # Assert
- assert result == expected_result, f"Test ID: {test_id}"
-
-
-# Edge cases
-@pytest.mark.parametrize(
- "ip, port, netuid, test_id",
- [
- ("0.0.0.0", 0, 0, "edge-case-min-values"),
- ("255.255.255.255", 65535, 2147483647, "edge-case-max-values"),
- ],
-)
-def test_prometheus_extrinsic_edge_cases(
- mock_bittensor, mock_wallet, mock_net, ip, port, netuid, test_id
-):
- # Arrange
- subtensor = MagicMock(spec=Subtensor)
- subtensor.network = "test_network"
- subtensor.substrate = MagicMock()
- wallet = MagicMock(spec=Wallet)
- mock_net.get_external_ip.return_value = ip
- mock_net.ip_to_int.return_value = 3232235521 # IP in integer form
- mock_net.ip_version.return_value = 4
- neuron = MagicMock()
- neuron.is_null = True
- subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron
- subtensor._do_serve_prometheus.return_value = (True, None)
-
- # Act
- result = prometheus_extrinsic(
- subtensor=subtensor,
- wallet=wallet,
- ip=ip,
- port=port,
- netuid=netuid,
- wait_for_inclusion=False,
- wait_for_finalization=True,
- )
-
- # Assert
- assert result is True, f"Test ID: {test_id}"
-
-
-# Error cases
-def test_prometheus_extrinsic_error_cases(mock_bittensor, mock_wallet, mocker):
- # Arrange
- subtensor = MagicMock(spec=Subtensor)
- subtensor.network = "test_network"
- subtensor.substrate = MagicMock()
- subtensor.substrate.websocket.sock.getsockopt.return_value = 0
- wallet = MagicMock(spec=Wallet)
- neuron = MagicMock()
- neuron.is_null = True
- subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron
- subtensor._do_serve_prometheus.return_value = (True,)
-
- with mocker.patch(
- "bittensor.utils.networking.get_external_ip", side_effect=RuntimeError
- ):
- # Act & Assert
- with pytest.raises(RuntimeError):
- prometheus_extrinsic(
- subtensor=subtensor,
- wallet=wallet,
- ip=None,
- port=9221,
- netuid=1,
- wait_for_inclusion=False,
- wait_for_finalization=True,
- )
diff --git a/tests/unit_tests/extrinsics/test_registration.py b/tests/unit_tests/extrinsics/test_registration.py
new file mode 100644
index 0000000000..18d14fac10
--- /dev/null
+++ b/tests/unit_tests/extrinsics/test_registration.py
@@ -0,0 +1,224 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import pytest
+from bittensor_wallet import Wallet
+
+from bittensor.core.extrinsics import registration
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.registration import POWSolution
+
+
+# Mocking external dependencies
+@pytest.fixture
+def mock_subtensor(mocker):
+ mock = mocker.MagicMock(spec=Subtensor)
+ mock.network = "mock_network"
+ mock.substrate = mocker.MagicMock()
+ return mock
+
+
+@pytest.fixture
+def mock_wallet(mocker):
+ mock = mocker.MagicMock(spec=Wallet)
+ mock.coldkeypub.ss58_address = "mock_address"
+ mock.coldkey = mocker.MagicMock()
+ mock.hotkey = mocker.MagicMock()
+ mock.hotkey.ss58_address = "fake_ss58_address"
+ return mock
+
+
+@pytest.fixture
+def mock_pow_solution(mocker):
+ mock = mocker.MagicMock(spec=POWSolution)
+ mock.block_number = 123
+ mock.nonce = 456
+ mock.seal = [0, 1, 2, 3]
+ mock.is_stale.return_value = False
+ return mock
+
+
+@pytest.fixture
+def mock_new_wallet(mocker):
+ mock = mocker.MagicMock(spec=Wallet)
+ mock.coldkeypub.ss58_address = "mock_address"
+ mock.coldkey = mocker.MagicMock()
+ mock.hotkey = mocker.MagicMock()
+ return mock
+
+
+@pytest.mark.parametrize(
+ "subnet_exists, neuron_is_null, cuda_available, expected_result, test_id",
+ [
+ (False, True, True, False, "subnet-does-not-exist"),
+ (True, False, True, True, "neuron-already-registered"),
+ (True, True, False, False, "cuda-unavailable"),
+ ],
+)
+def test_register_extrinsic_without_pow(
+ mock_subtensor,
+ mock_wallet,
+ subnet_exists,
+ neuron_is_null,
+ cuda_available,
+ expected_result,
+ test_id,
+ mocker,
+):
+ # Arrange
+ with (
+ mocker.patch.object(
+ mock_subtensor, "subnet_exists", return_value=subnet_exists
+ ),
+ mocker.patch.object(
+ mock_subtensor,
+ "get_neuron_for_pubkey_and_subnet",
+ return_value=mocker.MagicMock(is_null=neuron_is_null),
+ ),
+ mocker.patch("torch.cuda.is_available", return_value=cuda_available),
+ mocker.patch(
+ "bittensor.utils.registration._get_block_with_retry",
+ return_value=(0, 0, "00ff11ee"),
+ ),
+ ):
+ # Act
+ result = registration.register_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ netuid=123,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ max_allowed_attempts=3,
+ output_in_place=True,
+ cuda=True,
+ dev_id=0,
+ tpb=256,
+ num_processes=None,
+ update_interval=None,
+ log_verbose=False,
+ )
+
+ # Assert
+ assert result == expected_result, f"Test failed for test_id: {test_id}"
+
+
+@pytest.mark.parametrize(
+ "pow_success, pow_stale, registration_success, cuda, hotkey_registered, expected_result, test_id",
+ [
+ (True, False, True, False, False, True, "successful-with-valid-pow"),
+ (True, False, True, True, False, True, "successful-with-valid-cuda-pow"),
+ # Pow failed but key was registered already
+ (False, False, False, False, True, True, "hotkey-registered"),
+ # Pow was a success but registration failed with error 'key already registered'
+ (True, False, False, False, False, True, "registration-fail-key-registered"),
+ ],
+)
+def test_register_extrinsic_with_pow(
+ mock_subtensor,
+ mock_wallet,
+ mock_pow_solution,
+ pow_success,
+ pow_stale,
+ registration_success,
+ cuda,
+ hotkey_registered,
+ expected_result,
+ test_id,
+ mocker,
+):
+ # Arrange
+ with mocker.patch(
+ "bittensor.utils.registration._solve_for_difficulty_fast",
+ return_value=mock_pow_solution if pow_success else None,
+ ), mocker.patch(
+ "bittensor.utils.registration._solve_for_difficulty_fast_cuda",
+ return_value=mock_pow_solution if pow_success else None,
+ ), mocker.patch(
+ "bittensor.core.extrinsics.registration._do_pow_register",
+ return_value=(registration_success, "HotKeyAlreadyRegisteredInSubNet"),
+ ), mocker.patch("torch.cuda.is_available", return_value=cuda):
+ # Act
+ if pow_success:
+ mock_pow_solution.is_stale.return_value = pow_stale
+
+ if not pow_success and hotkey_registered:
+ mock_subtensor.is_hotkey_registered = mocker.MagicMock(
+ return_value=hotkey_registered
+ )
+
+ result = registration.register_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ netuid=123,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ max_allowed_attempts=3,
+ output_in_place=True,
+ cuda=cuda,
+ dev_id=0,
+ tpb=256,
+ num_processes=None,
+ update_interval=None,
+ log_verbose=False,
+ )
+
+ # Assert
+ assert result == expected_result, f"Test failed for test_id: {test_id}."
+
+
+@pytest.mark.parametrize(
+ "subnet_exists, neuron_is_null, recycle_success, is_registered, expected_result, test_id",
+ [
+ # Happy paths
+ (True, False, None, None, True, "neuron-not-null"),
+ (True, True, True, True, True, "happy-path-wallet-registered"),
+ # Error paths
+ (False, True, False, None, False, "subnet-non-existence"),
+ (True, True, False, False, False, "error-path-recycling-failed"),
+ (True, True, True, False, False, "error-path-not-registered"),
+ ],
+)
+def test_burned_register_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ subnet_exists,
+ neuron_is_null,
+ recycle_success,
+ is_registered,
+ expected_result,
+ test_id,
+ mocker,
+):
+ # Arrange
+ with mocker.patch.object(
+ mock_subtensor, "subnet_exists", return_value=subnet_exists
+ ), mocker.patch.object(
+ mock_subtensor,
+ "get_neuron_for_pubkey_and_subnet",
+ return_value=mocker.MagicMock(is_null=neuron_is_null),
+ ), mocker.patch(
+ "bittensor.core.extrinsics.registration._do_burned_register",
+ return_value=(recycle_success, "Mock error message"),
+ ), mocker.patch.object(
+ mock_subtensor, "is_hotkey_registered", return_value=is_registered
+ ):
+ # Act
+ result = registration.burned_register_extrinsic(
+ subtensor=mock_subtensor, wallet=mock_wallet, netuid=123
+ )
+ # Assert
+ assert result == expected_result, f"Test failed for test_id: {test_id}"
diff --git a/tests/unit_tests/extrinsics/test_root.py b/tests/unit_tests/extrinsics/test_root.py
new file mode 100644
index 0000000000..96d90fe09a
--- /dev/null
+++ b/tests/unit_tests/extrinsics/test_root.py
@@ -0,0 +1,242 @@
+import pytest
+from bittensor.core.subtensor import Subtensor
+from bittensor.core.extrinsics import root
+
+
+@pytest.fixture
+def mock_subtensor(mocker):
+ mock = mocker.MagicMock(spec=Subtensor)
+ mock.network = "magic_mock"
+ return mock
+
+
+@pytest.fixture
+def mock_wallet(mocker):
+ mock = mocker.MagicMock()
+ mock.hotkey.ss58_address = "fake_hotkey_address"
+ return mock
+
+
+@pytest.mark.parametrize(
+ "wait_for_inclusion, wait_for_finalization, hotkey_registered, registration_success, expected_result",
+ [
+ (
+ False,
+ True,
+ [True, None],
+ True,
+ True,
+ ), # Already registered after attempt
+ (
+ False,
+ True,
+ [False, True],
+ True,
+ True,
+ ), # Registration succeeds with user confirmation
+ (False, True, [False, False], False, None), # Registration fails
+ (
+ False,
+ True,
+ [False, False],
+ True,
+ None,
+ ), # Registration succeeds but neuron not found
+ ],
+ ids=[
+ "success-already-registered",
+ "success-registration-succeeds",
+ "failure-registration-failed",
+ "failure-neuron-not-found",
+ ],
+)
+def test_root_register_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ hotkey_registered,
+ registration_success,
+ expected_result,
+ mocker,
+):
+ # Arrange
+ mock_subtensor.is_hotkey_registered.side_effect = hotkey_registered
+
+ # Preps
+ mock_register = mocker.Mock(
+ return_value=(registration_success, "Error registering")
+ )
+ root._do_root_register = mock_register
+
+ # Act
+ result = root.root_register_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # Assert
+ assert result == expected_result
+
+ if not hotkey_registered[0]:
+ mock_register.assert_called_once()
+
+
+@pytest.mark.parametrize(
+ "wait_for_inclusion, wait_for_finalization, netuids, weights, expected_success",
+ [
+ (True, False, [1, 2], [0.5, 0.5], True), # Success - weights set
+ (
+ False,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ True,
+ ), # Success - weights set no wait
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 20],
+ True,
+ ), # Success - large value to be normalized
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 0],
+ True,
+ ), # Success - single large value
+ (
+ True,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ False,
+ ), # Failure - setting weights failed
+ (
+ True,
+ False,
+ [],
+ [],
+ False,
+ ), # Exception catched - ValueError 'min() arg is an empty sequence'
+ ],
+ ids=[
+ "success-weights-set",
+ "success-not-wait",
+ "success-large-value",
+ "success-single-value",
+ "failure-setting-weights",
+ "failure-value-error-exception",
+ ],
+)
+def test_set_root_weights_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ netuids,
+ weights,
+ expected_success,
+ mocker,
+):
+ # Preps
+ root._do_set_root_weights = mocker.Mock(
+ return_value=(expected_success, "Mock error")
+ )
+ mock_subtensor.min_allowed_weights = mocker.Mock(return_value=0)
+ mock_subtensor.max_weight_limit = mocker.Mock(return_value=1)
+
+ # Call
+ result = root.set_root_weights_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ netuids=netuids,
+ weights=weights,
+ version_key=0,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # Asserts
+ assert result == expected_success
+
+
+@pytest.mark.parametrize(
+ "wait_for_inclusion, wait_for_finalization, netuids, weights, user_response, expected_success",
+ [
+ (True, False, [1, 2], [0.5, 0.5], True, True), # Success - weights set
+ (
+ False,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ None,
+ True,
+ ), # Success - weights set no wait
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 20],
+ True,
+ True,
+ ), # Success - large value to be normalized
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 0],
+ True,
+ True,
+ ), # Success - single large value
+ (
+ True,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ None,
+ False,
+ ), # Failure - setting weights failed
+ (
+ True,
+ False,
+ [],
+ [],
+ False,
+ False,
+ ), # Exception catched - ValueError 'min() arg is an empty sequence'
+ ],
+ ids=[
+ "success-weights-set",
+ "success-not-wait",
+ "success-large-value",
+ "success-single-value",
+ "failure-setting-weights",
+ "failure-value-error-exception",
+ ],
+)
+def test_set_root_weights_extrinsic_torch(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ netuids,
+ weights,
+ user_response,
+ expected_success,
+ force_legacy_torch_compatible_api,
+ mocker,
+):
+ test_set_root_weights_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ netuids,
+ weights,
+ expected_success,
+ mocker,
+ )
diff --git a/tests/unit_tests/extrinsics/test_serving.py b/tests/unit_tests/extrinsics/test_serving.py
index a57e32d01c..46eef17888 100644
--- a/tests/unit_tests/extrinsics/test_serving.py
+++ b/tests/unit_tests/extrinsics/test_serving.py
@@ -50,7 +50,7 @@ def mock_axon(mock_wallet, mocker):
@pytest.mark.parametrize(
- "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,",
+ "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected,test_id,",
[
(
"192.168.1.1",
@@ -61,7 +61,6 @@ def mock_axon(mock_wallet, mocker):
0,
False,
True,
- False,
True,
"happy-path-no-wait",
),
@@ -74,7 +73,6 @@ def mock_axon(mock_wallet, mocker):
1,
True,
False,
- False,
True,
"happy-path-wait-for-inclusion",
),
@@ -88,14 +86,13 @@ def mock_axon(mock_wallet, mocker):
False,
True,
True,
- True,
- "happy-path-wait-for-finalization-and-prompt",
+ "happy-path-wait-for-finalization",
),
],
ids=[
"happy-path-no-wait",
"happy-path-wait-for-inclusion",
- "happy-path-wait-for-finalization-and-prompt",
+ "happy-path-wait-for-finalization",
],
)
def test_serve_extrinsic_happy_path(
@@ -109,39 +106,33 @@ def test_serve_extrinsic_happy_path(
placeholder2,
wait_for_inclusion,
wait_for_finalization,
- prompt,
expected,
test_id,
mocker,
):
# Arrange
serving.do_serve_axon = mocker.MagicMock(return_value=(True, ""))
- with patch(
- "bittensor.core.extrinsics.serving.Confirm.ask",
- return_value=True,
- ):
- # Act
- result = serving.serve_extrinsic(
- mock_subtensor,
- mock_wallet,
- ip,
- port,
- protocol,
- netuid,
- placeholder1,
- placeholder2,
- wait_for_inclusion,
- wait_for_finalization,
- prompt,
- )
+ # Act
+ result = serving.serve_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ ip,
+ port,
+ protocol,
+ netuid,
+ placeholder1,
+ placeholder2,
+ wait_for_inclusion,
+ wait_for_finalization,
+ )
- # Assert
- assert result == expected, f"Test ID: {test_id}"
+ # Assert
+ assert result == expected, f"Test ID: {test_id}"
# Various edge cases
@pytest.mark.parametrize(
- "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,",
+ "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected,test_id,",
[
(
"192.168.1.4",
@@ -152,7 +143,6 @@ def test_serve_extrinsic_happy_path(
3,
True,
True,
- False,
True,
"edge_case_max_values",
),
@@ -170,39 +160,33 @@ def test_serve_extrinsic_edge_cases(
placeholder2,
wait_for_inclusion,
wait_for_finalization,
- prompt,
expected,
test_id,
mocker,
):
# Arrange
serving.do_serve_axon = mocker.MagicMock(return_value=(True, ""))
- with patch(
- "bittensor.core.extrinsics.serving.Confirm.ask",
- return_value=True,
- ):
- # Act
- result = serving.serve_extrinsic(
- mock_subtensor,
- mock_wallet,
- ip,
- port,
- protocol,
- netuid,
- placeholder1,
- placeholder2,
- wait_for_inclusion,
- wait_for_finalization,
- prompt,
- )
+ # Act
+ result = serving.serve_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ ip,
+ port,
+ protocol,
+ netuid,
+ placeholder1,
+ placeholder2,
+ wait_for_inclusion,
+ wait_for_finalization,
+ )
- # Assert
- assert result == expected, f"Test ID: {test_id}"
+ # Assert
+ assert result == expected, f"Test ID: {test_id}"
# Various error cases
@pytest.mark.parametrize(
- "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected_error_message,test_id,",
+ "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected_error_message,test_id,",
[
(
"192.168.1.5",
@@ -214,7 +198,6 @@ def test_serve_extrinsic_edge_cases(
True,
True,
False,
- False,
"error-case-failed-serve",
),
],
@@ -231,51 +214,44 @@ def test_serve_extrinsic_error_cases(
placeholder2,
wait_for_inclusion,
wait_for_finalization,
- prompt,
expected_error_message,
test_id,
mocker,
):
# Arrange
serving.do_serve_axon = mocker.MagicMock(return_value=(False, "Error serving axon"))
- with patch(
- "bittensor.core.extrinsics.serving.Confirm.ask",
- return_value=True,
- ):
- # Act
- result = serving.serve_extrinsic(
- mock_subtensor,
- mock_wallet,
- ip,
- port,
- protocol,
- netuid,
- placeholder1,
- placeholder2,
- wait_for_inclusion,
- wait_for_finalization,
- prompt,
- )
+ # Act
+ result = serving.serve_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ ip,
+ port,
+ protocol,
+ netuid,
+ placeholder1,
+ placeholder2,
+ wait_for_inclusion,
+ wait_for_finalization,
+ )
- # Assert
- assert result == expected_error_message, f"Test ID: {test_id}"
+ # Assert
+ assert result == expected_error_message, f"Test ID: {test_id}"
@pytest.mark.parametrize(
- "netuid, wait_for_inclusion, wait_for_finalization, prompt, external_ip, external_ip_success, serve_success, expected_result, test_id",
+ "netuid, wait_for_inclusion, wait_for_finalization, external_ip, external_ip_success, serve_success, expected_result, test_id",
[
# Happy path test
- (1, False, True, False, "192.168.1.1", True, True, True, "happy-ext-ip"),
- (1, False, True, True, None, True, True, True, "happy-net-external-ip"),
+ (1, False, True, "192.168.1.1", True, True, True, "happy-ext-ip"),
+ (1, False, True, None, True, True, True, "happy-net-external-ip"),
# Edge cases
- (1, True, True, False, "192.168.1.1", True, True, True, "edge-case-wait"),
+ (1, True, True, "192.168.1.1", True, True, True, "edge-case-wait"),
# Error cases
- (1, False, True, False, None, False, True, False, "error-fetching-external-ip"),
+ (1, False, True, None, False, True, False, "error-fetching-external-ip"),
(
1,
False,
True,
- False,
"192.168.1.1",
True,
False,
@@ -297,7 +273,6 @@ def test_serve_axon_extrinsic(
netuid,
wait_for_inclusion,
wait_for_finalization,
- prompt,
external_ip,
external_ip_success,
serve_success,
diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py
index 9c32fc9bdf..f447915d2f 100644
--- a/tests/unit_tests/extrinsics/test_set_weights.py
+++ b/tests/unit_tests/extrinsics/test_set_weights.py
@@ -28,7 +28,7 @@ def mock_wallet():
@pytest.mark.parametrize(
- "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, expected_message",
+ "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, expected_success, expected_message",
[
(
[1, 2],
@@ -37,8 +37,6 @@ def mock_wallet():
True,
False,
True,
- True,
- True,
"Successfully set weights and Finalized.",
),
(
@@ -47,8 +45,6 @@ def mock_wallet():
0,
False,
False,
- False,
- True,
True,
"Not waiting for finalization or inclusion.",
),
@@ -58,18 +54,14 @@ def mock_wallet():
0,
True,
False,
- True,
- True,
False,
- "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.",
+ "Mock error message",
),
- ([1, 2], [0.5, 0.5], 0, True, True, True, False, False, "Prompt refused."),
],
ids=[
"happy-flow",
"not-waiting-finalization-inclusion",
"error-flow",
- "prompt-refused",
],
)
def test_set_weights_extrinsic(
@@ -80,8 +72,6 @@ def test_set_weights_extrinsic(
version_key,
wait_for_inclusion,
wait_for_finalization,
- prompt,
- user_accepts,
expected_success,
expected_message,
):
@@ -90,7 +80,7 @@ def test_set_weights_extrinsic(
with patch(
"bittensor.utils.weight_utils.convert_weights_and_uids_for_emit",
return_value=(uids_tensor, weights_tensor),
- ), patch("rich.prompt.Confirm.ask", return_value=user_accepts), patch(
+ ), patch(
"bittensor.core.extrinsics.set_weights.do_set_weights",
return_value=(expected_success, "Mock error message"),
) as mock_do_set_weights:
@@ -103,22 +93,10 @@ def test_set_weights_extrinsic(
version_key=version_key,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
assert result == expected_success, f"Test {expected_message} failed."
assert message == expected_message, f"Test {expected_message} failed."
- if user_accepts is not False:
- mock_do_set_weights.assert_called_once_with(
- self=mock_subtensor,
- wallet=mock_wallet,
- netuid=123,
- uids=uids_tensor,
- vals=weights_tensor,
- version_key=version_key,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
def test_do_set_weights_is_success(mock_subtensor, mocker):
@@ -226,7 +204,7 @@ def test_do_set_weights_is_not_success(mock_subtensor, mocker):
mock_subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once()
assert result == (
False,
- mock_subtensor.substrate.submit_extrinsic.return_value.error_message,
+ "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.",
)
diff --git a/tests/unit_tests/test_chain_data.py b/tests/unit_tests/test_chain_data.py
index 353f697d46..65232e3382 100644
--- a/tests/unit_tests/test_chain_data.py
+++ b/tests/unit_tests/test_chain_data.py
@@ -364,116 +364,3 @@ def create_neuron_info_decoded(
"prometheus_info": prometheus_info,
"axon_info": axon_info,
}
-
-
-@pytest.fixture
-def mock_from_scale_encoding(mocker):
- return mocker.patch("bittensor.core.chain_data.delegate_info.from_scale_encoding")
-
-
-@pytest.fixture
-def mock_fix_decoded_values(mocker):
- return mocker.patch(
- "bittensor.core.chain_data.DelegateInfo.fix_decoded_values",
- side_effect=lambda x: x,
- )
-
-
-@pytest.mark.parametrize(
- "test_id, vec_u8, expected",
- [
- (
- "happy-path-1",
- [1, 2, 3],
- [
- DelegateInfo(
- hotkey_ss58="hotkey",
- total_stake=1000,
- nominators=[
- "nominator1",
- "nominator2",
- ],
- owner_ss58="owner",
- take=10.1,
- validator_permits=[1, 2, 3],
- registrations=[4, 5, 6],
- return_per_1000=100,
- total_daily_return=1000,
- )
- ],
- ),
- (
- "happy-path-2",
- [4, 5, 6],
- [
- DelegateInfo(
- hotkey_ss58="hotkey",
- total_stake=1000,
- nominators=[
- "nominator1",
- "nominator2",
- ],
- owner_ss58="owner",
- take=2.1,
- validator_permits=[1, 2, 3],
- registrations=[4, 5, 6],
- return_per_1000=100,
- total_daily_return=1000,
- )
- ],
- ),
- ],
-)
-def test_list_from_vec_u8_happy_path(
- mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected
-):
- # Arrange
- mock_from_scale_encoding.return_value = expected
-
- # Act
- result = DelegateInfo.list_from_vec_u8(vec_u8)
-
- # Assert
- mock_from_scale_encoding.assert_called_once_with(
- vec_u8, ChainDataType.DelegateInfo, is_vec=True
- )
- assert result == expected, f"Failed {test_id}"
-
-
-@pytest.mark.parametrize(
- "test_id, vec_u8, expected",
- [
- ("edge_empty_list", [], []),
- ],
-)
-def test_list_from_vec_u8_edge_cases(
- mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected
-):
- # Arrange
- mock_from_scale_encoding.return_value = None
-
- # Act
- result = DelegateInfo.list_from_vec_u8(vec_u8)
-
- # Assert
- mock_from_scale_encoding.assert_called_once_with(
- vec_u8, ChainDataType.DelegateInfo, is_vec=True
- )
- assert result == expected, f"Failed {test_id}"
-
-
-@pytest.mark.parametrize(
- "vec_u8, expected_exception",
- [
- ("not_a_list", TypeError),
- ],
-)
-def test_list_from_vec_u8_error_cases(
- vec_u8,
- expected_exception,
-):
- # No Arrange section needed as input values are provided via test parameters
-
- # Act & Assert
- with pytest.raises(expected_exception):
- _ = DelegateInfo.list_from_vec_u8(vec_u8)
diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py
index d0783d20ff..c889903684 100644
--- a/tests/unit_tests/test_subtensor.py
+++ b/tests/unit_tests/test_subtensor.py
@@ -1132,7 +1132,6 @@ def test_set_weights(subtensor, mocker):
fake_weights = [0.4, 0.6]
fake_wait_for_inclusion = False
fake_wait_for_finalization = False
- fake_prompt = False
fake_max_retries = 5
expected_result = (True, None)
@@ -1159,7 +1158,6 @@ def test_set_weights(subtensor, mocker):
version_key=settings.version_as_int,
wait_for_inclusion=fake_wait_for_inclusion,
wait_for_finalization=fake_wait_for_finalization,
- prompt=fake_prompt,
max_retries=fake_max_retries,
)
@@ -1180,7 +1178,6 @@ def test_set_weights(subtensor, mocker):
version_key=settings.version_as_int,
wait_for_inclusion=fake_wait_for_inclusion,
wait_for_finalization=fake_wait_for_finalization,
- prompt=fake_prompt,
)
assert result == expected_result
@@ -1273,7 +1270,6 @@ def test_transfer(subtensor, mocker):
fake_amount = 1.1
fake_wait_for_inclusion = True
fake_wait_for_finalization = True
- fake_prompt = False
mocked_transfer_extrinsic = mocker.patch.object(
subtensor_module, "transfer_extrinsic"
)
@@ -1285,7 +1281,6 @@ def test_transfer(subtensor, mocker):
fake_amount,
fake_wait_for_inclusion,
fake_wait_for_finalization,
- fake_prompt,
)
# Asserts
@@ -1296,7 +1291,6 @@ def test_transfer(subtensor, mocker):
amount=fake_amount,
wait_for_inclusion=fake_wait_for_inclusion,
wait_for_finalization=fake_wait_for_finalization,
- prompt=fake_prompt,
)
assert result == mocked_transfer_extrinsic.return_value
@@ -1406,160 +1400,6 @@ def test_neuron_for_uid_success(subtensor, mocker):
assert result == mocked_neuron_from_vec_u8.return_value
-def test_do_serve_prometheus_is_success(subtensor, mocker):
- """Successful do_serve_prometheus call."""
- # Prep
- fake_wallet = mocker.MagicMock()
- fake_call_params = mocker.MagicMock()
- fake_wait_for_inclusion = True
- fake_wait_for_finalization = True
-
- subtensor.substrate.submit_extrinsic.return_value.is_success = True
-
- # Call
- result = subtensor._do_serve_prometheus(
- wallet=fake_wallet,
- call_params=fake_call_params,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- # Asserts
- subtensor.substrate.compose_call.assert_called_once_with(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=fake_call_params,
- )
-
- subtensor.substrate.create_signed_extrinsic.assert_called_once_with(
- call=subtensor.substrate.compose_call.return_value,
- keypair=fake_wallet.hotkey,
- )
-
- subtensor.substrate.submit_extrinsic.assert_called_once_with(
- subtensor.substrate.create_signed_extrinsic.return_value,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once()
- assert result == (True, None)
-
-
-def test_do_serve_prometheus_is_not_success(subtensor, mocker):
- """Unsuccessful do_serve_axon call."""
- # Prep
- fake_wallet = mocker.MagicMock()
- fake_call_params = mocker.MagicMock()
- fake_wait_for_inclusion = True
- fake_wait_for_finalization = True
-
- subtensor.substrate.submit_extrinsic.return_value.is_success = None
-
- # Call
- result = subtensor._do_serve_prometheus(
- wallet=fake_wallet,
- call_params=fake_call_params,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- # Asserts
- subtensor.substrate.compose_call.assert_called_once_with(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=fake_call_params,
- )
-
- subtensor.substrate.create_signed_extrinsic.assert_called_once_with(
- call=subtensor.substrate.compose_call.return_value,
- keypair=fake_wallet.hotkey,
- )
-
- subtensor.substrate.submit_extrinsic.assert_called_once_with(
- subtensor.substrate.create_signed_extrinsic.return_value,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once()
- assert result == (
- False,
- subtensor.substrate.submit_extrinsic.return_value.error_message,
- )
-
-
-def test_do_serve_prometheus_no_waits(subtensor, mocker):
- """Unsuccessful do_serve_axon call."""
- # Prep
- fake_wallet = mocker.MagicMock()
- fake_call_params = mocker.MagicMock()
- fake_wait_for_inclusion = False
- fake_wait_for_finalization = False
-
- # Call
- result = subtensor._do_serve_prometheus(
- wallet=fake_wallet,
- call_params=fake_call_params,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- # Asserts
- subtensor.substrate.compose_call.assert_called_once_with(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=fake_call_params,
- )
-
- subtensor.substrate.create_signed_extrinsic.assert_called_once_with(
- call=subtensor.substrate.compose_call.return_value,
- keypair=fake_wallet.hotkey,
- )
-
- subtensor.substrate.submit_extrinsic.assert_called_once_with(
- subtensor.substrate.create_signed_extrinsic.return_value,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
- assert result == (True, None)
-
-
-def test_serve_prometheus(subtensor, mocker):
- """Test serve_prometheus function successful call."""
- # Preps
- fake_wallet = mocker.MagicMock()
- fake_port = 1234
- fake_netuid = 1
- wait_for_inclusion = True
- wait_for_finalization = False
-
- mocked_prometheus_extrinsic = mocker.patch.object(
- subtensor_module, "prometheus_extrinsic"
- )
-
- # Call
- result = subtensor.serve_prometheus(
- fake_wallet,
- fake_port,
- fake_netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- # Asserts
- mocked_prometheus_extrinsic.assert_called_once_with(
- subtensor,
- wallet=fake_wallet,
- port=fake_port,
- netuid=fake_netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- assert result == mocked_prometheus_extrinsic.return_value
-
-
def test_do_serve_axon_is_success(subtensor, mocker):
"""Successful do_serve_axon call."""
# Prep
@@ -1897,7 +1737,6 @@ def test_commit_weights(subtensor, mocker):
weights = [0.4, 0.6]
wait_for_inclusion = False
wait_for_finalization = False
- prompt = False
max_retries = 5
expected_result = (True, None)
@@ -1918,7 +1757,6 @@ def test_commit_weights(subtensor, mocker):
version_key=settings.version_as_int,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
max_retries=max_retries,
)
@@ -1939,7 +1777,6 @@ def test_commit_weights(subtensor, mocker):
commit_hash=mocked_generate_weight_hash.return_value,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
assert result == expected_result
@@ -1966,7 +1803,6 @@ def test_reveal_weights(subtensor, mocker):
salt=salt,
wait_for_inclusion=False,
wait_for_finalization=False,
- prompt=False,
)
# Assertions
@@ -1981,7 +1817,6 @@ def test_reveal_weights(subtensor, mocker):
salt=salt,
wait_for_inclusion=False,
wait_for_finalization=False,
- prompt=False,
)
@@ -2009,7 +1844,6 @@ def test_reveal_weights_false(subtensor, mocker):
salt=salt,
wait_for_inclusion=False,
wait_for_finalization=False,
- prompt=False,
)
# Assertion
@@ -2051,3 +1885,256 @@ def test_connect_with_substrate(mocker):
# Assertions
assert spy_get_substrate.call_count == 0
+
+
+def test_get_subnet_burn_cost_success(subtensor, mocker):
+ """Tests get_subnet_burn_cost method with successfully result."""
+ # Preps
+ mocked_query_runtime_api = mocker.patch.object(subtensor, "query_runtime_api")
+ fake_block = 123
+
+ # Call
+ result = subtensor.get_subnet_burn_cost(fake_block)
+
+ # Asserts
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block=fake_block,
+ )
+
+ assert result == mocked_query_runtime_api.return_value
+
+
+def test_get_subnet_burn_cost_none(subtensor, mocker):
+ """Tests get_subnet_burn_cost method with None result."""
+ # Preps
+ mocked_query_runtime_api = mocker.patch.object(
+ subtensor, "query_runtime_api", return_value=None
+ )
+ fake_block = 123
+
+ # Call
+ result = subtensor.get_subnet_burn_cost(fake_block)
+
+ # Asserts
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block=fake_block,
+ )
+
+ assert result is None
+
+
+def test_difficulty_success(subtensor, mocker):
+ """Tests difficulty method with successfully result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(subtensor, "_get_hyperparameter")
+ fake_netuid = 1
+ fake_block = 2
+
+ # Call
+ result = subtensor.difficulty(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Difficulty",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ assert result == int(mocked_get_hyperparameter.return_value)
+
+
+def test_difficulty_none(subtensor, mocker):
+ """Tests difficulty method with None result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(
+ subtensor, "_get_hyperparameter", return_value=None
+ )
+ fake_netuid = 1
+ fake_block = 2
+
+ # Call
+ result = subtensor.difficulty(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Difficulty",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ assert result is None
+
+
+def test_recycle_success(subtensor, mocker):
+ """Tests recycle method with successfully result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(
+ subtensor, "_get_hyperparameter", return_value=0.1
+ )
+ fake_netuid = 1
+ fake_block = 2
+ mocked_balance = mocker.patch("bittensor.utils.balance.Balance")
+
+ # Call
+ result = subtensor.recycle(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Burn",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ mocked_balance.assert_called_once_with(int(mocked_get_hyperparameter.return_value))
+ assert result == mocked_balance.return_value
+
+
+def test_recycle_none(subtensor, mocker):
+ """Tests recycle method with None result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(
+ subtensor, "_get_hyperparameter", return_value=None
+ )
+ fake_netuid = 1
+ fake_block = 2
+
+ # Call
+ result = subtensor.recycle(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Burn",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ assert result is None
+
+
+# `get_all_subnets_info` tests
+def test_get_all_subnets_info_success(mocker, subtensor):
+ """Test get_all_subnets_info returns correct data when subnet information is found."""
+ # Prep
+ block = 123
+ subnet_data = [1, 2, 3] # Mocked response data
+ mocker.patch.object(
+ subtensor.substrate, "get_block_hash", return_value="mock_block_hash"
+ )
+ mock_response = {"result": subnet_data}
+ mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response)
+ mocker.patch.object(
+ subtensor_module.SubnetInfo,
+ "list_from_vec_u8",
+ return_value="list_from_vec_u80",
+ )
+
+ # Call
+ result = subtensor.get_all_subnets_info(block)
+
+ # Asserts
+ subtensor.substrate.get_block_hash.assert_called_once_with(block)
+ subtensor.substrate.rpc_request.assert_called_once_with(
+ method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"]
+ )
+ subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data)
+
+
+@pytest.mark.parametrize("result_", [[], None])
+def test_get_all_subnets_info_no_data(mocker, subtensor, result_):
+ """Test get_all_subnets_info returns empty list when no subnet information is found."""
+ # Prep
+ block = 123
+ mocker.patch.object(
+ subtensor.substrate, "get_block_hash", return_value="mock_block_hash"
+ )
+ mock_response = {"result": result_}
+ mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response)
+ mocker.patch.object(subtensor_module.SubnetInfo, "list_from_vec_u8")
+
+ # Call
+ result = subtensor.get_all_subnets_info(block)
+
+ # Asserts
+ assert result == []
+ subtensor.substrate.get_block_hash.assert_called_once_with(block)
+ subtensor.substrate.rpc_request.assert_called_once_with(
+ method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"]
+ )
+ subtensor_module.SubnetInfo.list_from_vec_u8.assert_not_called()
+
+
+def test_get_all_subnets_info_retry(mocker, subtensor):
+ """Test get_all_subnets_info retries on failure."""
+ # Prep
+ block = 123
+ subnet_data = [1, 2, 3]
+ mocker.patch.object(
+ subtensor.substrate, "get_block_hash", return_value="mock_block_hash"
+ )
+ mock_response = {"result": subnet_data}
+ mock_rpc_request = mocker.patch.object(
+ subtensor.substrate,
+ "rpc_request",
+ side_effect=[Exception, Exception, mock_response],
+ )
+ mocker.patch.object(
+ subtensor_module.SubnetInfo, "list_from_vec_u8", return_value=["some_data"]
+ )
+
+ # Call
+ result = subtensor.get_all_subnets_info(block)
+
+ # Asserts
+ subtensor.substrate.get_block_hash.assert_called_with(block)
+ assert mock_rpc_request.call_count == 3
+ subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data)
+ assert result == ["some_data"]
+
+
+def test_get_delegate_take_success(subtensor, mocker):
+ """Verify `get_delegate_take` method successful path."""
+ # Preps
+ fake_hotkey_ss58 = "FAKE_SS58"
+ fake_block = 123
+
+ subtensor_module.u16_normalized_float = mocker.Mock()
+ subtensor.query_subtensor = mocker.Mock(return_value=mocker.Mock(value="value"))
+
+ # Call
+ result = subtensor.get_delegate_take(hotkey_ss58=fake_hotkey_ss58, block=fake_block)
+
+ # Asserts
+ subtensor.query_subtensor.assert_called_once_with(
+ "Delegates", fake_block, [fake_hotkey_ss58]
+ )
+ subtensor_module.u16_normalized_float.assert_called_once_with(
+ subtensor.query_subtensor.return_value.value
+ )
+ assert result == subtensor_module.u16_normalized_float.return_value
+
+
+def test_get_delegate_take_none(subtensor, mocker):
+ """Verify `get_delegate_take` method returns None."""
+ # Preps
+ fake_hotkey_ss58 = "FAKE_SS58"
+ fake_block = 123
+
+ subtensor.query_subtensor = mocker.Mock(return_value=mocker.Mock(value=None))
+ subtensor_module.u16_normalized_float = mocker.Mock()
+
+ # Call
+ result = subtensor.get_delegate_take(hotkey_ss58=fake_hotkey_ss58, block=fake_block)
+
+ # Asserts
+ subtensor.query_subtensor.assert_called_once_with(
+ "Delegates", fake_block, [fake_hotkey_ss58]
+ )
+
+ subtensor_module.u16_normalized_float.assert_not_called()
+ assert result is None
diff --git a/tests/unit_tests/utils/test_formatting.py b/tests/unit_tests/utils/test_formatting.py
new file mode 100644
index 0000000000..3c223a48b3
--- /dev/null
+++ b/tests/unit_tests/utils/test_formatting.py
@@ -0,0 +1,80 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import math
+
+from bittensor.utils import formatting
+
+
+def test_get_human_readable():
+ """Tests the `get_human_readable` function in the `formatting` module."""
+ num1 = 1000
+ num2 = 1_000_000
+ num3 = 1_000_000_000
+ num4 = 150
+ negative_num = -1000
+
+ # Test for default output
+ assert formatting.get_human_readable(num1) == "1.0KH"
+
+ # Test for different quantities
+ assert formatting.get_human_readable(num2) == "1.0MH"
+ assert formatting.get_human_readable(num3) == "1.0GH"
+
+ # Test for numbers less than 1000
+ assert formatting.get_human_readable(num4) == "150.0H"
+
+ # Test for negative numbers
+ assert formatting.get_human_readable(negative_num) == "-1.0KH"
+
+ # Test for different suffix
+ assert formatting.get_human_readable(num1, suffix="B") == "1.0KB"
+ assert formatting.get_human_readable(num2, suffix="B") == "1.0MB"
+ assert formatting.get_human_readable(num3, suffix="B") == "1.0GB"
+ assert formatting.get_human_readable(num4, suffix="B") == "150.0B"
+ assert formatting.get_human_readable(negative_num, suffix="B") == "-1.0KB"
+
+
+def test_millify():
+ """Test millify function with various cases."""
+ # Testing with value 0
+ assert formatting.millify(0) == "0.00"
+ # Testing with a number in the tens
+ assert formatting.millify(10) == "10.00"
+ # Testing with a number in the hundreds
+ assert formatting.millify(100) == "100.00"
+ # Testing with a number in the thousands
+ assert formatting.millify(1000) == "1.00 K"
+ # Testing with a number in the millions
+ assert formatting.millify(1000000) == "1.00 M"
+ # Testing with a number in the billions
+ assert formatting.millify(1000000000) == "1.00 B"
+ # Testing with a number in the trillions
+ assert formatting.millify(1000000000000) == "1.00 T"
+ # Testing with maximum limit
+ mill_names = ["", " K", " M", " B", " T"]
+ n = 10 ** (3 * (len(mill_names) - 1) + 1)
+ mill_idx = max(
+ 0,
+ min(
+ len(mill_names) - 1,
+ int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)),
+ ),
+ )
+ assert formatting.millify(n) == "{:.2f}{}".format(
+ n / 10 ** (3 * mill_idx), mill_names[mill_idx]
+ )