diff --git a/CHANGELOG.md b/CHANGELOG.md
index b04ebaa0f0..23b0ed3eae 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,23 @@
# Changelog
+## 8.2.1 /2024-11-06
+
+## What's Changed
+
+* Expands the type registry to include all the available options by @thewhaleking in https://github.com/opentensor/bittensor/pull/2353
+* add `Subtensor.register`, `Subtensor.difficulty` and related staff with tests by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2352
+* added to Subtensor: `burned_register`, `get_subnet_burn_cost`, `recycle` and related extrinsics by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2359
+* Poem "Risen from the Past". Act 3. by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2363
+* default port from 9946 to 9944 by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2376
+* remove unused prometheus extrinsic by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2378
+* Replace rich.console to btlogging.loggin by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2377
+* Backmerge 8.2.0 by @ibraheem-opentensor in https://github.com/opentensor/bittensor/pull/2389
+* Add subvortex subnet and tests by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2395
+* Handle SSL Error on Connection by @thewhaleking in https://github.com/opentensor/bittensor/pull/2384
+* Avoid using prompt in SDK by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2382
+
+**Full Changelog**: https://github.com/opentensor/bittensor/compare/v8.2.0...v8.2.1
+
## 8.2.0 /2024-10-10
## What's Changed
diff --git a/bittensor/core/chain_data/delegate_info.py b/bittensor/core/chain_data/delegate_info.py
index d77f1e1412..a840d1bb15 100644
--- a/bittensor/core/chain_data/delegate_info.py
+++ b/bittensor/core/chain_data/delegate_info.py
@@ -1,10 +1,9 @@
-from dataclasses import dataclass
-from typing import Optional, Any
+import bt_decode
-from scalecodec.utils.ss58 import ss58_encode
+from dataclasses import dataclass
+from typing import Optional
-from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType
-from bittensor.core.settings import SS58_FORMAT
+from bittensor.core.chain_data.utils import decode_account_id
from bittensor.utils import u16_normalized_float
from bittensor.utils.balance import Balance
@@ -24,7 +23,6 @@ class DelegateInfo:
validator_permits (list[int]): List of subnets that the delegate is allowed to validate on.
return_per_1000 (int): Return per 1000 TAO, for the delegate over a day.
total_daily_return (int): Total daily return of the delegate.
-
"""
hotkey_ss58: str # Hotkey of delegate
@@ -37,69 +35,78 @@ class DelegateInfo:
validator_permits: list[
int
] # List of subnets that the delegate is allowed to validate on
- registrations: tuple[int] # List of subnets that the delegate is registered on
+ registrations: list[int] # list of subnets that the delegate is registered on
return_per_1000: Balance # Return per 1000 tao of the delegate over a day
total_daily_return: Balance # Total daily return of the delegate
@classmethod
- def fix_decoded_values(cls, decoded: Any) -> "DelegateInfo":
- """Fixes the decoded values."""
-
- return cls(
- hotkey_ss58=ss58_encode(decoded["delegate_ss58"], SS58_FORMAT),
- owner_ss58=ss58_encode(decoded["owner_ss58"], SS58_FORMAT),
- take=u16_normalized_float(decoded["take"]),
- nominators=[
- (
- ss58_encode(nom[0], SS58_FORMAT),
- Balance.from_rao(nom[1]),
- )
- for nom in decoded["nominators"]
- ],
- total_stake=Balance.from_rao(
- sum([nom[1] for nom in decoded["nominators"]])
- ),
- validator_permits=decoded["validator_permits"],
- registrations=decoded["registrations"],
- return_per_1000=Balance.from_rao(decoded["return_per_1000"]),
- total_daily_return=Balance.from_rao(decoded["total_daily_return"]),
+ def from_vec_u8(cls, vec_u8: bytes) -> Optional["DelegateInfo"]:
+ decoded = bt_decode.DelegateInfo.decode(vec_u8)
+ hotkey = decode_account_id(decoded.delegate_ss58)
+ owner = decode_account_id(decoded.owner_ss58)
+ nominators = [
+ (decode_account_id(x), Balance.from_rao(y)) for x, y in decoded.nominators
+ ]
+ total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0)
+ return DelegateInfo(
+ hotkey_ss58=hotkey,
+ total_stake=total_stake,
+ nominators=nominators,
+ owner_ss58=owner,
+ take=u16_normalized_float(decoded.take),
+ validator_permits=decoded.validator_permits,
+ registrations=decoded.registrations,
+ return_per_1000=Balance.from_rao(decoded.return_per_1000),
+ total_daily_return=Balance.from_rao(decoded.total_daily_return),
)
@classmethod
- def from_vec_u8(cls, vec_u8: list[int]) -> Optional["DelegateInfo"]:
- """Returns a DelegateInfo object from a ``vec_u8``."""
- if len(vec_u8) == 0:
- return None
-
- decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo)
- if decoded is None:
- return None
-
- return DelegateInfo.fix_decoded_values(decoded)
-
- @classmethod
- def list_from_vec_u8(cls, vec_u8: list[int]) -> list["DelegateInfo"]:
- """Returns a list of DelegateInfo objects from a ``vec_u8``."""
- decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo, is_vec=True)
-
- if decoded is None:
- return []
-
- return [DelegateInfo.fix_decoded_values(d) for d in decoded]
+ def list_from_vec_u8(cls, vec_u8: bytes) -> list["DelegateInfo"]:
+ decoded = bt_decode.DelegateInfo.decode_vec(vec_u8)
+ results = []
+ for d in decoded:
+ hotkey = decode_account_id(d.delegate_ss58)
+ owner = decode_account_id(d.owner_ss58)
+ nominators = [
+ (decode_account_id(x), Balance.from_rao(y)) for x, y in d.nominators
+ ]
+ total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0)
+ results.append(
+ DelegateInfo(
+ hotkey_ss58=hotkey,
+ total_stake=total_stake,
+ nominators=nominators,
+ owner_ss58=owner,
+ take=u16_normalized_float(d.take),
+ validator_permits=d.validator_permits,
+ registrations=d.registrations,
+ return_per_1000=Balance.from_rao(d.return_per_1000),
+ total_daily_return=Balance.from_rao(d.total_daily_return),
+ )
+ )
+ return results
@classmethod
def delegated_list_from_vec_u8(
- cls, vec_u8: list[int]
- ) -> list[tuple["DelegateInfo", "Balance"]]:
- """Returns a list of Tuples of DelegateInfo objects, and Balance, from a ``vec_u8``.
-
- This is the list of delegates that the user has delegated to, and the amount of stake delegated.
- """
- decoded = from_scale_encoding(vec_u8, ChainDataType.DelegatedInfo, is_vec=True)
- if decoded is None:
- return []
-
- return [
- (DelegateInfo.fix_decoded_values(d), Balance.from_rao(s))
- for d, s in decoded
- ]
+ cls, vec_u8: bytes
+ ) -> list[tuple["DelegateInfo", Balance]]:
+ decoded = bt_decode.DelegateInfo.decode_delegated(vec_u8)
+ results = []
+ for d, b in decoded:
+ nominators = [
+ (decode_account_id(x), Balance.from_rao(y)) for x, y in d.nominators
+ ]
+ total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0)
+ delegate = DelegateInfo(
+ hotkey_ss58=decode_account_id(d.delegate_ss58),
+ total_stake=total_stake,
+ nominators=nominators,
+ owner_ss58=decode_account_id(d.owner_ss58),
+ take=u16_normalized_float(d.take),
+ validator_permits=d.validator_permits,
+ registrations=d.registrations,
+ return_per_1000=Balance.from_rao(d.return_per_1000),
+ total_daily_return=Balance.from_rao(d.total_daily_return),
+ )
+ results.append((delegate, Balance.from_rao(b)))
+ return results
diff --git a/bittensor/core/chain_data/subnet_info.py b/bittensor/core/chain_data/subnet_info.py
index f1ce151872..4169746a08 100644
--- a/bittensor/core/chain_data/subnet_info.py
+++ b/bittensor/core/chain_data/subnet_info.py
@@ -1,13 +1,10 @@
from dataclasses import dataclass
-from typing import Any, Optional, Union
-from scalecodec.utils.ss58 import ss58_encode
+import bt_decode
-from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType
-from bittensor.core.settings import SS58_FORMAT
+from bittensor.core.chain_data.utils import decode_account_id
from bittensor.utils import u16_normalized_float
from bittensor.utils.balance import Balance
-from bittensor.utils.registration import torch, use_torch
@dataclass
@@ -28,76 +25,39 @@ class SubnetInfo:
blocks_since_epoch: int
tempo: int
modality: int
- # netuid -> topk percentile prunning score requirement (u16:MAX normalized.)
connection_requirements: dict[str, float]
emission_value: float
burn: Balance
owner_ss58: str
@classmethod
- def from_vec_u8(cls, vec_u8: list[int]) -> Optional["SubnetInfo"]:
- """Returns a SubnetInfo object from a ``vec_u8``."""
- if len(vec_u8) == 0:
- return None
-
- decoded = from_scale_encoding(vec_u8, ChainDataType.SubnetInfo)
- if decoded is None:
- return None
-
- return SubnetInfo.fix_decoded_values(decoded)
-
- @classmethod
- def list_from_vec_u8(cls, vec_u8: list[int]) -> list["SubnetInfo"]:
- """Returns a list of SubnetInfo objects from a ``vec_u8``."""
- decoded = from_scale_encoding(
- vec_u8, ChainDataType.SubnetInfo, is_vec=True, is_option=True
- )
-
- if decoded is None:
- return []
-
- return [SubnetInfo.fix_decoded_values(d) for d in decoded]
-
- @classmethod
- def fix_decoded_values(cls, decoded: dict) -> "SubnetInfo":
- """Returns a SubnetInfo object from a decoded SubnetInfo dictionary."""
- return SubnetInfo(
- netuid=decoded["netuid"],
- rho=decoded["rho"],
- kappa=decoded["kappa"],
- difficulty=decoded["difficulty"],
- immunity_period=decoded["immunity_period"],
- max_allowed_validators=decoded["max_allowed_validators"],
- min_allowed_weights=decoded["min_allowed_weights"],
- max_weight_limit=decoded["max_weights_limit"],
- scaling_law_power=decoded["scaling_law_power"],
- subnetwork_n=decoded["subnetwork_n"],
- max_n=decoded["max_allowed_uids"],
- blocks_since_epoch=decoded["blocks_since_last_step"],
- tempo=decoded["tempo"],
- modality=decoded["network_modality"],
- connection_requirements={
- str(int(netuid)): u16_normalized_float(int(req))
- for netuid, req in decoded["network_connect"]
- },
- emission_value=decoded["emission_values"],
- burn=Balance.from_rao(decoded["burn"]),
- owner_ss58=ss58_encode(decoded["owner"], SS58_FORMAT),
- )
-
- def to_parameter_dict(self) -> Union[dict[str, Any], "torch.nn.ParameterDict"]:
- """Returns a torch tensor or dict of the subnet info."""
- if use_torch():
- return torch.nn.ParameterDict(self.__dict__)
- else:
- return self.__dict__
-
- @classmethod
- def from_parameter_dict(
- cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"]
- ) -> "SubnetInfo":
- """Creates a SubnetInfo instance from a parameter dictionary."""
- if use_torch():
- return cls(**dict(parameter_dict))
- else:
- return cls(**parameter_dict)
+ def list_from_vec_u8(cls, vec_u8: bytes) -> list["SubnetInfo"]:
+ decoded = bt_decode.SubnetInfo.decode_vec_option(vec_u8)
+ result = []
+ for d in decoded:
+ result.append(
+ SubnetInfo(
+ netuid=d.netuid,
+ rho=d.rho,
+ kappa=d.kappa,
+ difficulty=d.difficulty,
+ immunity_period=d.immunity_period,
+ max_allowed_validators=d.max_allowed_validators,
+ min_allowed_weights=d.min_allowed_weights,
+ max_weight_limit=d.max_weights_limit,
+ scaling_law_power=d.scaling_law_power,
+ subnetwork_n=d.subnetwork_n,
+ max_n=d.max_allowed_uids,
+ blocks_since_epoch=d.blocks_since_last_step,
+ tempo=d.tempo,
+ modality=d.network_modality,
+ connection_requirements={
+ str(int(netuid)): u16_normalized_float(int(req))
+ for (netuid, req) in d.network_connect
+ },
+ emission_value=d.emission_values,
+ burn=Balance.from_rao(d.burn),
+ owner_ss58=decode_account_id(d.owner),
+ )
+ )
+ return result
diff --git a/bittensor/core/chain_data/utils.py b/bittensor/core/chain_data/utils.py
index 0544ca85a2..9c21c9d22e 100644
--- a/bittensor/core/chain_data/utils.py
+++ b/bittensor/core/chain_data/utils.py
@@ -260,7 +260,7 @@ def from_scale_encoding_using_type_string(
}
-def decode_account_id(account_id_bytes: list) -> str:
+def decode_account_id(account_id_bytes: Union[bytes, str]) -> str:
"""
Decodes an AccountId from bytes to a Base64 string using SS58 encoding.
diff --git a/bittensor/core/config.py b/bittensor/core/config.py
index 5027bbecb5..f38aff20e6 100644
--- a/bittensor/core/config.py
+++ b/bittensor/core/config.py
@@ -97,18 +97,6 @@ def __init__(
# this can fail if --no_version_checking has already been added.
pass
- try:
- parser.add_argument(
- "--no_prompt",
- dest="no_prompt",
- action="store_true",
- help="Set ``true`` to stop cli from prompting the user.",
- default=False,
- )
- except Exception:
- # this can fail if --no_version_checking has already been added.
- pass
-
# Get args from argv if not passed in.
if args is None:
args = sys.argv[1:]
diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py
index 5e9f2e9e19..3e69598c06 100644
--- a/bittensor/core/extrinsics/commit_weights.py
+++ b/bittensor/core/extrinsics/commit_weights.py
@@ -20,7 +20,6 @@
from typing import Optional, TYPE_CHECKING
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.extrinsics.utils import submit_extrinsic
from bittensor.utils import format_error_message
@@ -33,7 +32,7 @@
from bittensor.core.subtensor import Subtensor
-# # Chain call for `commit_weights_extrinsic`
+# Chain call for `commit_weights_extrinsic`
@ensure_connected
def do_commit_weights(
self: "Subtensor",
@@ -101,11 +100,10 @@ def commit_weights_extrinsic(
commit_hash: str,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> tuple[bool, str]:
"""
Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet.
- This function is a wrapper around the `do_commit_weights` method, handling user prompts and error messages.
+ This function is a wrapper around the `do_commit_weights` method.
Args:
subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
@@ -114,16 +112,12 @@ def commit_weights_extrinsic(
commit_hash (str): The hash of the neuron's weights to be committed.
wait_for_inclusion (bool): Waits for the transaction to be included in a block.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding.
Returns:
- tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string
- value describing the success or potential error.
+ tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string value describing the success or potential error.
This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required.
"""
- if prompt and not Confirm.ask(f"Would you like to commit weights?"):
- return False, "User cancelled the operation."
success, error_message = do_commit_weights(
self=subtensor,
@@ -224,11 +218,10 @@ def reveal_weights_extrinsic(
version_key: int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> tuple[bool, str]:
"""
Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet.
- This function is a wrapper around the `_do_reveal_weights` method, handling user prompts and error messages.
+ This function is a wrapper around the `_do_reveal_weights` method.
Args:
subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
@@ -240,18 +233,13 @@ def reveal_weights_extrinsic(
version_key (int): Version key for compatibility with the network.
wait_for_inclusion (bool): Waits for the transaction to be included in a block.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding.
Returns:
- tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string
- value describing the success or potential error.
+ tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string value describing the success or potential error.
This function provides a user-friendly interface for revealing weights on the Bittensor blockchain, ensuring proper error handling and user interaction when required.
"""
- if prompt and not Confirm.ask(f"Would you like to reveal weights?"):
- return False, "User cancelled the operation."
-
success, error_message = do_reveal_weights(
self=subtensor,
wallet=wallet,
diff --git a/bittensor/core/extrinsics/prometheus.py b/bittensor/core/extrinsics/prometheus.py
deleted file mode 100644
index a6ab1cfb16..0000000000
--- a/bittensor/core/extrinsics/prometheus.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2024 Opentensor Foundation
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-#
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import json
-from typing import Optional, TYPE_CHECKING
-
-from retry import retry
-
-from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import version_as_int, bt_console
-from bittensor.utils import networking as net, format_error_message
-from bittensor.utils.btlogging import logging
-from bittensor.utils.networking import ensure_connected
-
-# For annotation purposes
-if TYPE_CHECKING:
- from bittensor_wallet import Wallet
- from bittensor.core.subtensor import Subtensor
- from bittensor.core.types import PrometheusServeCallParams
-
-
-# Chain call for `prometheus_extrinsic`
-@ensure_connected
-def do_serve_prometheus(
- self: "Subtensor",
- wallet: "Wallet",
- call_params: "PrometheusServeCallParams",
- wait_for_inclusion: bool = False,
- wait_for_finalization: bool = True,
-) -> tuple[bool, Optional[dict]]:
- """
- Sends a serve prometheus extrinsic to the chain.
-
- Args:
- self (bittensor.core.subtensor.Subtensor): Bittensor subtensor object
- wallet (bittensor_wallet.Wallet): Wallet object.
- call_params (bittensor.core.types.PrometheusServeCallParams): Prometheus serve call parameters.
- wait_for_inclusion (bool): If ``true``, waits for inclusion.
- wait_for_finalization (bool): If ``true``, waits for finalization.
-
- Returns:
- success (bool): ``True`` if serve prometheus was successful.
- error (Optional[str]): Error message if serve prometheus failed, ``None`` otherwise.
- """
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=call_params,
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call, keypair=wallet.hotkey
- )
- response = submit_extrinsic(
- substrate=self.substrate,
- extrinsic=extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
- if wait_for_inclusion or wait_for_finalization:
- response.process_events()
- if response.is_success:
- return True, None
- else:
- return False, response.error_message
- else:
- return True, None
-
- return make_substrate_call_with_retry()
-
-
-def prometheus_extrinsic(
- subtensor: "Subtensor",
- wallet: "Wallet",
- port: int,
- netuid: int,
- ip: int = None,
- wait_for_inclusion: bool = False,
- wait_for_finalization=True,
-) -> bool:
- """Subscribes a Bittensor endpoint to the Subtensor chain.
-
- Args:
- subtensor (bittensor.core.subtensor.Subtensor): Bittensor subtensor object.
- wallet (bittensor_wallet.Wallet): Bittensor wallet object.
- ip (str): Endpoint host port i.e., ``192.122.31.4``.
- port (int): Endpoint port number i.e., `9221`.
- netuid (int): Network `uid` to serve on.
- wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
- wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
-
- Returns:
- success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
- """
-
- # Get external ip
- if ip is None:
- try:
- external_ip = net.get_external_ip()
- bt_console.print(
- f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]"
- )
- logging.success(prefix="External IP", suffix="{external_ip}")
- except Exception as e:
- raise RuntimeError(
- f"Unable to attain your external ip. Check your internet connection. error: {e}"
- ) from e
- else:
- external_ip = ip
-
- call_params: "PrometheusServeCallParams" = {
- "version": version_as_int,
- "ip": net.ip_to_int(external_ip),
- "port": port,
- "ip_type": net.ip_version(external_ip),
- }
-
- with bt_console.status(":satellite: Checking Prometheus..."):
- neuron = subtensor.get_neuron_for_pubkey_and_subnet(
- wallet.hotkey.ss58_address, netuid=netuid
- )
- neuron_up_to_date = not neuron.is_null and call_params == {
- "version": neuron.prometheus_info.version,
- "ip": net.ip_to_int(neuron.prometheus_info.ip),
- "port": neuron.prometheus_info.port,
- "ip_type": neuron.prometheus_info.ip_type,
- }
-
- if neuron_up_to_date:
- bt_console.print(
- f":white_heavy_check_mark: [green]Prometheus already Served[/green]\n"
- f"[green not bold]- Status: [/green not bold] |"
- f"[green not bold] ip: [/green not bold][white not bold]{neuron.prometheus_info.ip}[/white not bold] |"
- f"[green not bold] ip_type: [/green not bold][white not bold]{neuron.prometheus_info.ip_type}[/white not bold] |"
- f"[green not bold] port: [/green not bold][white not bold]{neuron.prometheus_info.port}[/white not bold] | "
- f"[green not bold] version: [/green not bold][white not bold]{neuron.prometheus_info.version}[/white not bold] |"
- )
-
- bt_console.print(
- f":white_heavy_check_mark: [white]Prometheus already served.[/white]"
- )
- return True
-
- # Add netuid, not in prometheus_info
- call_params["netuid"] = netuid
-
- with bt_console.status(
- f":satellite: Serving prometheus on: [white]{subtensor.network}:{netuid}[/white] ..."
- ):
- success, error_message = do_serve_prometheus(
- self=subtensor,
- wallet=wallet,
- call_params=call_params,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
-
- if wait_for_inclusion or wait_for_finalization:
- if success is True:
- json_ = json.dumps(call_params, indent=4, sort_keys=True)
- bt_console.print(
- f":white_heavy_check_mark: [green]Served prometheus[/green]\n [bold white]{json_}[/bold white]"
- )
- return True
- else:
- bt_console.print(
- f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}"
- )
- return False
- else:
- return True
diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py
new file mode 100644
index 0000000000..97c7332074
--- /dev/null
+++ b/bittensor/core/extrinsics/registration.py
@@ -0,0 +1,417 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import time
+from typing import Union, Optional, TYPE_CHECKING
+
+from bittensor_wallet.errors import KeyFileError
+from retry import retry
+
+from bittensor.utils import format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.networking import ensure_connected
+from bittensor.utils.registration import (
+ POWSolution,
+ create_pow,
+ torch,
+ log_no_torch_error,
+)
+
+# For annotation purposes
+if TYPE_CHECKING:
+ from bittensor.core.subtensor import Subtensor
+ from bittensor_wallet import Wallet
+
+
+@ensure_connected
+def _do_pow_register(
+ self: "Subtensor",
+ netuid: int,
+ wallet: "Wallet",
+ pow_result: "POWSolution",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> tuple[bool, Optional[str]]:
+ """Sends a (POW) register extrinsic to the chain.
+
+ Args:
+ netuid (int): The subnet to register on.
+ wallet (bittensor.wallet): The wallet to register.
+ pow_result (POWSolution): The PoW result to register.
+ wait_for_inclusion (bool): If ``True``, waits for the extrinsic to be included in a block. Default to `False`.
+ wait_for_finalization (bool): If ``True``, waits for the extrinsic to be finalized. Default to `True`.
+
+ Returns:
+ success (bool): ``True`` if the extrinsic was included in a block.
+ error (Optional[str]): ``None`` on success or not waiting for inclusion/finalization, otherwise the error message.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="register",
+ call_params={
+ "netuid": netuid,
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ "hotkey": wallet.hotkey.ss58_address,
+ "coldkey": wallet.coldkeypub.ss58_address,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.hotkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
+
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(response.error_message)
+ # Successful registration
+ else:
+ return True, None
+
+ return make_substrate_call_with_retry()
+
+
+def register_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+) -> bool:
+ """Registers the wallet to the chain.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor interface.
+ wallet (bittensor.wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to register on.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
+ max_allowed_attempts (int): Maximum number of attempts to register the wallet.
+ output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`.
+ cuda (bool): If ``true``, the wallet should be registered using CUDA device(s).
+ dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids.
+ tpb (int): The number of threads per block (CUDA).
+ num_processes (int): The number of processes to use to register.
+ update_interval (int): The number of nonces to solve between updates.
+ log_verbose (bool): If ``true``, the registration process will log more information.
+
+ Returns:
+ success (bool):
+ Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+ if not subtensor.subnet_exists(netuid):
+ logging.error(
+ f":cross_mark: Failed: Subnet {netuid} does not exist."
+ )
+ return False
+
+ logging.info(
+ f":satellite: Checking Account on subnet {netuid}..."
+ )
+ neuron = subtensor.get_neuron_for_pubkey_and_subnet(
+ wallet.hotkey.ss58_address, netuid=netuid
+ )
+ if not neuron.is_null:
+ logging.debug(
+ f"Wallet {wallet} is already registered on {neuron.netuid} with {neuron.uid}."
+ )
+ return True
+
+ logging.debug(
+ f"Registration hotkey: {wallet.hotkey.ss58_address}, Public coldkey: {wallet.coldkey.ss58_address} in the network: {subtensor.network}."
+ )
+
+ if not torch:
+ log_no_torch_error()
+ return False
+
+ # Attempt rolling registration.
+ attempts = 1
+ while True:
+ logging.info(
+ f":satellite: Registering... ({attempts}/{max_allowed_attempts})"
+ )
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ return False
+ pow_result: Optional[POWSolution] = create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result: Optional[POWSolution] = create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ # pow failed
+ if not pow_result:
+ # might be registered already on this subnet
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.info(
+ f":white_heavy_check_mark: Already registered on netuid: {netuid}."
+ )
+ return True
+
+ # pow successful, proceed to submit pow to chain for registration
+ else:
+ logging.info(":satellite: Submitting POW...")
+ # check if pow result is still valid
+ while not pow_result.is_stale(subtensor=subtensor):
+ result: tuple[bool, Optional[str]] = _do_pow_register(
+ self=subtensor,
+ netuid=netuid,
+ wallet=wallet,
+ pow_result=pow_result,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ success, err_msg = result
+
+ if not success:
+ # Look error here
+ # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs
+ if "HotKeyAlreadyRegisteredInSubNet" in err_msg:
+ logging.info(
+ f":white_heavy_check_mark: Already Registered on subnet {netuid}."
+ )
+ return True
+
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ time.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ logging.info(":satellite: Checking Balance...")
+ is_registered = subtensor.is_hotkey_registered(
+ hotkey_ss58=wallet.hotkey.ss58_address,
+ netuid=netuid,
+ )
+ if is_registered:
+ logging.info(
+ ":white_heavy_check_mark: Registered"
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(
+ ":cross_mark: Unknown error. Neuron not found."
+ )
+ continue
+ else:
+ # Exited loop because pow is no longer valid.
+ logging.error("POW is stale.")
+ # Try again.
+ continue
+
+ if attempts < max_allowed_attempts:
+ # Failed registration, retry pow
+ attempts += 1
+ logging.info(
+ f":satellite: Failed registration, retrying pow ... ({attempts}/{max_allowed_attempts})"
+ )
+ else:
+ # Failed to register after max attempts.
+ logging.error("No more attempts.")
+ return False
+
+
+@ensure_connected
+def _do_burned_register(
+ self,
+ netuid: int,
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> tuple[bool, Optional[str]]:
+ """
+ Performs a burned register extrinsic call to the Subtensor chain.
+
+ This method sends a registration transaction to the Subtensor blockchain using the burned register mechanism. It
+ retries the call up to three times with exponential backoff in case of failures.
+
+ Args:
+ self (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ netuid (int): The network unique identifier to register on.
+ wallet (bittensor_wallet.Wallet): The wallet to be registered.
+ wait_for_inclusion (bool): Whether to wait for the transaction to be included in a block. Default is False.
+ wait_for_finalization (bool): Whether to wait for the transaction to be finalized. Default is True.
+
+ Returns:
+ Tuple[bool, Optional[str]]: A tuple containing a boolean indicating success or failure, and an optional error message.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="burned_register",
+ call_params={
+ "netuid": netuid,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
+
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(response.error_message)
+ # Successful registration
+ else:
+ return True, None
+
+ return make_substrate_call_with_retry()
+
+
+def burned_register_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> bool:
+ """Registers the wallet to chain by recycling TAO.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor.wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to register on.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+ if not subtensor.subnet_exists(netuid):
+ logging.error(
+ f":cross_mark: Failed error: subnet {netuid} does not exist."
+ )
+ return False
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error(
+ ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
+ )
+ return False
+ logging.info(
+ f":satellite: Checking Account on subnet {netuid} ..."
+ )
+ neuron = subtensor.get_neuron_for_pubkey_and_subnet(
+ wallet.hotkey.ss58_address, netuid=netuid
+ )
+
+ old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address)
+
+ if not neuron.is_null:
+ logging.info(":white_heavy_check_mark: Already Registered")
+ logging.info(f"\t\tuid: {neuron.uid}")
+ logging.info(f"\t\tnetuid: {neuron.netuid}")
+ logging.info(f"\t\thotkey: {neuron.hotkey}")
+ logging.info(f"\t\tcoldkey: {neuron.coldkey}")
+ return True
+
+ logging.info(":satellite: Recycling TAO for Registration...")
+
+ recycle_amount = subtensor.recycle(netuid=netuid)
+ logging.info(f"Recycling {recycle_amount} to register on subnet:{netuid}")
+
+ success, err_msg = _do_burned_register(
+ self=subtensor,
+ netuid=netuid,
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not success:
+ logging.error(f":cross_mark: Failed error: {err_msg}")
+ time.sleep(0.5)
+ return False
+ # Successful registration, final check for neuron and pubkey
+ else:
+ logging.info(":satellite: Checking Balance...")
+ block = subtensor.get_current_block()
+ new_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address, block=block)
+
+ logging.info(
+ f"Balance: {old_balance} :arrow_right: {new_balance}"
+ )
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.info(":white_heavy_check_mark: Registered")
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(":cross_mark: Unknown error. Neuron not found.")
+ return False
diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py
new file mode 100644
index 0000000000..616c46f958
--- /dev/null
+++ b/bittensor/core/extrinsics/root.py
@@ -0,0 +1,279 @@
+import time
+from typing import Optional, Union, TYPE_CHECKING
+
+import numpy as np
+from bittensor_wallet.errors import KeyFileError
+from numpy.typing import NDArray
+from retry import retry
+
+from bittensor.core.settings import version_as_int
+from bittensor.utils import format_error_message, weight_utils
+from bittensor.utils.btlogging import logging
+from bittensor.utils.networking import ensure_connected
+from bittensor.utils.registration import torch, legacy_torch_api_compat
+
+if TYPE_CHECKING:
+ from bittensor_wallet import Wallet
+ from bittensor.core.subtensor import Subtensor
+
+
+@ensure_connected
+def _do_root_register(
+ self: "Subtensor",
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> tuple[bool, Optional[str]]:
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="root_register",
+ call_params={"hotkey": wallet.hotkey.ss58_address},
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True
+
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(response.error_message)
+ # Successful registration
+ else:
+ return True, None
+
+ return make_substrate_call_with_retry()
+
+
+def root_register_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+) -> bool:
+ """Registers the wallet to root network.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``True``.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error(
+ "Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
+ )
+ return False
+
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=0, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.info(
+ ":white_heavy_check_mark: Already registered on root network."
+ )
+ return True
+
+ logging.info(":satellite: Registering to root network...")
+ success, err_msg = _do_root_register(
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not success:
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ time.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ is_registered = subtensor.is_hotkey_registered(
+ netuid=0, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.success(":white_heavy_check_mark: Registered")
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(":cross_mark: Unknown error. Neuron not found.")
+
+
+@ensure_connected
+def _do_set_root_weights(
+ self: "Subtensor",
+ wallet: "Wallet",
+ uids: list[int],
+ vals: list[int],
+ netuid: int = 0,
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, Optional[str]]:
+ """
+ Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons on root. This method constructs and submits the transaction, handling retries and blockchain communication.
+
+ Args:
+ self (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights.
+ uids (List[int]): List of neuron UIDs for which weights are being set.
+ vals (List[int]): List of weight values corresponding to each UID.
+ netuid (int): Unique identifier for the network.
+ version_key (int, optional): Version key for compatibility with the network. Defaults is a current ``version_as_int``.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults is ``False``.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults is ``False``.
+
+ Returns:
+ Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message.
+
+ This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their trust in other neurons based on observed performance and contributions on the root network.
+ """
+
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_root_weights",
+ call_params={
+ "dests": uids,
+ "weights": vals,
+ "netuid": netuid,
+ "version_key": version_key,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.coldkey,
+ era={"period": 5},
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalziation or inclusion."
+
+ response.process_events()
+ if response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, response.error_message
+
+ return make_substrate_call_with_retry()
+
+
+@legacy_torch_api_compat
+def set_root_weights_extrinsic(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuids: Union[NDArray[np.int64], "torch.LongTensor", list[int]],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list[float]],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> bool:
+ """Sets the given weights and values on chain for wallet hotkey account.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance.
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object. Bittensor wallet object.
+ netuids (Union[NDArray[np.int64], torch.LongTensor, list[int]]): The ``netuid`` of the subnet to set weights for.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list[float]]): Weights to set. These must be ``float`` s and must correspond to the passed ``netuid`` s.
+ version_key (int): The version key of the validator. Default is ``0``.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``False``.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error(
+ ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
+ )
+ return False
+
+ # First convert types.
+ if isinstance(netuids, list):
+ netuids = np.array(netuids, dtype=np.int64)
+ if isinstance(weights, list):
+ weights = np.array(weights, dtype=np.float32)
+
+ # Get weight restrictions.
+ min_allowed_weights = subtensor.min_allowed_weights(netuid=0)
+ max_weight_limit = subtensor.max_weight_limit(netuid=0)
+
+ # Get non zero values.
+ non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1)
+ non_zero_weight_uids = netuids[non_zero_weight_idx]
+ non_zero_weights = weights[non_zero_weight_idx]
+ if non_zero_weights.size < min_allowed_weights:
+ raise ValueError(
+ "The minimum number of weights required to set weights is {}, got {}".format(
+ min_allowed_weights, non_zero_weights.size
+ )
+ )
+
+ # Normalize the weights to max value.
+ formatted_weights = weight_utils.normalize_max_weight(
+ x=weights, limit=max_weight_limit
+ )
+ logging.info(
+ f"Raw Weights -> Normalized weights: {weights} -> {formatted_weights}"
+ )
+
+ logging.info(
+ f":satellite: Setting root weights on {subtensor.network} ..."
+ )
+ try:
+ weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit(
+ netuids, weights
+ )
+ success, error_message = _do_set_root_weights(
+ wallet=wallet,
+ netuid=0,
+ uids=weight_uids,
+ vals=weight_vals,
+ version_key=version_key,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True
+
+ if success is True:
+ logging.info(":white_heavy_check_mark: Finalized")
+ logging.success(f"Set weights {str(success)}")
+ return True
+ else:
+ logging.error(
+ f":cross_mark: Failed set weights. {str(error_message)}"
+ )
+ return False
+
+ except Exception as e:
+ logging.error(f":cross_mark: Failed set weights. {str(e)}")
+ return False
diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py
index 490f9c268e..f9cb788172 100644
--- a/bittensor/core/extrinsics/serving.py
+++ b/bittensor/core/extrinsics/serving.py
@@ -15,15 +15,13 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-import json
from typing import Optional, TYPE_CHECKING
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.errors import MetadataError
from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import version_as_int, bt_console
+from bittensor.core.settings import version_as_int
from bittensor.utils import format_error_message, networking as net
from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
@@ -100,7 +98,6 @@ def serve_extrinsic(
placeholder2: int = 0,
wait_for_inclusion: bool = False,
wait_for_finalization=True,
- prompt: bool = False,
) -> bool:
"""Subscribes a Bittensor endpoint to the subtensor chain.
@@ -115,7 +112,6 @@ def serve_extrinsic(
placeholder2 (int): A placeholder for future use.
wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
- prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding.
Returns:
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
@@ -159,15 +155,6 @@ def serve_extrinsic(
)
return True
- if prompt:
- output = params.copy()
- output["coldkey"] = wallet.coldkeypub.ss58_address
- output["hotkey"] = wallet.hotkey.ss58_address
- if not Confirm.ask(
- f"Do you want to serve axon:\n [bold white]{json.dumps(output, indent=4, sort_keys=True)}[/bold white]"
- ):
- return False
-
logging.debug(
f"Serving axon with: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) -> {subtensor.network}:{netuid}"
)
@@ -219,10 +206,9 @@ def serve_axon_extrinsic(
if axon.external_ip is None:
try:
external_ip = net.get_external_ip()
- bt_console.print(
- f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]"
+ logging.success(
+ f":white_heavy_check_mark: Found external ip: {external_ip}"
)
- logging.success(prefix="External IP", suffix=f"{external_ip}")
except Exception as e:
raise RuntimeError(
f"Unable to attain your external ip. Check your internet connection. error: {e}"
diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py
index 7680061c5b..6de8e2338e 100644
--- a/bittensor/core/extrinsics/set_weights.py
+++ b/bittensor/core/extrinsics/set_weights.py
@@ -21,10 +21,9 @@
import numpy as np
from numpy.typing import NDArray
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import bt_console, version_as_int
+from bittensor.core.settings import version_as_int
from bittensor.utils import format_error_message, weight_utils
from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
@@ -114,7 +113,6 @@ def set_weights_extrinsic(
version_key: int = 0,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> tuple[bool, str]:
"""Sets the given weights and values on chain for wallet hotkey account.
@@ -127,7 +125,6 @@ def set_weights_extrinsic(
version_key (int): The version key of the validator.
wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
- prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding.
Returns:
tuple[bool, str]: A tuple containing a success flag and an optional response message.
@@ -149,46 +146,35 @@ def set_weights_extrinsic(
uids, weights
)
- # Ask before moving on.
- if prompt:
- if not Confirm.ask(
- f"Do you want to set weights:\n[bold white] weights: {[float(v / 65535) for v in weight_vals]}\n"
- f"uids: {weight_uids}[/bold white ]?"
- ):
- return False, "Prompt refused."
-
- with bt_console.status(
- f":satellite: Setting weights on [white]{subtensor.network}[/white] ..."
- ):
- try:
- success, error_message = do_set_weights(
- self=subtensor,
- wallet=wallet,
- netuid=netuid,
- uids=weight_uids,
- vals=weight_vals,
- version_key=version_key,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
-
- if not wait_for_finalization and not wait_for_inclusion:
- return True, "Not waiting for finalization or inclusion."
-
- if success is True:
- bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]")
- logging.success(
- msg=str(success),
- prefix="Set weights",
- suffix="Finalized: ",
- )
- return True, "Successfully set weights and Finalized."
- else:
- error_message = format_error_message(error_message)
- logging.error(error_message)
- return False, error_message
-
- except Exception as e:
- bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}")
- logging.debug(str(e))
- return False, str(e)
+ logging.info(
+ f":satellite: Setting weights on {subtensor.network} ..."
+ )
+ logging.debug(f"Weights: {[float(v / 65535) for v in weight_vals]}")
+
+ try:
+ success, error_message = do_set_weights(
+ self=subtensor,
+ wallet=wallet,
+ netuid=netuid,
+ uids=weight_uids,
+ vals=weight_vals,
+ version_key=version_key,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ if success is True:
+ logging.success(f"Finalized! Set weights: {str(success)}")
+ return True, "Successfully set weights and Finalized."
+ else:
+ error_message = format_error_message(error_message)
+ logging.error(error_message)
+ return False, error_message
+
+ except Exception as e:
+ logging.error(f":cross_mark: Failed.: Error: {e}")
+ logging.debug(str(e))
+ return False, str(e)
diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py
index 896fecbf96..2fea50dd6c 100644
--- a/bittensor/core/extrinsics/transfer.py
+++ b/bittensor/core/extrinsics/transfer.py
@@ -18,16 +18,16 @@
from typing import Optional, Union, TYPE_CHECKING
from retry import retry
-from rich.prompt import Confirm
from bittensor.core.extrinsics.utils import submit_extrinsic
-from bittensor.core.settings import bt_console, NETWORK_EXPLORER_MAP
+from bittensor.core.settings import NETWORK_EXPLORER_MAP
from bittensor.utils import (
get_explorer_url_for_network,
format_error_message,
is_valid_bittensor_address_or_public_key,
)
from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
# For annotation purposes
@@ -102,7 +102,6 @@ def transfer_extrinsic(
wait_for_inclusion: bool = True,
wait_for_finalization: bool = False,
keep_alive: bool = True,
- prompt: bool = False,
) -> bool:
"""Transfers funds from this wallet to the destination public key address.
@@ -114,16 +113,13 @@ def transfer_extrinsic(
wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit.
- prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding.
Returns:
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
"""
# Validate destination address.
if not is_valid_bittensor_address_or_public_key(dest):
- bt_console.print(
- f":cross_mark: [red]Invalid destination address[/red]:[bold white]\n {dest}[/bold white]"
- )
+ logging.error(f"Invalid destination address: {dest}")
return False
if isinstance(dest, bytes):
@@ -140,15 +136,15 @@ def transfer_extrinsic(
transfer_balance = amount
# Check balance.
- with bt_console.status(":satellite: Checking Balance..."):
- account_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
- # check existential deposit.
- existential_deposit = subtensor.get_existential_deposit()
-
- with bt_console.status(":satellite: Transferring..."):
- fee = subtensor.get_transfer_fee(
- wallet=wallet, dest=dest, value=transfer_balance.rao
- )
+ logging.info(":satellite: Checking Balance...")
+ account_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
+ # check existential deposit.
+ existential_deposit = subtensor.get_existential_deposit()
+
+ logging.info(":satellite: Transferring...")
+ fee = subtensor.get_transfer_fee(
+ wallet=wallet, dest=dest, value=transfer_balance.rao
+ )
if not keep_alive:
# Check if the transfer should keep_alive the account
@@ -156,60 +152,52 @@ def transfer_extrinsic(
# Check if we have enough balance.
if account_balance < (transfer_balance + fee + existential_deposit):
- bt_console.print(
- ":cross_mark: [red]Not enough balance[/red]:[bold white]\n"
- f" balance: {account_balance}\n"
- f" amount: {transfer_balance}\n"
- f" for fee: {fee}[/bold white]"
- )
+ logging.error(":cross_mark: Not enough balance:")
+ logging.info(f"\t\tBalance: \t{account_balance}")
+ logging.info(f"\t\tAmount: \t{transfer_balance}")
+ logging.info(f"\t\tFor fee: \t{fee}")
return False
- # Ask before moving on.
- if prompt:
- if not Confirm.ask(
- "Do you want to transfer:[bold white]\n"
- f" amount: {transfer_balance}\n"
- f" from: {wallet.name}:{wallet.coldkey.ss58_address}\n"
- f" to: {dest}\n"
- f" for fee: {fee}[/bold white]"
- ):
- return False
-
- with bt_console.status(":satellite: Transferring..."):
- success, block_hash, error_message = do_transfer(
- self=subtensor,
- wallet=wallet,
- dest=dest,
- transfer_balance=transfer_balance,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
+ logging.info(":satellite: Transferring...")
+ logging.info(f"\tAmount: {transfer_balance}")
+ logging.info(f"\tfrom: {wallet.name}:{wallet.coldkey.ss58_address}")
+ logging.info(f"\tTo: {dest}")
+ logging.info(f"\tFor fee: {fee}")
+
+ success, block_hash, error_message = do_transfer(
+ self=subtensor,
+ wallet=wallet,
+ dest=dest,
+ transfer_balance=transfer_balance,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
- if success:
- bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]")
- bt_console.print(f"[green]Block Hash: {block_hash}[/green]")
+ if success:
+ logging.success(":white_heavy_check_mark: Finalized")
+ logging.info(f"Block Hash: {block_hash}")
- explorer_urls = get_explorer_url_for_network(
- subtensor.network, block_hash, NETWORK_EXPLORER_MAP
+ explorer_urls = get_explorer_url_for_network(
+ subtensor.network, block_hash, NETWORK_EXPLORER_MAP
+ )
+ if explorer_urls != {} and explorer_urls:
+ logging.info(
+ f"Opentensor Explorer Link: {explorer_urls.get('opentensor')}"
)
- if explorer_urls != {} and explorer_urls:
- bt_console.print(
- f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}[/green]"
- )
- bt_console.print(
- f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}[/green]"
- )
- else:
- bt_console.print(
- f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}"
+ logging.info(
+ f"Taostats Explorer Link: {explorer_urls.get('taostats')}"
)
+ else:
+ logging.error(
+ f":cross_mark: Failed: {format_error_message(error_message)}"
+ )
if success:
- with bt_console.status(":satellite: Checking Balance..."):
- new_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
- bt_console.print(
- f"Balance:\n [blue]{account_balance}[/blue] :arrow_right: [green]{new_balance}[/green]"
- )
- return True
+ logging.info(":satellite: Checking Balance...")
+ new_balance = subtensor.get_balance(wallet.coldkey.ss58_address)
+ logging.success(
+ f"Balance: {account_balance} :arrow_right: {new_balance}"
+ )
+ return True
return False
diff --git a/bittensor/core/metagraph.py b/bittensor/core/metagraph.py
index 208eaa6b9f..75e8d947c9 100644
--- a/bittensor/core/metagraph.py
+++ b/bittensor/core/metagraph.py
@@ -1249,12 +1249,11 @@ def load_from_path(self, dir_path: str) -> "Metagraph":
with open(graph_filename, "rb") as graph_file:
state_dict = pickle.load(graph_file)
except pickle.UnpicklingError:
- settings.bt_console.print(
+ logging.info(
"Unable to load file. Attempting to restore metagraph using torch."
)
- settings.bt_console.print(
- ":warning:[yellow]Warning:[/yellow] This functionality exists to load "
- "metagraph state from legacy saves, but will not be supported in the future."
+ logging.warning(
+ ":warning: This functionality exists to load metagraph state from legacy saves, but will not be supported in the future."
)
try:
import torch as real_torch
@@ -1264,7 +1263,7 @@ def load_from_path(self, dir_path: str) -> "Metagraph":
state_dict[key] = state_dict[key].detach().numpy()
del real_torch
except (RuntimeError, ImportError):
- settings.bt_console.print("Unable to load file. It may be corrupted.")
+ logging.error("Unable to load file. It may be corrupted.")
raise
self.n = state_dict["n"]
diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py
index 36314c2b72..48995c83e7 100644
--- a/bittensor/core/settings.py
+++ b/bittensor/core/settings.py
@@ -15,7 +15,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-__version__ = "8.2.0"
+__version__ = "8.2.1"
import os
import re
@@ -23,36 +23,6 @@
from pathlib import Path
from munch import munchify
-from rich.console import Console
-from rich.traceback import install
-
-# Rich console.
-__console__ = Console()
-__use_console__ = True
-
-# Remove overdue locals in debug training.
-install(show_locals=False)
-
-
-def turn_console_off():
- global __use_console__
- global __console__
- from io import StringIO
-
- __use_console__ = False
- __console__ = Console(file=StringIO(), stderr=False)
-
-
-def turn_console_on():
- global __use_console__
- global __console__
- __use_console__ = True
- __console__ = Console()
-
-
-turn_console_off()
-
-bt_console = __console__
HOME_DIR = Path.home()
@@ -70,12 +40,26 @@ def turn_console_on():
WALLETS_DIR.mkdir(parents=True, exist_ok=True)
MINERS_DIR.mkdir(parents=True, exist_ok=True)
+# Bittensor networks name
+NETWORKS = ["finney", "test", "archive", "local", "subvortex"]
+
+DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443"
+DEFAULT_NETWORK = NETWORKS[0]
# Bittensor endpoints (Needs to use wss://)
FINNEY_ENTRYPOINT = "wss://entrypoint-finney.opentensor.ai:443"
FINNEY_TEST_ENTRYPOINT = "wss://test.finney.opentensor.ai:443/"
ARCHIVE_ENTRYPOINT = "wss://archive.chain.opentensor.ai:443/"
-LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9946"
+LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9944"
+SUBVORTEX_ENTRYPOINT = "ws://subvortex.info:9944"
+
+NETWORK_MAP = {
+ NETWORKS[0]: FINNEY_ENTRYPOINT,
+ NETWORKS[1]: FINNEY_TEST_ENTRYPOINT,
+ NETWORKS[2]: ARCHIVE_ENTRYPOINT,
+ NETWORKS[3]: LOCAL_ENTRYPOINT,
+ NETWORKS[4]: SUBVORTEX_ENTRYPOINT,
+}
# Currency Symbols Bittensor
TAO_SYMBOL: str = chr(0x03C4)
@@ -112,11 +96,28 @@ def turn_console_on():
}
# --- Type Registry ---
-TYPE_REGISTRY: dict = {
+TYPE_REGISTRY: dict[str, dict] = {
"types": {
"Balance": "u64", # Need to override default u128
},
"runtime_api": {
+ "DelegateInfoRuntimeApi": {
+ "methods": {
+ "get_delegated": {
+ "params": [
+ {
+ "name": "coldkey",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_delegates": {
+ "params": [],
+ "type": "Vec",
+ },
+ }
+ },
"NeuronInfoRuntimeApi": {
"methods": {
"get_neuron_lite": {
@@ -141,8 +142,65 @@ def turn_console_on():
],
"type": "Vec",
},
+ "get_neuron": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ {
+ "name": "uid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_neurons": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
}
},
+ "StakeInfoRuntimeApi": {
+ "methods": {
+ "get_stake_info_for_coldkey": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_stake_info_for_coldkeys": {
+ "params": [
+ {
+ "name": "coldkey_account_vecs",
+ "type": "Vec>",
+ },
+ ],
+ "type": "Vec",
+ },
+ },
+ },
+ "ValidatorIPRuntimeApi": {
+ "methods": {
+ "get_associated_validator_ip_info_for_subnet": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
+ },
+ },
"SubnetInfoRuntimeApi": {
"methods": {
"get_subnet_hyperparams": {
@@ -153,12 +211,56 @@ def turn_console_on():
},
],
"type": "Vec",
- }
+ },
+ "get_subnet_info": {
+ "params": [
+ {
+ "name": "netuid",
+ "type": "u16",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_subnets_info": {
+ "params": [],
+ "type": "Vec",
+ },
}
},
"SubnetRegistrationRuntimeApi": {
"methods": {"get_network_registration_cost": {"params": [], "type": "u64"}}
},
+ "ColdkeySwapRuntimeApi": {
+ "methods": {
+ "get_scheduled_coldkey_swap": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_remaining_arbitration_period": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ "get_coldkey_swap_destinations": {
+ "params": [
+ {
+ "name": "coldkey_account_vec",
+ "type": "Vec",
+ },
+ ],
+ "type": "Vec",
+ },
+ }
+ },
},
}
diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py
index ca7397adb6..70da83f63a 100644
--- a/bittensor/core/subtensor.py
+++ b/bittensor/core/subtensor.py
@@ -23,6 +23,7 @@
import argparse
import copy
import socket
+import ssl
from typing import Union, Optional, TypedDict, Any
import numpy as np
@@ -39,20 +40,26 @@
from bittensor.core import settings
from bittensor.core.axon import Axon
from bittensor.core.chain_data import (
+ custom_rpc_type_registry,
+ DelegateInfo,
NeuronInfo,
+ NeuronInfoLite,
PrometheusInfo,
SubnetHyperparameters,
- NeuronInfoLite,
- custom_rpc_type_registry,
+ SubnetInfo,
)
from bittensor.core.config import Config
from bittensor.core.extrinsics.commit_weights import (
commit_weights_extrinsic,
reveal_weights_extrinsic,
)
-from bittensor.core.extrinsics.prometheus import (
- do_serve_prometheus,
- prometheus_extrinsic,
+from bittensor.core.extrinsics.registration import (
+ burned_register_extrinsic,
+ register_extrinsic,
+)
+from bittensor.core.extrinsics.root import (
+ root_register_extrinsic,
+ set_root_weights_extrinsic,
)
from bittensor.core.extrinsics.serving import (
do_serve_axon,
@@ -65,10 +72,10 @@
transfer_extrinsic,
)
from bittensor.core.metagraph import Metagraph
-from bittensor.utils import torch
-from bittensor.utils import u16_normalized_float, networking
+from bittensor.utils import networking, torch, ss58_to_vec_u8, u16_normalized_float
from bittensor.utils.balance import Balance
from bittensor.utils.btlogging import logging
+from bittensor.utils.registration import legacy_torch_api_compat
from bittensor.utils.weight_utils import generate_weight_hash
KEY_NONCE: dict[str, int] = {}
@@ -225,7 +232,7 @@ def _get_substrate(self):
except (AttributeError, TypeError, socket.error, OSError) as e:
logging.warning(f"Error setting timeout: {e}")
- except ConnectionRefusedError as error:
+ except (ConnectionRefusedError, ssl.SSLError) as error:
logging.error(
f"Could not connect to {self.network} network with {self.chain_endpoint} chain endpoint.",
)
@@ -708,16 +715,8 @@ def determine_chain_endpoint_and_network(
if network is None:
return None, None
- if network in ["finney", "local", "test", "archive"]:
- if network == "finney":
- # Kiru Finney staging network.
- return network, settings.FINNEY_ENTRYPOINT
- elif network == "local":
- return network, settings.LOCAL_ENTRYPOINT
- elif network == "test":
- return network, settings.FINNEY_TEST_ENTRYPOINT
- elif network == "archive":
- return network, settings.ARCHIVE_ENTRYPOINT
+ if network in settings.NETWORKS:
+ return network, settings.NETWORK_MAP[network]
else:
if (
network == settings.FINNEY_ENTRYPOINT
@@ -738,7 +737,6 @@ def determine_chain_endpoint_and_network(
return "local", network
else:
return "unknown", network
- return None, None
def get_netuids_for_hotkey(
self, hotkey_ss58: str, block: Optional[int] = None
@@ -846,7 +844,6 @@ def set_weights(
version_key: int = settings.version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
max_retries: int = 5,
) -> tuple[bool, str]:
"""
@@ -860,7 +857,6 @@ def set_weights(
version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
max_retries (int): The number of maximum attempts to set weights. Default is ``5``.
Returns:
@@ -889,7 +885,6 @@ def set_weights(
version_key=version_key,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
except Exception as e:
logging.error(f"Error setting weights: {e}")
@@ -898,6 +893,151 @@ def set_weights(
return success, message
+ @legacy_torch_api_compat
+ def root_set_weights(
+ self,
+ wallet: "Wallet",
+ netuids: Union[NDArray[np.int64], "torch.LongTensor", list],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ ) -> bool:
+ """
+ Sets the weights for neurons on the root network. This action is crucial for defining the influence and interactions of neurons at the root level of the Bittensor network.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights.
+ netuids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs for which weights are being set.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each UID.
+ version_key (int, optional): Version key for compatibility with the network. Default is ``0``.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to ``False``.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to ``False``.
+
+ Returns:
+ bool: ``True`` if the setting of root-level weights is successful, False otherwise.
+
+ This function plays a pivotal role in shaping the root network's collective intelligence and decision-making processes, reflecting the principles of decentralized governance and collaborative learning in Bittensor.
+ """
+ return set_root_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuids=netuids,
+ weights=weights,
+ version_key=version_key,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ def register(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+ ) -> bool:
+ """
+ Registers a neuron on the Bittensor network using the provided wallet.
+
+ Registration is a critical step for a neuron to become an active participant in the network, enabling it to stake, set weights, and receive incentives.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron to be registered.
+ netuid (int): The unique identifier of the subnet.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`.
+ max_allowed_attempts (int): Maximum number of attempts to register the wallet.
+ output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`.
+ cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). Defaults to `False`.
+ dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids. Defaults to `0` (zero).
+ tpb (int): The number of threads per block (CUDA). Default to `256`.
+ num_processes (Optional[int]): The number of processes to use to register. Default to `None`.
+ update_interval (Optional[int]): The number of nonces to solve between updates. Default to `None`.
+ log_verbose (bool): If ``true``, the registration process will log more information. Default to `False`.
+
+ Returns:
+ bool: ``True`` if the registration is successful, False otherwise.
+
+ This function facilitates the entry of new neurons into the network, supporting the decentralized
+ growth and scalability of the Bittensor ecosystem.
+ """
+ return register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ max_allowed_attempts=max_allowed_attempts,
+ output_in_place=output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ def root_register(
+ self,
+ wallet: "Wallet",
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ ) -> bool:
+ """
+ Registers the neuron associated with the wallet on the root network. This process is integral for participating in the highest layer of decision-making and governance within the Bittensor network.
+
+ Args:
+ wallet (bittensor.wallet): The wallet associated with the neuron to be registered on the root network.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`.
+
+ Returns:
+ bool: ``True`` if the registration on the root network is successful, False otherwise.
+
+ This function enables neurons to engage in the most critical and influential aspects of the network's governance, signifying a high level of commitment and responsibility in the Bittensor ecosystem.
+ """
+ return root_register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ def burned_register(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ ) -> bool:
+ """
+ Registers a neuron on the Bittensor network by recycling TAO. This method of registration involves recycling TAO tokens, allowing them to be re-mined by performing work on the network.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron to be registered.
+ netuid (int): The unique identifier of the subnet.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to `False`.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to `True`.
+
+ Returns:
+ bool: ``True`` if the registration is successful, False otherwise.
+ """
+ return burned_register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
def serve_axon(
self,
netuid: int,
@@ -1013,7 +1153,6 @@ def transfer(
amount: Union["Balance", float],
wait_for_inclusion: bool = True,
wait_for_finalization: bool = False,
- prompt: bool = False,
) -> bool:
"""
Executes a transfer of funds from the provided wallet to the specified destination address. This function is used to move TAO tokens within the Bittensor network, facilitating transactions between neurons.
@@ -1024,7 +1163,6 @@ def transfer(
amount (Union[bittensor.utils.balance.Balance, float]): The amount of TAO to be transferred.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``True``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
Returns:
transfer_extrinsic (bool): ``True`` if the transfer is successful, False otherwise.
@@ -1038,7 +1176,6 @@ def transfer(
amount=amount,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
# Community uses this method via `bittensor.api.extrinsics.prometheus.prometheus_extrinsic`
@@ -1102,37 +1239,6 @@ def make_substrate_call_with_retry():
return NeuronInfo.from_vec_u8(result)
- # Community uses this method
- def serve_prometheus(
- self,
- wallet: "Wallet",
- port: int,
- netuid: int,
- wait_for_inclusion: bool = False,
- wait_for_finalization: bool = True,
- ) -> bool:
- """
- Serves Prometheus metrics by submitting an extrinsic to a blockchain network via the specified wallet. The function allows configuring whether to wait for the transaction's inclusion in a block and its finalization.
-
- Args:
- wallet (bittensor_wallet.Wallet): Bittensor wallet instance used for submitting the extrinsic.
- port (int): The port number on which Prometheus metrics are served.
- netuid (int): The unique identifier of the subnetwork.
- wait_for_inclusion (bool): If True, waits for the transaction to be included in a block. Defaults to ``False``.
- wait_for_finalization (bool): If True, waits for the transaction to be finalized. Defaults to ``True``.
-
- Returns:
- bool: Returns True if the Prometheus extrinsic is successfully processed, otherwise False.
- """
- return prometheus_extrinsic(
- self,
- wallet=wallet,
- port=port,
- netuid=netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
# Community uses this method
def get_subnet_hyperparameters(
self, netuid: int, block: Optional[int] = None
@@ -1326,6 +1432,36 @@ def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool:
_result = self.query_subtensor("NetworksAdded", block, [netuid])
return getattr(_result, "value", False)
+ @networking.ensure_connected
+ def get_all_subnets_info(self, block: Optional[int] = None) -> list[SubnetInfo]:
+ """
+ Retrieves detailed information about all subnets within the Bittensor network. This function provides comprehensive data on each subnet, including its characteristics and operational parameters.
+
+ Args:
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ list[SubnetInfo]: A list of SubnetInfo objects, each containing detailed information about a subnet.
+
+ Gaining insights into the subnets' details assists in understanding the network's composition, the roles of different subnets, and their unique features.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
+
+ return self.substrate.rpc_request(
+ method="subnetInfo_getSubnetsInfo", # custom rpc method
+ params=[block_hash] if block_hash else [],
+ )
+
+ json_body = make_substrate_call_with_retry()
+
+ if not (result := json_body.get("result", None)):
+ return []
+
+ return SubnetInfo.list_from_vec_u8(result)
+
# Metagraph uses this method
def bonds(
self, netuid: int, block: Optional[int] = None
@@ -1352,6 +1488,30 @@ def bonds(
return b_map
+ def get_subnet_burn_cost(self, block: Optional[int] = None) -> Optional[str]:
+ """
+ Retrieves the burn cost for registering a new subnet within the Bittensor network. This cost represents the amount of Tao that needs to be locked or burned to establish a new subnet.
+
+ Args:
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ int: The burn cost for subnet registration.
+
+ The subnet burn cost is an important economic parameter, reflecting the network's mechanisms for controlling the proliferation of subnets and ensuring their commitment to the network's long-term viability.
+ """
+ lock_cost = self.query_runtime_api(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block=block,
+ )
+
+ if lock_cost is None:
+ return None
+
+ return lock_cost
+
# Metagraph uses this method
def neurons(self, netuid: int, block: Optional[int] = None) -> list["NeuronInfo"]:
"""
@@ -1549,9 +1709,7 @@ def get_transfer_fee(
call=call, keypair=wallet.coldkeypub
)
except Exception as e:
- settings.bt_console.print(
- f":cross_mark: [red]Failed to get payment info[/red]:[bold white]\n {e}[/bold white]"
- )
+ logging.error(f"Failed to get payment info. {e}")
payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao
fee = Balance.from_rao(payment_info["partialFee"])
@@ -1599,7 +1757,6 @@ def commit_weights(
version_key: int = settings.version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
max_retries: int = 5,
) -> tuple[bool, str]:
"""
@@ -1615,7 +1772,6 @@ def commit_weights(
version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
max_retries (int): The number of maximum attempts to commit weights. Default is ``5``.
Returns:
@@ -1654,7 +1810,6 @@ def commit_weights(
commit_hash=commit_hash,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
if success:
break
@@ -1676,7 +1831,6 @@ def reveal_weights(
version_key: int = settings.version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
- prompt: bool = False,
max_retries: int = 5,
) -> tuple[bool, str]:
"""
@@ -1692,7 +1846,6 @@ def reveal_weights(
version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version``.
wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
- prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``.
max_retries (int): The number of maximum attempts to reveal weights. Default is ``5``.
Returns:
@@ -1719,7 +1872,6 @@ def reveal_weights(
version_key=version_key,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
if success:
break
@@ -1730,7 +1882,101 @@ def reveal_weights(
return success, message
- # Subnet 27 uses this method
- _do_serve_prometheus = do_serve_prometheus
+ def difficulty(self, netuid: int, block: Optional[int] = None) -> Optional[int]:
+ """
+ Retrieves the 'Difficulty' hyperparameter for a specified subnet in the Bittensor network.
+
+ This parameter is instrumental in determining the computational challenge required for neurons to participate in consensus and validation processes.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ Optional[int]: The value of the 'Difficulty' hyperparameter if the subnet exists, ``None`` otherwise.
+
+ The 'Difficulty' parameter directly impacts the network's security and integrity by setting the computational effort required for validating transactions and participating in the network's consensus mechanism.
+ """
+ call = self._get_hyperparameter(
+ param_name="Difficulty", netuid=netuid, block=block
+ )
+ if call is None:
+ return None
+ return int(call)
+
+ def recycle(self, netuid: int, block: Optional[int] = None) -> Optional["Balance"]:
+ """
+ Retrieves the 'Burn' hyperparameter for a specified subnet. The 'Burn' parameter represents the amount of Tao that is effectively recycled within the Bittensor network.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ Optional[Balance]: The value of the 'Burn' hyperparameter if the subnet exists, None otherwise.
+
+ Understanding the 'Burn' rate is essential for analyzing the network registration usage, particularly how it is correlated with user activity and the overall cost of participation in a given subnet.
+ """
+ call = self._get_hyperparameter(param_name="Burn", netuid=netuid, block=block)
+ return None if call is None else Balance.from_rao(int(call))
+
+ def get_delegate_take(
+ self, hotkey_ss58: str, block: Optional[int] = None
+ ) -> Optional[float]:
+ """
+ Retrieves the delegate 'take' percentage for a neuron identified by its hotkey. The 'take' represents the percentage of rewards that the delegate claims from its nominators' stakes.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ block (Optional[int]): The blockchain block number for the query.
+
+ Returns:
+ Optional[float]: The delegate take percentage, None if not available.
+
+ The delegate take is a critical parameter in the network's incentive structure, influencing the distribution of rewards among neurons and their nominators.
+ """
+ _result = self.query_subtensor("Delegates", block, [hotkey_ss58])
+ return (
+ None
+ if getattr(_result, "value", None) is None
+ else u16_normalized_float(_result.value)
+ )
+
+ @networking.ensure_connected
+ def get_delegate_by_hotkey(
+ self, hotkey_ss58: str, block: Optional[int] = None
+ ) -> Optional[DelegateInfo]:
+ """
+ Retrieves detailed information about a delegate neuron based on its hotkey. This function provides a comprehensive view of the delegate's status, including its stakes, nominators, and reward distribution.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the delegate's hotkey.
+ block (Optional[int]): The blockchain block number for the query. Default is ``None``.
+
+ Returns:
+ Optional[DelegateInfo]: Detailed information about the delegate neuron, ``None`` if not found.
+
+ This function is essential for understanding the roles and influence of delegate neurons within the Bittensor network's consensus and governance structures.
+ """
+
+ @retry(delay=1, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry(encoded_hotkey_: list[int]):
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
+
+ return self.substrate.rpc_request(
+ method="delegateInfo_getDelegate", # custom rpc method
+ params=(
+ [encoded_hotkey_, block_hash] if block_hash else [encoded_hotkey_]
+ ),
+ )
+
+ encoded_hotkey = ss58_to_vec_u8(hotkey_ss58)
+ json_body = make_substrate_call_with_retry(encoded_hotkey)
+
+ if not (result := json_body.get("result", None)):
+ return None
+
+ return DelegateInfo.from_vec_u8(result)
+
# Subnet 27 uses this method name
_do_serve_axon = do_serve_axon
diff --git a/bittensor/utils/btlogging/format.py b/bittensor/utils/btlogging/format.py
index 1aa505c82c..9e279a3b26 100644
--- a/bittensor/utils/btlogging/format.py
+++ b/bittensor/utils/btlogging/format.py
@@ -54,6 +54,8 @@ def _success(self, message: str, *args, **kws):
":white_heavy_check_mark:": "✅",
":cross_mark:": "❌",
":satellite:": "🛰️",
+ ":warning:": "⚠️",
+ ":arrow_right:": "➡️",
}
@@ -64,6 +66,8 @@ def _success(self, message: str, *args, **kws):
"": Style.RESET_ALL,
"": Fore.GREEN,
"": Style.RESET_ALL,
+ "": Fore.MAGENTA,
+ "": Style.RESET_ALL,
}
diff --git a/bittensor/utils/btlogging/loggingmachine.py b/bittensor/utils/btlogging/loggingmachine.py
index abc4758bf8..66d7cc7595 100644
--- a/bittensor/utils/btlogging/loggingmachine.py
+++ b/bittensor/utils/btlogging/loggingmachine.py
@@ -49,7 +49,8 @@
def _concat_message(msg="", prefix="", suffix=""):
"""Concatenates a message with optional prefix and suffix."""
- msg = f"{f'{prefix} - ' if prefix else ''}{msg}{f' - {suffix}' if suffix else ''}"
+ empty_pref_suf = [None, ""]
+ msg = f"{f'{prefix} - ' if prefix not in empty_pref_suf else ''}{msg}{f' - {suffix}' if suffix not in empty_pref_suf else ''}"
return msg
@@ -443,27 +444,27 @@ def info(self, msg="", prefix="", suffix="", *args, **kwargs):
def success(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps success message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.success(msg, *args, **kwargs)
def warning(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps warning message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.warning(msg, *args, **kwargs)
def error(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps error message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.error(msg, *args, **kwargs)
def critical(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps critical message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.critical(msg, *args, **kwargs)
def exception(self, msg="", prefix="", suffix="", *args, **kwargs):
"""Wraps exception message with prefix and suffix."""
- msg = f"{prefix} - {msg} - {suffix}"
+ msg = _concat_message(msg, prefix, suffix)
self._logger.exception(msg, *args, **kwargs)
def on(self):
diff --git a/bittensor/utils/formatting.py b/bittensor/utils/formatting.py
new file mode 100644
index 0000000000..1ee3fd6671
--- /dev/null
+++ b/bittensor/utils/formatting.py
@@ -0,0 +1,41 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import math
+
+
+def get_human_readable(num, suffix="H"):
+ """Convert a number into a human-readable format with suffixes."""
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
+ if abs(num) < 1000.0:
+ return f"{num:3.1f}{unit}{suffix}"
+ num /= 1000.0
+ return f"{num:.1f}Y{suffix}"
+
+
+def millify(n: int):
+ """Converts a number into a more readable format with suffixes."""
+ mill_names = ["", " K", " M", " B", " T"]
+ n = float(n)
+ mill_idx = max(
+ 0,
+ min(
+ len(mill_names) - 1,
+ int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)),
+ ),
+ )
+ return "{:.2f}{}".format(n / 10 ** (3 * mill_idx), mill_names[mill_idx])
diff --git a/bittensor/utils/register_cuda.py b/bittensor/utils/register_cuda.py
new file mode 100644
index 0000000000..e0a77f19c9
--- /dev/null
+++ b/bittensor/utils/register_cuda.py
@@ -0,0 +1,130 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import binascii
+import hashlib
+import io
+import math
+from contextlib import redirect_stdout
+from typing import Any, Union
+
+import numpy as np
+from Crypto.Hash import keccak
+
+
+def solve_cuda(
+ nonce_start: "np.int64",
+ update_interval: "np.int64",
+ tpb: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ dev_id: int = 0,
+) -> Union[tuple[Any, bytes], tuple[int, bytes], tuple[Any, None]]:
+ """
+ Solves the PoW problem using CUDA.
+
+ Args:
+ nonce_start (numpy.int64): Starting nonce.
+ update_interval (numpy.int64): Number of nonces to solve before updating block information.
+ tpb (int): Threads per block.
+ block_and_hotkey_hash_bytes (bytes): Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes.
+ difficulty (int): Difficulty of the PoW problem.
+ limit (int): Upper limit of the nonce.
+ dev_id (int): The CUDA device ID. Defaults to ``0``.
+
+ Returns:
+ (Union[tuple[Any, bytes], tuple[int, bytes], tuple[Any, None]]): Tuple of the nonce and the seal corresponding to the solution. Returns -1 for nonce if no solution is found.
+ """
+
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError(
+ "Please install cubit. See the instruction https://github.com/opentensor/cubit?tab=readme-ov-file#install."
+ )
+
+ upper = int(limit // difficulty)
+
+ upper_bytes = upper.to_bytes(32, byteorder="little", signed=False)
+
+ def _hex_bytes_to_u8_list(hex_bytes: bytes):
+ """Converts a sequence of hex bytes to a list of unsigned 8-bit integers."""
+ hex_chunks = [
+ int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)
+ ]
+ return hex_chunks
+
+ def _create_seal_hash(block_and_hotkey_hash_hex_: bytes, nonce: int) -> bytes:
+ """Creates a seal hash from the block and hotkey hash and nonce."""
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little"))
+ pre_seal = nonce_bytes + block_and_hotkey_hash_hex_
+ seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest()
+ kec = keccak.new(digest_bits=256)
+ return kec.update(seal_sh256).digest()
+
+ def _seal_meets_difficulty(seal_: bytes, difficulty_: int):
+ """Checks if the seal meets the given difficulty."""
+ seal_number = int.from_bytes(seal_, "big")
+ product = seal_number * difficulty_
+ limit_ = int(math.pow(2, 256)) - 1
+
+ return product < limit_
+
+ # Call cython function
+ # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit,
+ # const unsigned char[:] block_bytes, int dev_id
+ block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+
+ solution = cubit.solve_cuda(
+ tpb,
+ nonce_start,
+ update_interval,
+ upper_bytes,
+ block_and_hotkey_hash_hex,
+ dev_id,
+ ) # 0 is first GPU
+ seal = None
+ if solution != -1:
+ seal = _create_seal_hash(block_and_hotkey_hash_hex, solution)
+ if _seal_meets_difficulty(seal, difficulty):
+ return solution, seal
+ else:
+ return -1, b"\x00" * 32
+ return solution, seal
+
+
+def reset_cuda():
+ """Resets the CUDA environment."""
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+ cubit.reset_cuda()
+
+
+def log_cuda_errors() -> str:
+ """Logs any CUDA errors."""
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ file = io.StringIO()
+ with redirect_stdout(file):
+ cubit.log_cuda_errors()
+ return file.getvalue()
diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py
index 4d0cdb93d6..4dd6d8ec67 100644
--- a/bittensor/utils/registration.py
+++ b/bittensor/utils/registration.py
@@ -15,13 +15,30 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+import binascii
+import dataclasses
import functools
+import hashlib
+import math
+import multiprocessing
import os
-from typing import TYPE_CHECKING
+import random
+import subprocess
+import time
+from datetime import timedelta
+from multiprocessing.queues import Queue as QueueType
+from queue import Empty, Full
+from typing import Any, Callable, Optional, Union, TYPE_CHECKING
import numpy
+from Crypto.Hash import keccak
+from retry import retry
+from rich import console as rich_console, status as rich_status
+from rich.console import Console
from bittensor.utils.btlogging import logging
+from bittensor.utils.formatting import get_human_readable, millify
+from bittensor.utils.register_cuda import solve_cuda
def use_torch() -> bool:
@@ -95,5 +112,1011 @@ def __getattr__(self, name):
if TYPE_CHECKING:
import torch
+ from bittensor.core.subtensor import Subtensor
+ from bittensor_wallet import Wallet
else:
torch = LazyLoadedTorch()
+
+
+def _hex_bytes_to_u8_list(hex_bytes: bytes):
+ hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)]
+ return hex_chunks
+
+
+def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes:
+ """Create a seal hash for a given block and nonce."""
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little"))
+ pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+ seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest()
+ kec = keccak.new(digest_bits=256)
+ seal = kec.update(seal_sh256).digest()
+ return seal
+
+
+def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int):
+ """Check if the seal meets the given difficulty criteria."""
+ seal_number = int.from_bytes(seal, "big")
+ product = seal_number * difficulty
+ return product < limit
+
+
+@dataclasses.dataclass
+class POWSolution:
+ """A solution to the registration PoW problem."""
+
+ nonce: int
+ block_number: int
+ difficulty: int
+ seal: bytes
+
+ def is_stale(self, subtensor: "Subtensor") -> bool:
+ """
+ Returns True if the POW is stale.
+
+ This means the block the POW is solved for is within 3 blocks of the current block.
+ """
+ return self.block_number < subtensor.get_current_block() - 3
+
+
+class _UsingSpawnStartMethod:
+ def __init__(self, force: bool = False):
+ self._old_start_method = None
+ self._force = force
+
+ def __enter__(self):
+ self._old_start_method = multiprocessing.get_start_method(allow_none=True)
+ if self._old_start_method is None:
+ self._old_start_method = "spawn" # default to spawn
+
+ multiprocessing.set_start_method("spawn", force=self._force)
+
+ def __exit__(self, *args):
+ # restore the old start method
+ multiprocessing.set_start_method(self._old_start_method, force=True)
+
+
+class _SolverBase(multiprocessing.Process):
+ """
+ A process that solves the registration PoW problem.
+
+ Args:
+ proc_num (int): The number of the process being created.
+ num_proc (int): The total number of processes running.
+ update_interval (int): The number of nonces to try to solve before checking for a new block.
+ finished_queue (multiprocessing.Queue): The queue to put the process number when a process finishes each update_interval. Used for calculating the average time per update_interval across all processes.
+ solution_queue (multiprocessing.Queue): The queue to put the solution the process has found during the pow solve.
+ newBlockEvent (multiprocessing.Event): The event to set by the main process when a new block is finalized in the network. The solver process will check for the event after each update_interval. The solver process will get the new block hash and difficulty and start solving for a new nonce.
+ stopEvent (multiprocessing.Event): The event to set by the main process when all the solver processes should stop. The solver process will check for the event after each update_interval. The solver process will stop when the event is set. Used to stop the solver processes when a solution is found.
+ curr_block (multiprocessing.Array): The array containing this process's current block hash. The main process will set the array to the new block hash when a new block is finalized in the network. The solver process will get the new block hash from this array when newBlockEvent is set.
+ curr_block_num (multiprocessing.Value): The value containing this process's current block number. The main process will set the value to the new block number when a new block is finalized in the network. The solver process will get the new block number from this value when newBlockEvent is set.
+ curr_diff (multiprocessing.Array): The array containing this process's current difficulty. The main process will set the array to the new difficulty when a new block is finalized in the network. The solver process will get the new difficulty from this array when newBlockEvent is set.
+ check_block (multiprocessing.Lock): The lock to prevent this process from getting the new block data while the main process is updating the data.
+ limit (int): The limit of the pow solve for a valid solution.
+ """
+
+ proc_num: int
+ num_proc: int
+ update_interval: int
+ finished_queue: "multiprocessing.Queue"
+ solution_queue: "multiprocessing.Queue"
+ newBlockEvent: "multiprocessing.Event"
+ stopEvent: "multiprocessing.Event"
+ hotkey_bytes: bytes
+ curr_block: "multiprocessing.Array"
+ curr_block_num: "multiprocessing.Value"
+ curr_diff: "multiprocessing.Array"
+ check_block: "multiprocessing.Lock"
+ limit: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ ):
+ multiprocessing.Process.__init__(self, daemon=True)
+ self.proc_num = proc_num
+ self.num_proc = num_proc
+ self.update_interval = update_interval
+ self.finished_queue = finished_queue
+ self.solution_queue = solution_queue
+ self.newBlockEvent = multiprocessing.Event()
+ self.newBlockEvent.clear()
+ self.curr_block = curr_block
+ self.curr_block_num = curr_block_num
+ self.curr_diff = curr_diff
+ self.check_block = check_block
+ self.stopEvent = stopEvent
+ self.limit = limit
+
+ def run(self):
+ raise NotImplementedError("_SolverBase is an abstract class")
+
+ @staticmethod
+ def create_shared_memory() -> (
+ tuple["multiprocessing.Array", "multiprocessing.Value", "multiprocessing.Array"]
+ ):
+ """Creates shared memory for the solver processes to use."""
+ curr_block = multiprocessing.Array("h", 32, lock=True) # byte array
+ curr_block_num = multiprocessing.Value("i", 0, lock=True) # int
+ curr_diff = multiprocessing.Array("Q", [0, 0], lock=True) # [high, low]
+
+ return curr_block, curr_block_num, curr_diff
+
+
+class _Solver(_SolverBase):
+ def run(self):
+ block_number: int
+ block_and_hotkey_hash_bytes: bytes
+ block_difficulty: int
+ nonce_limit = int(math.pow(2, 64)) - 1
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_end = nonce_start + self.update_interval
+ while not self.stopEvent.is_set():
+ if self.newBlockEvent.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.newBlockEvent.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block(
+ nonce_start,
+ nonce_end,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Send time
+ self.finished_queue.put_nowait(self.proc_num)
+ except Full:
+ pass
+
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_start = nonce_start % nonce_limit
+ nonce_end = nonce_start + self.update_interval
+
+
+class _CUDASolver(_SolverBase):
+ dev_id: int
+ tpb: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id: int,
+ tpb: int,
+ ):
+ super().__init__(
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ self.dev_id = dev_id
+ self.tpb = tpb
+
+ def run(self):
+ block_number: int = 0 # dummy value
+ block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value
+ block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value
+ nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ while not self.stopEvent.is_set():
+ if self.newBlockEvent.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.newBlockEvent.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block_cuda(
+ nonce_start,
+ self.update_interval,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ self.dev_id,
+ self.tpb,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Signal that a nonce_block was finished using queue
+ # send our proc_num
+ self.finished_queue.put(self.proc_num)
+ except Full:
+ pass
+
+ # increase nonce by number of nonces processed
+ nonce_start += self.update_interval * self.tpb
+ nonce_start = nonce_start % nonce_limit
+
+
+def _solve_for_nonce_block_cuda(
+ nonce_start: int,
+ update_interval: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+ dev_id: int,
+ tpb: int,
+) -> Optional["POWSolution"]:
+ """Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb"""
+ solution, seal = solve_cuda(
+ nonce_start,
+ update_interval,
+ tpb,
+ block_and_hotkey_hash_bytes,
+ difficulty,
+ limit,
+ dev_id,
+ )
+
+ if solution != -1:
+ # Check if solution is valid (i.e. not -1)
+ return POWSolution(solution, block_number, difficulty, seal)
+
+ return None
+
+
+def _solve_for_nonce_block(
+ nonce_start: int,
+ nonce_end: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+) -> Optional["POWSolution"]:
+ """Tries to solve the POW for a block of nonces (nonce_start, nonce_end)"""
+ for nonce in range(nonce_start, nonce_end):
+ # Create seal.
+ seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce)
+
+ # Check if seal meets difficulty
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ # Found a solution, save it.
+ return POWSolution(nonce, block_number, difficulty, seal)
+
+ return None
+
+
+def _registration_diff_unpack(packed_diff: "multiprocessing.Array") -> int:
+ """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian."""
+ return int(packed_diff[0] << 32 | packed_diff[1])
+
+
+def _registration_diff_pack(diff: int, packed_diff: "multiprocessing.Array"):
+ """Packs the difficulty into two 32-bit integers. Little endian."""
+ packed_diff[0] = diff >> 32
+ packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits
+
+
+def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes:
+ """Hashes the block with the hotkey using Keccak-256 to get 32 bytes"""
+ kec = keccak.new(digest_bits=256)
+ kec = kec.update(bytearray(block_bytes + hotkey_bytes))
+ block_and_hotkey_hash_bytes = kec.digest()
+ return block_and_hotkey_hash_bytes
+
+
+def _update_curr_block(
+ curr_diff: "multiprocessing.Array",
+ curr_block: "multiprocessing.Array",
+ curr_block_num: "multiprocessing.Value",
+ block_number: int,
+ block_bytes: bytes,
+ diff: int,
+ hotkey_bytes: bytes,
+ lock: "multiprocessing.Lock",
+):
+ """Updates the current block's information atomically using a lock."""
+ with lock:
+ curr_block_num.value = block_number
+ # Hash the block with the hotkey
+ block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes)
+ for i in range(32):
+ curr_block[i] = block_and_hotkey_hash_bytes[i]
+ _registration_diff_pack(diff, curr_diff)
+
+
+def get_cpu_count() -> int:
+ """Returns the number of CPUs in the system."""
+ try:
+ return len(os.sched_getaffinity(0))
+ except AttributeError:
+ # OSX does not have sched_getaffinity
+ return os.cpu_count()
+
+
+@dataclasses.dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: bytes
+
+
+class RegistrationStatisticsLogger:
+ """Logs statistics for a registration."""
+
+ status: Optional[rich_status.Status]
+
+ def __init__(
+ self,
+ console: Optional[rich_console.Console] = None,
+ output_in_place: bool = True,
+ ) -> None:
+ if console is None:
+ console = Console()
+
+ self.console = console
+
+ if output_in_place:
+ self.status = self.console.status("Solving")
+ else:
+ self.status = None
+
+ def start(self) -> None:
+ if self.status is not None:
+ self.status.start()
+
+ def stop(self) -> None:
+ if self.status is not None:
+ self.status.stop()
+
+ def get_status_message(
+ self, stats: RegistrationStatistics, verbose: bool = False
+ ) -> str:
+ """Generates the status message based on registration statistics."""
+ message = (
+ "Solving\n"
+ + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n"
+ + (
+ f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n"
+ + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n"
+ if verbose
+ else ""
+ )
+ + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n"
+ + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / "
+ + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n"
+ + f"Block Number: [bold white]{stats.block_number}[/bold white]\n"
+ + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n"
+ )
+ return message
+
+ def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None:
+ if self.status is not None:
+ self.status.update(self.get_status_message(stats, verbose=verbose))
+ else:
+ self.console.log(self.get_status_message(stats, verbose=verbose))
+
+
+def _solve_for_difficulty_fast(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ output_in_place: bool = True,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional[POWSolution]:
+ """
+ Solves the POW for registration using multiprocessing.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance to connect to for block information and to submit.
+ wallet (bittensor_wallet.Wallet): wallet to use for registration.
+ netuid (int): The netuid of the subnet to register to.
+ output_in_place (bool): If true, prints the status in place. Otherwise, prints the status on a new line.
+ num_processes (int): Number of processes to use.
+ update_interval (int): Number of nonces to solve before updating block information.
+ n_samples (int): The number of samples of the hash_rate to keep for the EWMA.
+ alpha_ (float): The alpha for the EWMA for the hash_rate calculation.
+ log_verbose (bool): If true, prints more verbose logging of the registration metrics.
+
+ Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ Note: We can also modify the update interval to do smaller blocks of work, while still updating the block information after a different number of nonces, to increase the transparency of the process while still keeping the speed.
+ """
+ if num_processes is None:
+ # get the number of allowed processes for this process
+ num_processes = min(1, get_cpu_count())
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ limit = int(math.pow(2, 256)) - 1
+
+ curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory()
+
+ # Establish communication queues
+ # See the _Solver class for more information on the queues.
+ stopEvent = multiprocessing.Event()
+ stopEvent.clear()
+
+ solution_queue = multiprocessing.Queue()
+ finished_queues = [multiprocessing.Queue() for _ in range(num_processes)]
+ check_block = multiprocessing.Lock()
+
+ hotkey_bytes = (
+ wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key
+ )
+ # Start consumers
+ solvers = [
+ _Solver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ for i in range(num_processes)
+ ]
+
+ # Get first block
+ block_number, difficulty, block_hash = _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+
+ block_bytes = bytes.fromhex(block_hash[2:])
+ old_block_number = block_number
+ # Set to current block
+ _update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total=0.0,
+ time_average=0.0,
+ rounds_total=0,
+ time_spent=0.0,
+ hash_rate_perpetual=0.0,
+ hash_rate=0.0,
+ difficulty=difficulty,
+ block_number=block_number,
+ block_hash=block_hash,
+ )
+
+ start_time_perpetual = time.time()
+
+ logger = RegistrationStatisticsLogger(output_in_place=output_in_place)
+ logger.start()
+
+ solution = None
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha
+
+ while netuid == -1 or not subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ ):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=0.25)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = _check_for_newest_block_and_update(
+ subtensor=subtensor,
+ netuid=netuid,
+ hotkey_bytes=hotkey_bytes,
+ old_block_number=old_block_number,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_stats=curr_stats,
+ update_curr_block=_update_curr_block,
+ check_block=check_block,
+ solvers=solvers,
+ )
+
+ num_time = 0
+ for finished_queue in finished_queues:
+ try:
+ proc_num = finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ hash_rate_ = (num_time * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum(
+ [hash_rates[i] * weights[i] for i in range(n_samples)]
+ ) / (sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (
+ curr_stats.time_average * curr_stats.rounds_total
+ + curr_stats.time_spent
+ ) / (curr_stats.rounds_total + num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * update_interval
+ ) / new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, solution contains the nonce or wallet is registered
+ stopEvent.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ _terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+@retry(Exception, tries=3, delay=1)
+def _get_block_with_retry(
+ subtensor: "Subtensor", netuid: int
+) -> tuple[int, int, bytes]:
+ """
+ Gets the current block number, difficulty, and block hash from the substrate node.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor object to use to get the block number, difficulty, and block hash.
+ netuid (int): The netuid of the network to get the block number, difficulty, and block hash from.
+
+ Returns:
+ tuple[int, int, bytes]
+ block_number (int): The current block number.
+ difficulty (int): The current difficulty of the subnet.
+ block_hash (bytes): The current block hash.
+
+ Raises:
+ Exception: If the block hash is None.
+ ValueError: If the difficulty is None.
+ """
+ block_number = subtensor.get_current_block()
+ difficulty = 1_000_000 if netuid == -1 else subtensor.difficulty(netuid=netuid)
+ block_hash = subtensor.get_block_hash(block_number)
+ if block_hash is None:
+ raise Exception(
+ "Network error. Could not connect to substrate to get block hash"
+ )
+ if difficulty is None:
+ raise ValueError("Chain error. Difficulty is None")
+ return block_number, difficulty, block_hash
+
+
+def _check_for_newest_block_and_update(
+ subtensor: "Subtensor",
+ netuid: int,
+ old_block_number: int,
+ hotkey_bytes: bytes,
+ curr_diff: "multiprocessing.Array",
+ curr_block: "multiprocessing.Array",
+ curr_block_num: "multiprocessing.Value",
+ update_curr_block: "Callable",
+ check_block: "multiprocessing.Lock",
+ solvers: Union[list["_Solver"], list["_CUDASolver"]],
+ curr_stats: "RegistrationStatistics",
+) -> int:
+ """
+ Checks for a new block and updates the current block information if a new block is found.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor object to use for getting the current block.
+ netuid (int): The netuid to use for retrieving the difficulty.
+ old_block_number (int): The old block number to check against.
+ hotkey_bytes (bytes): The bytes of the hotkey's pubkey.
+ curr_diff (multiprocessing.Array): The current difficulty as a multiprocessing array.
+ curr_block (multiprocessing.Array): Where the current block is stored as a multiprocessing array.
+ curr_block_num (multiprocessing.Value): Where the current block number is stored as a multiprocessing value.
+ update_curr_block (typing.Callable): A function that updates the current block.
+ check_block (multiprocessing.Lock): A mp lock that is used to check for a new block.
+ solvers (list[bittensor.utils.registration._Solver]): A list of solvers to update the current block for.
+ curr_stats (bittensor.utils.registration.RegistrationStatistics): The current registration statistics to update.
+
+ Returns:
+ (int) The current block number.
+ """
+ block_number = subtensor.get_current_block()
+ if block_number != old_block_number:
+ old_block_number = block_number
+ # update block information
+ block_number, difficulty, block_hash = _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+ block_bytes = bytes.fromhex(block_hash[2:])
+
+ update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+ # Set new block events for each solver
+
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ # update stats
+ curr_stats.block_number = block_number
+ curr_stats.block_hash = block_hash
+ curr_stats.difficulty = difficulty
+
+ return old_block_number
+
+
+def _solve_for_difficulty_fast_cuda(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ output_in_place: bool = True,
+ update_interval: int = 50_000,
+ tpb: int = 512,
+ dev_id: Union[list[int], int] = 0,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional["POWSolution"]:
+ """
+ Solves the registration fast using CUDA.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor node to grab blocks.
+ wallet (bittensor_wallet.Wallet): The wallet to register.
+ netuid (int): The netuid of the subnet to register to.
+ output_in_place (bool) If true, prints the output in place, otherwise prints to new lines.
+ update_interval (int): The number of nonces to try before checking for more blocks.
+ tpb (int): The number of threads per block. CUDA param that should match the GPU capability
+ dev_id (Union[list[int], int]): The CUDA device IDs to execute the registration on, either a single device or a list of devices.
+ n_samples (int): The number of samples of the hash_rate to keep for the EWMA.
+ alpha_ (float): The alpha for the EWMA for the hash_rate calculation.
+ log_verbose (bool): If true, prints more verbose logging of the registration metrics.
+
+ Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ """
+ if isinstance(dev_id, int):
+ dev_id = [dev_id]
+ elif dev_id is None:
+ dev_id = [0]
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ if not torch.cuda.is_available():
+ raise Exception("CUDA not available")
+
+ limit = int(math.pow(2, 256)) - 1
+
+ # Set mp start to use spawn so CUDA doesn't complain
+ with _UsingSpawnStartMethod(force=True):
+ curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory()
+
+ # Create a worker per CUDA device
+ num_processes = len(dev_id)
+
+ # Establish communication queues
+ stopEvent = multiprocessing.Event()
+ stopEvent.clear()
+ solution_queue = multiprocessing.Queue()
+ finished_queues = [multiprocessing.Queue() for _ in range(num_processes)]
+ check_block = multiprocessing.Lock()
+
+ hotkey_bytes = wallet.hotkey.public_key
+ # Start workers
+ solvers = [
+ _CUDASolver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stopEvent,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id[i],
+ tpb,
+ )
+ for i in range(num_processes)
+ ]
+
+ # Get first block
+ block_number, difficulty, block_hash = _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+
+ block_bytes = bytes.fromhex(block_hash[2:])
+ old_block_number = block_number
+
+ # Set to current block
+ _update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total=0.0,
+ time_average=0.0,
+ rounds_total=0,
+ time_spent=0.0,
+ hash_rate_perpetual=0.0,
+ hash_rate=0.0, # EWMA hash_rate (H/s)
+ difficulty=difficulty,
+ block_number=block_number,
+ block_hash=block_hash,
+ )
+
+ start_time_perpetual = time.time()
+
+ logger = RegistrationStatisticsLogger(output_in_place=output_in_place)
+ logger.start()
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha
+
+ solution = None
+ while netuid == -1 or not subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ ):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=0.15)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = _check_for_newest_block_and_update(
+ subtensor=subtensor,
+ netuid=netuid,
+ hotkey_bytes=hotkey_bytes,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ old_block_number=old_block_number,
+ curr_stats=curr_stats,
+ update_curr_block=_update_curr_block,
+ check_block=check_block,
+ solvers=solvers,
+ )
+
+ num_time = 0
+ # Get times for each solver
+ for finished_queue in finished_queues:
+ try:
+ proc_num = finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ hash_rate_ = (num_time * tpb * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum(
+ [hash_rates[i] * weights[i] for i in range(n_samples)]
+ ) / (sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (
+ curr_stats.time_average * curr_stats.rounds_total
+ + curr_stats.time_spent
+ ) / (curr_stats.rounds_total + num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * (tpb * update_interval)
+ ) / new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, found_solution contains the nonce or wallet is registered
+
+ stopEvent.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ _terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+def _terminate_workers_and_wait_for_exit(
+ workers: list[Union[multiprocessing.Process, QueueType]],
+) -> None:
+ for worker in workers:
+ if isinstance(worker, QueueType):
+ worker.join_thread()
+ else:
+ try:
+ worker.join(3.0)
+ except subprocess.TimeoutExpired:
+ worker.terminate()
+ try:
+ worker.close()
+ except ValueError:
+ worker.terminate()
+
+
+def create_pow(
+ subtensor: "Subtensor",
+ wallet: "Wallet",
+ netuid: int,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+) -> Optional[dict[str, Any]]:
+ """
+ Creates a proof of work for the given subtensor and wallet.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor to create a proof of work for.
+ wallet (bittensor_wallet.Wallet): The wallet to create a proof of work for.
+ netuid (int): The netuid for the subnet to create a proof of work for.
+ output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Default is ``True``.
+ cuda (bool): If true, uses CUDA to solve the proof of work. Default is ``False``.
+ dev_id (Union[List[int], int]): The CUDA device id(s) to use. If cuda is true and dev_id is a list, then multiple CUDA devices will be used to solve the proof of work. Default is ``0``.
+ tpb (int): The number of threads per block to use when solving the proof of work. Should be a multiple of 32. Default is ``256``.
+ num_processes (Optional[int]): The number of processes to use when solving the proof of work. If None, then the number of processes is equal to the number of CPU cores. Default is None.
+ update_interval (Optional[int]): The number of nonces to run before checking for a new block. Default is ``None``.
+ log_verbose (bool): If true, prints the progress of the proof of work more verbosely. Default is ``False``.
+
+ Returns:
+ Optional[Dict[str, Any]]: The proof of work solution or None if the wallet is already registered or there is a different error.
+
+ Raises:
+ ValueError: If the subnet does not exist.
+ """
+ if netuid != -1:
+ if not subtensor.subnet_exists(netuid=netuid):
+ raise ValueError(f"Subnet {netuid} does not exist.")
+
+ if cuda:
+ solution: Optional[POWSolution] = _solve_for_difficulty_fast_cuda(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ solution: Optional[POWSolution] = _solve_for_difficulty_fast(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ return solution
diff --git a/requirements/prod.txt b/requirements/prod.txt
index 4a319c506c..17c73f6f25 100644
--- a/requirements/prod.txt
+++ b/requirements/prod.txt
@@ -1,7 +1,7 @@
wheel
setuptools~=70.0.0
-bittensor-cli
aiohttp~=3.9
+bittensor-cli
bt-decode
colorama~=0.4.6
fastapi~=0.110.1
@@ -12,6 +12,7 @@ nest_asyncio
netaddr
packaging
python-statemachine~=2.1
+pycryptodome>=3.18.0,<4.0.0
pyyaml
retry
requests
diff --git a/scripts/environments/apple_m1_environment.yml b/scripts/environments/apple_m1_environment.yml
index 25824aa64e..7d949c7e4e 100644
--- a/scripts/environments/apple_m1_environment.yml
+++ b/scripts/environments/apple_m1_environment.yml
@@ -126,7 +126,6 @@ dependencies:
- argparse==1.4.0
- arrow==1.2.3
- async-timeout==4.0.2
- - backoff==2.1.0
- blinker==1.6.2
- cachetools==4.2.4
- certifi==2024.2.2
diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py
index 853719f85d..7e7de812b9 100644
--- a/tests/e2e_tests/test_axon.py
+++ b/tests/e2e_tests/test_axon.py
@@ -65,7 +65,6 @@ async def test_axon(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/miner.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py
index ca9b0a0a2c..909b2e7088 100644
--- a/tests/e2e_tests/test_commit_weights.py
+++ b/tests/e2e_tests/test_commit_weights.py
@@ -20,7 +20,7 @@
@pytest.mark.asyncio
async def test_commit_and_reveal_weights(local_chain):
"""
- Tests the commit/reveal weights mechanism
+ Tests the commit/reveal weights mechanism with subprocess disabled (CR1.0)
Steps:
1. Register a subnet through Alice
@@ -61,7 +61,7 @@ async def test_commit_and_reveal_weights(local_chain):
subtensor = bittensor.Subtensor(network="ws://localhost:9945")
assert subtensor.get_subnet_hyperparameters(
- netuid=netuid
+ netuid=netuid,
).commit_reveal_weights_enabled, "Failed to enable commit/reveal"
# Lower the commit_reveal interval
@@ -69,17 +69,16 @@ async def test_commit_and_reveal_weights(local_chain):
local_chain,
alice_wallet,
call_function="sudo_set_commit_reveal_weights_interval",
- call_params={"netuid": netuid, "interval": "370"},
+ call_params={"netuid": netuid, "interval": "1"},
return_error_message=True,
)
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(
netuid=netuid
).commit_reveal_weights_interval
- == 370
- ), "Failed to set commit/reveal interval"
+ == 1
+ ), "Failed to set commit/reveal periods"
assert (
subtensor.weights_rate_limit(netuid=netuid) > 0
@@ -92,7 +91,6 @@ async def test_commit_and_reveal_weights(local_chain):
call_params={"netuid": netuid, "weights_set_rate_limit": "0"},
return_error_message=True,
)
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0
), "Failed to set weights_rate_limit"
@@ -117,6 +115,8 @@ async def test_commit_and_reveal_weights(local_chain):
wait_for_finalization=True,
)
+ assert success is True
+
weight_commits = subtensor.query_module(
module="SubtensorModule",
name="WeightCommits",
@@ -124,18 +124,20 @@ async def test_commit_and_reveal_weights(local_chain):
)
# Assert that the committed weights are set correctly
assert weight_commits.value is not None, "Weight commit not found in storage"
- commit_hash, commit_block = weight_commits.value
+ commit_hash, commit_block, reveal_block, expire_block = weight_commits.value[0]
assert commit_block > 0, f"Invalid block number: {commit_block}"
# Query the WeightCommitRevealInterval storage map
- weight_commit_reveal_interval = subtensor.query_module(
- module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid]
+ reveal_periods = subtensor.query_module(
+ module="SubtensorModule", name="RevealPeriodEpochs", params=[netuid]
)
- interval = weight_commit_reveal_interval.value
- assert interval > 0, "Invalid WeightCommitRevealInterval"
+ periods = reveal_periods.value
+ assert periods > 0, "Invalid RevealPeriodEpochs"
# Wait until the reveal block range
- await wait_interval(interval, subtensor)
+ await wait_interval(
+ subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor
+ )
# Reveal weights
success, message = subtensor.reveal_weights(
@@ -147,6 +149,9 @@ async def test_commit_and_reveal_weights(local_chain):
wait_for_inclusion=True,
wait_for_finalization=True,
)
+
+ assert success is True
+
time.sleep(10)
# Query the Weights storage map
diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py
index e075326ca5..daeca22230 100644
--- a/tests/e2e_tests/test_dendrite.py
+++ b/tests/e2e_tests/test_dendrite.py
@@ -91,7 +91,6 @@ async def test_dendrite(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/validator.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py
index 3e309f4f64..a95cf37660 100644
--- a/tests/e2e_tests/test_incentive.py
+++ b/tests/e2e_tests/test_incentive.py
@@ -70,7 +70,6 @@ async def test_incentive(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/miner.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
@@ -103,7 +102,6 @@ async def test_incentive(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/validator.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py
index 32d0f6e14d..7a4e1847e6 100644
--- a/tests/e2e_tests/test_subtensor_functions.py
+++ b/tests/e2e_tests/test_subtensor_functions.py
@@ -111,7 +111,6 @@ async def test_subtensor_extrinsics(local_chain):
[
f"{sys.executable}",
f'"{template_path}{templates_repo}/neurons/validator.py"',
- "--no_prompt",
"--netuid",
str(netuid),
"--subtensor.network",
diff --git a/tests/e2e_tests/test_transfer.py b/tests/e2e_tests/test_transfer.py
index b6be1cd6ae..62cf9723cc 100644
--- a/tests/e2e_tests/test_transfer.py
+++ b/tests/e2e_tests/test_transfer.py
@@ -32,7 +32,6 @@ def test_transfer(local_chain):
amount=2,
wait_for_finalization=True,
wait_for_inclusion=True,
- prompt=False,
)
# Account details after transfer
diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py
index f876d249bd..3c6badb91c 100644
--- a/tests/helpers/__init__.py
+++ b/tests/helpers/__init__.py
@@ -18,7 +18,6 @@
import os
from .helpers import ( # noqa: F401
CLOSE_IN_VALUE,
- MockConsole,
__mock_wallet_factory__,
)
from bittensor_wallet.mock.wallet_mock import ( # noqa: F401
diff --git a/tests/helpers/helpers.py b/tests/helpers/helpers.py
index 417bd643b3..41109ee5e6 100644
--- a/tests/helpers/helpers.py
+++ b/tests/helpers/helpers.py
@@ -22,14 +22,11 @@
from bittensor_wallet.mock.wallet_mock import get_mock_hotkey
from bittensor_wallet.mock.wallet_mock import get_mock_wallet
-from rich.console import Console
-from rich.text import Text
-
from bittensor.utils.balance import Balance
from bittensor.core.chain_data import AxonInfo, NeuronInfo, PrometheusInfo
-def __mock_wallet_factory__(*args, **kwargs) -> _MockWallet:
+def __mock_wallet_factory__(*_, **__) -> _MockWallet:
"""Returns a mock wallet object."""
mock_wallet = get_mock_wallet()
@@ -118,53 +115,3 @@ def get_mock_neuron_by_uid(uid: int, **kwargs) -> NeuronInfo:
return get_mock_neuron(
uid=uid, hotkey=get_mock_hotkey(uid), coldkey=get_mock_coldkey(uid), **kwargs
)
-
-
-class MockStatus:
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- pass
-
- def start(self):
- pass
-
- def stop(self):
- pass
-
- def update(self, *args, **kwargs):
- MockConsole().print(*args, **kwargs)
-
-
-class MockConsole:
- """
- Mocks the console object for status and print.
- Captures the last print output as a string.
- """
-
- captured_print = None
-
- def status(self, *args, **kwargs):
- return MockStatus()
-
- def print(self, *args, **kwargs):
- console = Console(
- width=1000, no_color=True, markup=False
- ) # set width to 1000 to avoid truncation
- console.begin_capture()
- console.print(*args, **kwargs)
- self.captured_print = console.end_capture()
-
- def clear(self, *args, **kwargs):
- pass
-
- @staticmethod
- def remove_rich_syntax(text: str) -> str:
- """
- Removes rich syntax from the given text.
- Removes markup and ansi syntax.
- """
- output_no_syntax = Text.from_ansi(Text.from_markup(text).plain).plain
-
- return output_no_syntax
diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py
index e252cb63f1..bacb340f2c 100644
--- a/tests/integration_tests/test_subtensor_integration.py
+++ b/tests/integration_tests/test_subtensor_integration.py
@@ -15,7 +15,9 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+import random
import unittest
+from queue import Empty as QueueEmpty
from unittest.mock import MagicMock, patch
import pytest
@@ -28,7 +30,6 @@
from bittensor.utils.mock import MockSubtensor
from tests.helpers import (
get_mock_coldkey,
- MockConsole,
get_mock_keypair,
get_mock_wallet,
)
@@ -50,12 +51,6 @@ def setUp(self):
@classmethod
def setUpClass(cls) -> None:
- # mock rich console status
- mock_console = MockConsole()
- cls._mock_console_patcher = patch(
- "bittensor.core.settings.bt_console", mock_console
- )
- cls._mock_console_patcher.start()
# Keeps the same mock network for all tests. This stops the network from being re-setup for each test.
cls._mock_subtensor = MockSubtensor()
cls._do_setup_subnet()
@@ -67,10 +62,6 @@ def _do_setup_subnet(cls):
# Setup the mock subnet 3
cls._mock_subtensor.create_subnet(netuid=3)
- @classmethod
- def tearDownClass(cls) -> None:
- cls._mock_console_patcher.stop()
-
def test_network_overrides(self):
"""Tests that the network overrides the chain_endpoint."""
# Argument importance: chain_endpoint (arg) > network (arg) > config.subtensor.chain_endpoint > config.subtensor.network
@@ -247,6 +238,170 @@ def test_defaults_to_finney(self):
assert sub.network == "finney"
assert sub.chain_endpoint == settings.FINNEY_ENTRYPOINT
+ def test_registration_multiprocessed_already_registered(self):
+ work_blocks_before_is_registered = random.randint(5, 10)
+ # return False each work block but return True after a random number of blocks
+ is_registered_return_values = (
+ [False for _ in range(work_blocks_before_is_registered)]
+ + [True]
+ + [True, False]
+ )
+ # this should pass the initial False check in the subtensor class and then return True because the neuron is already registered
+
+ mock_neuron = MagicMock()
+ mock_neuron.is_null = True
+
+ # patch solution queue to return None
+ with patch(
+ "multiprocessing.queues.Queue.get", return_value=None
+ ) as mock_queue_get:
+ # patch time queue get to raise Empty exception
+ with patch(
+ "multiprocessing.queues.Queue.get_nowait", side_effect=QueueEmpty
+ ) as mock_queue_get_nowait:
+ wallet = get_mock_wallet(
+ hotkey=get_mock_keypair(0, self.id()),
+ coldkey=get_mock_keypair(1, self.id()),
+ )
+ self.subtensor.is_hotkey_registered = MagicMock(
+ side_effect=is_registered_return_values
+ )
+
+ self.subtensor.difficulty = MagicMock(return_value=1)
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ side_effect=mock_neuron
+ )
+ self.subtensor._do_pow_register = MagicMock(return_value=(True, None))
+
+ # should return True
+ assert self.subtensor.register(
+ wallet=wallet, netuid=3, num_processes=3, update_interval=5
+ )
+
+ # calls until True and once again before exiting subtensor class
+ # This assertion is currently broken when difficulty is too low
+ assert (
+ self.subtensor.is_hotkey_registered.call_count
+ == work_blocks_before_is_registered + 2
+ )
+
+ def test_registration_partly_failed(self):
+ do_pow_register_mock = MagicMock(
+ side_effect=[(False, "Failed"), (False, "Failed"), (True, None)]
+ )
+
+ def is_registered_side_effect(*args, **kwargs):
+ nonlocal do_pow_register_mock
+ return do_pow_register_mock.call_count < 3
+
+ current_block = [i for i in range(0, 100)]
+
+ wallet = get_mock_wallet(
+ hotkey=get_mock_keypair(0, self.id()),
+ coldkey=get_mock_keypair(1, self.id()),
+ )
+
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ return_value=bittensor.NeuronInfo.get_null_neuron()
+ )
+ self.subtensor.is_hotkey_registered = MagicMock(
+ side_effect=is_registered_side_effect
+ )
+
+ self.subtensor.difficulty = MagicMock(return_value=1)
+ self.subtensor.get_current_block = MagicMock(side_effect=current_block)
+ self.subtensor._do_pow_register = do_pow_register_mock
+
+ # should return True
+ self.assertTrue(
+ self.subtensor.register(
+ wallet=wallet, netuid=3, num_processes=3, update_interval=5
+ ),
+ msg="Registration should succeed",
+ )
+
+ def test_registration_failed(self):
+ is_registered_return_values = [False for _ in range(100)]
+ current_block = [i for i in range(0, 100)]
+ mock_neuron = MagicMock()
+ mock_neuron.is_null = True
+
+ with patch(
+ "bittensor.core.extrinsics.registration.create_pow", return_value=None
+ ) as mock_create_pow:
+ wallet = get_mock_wallet(
+ hotkey=get_mock_keypair(0, self.id()),
+ coldkey=get_mock_keypair(1, self.id()),
+ )
+
+ self.subtensor.is_hotkey_registered = MagicMock(
+ side_effect=is_registered_return_values
+ )
+
+ self.subtensor.get_current_block = MagicMock(side_effect=current_block)
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ return_value=mock_neuron
+ )
+ self.subtensor.substrate.get_block_hash = MagicMock(
+ return_value="0x" + "0" * 64
+ )
+ self.subtensor._do_pow_register = MagicMock(return_value=(False, "Failed"))
+
+ # should return True
+ self.assertIsNot(
+ self.subtensor.register(wallet=wallet, netuid=3),
+ True,
+ msg="Registration should fail",
+ )
+ self.assertEqual(mock_create_pow.call_count, 3)
+
+ def test_registration_stale_then_continue(self):
+ # verify that after a stale solution, to solve will continue without exiting
+
+ class ExitEarly(Exception):
+ pass
+
+ mock_is_stale = MagicMock(side_effect=[True, False])
+
+ mock_do_pow_register = MagicMock(side_effect=ExitEarly())
+
+ mock_subtensor_self = MagicMock(
+ neuron_for_pubkey=MagicMock(
+ return_value=MagicMock(is_null=True)
+ ), # not registered
+ substrate=MagicMock(
+ get_block_hash=MagicMock(return_value="0x" + "0" * 64),
+ ),
+ )
+
+ mock_wallet = MagicMock()
+
+ mock_create_pow = MagicMock(return_value=MagicMock(is_stale=mock_is_stale))
+
+ with patch(
+ "bittensor.core.extrinsics.registration.create_pow", mock_create_pow
+ ), patch(
+ "bittensor.core.extrinsics.registration._do_pow_register",
+ mock_do_pow_register,
+ ):
+ # should create a pow and check if it is stale
+ # then should create a new pow and check if it is stale
+ # then should enter substrate and exit early because of test
+ self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(
+ return_value=bittensor.NeuronInfo.get_null_neuron()
+ )
+ with pytest.raises(ExitEarly):
+ bittensor.subtensor.register(mock_subtensor_self, mock_wallet, netuid=3)
+ self.assertEqual(
+ mock_create_pow.call_count, 2, msg="must try another pow after stale"
+ )
+ self.assertEqual(mock_is_stale.call_count, 2)
+ self.assertEqual(
+ mock_do_pow_register.call_count,
+ 1,
+ msg="only tries to submit once, then exits",
+ )
+
if __name__ == "__main__":
unittest.main()
diff --git a/tests/unit_tests/extrinsics/test_prometheus.py b/tests/unit_tests/extrinsics/test_prometheus.py
deleted file mode 100644
index dbcfed1e47..0000000000
--- a/tests/unit_tests/extrinsics/test_prometheus.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2024 Opentensor Foundation
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-#
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from unittest.mock import MagicMock, patch
-
-import pytest
-from bittensor_wallet import Wallet
-
-from bittensor.core.extrinsics.prometheus import (
- prometheus_extrinsic,
-)
-from bittensor.core.subtensor import Subtensor
-from bittensor.core.settings import version_as_int
-
-
-# Mocking the bittensor and networking modules
-@pytest.fixture
-def mock_bittensor():
- with patch("bittensor.core.subtensor.Subtensor") as mock:
- yield mock
-
-
-@pytest.fixture
-def mock_wallet():
- with patch("bittensor_wallet.Wallet") as mock:
- yield mock
-
-
-@pytest.fixture
-def mock_net():
- with patch("bittensor.utils.networking") as mock:
- yield mock
-
-
-@pytest.mark.parametrize(
- "ip, port, netuid, wait_for_inclusion, wait_for_finalization, expected_result, test_id",
- [
- (None, 9221, 0, False, True, True, "happy-path-default-ip"),
- ("192.168.0.1", 9221, 0, False, True, True, "happy-path-custom-ip"),
- (None, 9221, 0, True, False, True, "happy-path-wait-for-inclusion"),
- (None, 9221, 0, False, False, True, "happy-path-no-waiting"),
- ],
-)
-def test_prometheus_extrinsic_happy_path(
- mock_bittensor,
- mock_wallet,
- mock_net,
- ip,
- port,
- netuid,
- wait_for_inclusion,
- wait_for_finalization,
- expected_result,
- test_id,
-):
- # Arrange
- subtensor = MagicMock(spec=Subtensor)
- subtensor.network = "test_network"
- subtensor.substrate = MagicMock()
- wallet = MagicMock(spec=Wallet)
- mock_net.get_external_ip.return_value = "192.168.0.1"
- mock_net.ip_to_int.return_value = 3232235521 # IP in integer form
- mock_net.ip_version.return_value = 4
- neuron = MagicMock()
- neuron.is_null = False
- neuron.prometheus_info.version = version_as_int
- neuron.prometheus_info.ip = 3232235521
- neuron.prometheus_info.port = port
- neuron.prometheus_info.ip_type = 4
- subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron
- subtensor._do_serve_prometheus.return_value = (True, None)
-
- # Act
- result = prometheus_extrinsic(
- subtensor=subtensor,
- wallet=wallet,
- ip=ip,
- port=port,
- netuid=netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- # Assert
- assert result == expected_result, f"Test ID: {test_id}"
-
-
-# Edge cases
-@pytest.mark.parametrize(
- "ip, port, netuid, test_id",
- [
- ("0.0.0.0", 0, 0, "edge-case-min-values"),
- ("255.255.255.255", 65535, 2147483647, "edge-case-max-values"),
- ],
-)
-def test_prometheus_extrinsic_edge_cases(
- mock_bittensor, mock_wallet, mock_net, ip, port, netuid, test_id
-):
- # Arrange
- subtensor = MagicMock(spec=Subtensor)
- subtensor.network = "test_network"
- subtensor.substrate = MagicMock()
- wallet = MagicMock(spec=Wallet)
- mock_net.get_external_ip.return_value = ip
- mock_net.ip_to_int.return_value = 3232235521 # IP in integer form
- mock_net.ip_version.return_value = 4
- neuron = MagicMock()
- neuron.is_null = True
- subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron
- subtensor._do_serve_prometheus.return_value = (True, None)
-
- # Act
- result = prometheus_extrinsic(
- subtensor=subtensor,
- wallet=wallet,
- ip=ip,
- port=port,
- netuid=netuid,
- wait_for_inclusion=False,
- wait_for_finalization=True,
- )
-
- # Assert
- assert result is True, f"Test ID: {test_id}"
-
-
-# Error cases
-def test_prometheus_extrinsic_error_cases(mock_bittensor, mock_wallet, mocker):
- # Arrange
- subtensor = MagicMock(spec=Subtensor)
- subtensor.network = "test_network"
- subtensor.substrate = MagicMock()
- subtensor.substrate.websocket.sock.getsockopt.return_value = 0
- wallet = MagicMock(spec=Wallet)
- neuron = MagicMock()
- neuron.is_null = True
- subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron
- subtensor._do_serve_prometheus.return_value = (True,)
-
- with mocker.patch(
- "bittensor.utils.networking.get_external_ip", side_effect=RuntimeError
- ):
- # Act & Assert
- with pytest.raises(RuntimeError):
- prometheus_extrinsic(
- subtensor=subtensor,
- wallet=wallet,
- ip=None,
- port=9221,
- netuid=1,
- wait_for_inclusion=False,
- wait_for_finalization=True,
- )
diff --git a/tests/unit_tests/extrinsics/test_registration.py b/tests/unit_tests/extrinsics/test_registration.py
new file mode 100644
index 0000000000..18d14fac10
--- /dev/null
+++ b/tests/unit_tests/extrinsics/test_registration.py
@@ -0,0 +1,224 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import pytest
+from bittensor_wallet import Wallet
+
+from bittensor.core.extrinsics import registration
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.registration import POWSolution
+
+
+# Mocking external dependencies
+@pytest.fixture
+def mock_subtensor(mocker):
+ mock = mocker.MagicMock(spec=Subtensor)
+ mock.network = "mock_network"
+ mock.substrate = mocker.MagicMock()
+ return mock
+
+
+@pytest.fixture
+def mock_wallet(mocker):
+ mock = mocker.MagicMock(spec=Wallet)
+ mock.coldkeypub.ss58_address = "mock_address"
+ mock.coldkey = mocker.MagicMock()
+ mock.hotkey = mocker.MagicMock()
+ mock.hotkey.ss58_address = "fake_ss58_address"
+ return mock
+
+
+@pytest.fixture
+def mock_pow_solution(mocker):
+ mock = mocker.MagicMock(spec=POWSolution)
+ mock.block_number = 123
+ mock.nonce = 456
+ mock.seal = [0, 1, 2, 3]
+ mock.is_stale.return_value = False
+ return mock
+
+
+@pytest.fixture
+def mock_new_wallet(mocker):
+ mock = mocker.MagicMock(spec=Wallet)
+ mock.coldkeypub.ss58_address = "mock_address"
+ mock.coldkey = mocker.MagicMock()
+ mock.hotkey = mocker.MagicMock()
+ return mock
+
+
+@pytest.mark.parametrize(
+ "subnet_exists, neuron_is_null, cuda_available, expected_result, test_id",
+ [
+ (False, True, True, False, "subnet-does-not-exist"),
+ (True, False, True, True, "neuron-already-registered"),
+ (True, True, False, False, "cuda-unavailable"),
+ ],
+)
+def test_register_extrinsic_without_pow(
+ mock_subtensor,
+ mock_wallet,
+ subnet_exists,
+ neuron_is_null,
+ cuda_available,
+ expected_result,
+ test_id,
+ mocker,
+):
+ # Arrange
+ with (
+ mocker.patch.object(
+ mock_subtensor, "subnet_exists", return_value=subnet_exists
+ ),
+ mocker.patch.object(
+ mock_subtensor,
+ "get_neuron_for_pubkey_and_subnet",
+ return_value=mocker.MagicMock(is_null=neuron_is_null),
+ ),
+ mocker.patch("torch.cuda.is_available", return_value=cuda_available),
+ mocker.patch(
+ "bittensor.utils.registration._get_block_with_retry",
+ return_value=(0, 0, "00ff11ee"),
+ ),
+ ):
+ # Act
+ result = registration.register_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ netuid=123,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ max_allowed_attempts=3,
+ output_in_place=True,
+ cuda=True,
+ dev_id=0,
+ tpb=256,
+ num_processes=None,
+ update_interval=None,
+ log_verbose=False,
+ )
+
+ # Assert
+ assert result == expected_result, f"Test failed for test_id: {test_id}"
+
+
+@pytest.mark.parametrize(
+ "pow_success, pow_stale, registration_success, cuda, hotkey_registered, expected_result, test_id",
+ [
+ (True, False, True, False, False, True, "successful-with-valid-pow"),
+ (True, False, True, True, False, True, "successful-with-valid-cuda-pow"),
+ # Pow failed but key was registered already
+ (False, False, False, False, True, True, "hotkey-registered"),
+ # Pow was a success but registration failed with error 'key already registered'
+ (True, False, False, False, False, True, "registration-fail-key-registered"),
+ ],
+)
+def test_register_extrinsic_with_pow(
+ mock_subtensor,
+ mock_wallet,
+ mock_pow_solution,
+ pow_success,
+ pow_stale,
+ registration_success,
+ cuda,
+ hotkey_registered,
+ expected_result,
+ test_id,
+ mocker,
+):
+ # Arrange
+ with mocker.patch(
+ "bittensor.utils.registration._solve_for_difficulty_fast",
+ return_value=mock_pow_solution if pow_success else None,
+ ), mocker.patch(
+ "bittensor.utils.registration._solve_for_difficulty_fast_cuda",
+ return_value=mock_pow_solution if pow_success else None,
+ ), mocker.patch(
+ "bittensor.core.extrinsics.registration._do_pow_register",
+ return_value=(registration_success, "HotKeyAlreadyRegisteredInSubNet"),
+ ), mocker.patch("torch.cuda.is_available", return_value=cuda):
+ # Act
+ if pow_success:
+ mock_pow_solution.is_stale.return_value = pow_stale
+
+ if not pow_success and hotkey_registered:
+ mock_subtensor.is_hotkey_registered = mocker.MagicMock(
+ return_value=hotkey_registered
+ )
+
+ result = registration.register_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ netuid=123,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ max_allowed_attempts=3,
+ output_in_place=True,
+ cuda=cuda,
+ dev_id=0,
+ tpb=256,
+ num_processes=None,
+ update_interval=None,
+ log_verbose=False,
+ )
+
+ # Assert
+ assert result == expected_result, f"Test failed for test_id: {test_id}."
+
+
+@pytest.mark.parametrize(
+ "subnet_exists, neuron_is_null, recycle_success, is_registered, expected_result, test_id",
+ [
+ # Happy paths
+ (True, False, None, None, True, "neuron-not-null"),
+ (True, True, True, True, True, "happy-path-wallet-registered"),
+ # Error paths
+ (False, True, False, None, False, "subnet-non-existence"),
+ (True, True, False, False, False, "error-path-recycling-failed"),
+ (True, True, True, False, False, "error-path-not-registered"),
+ ],
+)
+def test_burned_register_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ subnet_exists,
+ neuron_is_null,
+ recycle_success,
+ is_registered,
+ expected_result,
+ test_id,
+ mocker,
+):
+ # Arrange
+ with mocker.patch.object(
+ mock_subtensor, "subnet_exists", return_value=subnet_exists
+ ), mocker.patch.object(
+ mock_subtensor,
+ "get_neuron_for_pubkey_and_subnet",
+ return_value=mocker.MagicMock(is_null=neuron_is_null),
+ ), mocker.patch(
+ "bittensor.core.extrinsics.registration._do_burned_register",
+ return_value=(recycle_success, "Mock error message"),
+ ), mocker.patch.object(
+ mock_subtensor, "is_hotkey_registered", return_value=is_registered
+ ):
+ # Act
+ result = registration.burned_register_extrinsic(
+ subtensor=mock_subtensor, wallet=mock_wallet, netuid=123
+ )
+ # Assert
+ assert result == expected_result, f"Test failed for test_id: {test_id}"
diff --git a/tests/unit_tests/extrinsics/test_root.py b/tests/unit_tests/extrinsics/test_root.py
new file mode 100644
index 0000000000..96d90fe09a
--- /dev/null
+++ b/tests/unit_tests/extrinsics/test_root.py
@@ -0,0 +1,242 @@
+import pytest
+from bittensor.core.subtensor import Subtensor
+from bittensor.core.extrinsics import root
+
+
+@pytest.fixture
+def mock_subtensor(mocker):
+ mock = mocker.MagicMock(spec=Subtensor)
+ mock.network = "magic_mock"
+ return mock
+
+
+@pytest.fixture
+def mock_wallet(mocker):
+ mock = mocker.MagicMock()
+ mock.hotkey.ss58_address = "fake_hotkey_address"
+ return mock
+
+
+@pytest.mark.parametrize(
+ "wait_for_inclusion, wait_for_finalization, hotkey_registered, registration_success, expected_result",
+ [
+ (
+ False,
+ True,
+ [True, None],
+ True,
+ True,
+ ), # Already registered after attempt
+ (
+ False,
+ True,
+ [False, True],
+ True,
+ True,
+ ), # Registration succeeds with user confirmation
+ (False, True, [False, False], False, None), # Registration fails
+ (
+ False,
+ True,
+ [False, False],
+ True,
+ None,
+ ), # Registration succeeds but neuron not found
+ ],
+ ids=[
+ "success-already-registered",
+ "success-registration-succeeds",
+ "failure-registration-failed",
+ "failure-neuron-not-found",
+ ],
+)
+def test_root_register_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ hotkey_registered,
+ registration_success,
+ expected_result,
+ mocker,
+):
+ # Arrange
+ mock_subtensor.is_hotkey_registered.side_effect = hotkey_registered
+
+ # Preps
+ mock_register = mocker.Mock(
+ return_value=(registration_success, "Error registering")
+ )
+ root._do_root_register = mock_register
+
+ # Act
+ result = root.root_register_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # Assert
+ assert result == expected_result
+
+ if not hotkey_registered[0]:
+ mock_register.assert_called_once()
+
+
+@pytest.mark.parametrize(
+ "wait_for_inclusion, wait_for_finalization, netuids, weights, expected_success",
+ [
+ (True, False, [1, 2], [0.5, 0.5], True), # Success - weights set
+ (
+ False,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ True,
+ ), # Success - weights set no wait
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 20],
+ True,
+ ), # Success - large value to be normalized
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 0],
+ True,
+ ), # Success - single large value
+ (
+ True,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ False,
+ ), # Failure - setting weights failed
+ (
+ True,
+ False,
+ [],
+ [],
+ False,
+ ), # Exception catched - ValueError 'min() arg is an empty sequence'
+ ],
+ ids=[
+ "success-weights-set",
+ "success-not-wait",
+ "success-large-value",
+ "success-single-value",
+ "failure-setting-weights",
+ "failure-value-error-exception",
+ ],
+)
+def test_set_root_weights_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ netuids,
+ weights,
+ expected_success,
+ mocker,
+):
+ # Preps
+ root._do_set_root_weights = mocker.Mock(
+ return_value=(expected_success, "Mock error")
+ )
+ mock_subtensor.min_allowed_weights = mocker.Mock(return_value=0)
+ mock_subtensor.max_weight_limit = mocker.Mock(return_value=1)
+
+ # Call
+ result = root.set_root_weights_extrinsic(
+ subtensor=mock_subtensor,
+ wallet=mock_wallet,
+ netuids=netuids,
+ weights=weights,
+ version_key=0,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # Asserts
+ assert result == expected_success
+
+
+@pytest.mark.parametrize(
+ "wait_for_inclusion, wait_for_finalization, netuids, weights, user_response, expected_success",
+ [
+ (True, False, [1, 2], [0.5, 0.5], True, True), # Success - weights set
+ (
+ False,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ None,
+ True,
+ ), # Success - weights set no wait
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 20],
+ True,
+ True,
+ ), # Success - large value to be normalized
+ (
+ True,
+ False,
+ [1, 2],
+ [2000, 0],
+ True,
+ True,
+ ), # Success - single large value
+ (
+ True,
+ False,
+ [1, 2],
+ [0.5, 0.5],
+ None,
+ False,
+ ), # Failure - setting weights failed
+ (
+ True,
+ False,
+ [],
+ [],
+ False,
+ False,
+ ), # Exception catched - ValueError 'min() arg is an empty sequence'
+ ],
+ ids=[
+ "success-weights-set",
+ "success-not-wait",
+ "success-large-value",
+ "success-single-value",
+ "failure-setting-weights",
+ "failure-value-error-exception",
+ ],
+)
+def test_set_root_weights_extrinsic_torch(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ netuids,
+ weights,
+ user_response,
+ expected_success,
+ force_legacy_torch_compatible_api,
+ mocker,
+):
+ test_set_root_weights_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ wait_for_inclusion,
+ wait_for_finalization,
+ netuids,
+ weights,
+ expected_success,
+ mocker,
+ )
diff --git a/tests/unit_tests/extrinsics/test_serving.py b/tests/unit_tests/extrinsics/test_serving.py
index a57e32d01c..46eef17888 100644
--- a/tests/unit_tests/extrinsics/test_serving.py
+++ b/tests/unit_tests/extrinsics/test_serving.py
@@ -50,7 +50,7 @@ def mock_axon(mock_wallet, mocker):
@pytest.mark.parametrize(
- "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,",
+ "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected,test_id,",
[
(
"192.168.1.1",
@@ -61,7 +61,6 @@ def mock_axon(mock_wallet, mocker):
0,
False,
True,
- False,
True,
"happy-path-no-wait",
),
@@ -74,7 +73,6 @@ def mock_axon(mock_wallet, mocker):
1,
True,
False,
- False,
True,
"happy-path-wait-for-inclusion",
),
@@ -88,14 +86,13 @@ def mock_axon(mock_wallet, mocker):
False,
True,
True,
- True,
- "happy-path-wait-for-finalization-and-prompt",
+ "happy-path-wait-for-finalization",
),
],
ids=[
"happy-path-no-wait",
"happy-path-wait-for-inclusion",
- "happy-path-wait-for-finalization-and-prompt",
+ "happy-path-wait-for-finalization",
],
)
def test_serve_extrinsic_happy_path(
@@ -109,39 +106,33 @@ def test_serve_extrinsic_happy_path(
placeholder2,
wait_for_inclusion,
wait_for_finalization,
- prompt,
expected,
test_id,
mocker,
):
# Arrange
serving.do_serve_axon = mocker.MagicMock(return_value=(True, ""))
- with patch(
- "bittensor.core.extrinsics.serving.Confirm.ask",
- return_value=True,
- ):
- # Act
- result = serving.serve_extrinsic(
- mock_subtensor,
- mock_wallet,
- ip,
- port,
- protocol,
- netuid,
- placeholder1,
- placeholder2,
- wait_for_inclusion,
- wait_for_finalization,
- prompt,
- )
+ # Act
+ result = serving.serve_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ ip,
+ port,
+ protocol,
+ netuid,
+ placeholder1,
+ placeholder2,
+ wait_for_inclusion,
+ wait_for_finalization,
+ )
- # Assert
- assert result == expected, f"Test ID: {test_id}"
+ # Assert
+ assert result == expected, f"Test ID: {test_id}"
# Various edge cases
@pytest.mark.parametrize(
- "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,",
+ "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected,test_id,",
[
(
"192.168.1.4",
@@ -152,7 +143,6 @@ def test_serve_extrinsic_happy_path(
3,
True,
True,
- False,
True,
"edge_case_max_values",
),
@@ -170,39 +160,33 @@ def test_serve_extrinsic_edge_cases(
placeholder2,
wait_for_inclusion,
wait_for_finalization,
- prompt,
expected,
test_id,
mocker,
):
# Arrange
serving.do_serve_axon = mocker.MagicMock(return_value=(True, ""))
- with patch(
- "bittensor.core.extrinsics.serving.Confirm.ask",
- return_value=True,
- ):
- # Act
- result = serving.serve_extrinsic(
- mock_subtensor,
- mock_wallet,
- ip,
- port,
- protocol,
- netuid,
- placeholder1,
- placeholder2,
- wait_for_inclusion,
- wait_for_finalization,
- prompt,
- )
+ # Act
+ result = serving.serve_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ ip,
+ port,
+ protocol,
+ netuid,
+ placeholder1,
+ placeholder2,
+ wait_for_inclusion,
+ wait_for_finalization,
+ )
- # Assert
- assert result == expected, f"Test ID: {test_id}"
+ # Assert
+ assert result == expected, f"Test ID: {test_id}"
# Various error cases
@pytest.mark.parametrize(
- "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected_error_message,test_id,",
+ "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected_error_message,test_id,",
[
(
"192.168.1.5",
@@ -214,7 +198,6 @@ def test_serve_extrinsic_edge_cases(
True,
True,
False,
- False,
"error-case-failed-serve",
),
],
@@ -231,51 +214,44 @@ def test_serve_extrinsic_error_cases(
placeholder2,
wait_for_inclusion,
wait_for_finalization,
- prompt,
expected_error_message,
test_id,
mocker,
):
# Arrange
serving.do_serve_axon = mocker.MagicMock(return_value=(False, "Error serving axon"))
- with patch(
- "bittensor.core.extrinsics.serving.Confirm.ask",
- return_value=True,
- ):
- # Act
- result = serving.serve_extrinsic(
- mock_subtensor,
- mock_wallet,
- ip,
- port,
- protocol,
- netuid,
- placeholder1,
- placeholder2,
- wait_for_inclusion,
- wait_for_finalization,
- prompt,
- )
+ # Act
+ result = serving.serve_extrinsic(
+ mock_subtensor,
+ mock_wallet,
+ ip,
+ port,
+ protocol,
+ netuid,
+ placeholder1,
+ placeholder2,
+ wait_for_inclusion,
+ wait_for_finalization,
+ )
- # Assert
- assert result == expected_error_message, f"Test ID: {test_id}"
+ # Assert
+ assert result == expected_error_message, f"Test ID: {test_id}"
@pytest.mark.parametrize(
- "netuid, wait_for_inclusion, wait_for_finalization, prompt, external_ip, external_ip_success, serve_success, expected_result, test_id",
+ "netuid, wait_for_inclusion, wait_for_finalization, external_ip, external_ip_success, serve_success, expected_result, test_id",
[
# Happy path test
- (1, False, True, False, "192.168.1.1", True, True, True, "happy-ext-ip"),
- (1, False, True, True, None, True, True, True, "happy-net-external-ip"),
+ (1, False, True, "192.168.1.1", True, True, True, "happy-ext-ip"),
+ (1, False, True, None, True, True, True, "happy-net-external-ip"),
# Edge cases
- (1, True, True, False, "192.168.1.1", True, True, True, "edge-case-wait"),
+ (1, True, True, "192.168.1.1", True, True, True, "edge-case-wait"),
# Error cases
- (1, False, True, False, None, False, True, False, "error-fetching-external-ip"),
+ (1, False, True, None, False, True, False, "error-fetching-external-ip"),
(
1,
False,
True,
- False,
"192.168.1.1",
True,
False,
@@ -297,7 +273,6 @@ def test_serve_axon_extrinsic(
netuid,
wait_for_inclusion,
wait_for_finalization,
- prompt,
external_ip,
external_ip_success,
serve_success,
diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py
index 9c32fc9bdf..0cd663f0b7 100644
--- a/tests/unit_tests/extrinsics/test_set_weights.py
+++ b/tests/unit_tests/extrinsics/test_set_weights.py
@@ -28,7 +28,7 @@ def mock_wallet():
@pytest.mark.parametrize(
- "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, expected_message",
+ "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, expected_success, expected_message",
[
(
[1, 2],
@@ -37,8 +37,6 @@ def mock_wallet():
True,
False,
True,
- True,
- True,
"Successfully set weights and Finalized.",
),
(
@@ -47,8 +45,6 @@ def mock_wallet():
0,
False,
False,
- False,
- True,
True,
"Not waiting for finalization or inclusion.",
),
@@ -58,18 +54,14 @@ def mock_wallet():
0,
True,
False,
- True,
- True,
False,
"Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.",
),
- ([1, 2], [0.5, 0.5], 0, True, True, True, False, False, "Prompt refused."),
],
ids=[
"happy-flow",
"not-waiting-finalization-inclusion",
"error-flow",
- "prompt-refused",
],
)
def test_set_weights_extrinsic(
@@ -80,8 +72,6 @@ def test_set_weights_extrinsic(
version_key,
wait_for_inclusion,
wait_for_finalization,
- prompt,
- user_accepts,
expected_success,
expected_message,
):
@@ -90,7 +80,7 @@ def test_set_weights_extrinsic(
with patch(
"bittensor.utils.weight_utils.convert_weights_and_uids_for_emit",
return_value=(uids_tensor, weights_tensor),
- ), patch("rich.prompt.Confirm.ask", return_value=user_accepts), patch(
+ ), patch(
"bittensor.core.extrinsics.set_weights.do_set_weights",
return_value=(expected_success, "Mock error message"),
) as mock_do_set_weights:
@@ -103,22 +93,10 @@ def test_set_weights_extrinsic(
version_key=version_key,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
assert result == expected_success, f"Test {expected_message} failed."
assert message == expected_message, f"Test {expected_message} failed."
- if user_accepts is not False:
- mock_do_set_weights.assert_called_once_with(
- self=mock_subtensor,
- wallet=mock_wallet,
- netuid=123,
- uids=uids_tensor,
- vals=weights_tensor,
- version_key=version_key,
- wait_for_finalization=wait_for_finalization,
- wait_for_inclusion=wait_for_inclusion,
- )
def test_do_set_weights_is_success(mock_subtensor, mocker):
diff --git a/tests/unit_tests/test_chain_data.py b/tests/unit_tests/test_chain_data.py
index 353f697d46..65232e3382 100644
--- a/tests/unit_tests/test_chain_data.py
+++ b/tests/unit_tests/test_chain_data.py
@@ -364,116 +364,3 @@ def create_neuron_info_decoded(
"prometheus_info": prometheus_info,
"axon_info": axon_info,
}
-
-
-@pytest.fixture
-def mock_from_scale_encoding(mocker):
- return mocker.patch("bittensor.core.chain_data.delegate_info.from_scale_encoding")
-
-
-@pytest.fixture
-def mock_fix_decoded_values(mocker):
- return mocker.patch(
- "bittensor.core.chain_data.DelegateInfo.fix_decoded_values",
- side_effect=lambda x: x,
- )
-
-
-@pytest.mark.parametrize(
- "test_id, vec_u8, expected",
- [
- (
- "happy-path-1",
- [1, 2, 3],
- [
- DelegateInfo(
- hotkey_ss58="hotkey",
- total_stake=1000,
- nominators=[
- "nominator1",
- "nominator2",
- ],
- owner_ss58="owner",
- take=10.1,
- validator_permits=[1, 2, 3],
- registrations=[4, 5, 6],
- return_per_1000=100,
- total_daily_return=1000,
- )
- ],
- ),
- (
- "happy-path-2",
- [4, 5, 6],
- [
- DelegateInfo(
- hotkey_ss58="hotkey",
- total_stake=1000,
- nominators=[
- "nominator1",
- "nominator2",
- ],
- owner_ss58="owner",
- take=2.1,
- validator_permits=[1, 2, 3],
- registrations=[4, 5, 6],
- return_per_1000=100,
- total_daily_return=1000,
- )
- ],
- ),
- ],
-)
-def test_list_from_vec_u8_happy_path(
- mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected
-):
- # Arrange
- mock_from_scale_encoding.return_value = expected
-
- # Act
- result = DelegateInfo.list_from_vec_u8(vec_u8)
-
- # Assert
- mock_from_scale_encoding.assert_called_once_with(
- vec_u8, ChainDataType.DelegateInfo, is_vec=True
- )
- assert result == expected, f"Failed {test_id}"
-
-
-@pytest.mark.parametrize(
- "test_id, vec_u8, expected",
- [
- ("edge_empty_list", [], []),
- ],
-)
-def test_list_from_vec_u8_edge_cases(
- mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected
-):
- # Arrange
- mock_from_scale_encoding.return_value = None
-
- # Act
- result = DelegateInfo.list_from_vec_u8(vec_u8)
-
- # Assert
- mock_from_scale_encoding.assert_called_once_with(
- vec_u8, ChainDataType.DelegateInfo, is_vec=True
- )
- assert result == expected, f"Failed {test_id}"
-
-
-@pytest.mark.parametrize(
- "vec_u8, expected_exception",
- [
- ("not_a_list", TypeError),
- ],
-)
-def test_list_from_vec_u8_error_cases(
- vec_u8,
- expected_exception,
-):
- # No Arrange section needed as input values are provided via test parameters
-
- # Act & Assert
- with pytest.raises(expected_exception):
- _ = DelegateInfo.list_from_vec_u8(vec_u8)
diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py
index d0783d20ff..765e093ddb 100644
--- a/tests/unit_tests/test_subtensor.py
+++ b/tests/unit_tests/test_subtensor.py
@@ -1132,7 +1132,6 @@ def test_set_weights(subtensor, mocker):
fake_weights = [0.4, 0.6]
fake_wait_for_inclusion = False
fake_wait_for_finalization = False
- fake_prompt = False
fake_max_retries = 5
expected_result = (True, None)
@@ -1159,7 +1158,6 @@ def test_set_weights(subtensor, mocker):
version_key=settings.version_as_int,
wait_for_inclusion=fake_wait_for_inclusion,
wait_for_finalization=fake_wait_for_finalization,
- prompt=fake_prompt,
max_retries=fake_max_retries,
)
@@ -1180,7 +1178,6 @@ def test_set_weights(subtensor, mocker):
version_key=settings.version_as_int,
wait_for_inclusion=fake_wait_for_inclusion,
wait_for_finalization=fake_wait_for_finalization,
- prompt=fake_prompt,
)
assert result == expected_result
@@ -1273,7 +1270,6 @@ def test_transfer(subtensor, mocker):
fake_amount = 1.1
fake_wait_for_inclusion = True
fake_wait_for_finalization = True
- fake_prompt = False
mocked_transfer_extrinsic = mocker.patch.object(
subtensor_module, "transfer_extrinsic"
)
@@ -1285,7 +1281,6 @@ def test_transfer(subtensor, mocker):
fake_amount,
fake_wait_for_inclusion,
fake_wait_for_finalization,
- fake_prompt,
)
# Asserts
@@ -1296,7 +1291,6 @@ def test_transfer(subtensor, mocker):
amount=fake_amount,
wait_for_inclusion=fake_wait_for_inclusion,
wait_for_finalization=fake_wait_for_finalization,
- prompt=fake_prompt,
)
assert result == mocked_transfer_extrinsic.return_value
@@ -1406,160 +1400,6 @@ def test_neuron_for_uid_success(subtensor, mocker):
assert result == mocked_neuron_from_vec_u8.return_value
-def test_do_serve_prometheus_is_success(subtensor, mocker):
- """Successful do_serve_prometheus call."""
- # Prep
- fake_wallet = mocker.MagicMock()
- fake_call_params = mocker.MagicMock()
- fake_wait_for_inclusion = True
- fake_wait_for_finalization = True
-
- subtensor.substrate.submit_extrinsic.return_value.is_success = True
-
- # Call
- result = subtensor._do_serve_prometheus(
- wallet=fake_wallet,
- call_params=fake_call_params,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- # Asserts
- subtensor.substrate.compose_call.assert_called_once_with(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=fake_call_params,
- )
-
- subtensor.substrate.create_signed_extrinsic.assert_called_once_with(
- call=subtensor.substrate.compose_call.return_value,
- keypair=fake_wallet.hotkey,
- )
-
- subtensor.substrate.submit_extrinsic.assert_called_once_with(
- subtensor.substrate.create_signed_extrinsic.return_value,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once()
- assert result == (True, None)
-
-
-def test_do_serve_prometheus_is_not_success(subtensor, mocker):
- """Unsuccessful do_serve_axon call."""
- # Prep
- fake_wallet = mocker.MagicMock()
- fake_call_params = mocker.MagicMock()
- fake_wait_for_inclusion = True
- fake_wait_for_finalization = True
-
- subtensor.substrate.submit_extrinsic.return_value.is_success = None
-
- # Call
- result = subtensor._do_serve_prometheus(
- wallet=fake_wallet,
- call_params=fake_call_params,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- # Asserts
- subtensor.substrate.compose_call.assert_called_once_with(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=fake_call_params,
- )
-
- subtensor.substrate.create_signed_extrinsic.assert_called_once_with(
- call=subtensor.substrate.compose_call.return_value,
- keypair=fake_wallet.hotkey,
- )
-
- subtensor.substrate.submit_extrinsic.assert_called_once_with(
- subtensor.substrate.create_signed_extrinsic.return_value,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once()
- assert result == (
- False,
- subtensor.substrate.submit_extrinsic.return_value.error_message,
- )
-
-
-def test_do_serve_prometheus_no_waits(subtensor, mocker):
- """Unsuccessful do_serve_axon call."""
- # Prep
- fake_wallet = mocker.MagicMock()
- fake_call_params = mocker.MagicMock()
- fake_wait_for_inclusion = False
- fake_wait_for_finalization = False
-
- # Call
- result = subtensor._do_serve_prometheus(
- wallet=fake_wallet,
- call_params=fake_call_params,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
-
- # Asserts
- subtensor.substrate.compose_call.assert_called_once_with(
- call_module="SubtensorModule",
- call_function="serve_prometheus",
- call_params=fake_call_params,
- )
-
- subtensor.substrate.create_signed_extrinsic.assert_called_once_with(
- call=subtensor.substrate.compose_call.return_value,
- keypair=fake_wallet.hotkey,
- )
-
- subtensor.substrate.submit_extrinsic.assert_called_once_with(
- subtensor.substrate.create_signed_extrinsic.return_value,
- wait_for_inclusion=fake_wait_for_inclusion,
- wait_for_finalization=fake_wait_for_finalization,
- )
- assert result == (True, None)
-
-
-def test_serve_prometheus(subtensor, mocker):
- """Test serve_prometheus function successful call."""
- # Preps
- fake_wallet = mocker.MagicMock()
- fake_port = 1234
- fake_netuid = 1
- wait_for_inclusion = True
- wait_for_finalization = False
-
- mocked_prometheus_extrinsic = mocker.patch.object(
- subtensor_module, "prometheus_extrinsic"
- )
-
- # Call
- result = subtensor.serve_prometheus(
- fake_wallet,
- fake_port,
- fake_netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- # Asserts
- mocked_prometheus_extrinsic.assert_called_once_with(
- subtensor,
- wallet=fake_wallet,
- port=fake_port,
- netuid=fake_netuid,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- assert result == mocked_prometheus_extrinsic.return_value
-
-
def test_do_serve_axon_is_success(subtensor, mocker):
"""Successful do_serve_axon call."""
# Prep
@@ -1897,7 +1737,6 @@ def test_commit_weights(subtensor, mocker):
weights = [0.4, 0.6]
wait_for_inclusion = False
wait_for_finalization = False
- prompt = False
max_retries = 5
expected_result = (True, None)
@@ -1918,7 +1757,6 @@ def test_commit_weights(subtensor, mocker):
version_key=settings.version_as_int,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
max_retries=max_retries,
)
@@ -1939,7 +1777,6 @@ def test_commit_weights(subtensor, mocker):
commit_hash=mocked_generate_weight_hash.return_value,
wait_for_inclusion=wait_for_inclusion,
wait_for_finalization=wait_for_finalization,
- prompt=prompt,
)
assert result == expected_result
@@ -1966,7 +1803,6 @@ def test_reveal_weights(subtensor, mocker):
salt=salt,
wait_for_inclusion=False,
wait_for_finalization=False,
- prompt=False,
)
# Assertions
@@ -1981,7 +1817,6 @@ def test_reveal_weights(subtensor, mocker):
salt=salt,
wait_for_inclusion=False,
wait_for_finalization=False,
- prompt=False,
)
@@ -2009,7 +1844,6 @@ def test_reveal_weights_false(subtensor, mocker):
salt=salt,
wait_for_inclusion=False,
wait_for_finalization=False,
- prompt=False,
)
# Assertion
@@ -2051,3 +1885,269 @@ def test_connect_with_substrate(mocker):
# Assertions
assert spy_get_substrate.call_count == 0
+
+
+def test_get_subnet_burn_cost_success(subtensor, mocker):
+ """Tests get_subnet_burn_cost method with successfully result."""
+ # Preps
+ mocked_query_runtime_api = mocker.patch.object(subtensor, "query_runtime_api")
+ fake_block = 123
+
+ # Call
+ result = subtensor.get_subnet_burn_cost(fake_block)
+
+ # Asserts
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block=fake_block,
+ )
+
+ assert result == mocked_query_runtime_api.return_value
+
+
+def test_get_subnet_burn_cost_none(subtensor, mocker):
+ """Tests get_subnet_burn_cost method with None result."""
+ # Preps
+ mocked_query_runtime_api = mocker.patch.object(
+ subtensor, "query_runtime_api", return_value=None
+ )
+ fake_block = 123
+
+ # Call
+ result = subtensor.get_subnet_burn_cost(fake_block)
+
+ # Asserts
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block=fake_block,
+ )
+
+ assert result is None
+
+
+def test_difficulty_success(subtensor, mocker):
+ """Tests difficulty method with successfully result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(subtensor, "_get_hyperparameter")
+ fake_netuid = 1
+ fake_block = 2
+
+ # Call
+ result = subtensor.difficulty(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Difficulty",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ assert result == int(mocked_get_hyperparameter.return_value)
+
+
+def test_difficulty_none(subtensor, mocker):
+ """Tests difficulty method with None result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(
+ subtensor, "_get_hyperparameter", return_value=None
+ )
+ fake_netuid = 1
+ fake_block = 2
+
+ # Call
+ result = subtensor.difficulty(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Difficulty",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ assert result is None
+
+
+def test_recycle_success(subtensor, mocker):
+ """Tests recycle method with successfully result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(
+ subtensor, "_get_hyperparameter", return_value=0.1
+ )
+ fake_netuid = 1
+ fake_block = 2
+ mocked_balance = mocker.patch("bittensor.utils.balance.Balance")
+
+ # Call
+ result = subtensor.recycle(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Burn",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ mocked_balance.assert_called_once_with(int(mocked_get_hyperparameter.return_value))
+ assert result == mocked_balance.return_value
+
+
+def test_recycle_none(subtensor, mocker):
+ """Tests recycle method with None result."""
+ # Preps
+ mocked_get_hyperparameter = mocker.patch.object(
+ subtensor, "_get_hyperparameter", return_value=None
+ )
+ fake_netuid = 1
+ fake_block = 2
+
+ # Call
+ result = subtensor.recycle(fake_netuid, fake_block)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="Burn",
+ netuid=fake_netuid,
+ block=fake_block,
+ )
+
+ assert result is None
+
+
+# `get_all_subnets_info` tests
+def test_get_all_subnets_info_success(mocker, subtensor):
+ """Test get_all_subnets_info returns correct data when subnet information is found."""
+ # Prep
+ block = 123
+ subnet_data = [1, 2, 3] # Mocked response data
+ mocker.patch.object(
+ subtensor.substrate, "get_block_hash", return_value="mock_block_hash"
+ )
+ mock_response = {"result": subnet_data}
+ mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response)
+ mocker.patch.object(
+ subtensor_module.SubnetInfo,
+ "list_from_vec_u8",
+ return_value="list_from_vec_u80",
+ )
+
+ # Call
+ result = subtensor.get_all_subnets_info(block)
+
+ # Asserts
+ subtensor.substrate.get_block_hash.assert_called_once_with(block)
+ subtensor.substrate.rpc_request.assert_called_once_with(
+ method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"]
+ )
+ subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data)
+
+
+@pytest.mark.parametrize("result_", [[], None])
+def test_get_all_subnets_info_no_data(mocker, subtensor, result_):
+ """Test get_all_subnets_info returns empty list when no subnet information is found."""
+ # Prep
+ block = 123
+ mocker.patch.object(
+ subtensor.substrate, "get_block_hash", return_value="mock_block_hash"
+ )
+ mock_response = {"result": result_}
+ mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response)
+ mocker.patch.object(subtensor_module.SubnetInfo, "list_from_vec_u8")
+
+ # Call
+ result = subtensor.get_all_subnets_info(block)
+
+ # Asserts
+ assert result == []
+ subtensor.substrate.get_block_hash.assert_called_once_with(block)
+ subtensor.substrate.rpc_request.assert_called_once_with(
+ method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"]
+ )
+ subtensor_module.SubnetInfo.list_from_vec_u8.assert_not_called()
+
+
+def test_get_all_subnets_info_retry(mocker, subtensor):
+ """Test get_all_subnets_info retries on failure."""
+ # Prep
+ block = 123
+ subnet_data = [1, 2, 3]
+ mocker.patch.object(
+ subtensor.substrate, "get_block_hash", return_value="mock_block_hash"
+ )
+ mock_response = {"result": subnet_data}
+ mock_rpc_request = mocker.patch.object(
+ subtensor.substrate,
+ "rpc_request",
+ side_effect=[Exception, Exception, mock_response],
+ )
+ mocker.patch.object(
+ subtensor_module.SubnetInfo, "list_from_vec_u8", return_value=["some_data"]
+ )
+
+ # Call
+ result = subtensor.get_all_subnets_info(block)
+
+ # Asserts
+ subtensor.substrate.get_block_hash.assert_called_with(block)
+ assert mock_rpc_request.call_count == 3
+ subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data)
+ assert result == ["some_data"]
+
+
+def test_get_delegate_take_success(subtensor, mocker):
+ """Verify `get_delegate_take` method successful path."""
+ # Preps
+ fake_hotkey_ss58 = "FAKE_SS58"
+ fake_block = 123
+
+ subtensor_module.u16_normalized_float = mocker.Mock()
+ subtensor.query_subtensor = mocker.Mock(return_value=mocker.Mock(value="value"))
+
+ # Call
+ result = subtensor.get_delegate_take(hotkey_ss58=fake_hotkey_ss58, block=fake_block)
+
+ # Asserts
+ subtensor.query_subtensor.assert_called_once_with(
+ "Delegates", fake_block, [fake_hotkey_ss58]
+ )
+ subtensor_module.u16_normalized_float.assert_called_once_with(
+ subtensor.query_subtensor.return_value.value
+ )
+ assert result == subtensor_module.u16_normalized_float.return_value
+
+
+def test_get_delegate_take_none(subtensor, mocker):
+ """Verify `get_delegate_take` method returns None."""
+ # Preps
+ fake_hotkey_ss58 = "FAKE_SS58"
+ fake_block = 123
+
+ subtensor.query_subtensor = mocker.Mock(return_value=mocker.Mock(value=None))
+ subtensor_module.u16_normalized_float = mocker.Mock()
+
+ # Call
+ result = subtensor.get_delegate_take(hotkey_ss58=fake_hotkey_ss58, block=fake_block)
+
+ # Asserts
+ subtensor.query_subtensor.assert_called_once_with(
+ "Delegates", fake_block, [fake_hotkey_ss58]
+ )
+
+ subtensor_module.u16_normalized_float.assert_not_called()
+ assert result is None
+
+
+def test_networks_during_connection(mocker):
+ """Test networks during_connection."""
+ # Preps
+ subtensor_module.SubstrateInterface = mocker.Mock()
+ # Call
+ for network in list(settings.NETWORK_MAP.keys()) + ["undefined"]:
+ sub = Subtensor(network)
+
+ # Assertions
+ sub.network = network
+ sub.chain_endpoint = settings.NETWORK_MAP.get(network)
diff --git a/tests/unit_tests/utils/test_formatting.py b/tests/unit_tests/utils/test_formatting.py
new file mode 100644
index 0000000000..3c223a48b3
--- /dev/null
+++ b/tests/unit_tests/utils/test_formatting.py
@@ -0,0 +1,80 @@
+# The MIT License (MIT)
+# Copyright © 2024 Opentensor Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+#
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import math
+
+from bittensor.utils import formatting
+
+
+def test_get_human_readable():
+ """Tests the `get_human_readable` function in the `formatting` module."""
+ num1 = 1000
+ num2 = 1_000_000
+ num3 = 1_000_000_000
+ num4 = 150
+ negative_num = -1000
+
+ # Test for default output
+ assert formatting.get_human_readable(num1) == "1.0KH"
+
+ # Test for different quantities
+ assert formatting.get_human_readable(num2) == "1.0MH"
+ assert formatting.get_human_readable(num3) == "1.0GH"
+
+ # Test for numbers less than 1000
+ assert formatting.get_human_readable(num4) == "150.0H"
+
+ # Test for negative numbers
+ assert formatting.get_human_readable(negative_num) == "-1.0KH"
+
+ # Test for different suffix
+ assert formatting.get_human_readable(num1, suffix="B") == "1.0KB"
+ assert formatting.get_human_readable(num2, suffix="B") == "1.0MB"
+ assert formatting.get_human_readable(num3, suffix="B") == "1.0GB"
+ assert formatting.get_human_readable(num4, suffix="B") == "150.0B"
+ assert formatting.get_human_readable(negative_num, suffix="B") == "-1.0KB"
+
+
+def test_millify():
+ """Test millify function with various cases."""
+ # Testing with value 0
+ assert formatting.millify(0) == "0.00"
+ # Testing with a number in the tens
+ assert formatting.millify(10) == "10.00"
+ # Testing with a number in the hundreds
+ assert formatting.millify(100) == "100.00"
+ # Testing with a number in the thousands
+ assert formatting.millify(1000) == "1.00 K"
+ # Testing with a number in the millions
+ assert formatting.millify(1000000) == "1.00 M"
+ # Testing with a number in the billions
+ assert formatting.millify(1000000000) == "1.00 B"
+ # Testing with a number in the trillions
+ assert formatting.millify(1000000000000) == "1.00 T"
+ # Testing with maximum limit
+ mill_names = ["", " K", " M", " B", " T"]
+ n = 10 ** (3 * (len(mill_names) - 1) + 1)
+ mill_idx = max(
+ 0,
+ min(
+ len(mill_names) - 1,
+ int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)),
+ ),
+ )
+ assert formatting.millify(n) == "{:.2f}{}".format(
+ n / 10 ** (3 * mill_idx), mill_names[mill_idx]
+ )