Skip to content

Commit

Permalink
Merge pull request #461 from HathorNetwork/dev
Browse files Browse the repository at this point in the history
Release v0.50.0
  • Loading branch information
jansegre authored Aug 16, 2022
2 parents 1b3e232 + a33e4f8 commit ab5360d
Show file tree
Hide file tree
Showing 23 changed files with 395 additions and 66 deletions.
2 changes: 1 addition & 1 deletion hathor/cli/openapi_files/openapi_base.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
],
"info": {
"title": "Hathor API",
"version": "0.49.2"
"version": "0.50.0"
},
"consumes": [
"application/json"
Expand Down
16 changes: 15 additions & 1 deletion hathor/cli/quick_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from argparse import Namespace
from argparse import ArgumentParser, Namespace

from hathor.cli.run_node import RunNode


class QuickTest(RunNode):

def create_parser(self) -> ArgumentParser:
parser = super().create_parser()
parser.add_argument('--no-wait', action='store_true', help='If set will not wait for a new tx before exiting')
return parser

def prepare(self, args: Namespace) -> None:
super().prepare(args)
self._no_wait = args.no_wait

def register_resources(self, args: Namespace) -> None:
self.log.info('patching on_new_tx to quit on success')
orig_on_new_tx = self.manager.on_new_tx
Expand All @@ -43,6 +52,11 @@ def exit_with_error():
sys.exit(1)
self.reactor.callLater(timeout, exit_with_error)

def run(self) -> None:
if self._no_wait:
return
super().run()


def main():
QuickTest().run()
42 changes: 40 additions & 2 deletions hathor/cli/run_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ def create_parser(self) -> ArgumentParser:
help='Disable support for running sync-v1. DO NOT ENABLE, IT WILL BREAK.')
parser.add_argument('--x-localhost-only', action='store_true', help='Only connect to peers on localhost')
parser.add_argument('--x-rocksdb-indexes', action='store_true', help='Use RocksDB indexes (currently opt-in)')
parser.add_argument('--x-enable-event-queue', action='store_true', help='Enable event queue mechanism')
parser.add_argument('--x-retain-events', action='store_true', help='Retain all events in the local database')
return parser

def prepare(self, args: Namespace) -> None:
Expand All @@ -111,6 +113,7 @@ def prepare(self, args: Namespace) -> None:
from hathor.p2p.peer_discovery import BootstrapPeerDiscovery, DNSPeerDiscovery
from hathor.p2p.peer_id import PeerId
from hathor.p2p.utils import discover_hostname
from hathor.storage import RocksDBStorage
from hathor.transaction import genesis
from hathor.transaction.storage import (
TransactionCacheStorage,
Expand Down Expand Up @@ -151,6 +154,7 @@ def prepare(self, args: Namespace) -> None:
python = f'{platform.python_version()}-{platform.python_implementation()}'

self.check_unsafe_arguments(args)
self.check_python_version()

self.log.info(
'hathor-core v{hathor}',
Expand Down Expand Up @@ -195,6 +199,7 @@ def create_wallet():
raise ValueError('Invalid type for wallet')

tx_storage: TransactionStorage
rocksdb_storage: RocksDBStorage
if args.memory_storage:
check_or_exit(not args.data, '--data should not be used with --memory-storage')
# if using MemoryStorage, no need to have cache
Expand All @@ -211,8 +216,9 @@ def create_wallet():
self.log.warn('--rocksdb-storage is now implied, no need to specify it')
cache_capacity = args.rocksdb_cache
use_memory_indexes = not args.x_rocksdb_indexes
tx_storage = TransactionRocksDBStorage(path=args.data, with_index=(not args.cache),
cache_capacity=cache_capacity,
rocksdb_storage = RocksDBStorage(path=args.data, cache_capacity=cache_capacity)
tx_storage = TransactionRocksDBStorage(rocksdb_storage,
with_index=(not args.cache),
use_memory_indexes=use_memory_indexes)
self.log.info('with storage', storage_class=type(tx_storage).__name__, path=args.data)
if args.cache:
Expand Down Expand Up @@ -296,6 +302,20 @@ def create_wallet():
if args.x_fast_init_beta:
self.log.warn('--x-fast-init-beta is now the default, no need to specify it')

if args.x_enable_event_queue:
if not settings.ENABLE_EVENT_QUEUE_FEATURE:
self.log.error('The event queue feature is not available yet')
sys.exit(-1)

self.manager.enable_event_queue = True
self.log.info('--x-enable-event-queue flag provided. '
'The events detected by the full node will be stored and retrieved to clients')

self.manager.retain_events = args.x_retain_events is True
elif args.x_retain_events:
self.log.error('You cannot use --x-retain-events without --x-enable-event-queue.')
sys.exit(-1)

for description in args.listen:
self.manager.add_listen_address(description)

Expand Down Expand Up @@ -623,6 +643,24 @@ def check_unsafe_arguments(self, args: Namespace) -> None:
if fail:
sys.exit(-1)

def check_python_version(self) -> None:
MIN_STABLE = (3, 8)
RECOMMENDED_VER = (3, 9)
cur = sys.version_info
min_pretty = '.'.join(map(str, MIN_STABLE))
cur_pretty = '.'.join(map(str, cur))
recommended_pretty = '.'.join(map(str, RECOMMENDED_VER))
if cur < MIN_STABLE:
self.log.warning('\n'.join([
'',
'********************************************************',
f'The detected Python version {cur_pretty} is deprecated and support for it will be removed soon.',
f'The minimum supported Python version will be {min_pretty}',
f'The recommended Python version is {recommended_pretty}',
'********************************************************',
'',
]))

def __init__(self, *, argv=None):
if argv is None:
import sys
Expand Down
2 changes: 2 additions & 0 deletions hathor/conf/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,3 +332,5 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int:

# Identifier used in metadata's voided_by.
SOFT_VOIDED_ID: bytes = b'tx-non-grata'

ENABLE_EVENT_QUEUE_FEATURE: bool = False
3 changes: 2 additions & 1 deletion hathor/indexes/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,9 @@ def _manually_initialize(self, tx_storage: 'TransactionStorage') -> None:
tx_count = 0
latest_timestamp = BLOCK_GENESIS.timestamp
first_timestamp = BLOCK_GENESIS.timestamp
total = tx_storage.get_count_tx_blocks()

for tx in progress(tx_storage.topological_iterator(), log=self.log):
for tx in progress(tx_storage.topological_iterator(), log=self.log, total=total):
# XXX: these would probably make more sense to be their own simple "indexes" instead of how it is here
latest_timestamp = max(tx.timestamp, latest_timestamp)
first_timestamp = min(tx.timestamp, first_timestamp)
Expand Down
6 changes: 6 additions & 0 deletions hathor/indexes/memory_tokens_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
)
from hathor.transaction import BaseTransaction, Transaction
from hathor.transaction.base_transaction import TxVersion
from hathor.util import is_token_uid_valid

logger = get_logger()

Expand Down Expand Up @@ -168,32 +169,37 @@ def iter_all_tokens(self) -> Iterator[Tuple[bytes, TokenIndexInfo]]:
yield from self._tokens.items()

def get_token_info(self, token_uid: bytes) -> TokenIndexInfo:
assert is_token_uid_valid(token_uid)
if token_uid not in self._tokens:
raise KeyError('unknown token')
info = self._tokens[token_uid]
return info

def get_transactions_count(self, token_uid: bytes) -> int:
assert is_token_uid_valid(token_uid)
if token_uid not in self._tokens:
return 0
info = self._tokens[token_uid]
return len(info._transactions)

def get_newest_transactions(self, token_uid: bytes, count: int) -> Tuple[List[bytes], bool]:
assert is_token_uid_valid(token_uid)
if token_uid not in self._tokens:
return [], False
transactions = self._tokens[token_uid]._transactions
return get_newest_sorted_key_list(transactions, count)

def get_older_transactions(self, token_uid: bytes, timestamp: int, hash_bytes: bytes, count: int
) -> Tuple[List[bytes], bool]:
assert is_token_uid_valid(token_uid)
if token_uid not in self._tokens:
return [], False
transactions = self._tokens[token_uid]._transactions
return get_older_sorted_key_list(transactions, timestamp, hash_bytes, count)

def get_newer_transactions(self, token_uid: bytes, timestamp: int, hash_bytes: bytes, count: int
) -> Tuple[List[bytes], bool]:
assert is_token_uid_valid(token_uid)
if token_uid not in self._tokens:
return [], False
transactions = self._tokens[token_uid]._transactions
Expand Down
12 changes: 8 additions & 4 deletions hathor/indexes/rocksdb_address_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,11 +148,15 @@ def get_sorted_from_address(self, address: str) -> List[bytes]:
def is_address_empty(self, address: str) -> bool:
self.log.debug('seek to', address=address)
it = self._db.iterkeys(self._cf)
it.seek(self._to_key(address))
res = it.get()
if not res:
seek_key = self._to_key(address)
it.seek(seek_key)
cf_key = it.get()
if not cf_key:
return True
_cf, key = cf_key
# XXX: this means we reached the end it did not found any key
if key == seek_key:
return True
_cf, key = res
addr, _, _ = self._from_key(key)
is_empty = addr != address
self.log.debug('seek empty', is_empty=is_empty)
Expand Down
8 changes: 8 additions & 0 deletions hathor/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,14 @@ def __init__(self, reactor: Reactor, peer_id: Optional[PeerId] = None, network:
# Can be activated on the command line with --full-verification
self._full_verification = False

# Activated with --x-enable-event-queue flag
# It activates the event mechanism inside full node
self.enable_event_queue = False

# Activated with --x-retain-events flag. It will be ignored if --enable-event-queue is not provided
# It tells full node to retain all generated events. Otherwise, they will be deleted after retrieval
self.retain_events = False

# List of whitelisted peers
self.peers_whitelist: List[str] = []

Expand Down
42 changes: 41 additions & 1 deletion hathor/p2p/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,17 @@
from hathor.util import Random, Reactor

if TYPE_CHECKING:
from twisted.internet.interfaces import IDelayedCall

from hathor.manager import HathorManager
from hathor.p2p.factory import HathorClientFactory, HathorServerFactory

logger = get_logger()
settings = HathorSettings()

# The timeout in seconds for the whitelist GET request
WHITELIST_REQUEST_TIMEOUT = 45


class _ConnectingPeer(NamedTuple):
connection_string: str
Expand Down Expand Up @@ -132,7 +137,22 @@ def __init__(self, reactor: Reactor, my_peer: PeerId, server_factory: 'HathorSer
def start(self) -> None:
self.lc_reconnect.start(5, now=False)
if settings.ENABLE_PEER_WHITELIST:
self.wl_reconnect.start(30)
self._start_whitelist_reconnect()

def _start_whitelist_reconnect(self) -> None:
# The deferred returned by the LoopingCall start method
# executes when the looping call stops running
# https://docs.twistedmatrix.com/en/stable/api/twisted.internet.task.LoopingCall.html
d = self.wl_reconnect.start(30)
d.addErrback(self._handle_whitelist_reconnect_err)

def _handle_whitelist_reconnect_err(self, *args: Any, **kwargs: Any) -> None:
""" This method will be called when an exception happens inside the whitelist update
and ends up stopping the looping call.
We log the error and start the looping call again.
"""
self.log.error('whitelist reconnect had an exception. Start looping call again.', args=args, kwargs=kwargs)
self.reactor.callLater(30, self._start_whitelist_reconnect)

def stop(self) -> None:
if self.lc_reconnect.running:
Expand Down Expand Up @@ -317,11 +337,31 @@ def update_whitelist(self) -> Deferred[None]:
settings.WHITELIST_URL.encode(),
Headers({'User-Agent': ['hathor-core']}),
None)
# Twisted Agent does not have a direct way to configure the HTTP client timeout
# only a TCP connection timeout.
# In this request we need a timeout that encompasses the connection and download time.
# The callLater below is a manual client timeout that includes it and
# will cancel the deferred in case it's called
timeout_call = self.reactor.callLater(WHITELIST_REQUEST_TIMEOUT, d.cancel)
d.addBoth(self._update_whitelist_timeout, timeout_call)
d.addCallback(readBody)
d.addErrback(self._update_whitelist_err)
d.addCallback(self._update_whitelist_cb)
return d

def _update_whitelist_timeout(self, param: Union[Failure, Optional[bytes]],
timeout_call: 'IDelayedCall') -> Union[Failure, Optional[bytes]]:
""" This method is always called for both cb and errback in the update whitelist get request deferred.
Because of that, the first parameter type will depend, will be a failure in case of errback
or optional bytes in case of cb (see _update_whitelist_cb).
We just need to cancel the timeout call later and return the first parameter,
to continue the cb/errback sequence.
"""
if timeout_call.active():
timeout_call.cancel()
return param

def _update_whitelist_err(self, *args: Any, **kwargs: Any) -> None:
self.log.error('update whitelist failed', args=args, kwargs=kwargs)

Expand Down
13 changes: 11 additions & 2 deletions hathor/p2p/peer_id.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ def __init__(self, auto_generate_keys: bool = True) -> None:
self.retry_interval = 5
self.retry_attempts = 0
self.flags = set()
self._certificate_options: Optional[CertificateOptions] = None

if auto_generate_keys:
self.generate_keys()
Expand Down Expand Up @@ -283,9 +284,17 @@ def get_certificate(self) -> x509.Certificate:
return self.certificate

def get_certificate_options(self) -> CertificateOptions:
""" Return certificate options
With certificate generated and signed with peer private key
""" Return certificate options With certificate generated and signed with peer private key
The result is cached so subsequent calls are really cheap.
"""
if self._certificate_options is None:
self._certificate_options = self._get_certificate_options()
return self._certificate_options

def _get_certificate_options(self) -> CertificateOptions:
"""Implementation of get_certificate_options, this should be cached to avoid opening the same static file
multiple times"""
certificate = self.get_certificate()
openssl_certificate = X509.from_cryptography(certificate)
assert self.private_key is not None
Expand Down
4 changes: 3 additions & 1 deletion hathor/simulator/simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,9 @@ def _run(self, interval: float, step: float, status_interval: float) -> Generato
real_elapsed_time = t1 - t0
# Rate is the number of simulated seconds per real second.
# For example, a rate of 60 means that we can simulate 1 minute per second.
rate = (self._clock.seconds() - initial) / real_elapsed_time
rate: Optional[float] = None
if real_elapsed_time != 0:
rate = (self._clock.seconds() - initial) / real_elapsed_time
# Simulation now.
sim_now = self._clock.seconds()
# Simulation dt.
Expand Down
17 changes: 17 additions & 0 deletions hathor/storage/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Copyright 2022 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from hathor.storage.rocksdb_storage import RocksDBStorage

__all__ = ['RocksDBStorage']
Loading

0 comments on commit ab5360d

Please sign in to comment.