Skip to content

Commit

Permalink
Allow re-signing of IS locks when performing retroactive signing (das…
Browse files Browse the repository at this point in the history
…hpay#3219)

* Implement re-signing of InstantSend inputs when TXs come in via blocks

* Use GetAdjustedTime instead of GetTimeMillis in CSigSharesManager

This allows use of mocktime in tests.

* Expose verifiedProRegTxHash in getpeerinfo and implement wait_for_mnauth

* Allow to wait for IS and CL to NOT happen

* Bump timeout for wait_for_instantlock

* Implement tests for retroactive signing of IS and CLs

* Add wait_for_tx function to DashTestFramework

* Add -whitelist=127.0.0.1 to node0

* Use node3 for isolated block generation

* Don't test for non-receival of TXs on node4/node5
  • Loading branch information
codablock committed Dec 6, 2019
1 parent a8b8891 commit bf7dee2
Show file tree
Hide file tree
Showing 12 changed files with 282 additions and 21 deletions.
1 change: 1 addition & 0 deletions qa/pull-tester/rpc-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
'llmq-chainlocks.py', # NOTE: needs dash_hash to pass
'llmq-simplepose.py', # NOTE: needs dash_hash to pass
'llmq-is-cl-conflicts.py', # NOTE: needs dash_hash to pass
'llmq-is-retroactive.py', # NOTE: needs dash_hash to pass
'llmq-dkgerrors.py', # NOTE: needs dash_hash to pass
'dip4-coinbasemerkleroots.py', # NOTE: needs dash_hash to pass
# vv Tests less than 60s vv
Expand Down
178 changes: 178 additions & 0 deletions qa/rpc-tests/llmq-is-retroactive.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.

from test_framework.mininode import *
from test_framework.test_framework import DashTestFramework
from test_framework.util import sync_blocks, set_node_times, \
isolate_node, reconnect_isolated_node

'''
llmq-is-retroactive.py
Tests retroactive signing
We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes.
Mempool inconsistencies are simulated via disconnecting/reconnecting node 3
and by having a higher relay fee on nodes 4 and 5.
'''

class LLMQ_IS_RetroactiveSigning(DashTestFramework):
def set_test_params(self):
# -whitelist is needed to avoid the trickling logic on node0
self.set_dash_test_params(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True)

def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)

self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
self.wait_for_sporks_same()

self.mine_quorum()
self.mine_quorum()

# Make sure that all nodes are chainlocked at the same height before starting actual tests
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())

self.log.info("trying normal IS lock")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
# are the only "neighbours" in intra-quorum connections for one of them.
self.wait_for_instantlock(txid, self.nodes[0])
self.bump_mocktime(1)
set_node_times(self.nodes, self.mocktime)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)

self.log.info("testing normal signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# push the tx directly via rpc
self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))
# node 3 should vote on a tx now since it became aware of it via sendrawtransaction
# and this should be enough to complete an IS lock
self.wait_for_instantlock(txid, self.nodes[0])

self.log.info("testing retroactive signing with unknown TX")
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make node 3 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
set_node_times(self.nodes, self.mocktime)
block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_chainlocked_block_all_nodes(block)
self.nodes[0].setmocktime(self.mocktime)

self.log.info("testing retroactive signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
set_node_times(self.nodes, self.mocktime)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)

self.log.info("testing retroactive signing with partially known TX and all nodes session timeout")
self.test_all_nodes_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_all_nodes_session_timeout(True)

self.log.info("testing retroactive signing with partially known TX and single node session timeout")
self.test_single_node_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_single_node_session_timeout(True)

def cycle_llmqs(self):
self.mine_quorum()
self.mine_quorum()
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())

def test_all_nodes_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, self.mocktime)
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[0].sendrawtransaction(rawtx)
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX before we continue
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# Make the signing session for the IS lock timeout on nodes 1-3
self.bump_mocktime(61)
set_node_times(self.nodes, self.mocktime)
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node 0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
self.nodes[0].setmocktime(self.mocktime)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)

def test_single_node_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, self.mocktime)
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
time.sleep(2) # make sure signing is done on node 2 (it's async)
# Make the signing session for the IS lock timeout on node 3
self.bump_mocktime(61)
set_node_times(self.nodes, self.mocktime)
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
self.nodes[0].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# node 3 fully reconnected but the signing session is already timed out on it, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 1)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node 0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
self.nodes[0].setmocktime(self.mocktime)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)

if __name__ == '__main__':
LLMQ_IS_RetroactiveSigning().main()
24 changes: 22 additions & 2 deletions qa/rpc-tests/test_framework/test_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -555,13 +555,23 @@ def send_complex_tx(self, sender, receiver):
self.sync_all()
return self.wait_for_instantlock(txid, sender)

def wait_for_instantlock(self, txid, node):
def wait_for_tx(self, txid, node, expected=True, timeout=15):
def check_tx():
try:
return node.getrawtransaction(txid)
except:
return False
if wait_until(check_tx, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")

def wait_for_instantlock(self, txid, node, expected=True, timeout=15):
def check_instantlock():
try:
return node.getrawtransaction(txid, True)["instantlock"]
except:
return False
return wait_until(check_instantlock, timeout=10, sleep=0.5)
if wait_until(check_instantlock, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")

def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15):
def check_chainlocked_block():
Expand Down Expand Up @@ -712,6 +722,16 @@ def mine_quorum(self, expected_contributions=5, expected_complaints=0, expected_

return new_quorum

def wait_for_mnauth(self, node, count, timeout=10):
def test():
pi = node.getpeerinfo()
c = 0
for p in pi:
if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "":
c += 1
return c >= count
wait_until(test, timeout=timeout)

# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
Expand Down
16 changes: 10 additions & 6 deletions src/llmq/quorums_instantsend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ void CInstantSendManager::InterruptWorkerThread()
workInterrupt();
}

bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Params& params)
bool CInstantSendManager::ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params)
{
if (!IsNewInstantSendEnabled()) {
return true;
Expand Down Expand Up @@ -444,7 +444,7 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
return false;
}
}
if (alreadyVotedCount == ids.size()) {
if (!allowReSigning && alreadyVotedCount == ids.size()) {
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: already voted on all inputs, bailing out\n", __func__,
tx.GetHash().ToString());
return true;
Expand All @@ -457,8 +457,8 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
auto& in = tx.vin[i];
auto& id = ids[i];
inputRequestIds.emplace(id);
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s\n", __func__,
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString());
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s. allowReSigning=%d\n", __func__,
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString(), allowReSigning);
if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash())) {
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__,
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString());
Expand Down Expand Up @@ -1015,6 +1015,10 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn
return;
}

// This is different on develop as allowReSigning is passed in from the caller. In 0.14.0.x, we have to figure this out
// here to mimic develop.
bool allowReSigning = !inMempool && !isDisconnect;

uint256 islockHash;
{
LOCK(cs);
Expand All @@ -1037,7 +1041,7 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn

bool chainlocked = pindex && chainLocksHandler->HasChainLock(pindex->nHeight, pindex->GetBlockHash());
if (islockHash.IsNull() && !chainlocked) {
ProcessTx(tx, Params().GetConsensus());
ProcessTx(tx, allowReSigning, Params().GetConsensus());
}

LOCK(cs);
Expand Down Expand Up @@ -1421,7 +1425,7 @@ bool CInstantSendManager::ProcessPendingRetryLockTxs()
tx->GetHash().ToString());
}

ProcessTx(*tx, Params().GetConsensus());
ProcessTx(*tx, false, Params().GetConsensus());
retryCount++;
}

Expand Down
2 changes: 1 addition & 1 deletion src/llmq/quorums_instantsend.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class CInstantSendManager : public CRecoveredSigsListener
void InterruptWorkerThread();

public:
bool ProcessTx(const CTransaction& tx, const Consensus::Params& params);
bool ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params);
bool CheckCanLock(const CTransaction& tx, bool printDebug, const Consensus::Params& params);
bool CheckCanLock(const COutPoint& outpoint, bool printDebug, const uint256& txHash, CAmount* retValue, const Consensus::Params& params);
bool IsLocked(const uint256& txHash);
Expand Down
19 changes: 15 additions & 4 deletions src/llmq/quorums_signing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,7 @@ void CSigningManager::UnregisterRecoveredSigsListener(CRecoveredSigsListener* l)
recoveredSigsListeners.erase(itRem, recoveredSigsListeners.end());
}

bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash)
bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign)
{
auto& params = Params().GetConsensus().llmqs.at(llmqType);

Expand All @@ -754,24 +754,31 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
{
LOCK(cs);

if (db.HasVotedOnId(llmqType, id)) {
bool hasVoted = db.HasVotedOnId(llmqType, id);
if (hasVoted) {
uint256 prevMsgHash;
db.GetVoteForId(llmqType, id, prevMsgHash);
if (msgHash != prevMsgHash) {
LogPrintf("CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting on conflicting msgHash=%s\n", __func__,
id.ToString(), prevMsgHash.ToString(), msgHash.ToString());
return false;
} else if (allowReSign) {
LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Resigning!\n", __func__,
id.ToString(), prevMsgHash.ToString());
} else {
LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting again.\n", __func__,
id.ToString(), prevMsgHash.ToString());
return false;
}
return false;
}

if (db.HasRecoveredSigForId(llmqType, id)) {
// no need to sign it if we already have a recovered sig
return true;
}
db.WriteVoteForId(llmqType, id, msgHash);
if (!hasVoted) {
db.WriteVoteForId(llmqType, id, msgHash);
}
}

int tipHeight;
Expand All @@ -796,6 +803,10 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
return false;
}

if (allowReSign) {
// make us re-announce all known shares (other nodes might have run into a timeout)
quorumSigSharesManager->ForceReAnnouncement(quorum, llmqType, id, msgHash);
}
quorumSigSharesManager->AsyncSign(quorum, id, msgHash);

return true;
Expand Down
2 changes: 1 addition & 1 deletion src/llmq/quorums_signing.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ class CSigningManager
void RegisterRecoveredSigsListener(CRecoveredSigsListener* l);
void UnregisterRecoveredSigsListener(CRecoveredSigsListener* l);

bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign = false);
bool HasRecoveredSig(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
bool HasRecoveredSigForId(Consensus::LLMQType llmqType, const uint256& id);
bool HasRecoveredSigForSession(const uint256& signHash);
Expand Down
Loading

0 comments on commit bf7dee2

Please sign in to comment.