diff --git a/configure.ac b/configure.ac index 6b4cba8fe284..1dd025996519 100644 --- a/configure.ac +++ b/configure.ac @@ -863,6 +863,39 @@ fi AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h]) +# FD_ZERO may be dependent on a declaration of memcpy, e.g. in SmartOS +# check that it fails to build without memcpy, then that it builds with +AC_MSG_CHECKING(FD_ZERO memcpy dependence) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #if HAVE_SYS_SELECT_H + #include + #endif + ]],[[ + #if HAVE_SYS_SELECT_H + fd_set fds; + FD_ZERO(&fds); + #endif + ]])], + [ AC_MSG_RESULT(no) ], + [ + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #if HAVE_SYS_SELECT_H + #include + #endif + ]], [[ + #if HAVE_SYS_SELECT_H + fd_set fds; + FD_ZERO(&fds); + #endif + ]])], + [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_CSTRING_DEPENDENT_FD_ZERO, 1, [Define this symbol if FD_ZERO is dependent of a memcpy declaration being available]) ], + [ AC_MSG_ERROR(failed with cstring include) ] + ) + ] +) + AC_CHECK_DECLS([getifaddrs, freeifaddrs],,, [#include #include ] diff --git a/src/Makefile.am b/src/Makefile.am index a26a5d980bdd..fc4beae6fe56 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -655,6 +655,7 @@ libdash_util_a_SOURCES = \ support/lockedpool.cpp \ chainparamsbase.cpp \ clientversion.cpp \ + compat/glibc_sanity_fdelt.cpp \ compat/glibc_sanity.cpp \ compat/glibcxx_sanity.cpp \ compat/strnlen.cpp \ diff --git a/src/compat/glibc_sanity.cpp b/src/compat/glibc_sanity.cpp index 569cf31eeece..9bcc3a902438 100644 --- a/src/compat/glibc_sanity.cpp +++ b/src/compat/glibc_sanity.cpp @@ -9,7 +9,7 @@ #include #if defined(HAVE_SYS_SELECT_H) -#include +bool sanity_test_fdelt(); #endif extern "C" void* memcpy(void* a, const void* b, size_t c); @@ -41,21 +41,6 @@ bool sanity_test_memcpy() } return true; } - -#if defined(HAVE_SYS_SELECT_H) -// trigger: Call FD_SET to trigger __fdelt_chk. FORTIFY_SOURCE must be defined -// as >0 and optimizations must be set to at least -O2. -// test: Add a file descriptor to an empty fd_set. Verify that it has been -// correctly added. -bool sanity_test_fdelt() -{ - fd_set fds; - FD_ZERO(&fds); - FD_SET(0, &fds); - return FD_ISSET(0, &fds); -} -#endif - } // namespace bool glibc_sanity_test() diff --git a/src/compat/glibc_sanity_fdelt.cpp b/src/compat/glibc_sanity_fdelt.cpp new file mode 100644 index 000000000000..9d13fd962508 --- /dev/null +++ b/src/compat/glibc_sanity_fdelt.cpp @@ -0,0 +1,26 @@ +// Copyright (c) 2009-2019 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#if defined(HAVE_CONFIG_H) +#include +#endif + +#if defined(HAVE_SYS_SELECT_H) +#ifdef HAVE_CSTRING_DEPENDENT_FD_ZERO +#include +#endif +#include + +// trigger: Call FD_SET to trigger __fdelt_chk. FORTIFY_SOURCE must be defined +// as >0 and optimizations must be set to at least -O2. +// test: Add a file descriptor to an empty fd_set. Verify that it has been +// correctly added. +bool sanity_test_fdelt() +{ + fd_set fds; + FD_ZERO(&fds); + FD_SET(0, &fds); + return FD_ISSET(0, &fds); +} +#endif diff --git a/src/core_write.cpp b/src/core_write.cpp index e8580f29dcc2..ba35a821b86d 100644 --- a/src/core_write.cpp +++ b/src/core_write.cpp @@ -154,7 +154,7 @@ void ScriptToUniv(const CScript& script, UniValue& out, bool include_address) out.pushKV("type", GetTxnOutputType(type)); CTxDestination address; - if (include_address && ExtractDestination(script, address)) { + if (include_address && ExtractDestination(script, address) && type != TX_PUBKEY) { out.pushKV("address", EncodeDestination(address)); } } @@ -170,7 +170,7 @@ void ScriptPubKeyToUniv(const CScript& scriptPubKey, if (fIncludeHex) out.pushKV("hex", HexStr(scriptPubKey)); - if (!ExtractDestinations(scriptPubKey, type, addresses, nRequired)) { + if (!ExtractDestinations(scriptPubKey, type, addresses, nRequired) || type == TX_PUBKEY) { out.pushKV("type", GetTxnOutputType(type)); return; } diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 708314a5a36d..6fea91f51211 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -128,6 +128,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "createpsbt", 1, "outputs" }, { "createpsbt", 2, "locktime" }, { "combinepsbt", 0, "txs"}, + { "joinpsbts", 0, "txs"}, { "finalizepsbt", 1, "extract"}, { "converttopsbt", 1, "permitsigdata"}, { "gettxout", 1, "n" }, diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index e1630ba5956b..86da263a5074 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #include #include +#include #include #include @@ -1392,6 +1394,141 @@ UniValue converttopsbt(const JSONRPCRequest& request) return EncodeBase64(ssTx.str()); } +UniValue utxoupdatepsbt(const JSONRPCRequest& request) +{ + if (request.fHelp || request.params.size() != 1) { + throw std::runtime_error( + RPCHelpMan{"utxoupdatepsbt", + "\nUpdates a PSBT with UTXOs retrieved from the UTXO set or the mempool.\n", + { + {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"} + }, + RPCResult { + " \"psbt\" (string) The base64-encoded partially signed transaction with inputs updated\n" + }, + RPCExamples { + HelpExampleCli("utxoupdatepsbt", "\"psbt\"") + }}.ToString()); + } + + RPCTypeCheck(request.params, {UniValue::VSTR}, true); + + // Unserialize the transactions + PartiallySignedTransaction psbtx; + std::string error; + if (!DecodeBase64PSBT(psbtx, request.params[0].get_str(), error)) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("TX decode failed %s", error)); + } + + // Fetch previous transactions (inputs): + CCoinsView viewDummy; + CCoinsViewCache view(&viewDummy); + { + LOCK2(cs_main, mempool.cs); + CCoinsViewCache &viewChain = ::ChainstateActive().CoinsTip(); + CCoinsViewMemPool viewMempool(&viewChain, mempool); + view.SetBackend(viewMempool); // temporarily switch cache backend to db+mempool view + + for (const CTxIn& txin : psbtx.tx->vin) { + view.AccessCoin(txin.prevout); // Load entries from viewChain into view; can fail. + } + + view.SetBackend(viewDummy); // switch back to avoid locking mempool for too long + } + + // Fill the inputs + for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) { + PSBTInput& input = psbtx.inputs.at(i); + + if (input.non_witness_utxo) { + continue; + } + + const Coin& coin = view.AccessCoin(psbtx.tx->vin[i].prevout); + + std::vector> solutions_data; + Solver(coin.out.scriptPubKey, solutions_data); + } + + CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); + ssTx << psbtx; + return EncodeBase64(ssTx.str()); +} + +UniValue joinpsbts(const JSONRPCRequest& request) +{ + if (request.fHelp || request.params.size() != 1) { + throw std::runtime_error( + RPCHelpMan{"joinpsbts", + "\nJoins multiple distinct PSBTs with different inputs and outputs into one PSBT with inputs and outputs from all of the PSBTs\n" + "No input in any of the PSBTs can be in more than one of the PSBTs.\n", + { + {"txs", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of base64 strings of partially signed transactions", + { + {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"} + }} + }, + RPCResult { + " \"psbt\" (string) The base64-encoded partially signed transaction\n" + }, + RPCExamples { + HelpExampleCli("joinpsbts", "\"psbt\"") + }}.ToString()); + } + + RPCTypeCheck(request.params, {UniValue::VARR}, true); + + // Unserialize the transactions + std::vector psbtxs; + UniValue txs = request.params[0].get_array(); + + if (txs.size() <= 1) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "At least two PSBTs are required to join PSBTs."); + } + + int32_t best_version = 1; + uint32_t best_locktime = 0xffffffff; + for (unsigned int i = 0; i < txs.size(); ++i) { + PartiallySignedTransaction psbtx; + std::string error; + if (!DecodeBase64PSBT(psbtx, txs[i].get_str(), error)) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("TX decode failed %s", error)); + } + psbtxs.push_back(psbtx); + // Choose the highest version number + if (psbtx.tx->nVersion > best_version) { + best_version = psbtx.tx->nVersion; + } + // Choose the lowest lock time + if (psbtx.tx->nLockTime < best_locktime) { + best_locktime = psbtx.tx->nLockTime; + } + } + + // Create a blank psbt where everything will be added + PartiallySignedTransaction merged_psbt; + merged_psbt.tx = CMutableTransaction(); + merged_psbt.tx->nVersion = best_version; + merged_psbt.tx->nLockTime = best_locktime; + + // Merge + for (auto& psbt : psbtxs) { + for (unsigned int i = 0; i < psbt.tx->vin.size(); ++i) { + if (!merged_psbt.AddInput(psbt.tx->vin[i], psbt.inputs[i])) { + throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Input %s:%d exists in multiple PSBTs", psbt.tx->vin[i].prevout.hash.ToString().c_str(), psbt.tx->vin[i].prevout.n)); + } + } + for (unsigned int i = 0; i < psbt.tx->vout.size(); ++i) { + merged_psbt.AddOutput(psbt.tx->vout[i], psbt.outputs[i]); + } + merged_psbt.unknown.insert(psbt.unknown.begin(), psbt.unknown.end()); + } + + CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); + ssTx << merged_psbt; + return EncodeBase64(ssTx.str()); +} + // clang-format off static const CRPCCommand commands[] = { // category name actor (function) argNames @@ -1409,6 +1546,8 @@ static const CRPCCommand commands[] = { "rawtransactions", "finalizepsbt", &finalizepsbt, {"psbt", "extract"} }, { "rawtransactions", "createpsbt", &createpsbt, {"inputs","outputs","locktime"} }, { "rawtransactions", "converttopsbt", &converttopsbt, {"hexstring","permitsigdata"} }, + { "rawtransactions", "utxoupdatepsbt", &utxoupdatepsbt, {"psbt"} }, + { "rawtransactions", "joinpsbts", &joinpsbts, {"txs"} }, { "blockchain", "gettxoutproof", &gettxoutproof, {"txids", "blockhash"} }, { "blockchain", "verifytxoutproof", &verifytxoutproof, {"proof"} }, diff --git a/src/validation.cpp b/src/validation.cpp index 652e0ed10160..1a8bcb3c9c96 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2649,15 +2649,14 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar if (nUpgraded > 0) AppendWarning(warningMessages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded)); } - std::string strMessage = strprintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, + std::string strMessage = strprintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo) evodb_cache=%.1fMiB%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion, log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx, FormatISO8601DateTime(pindexNew->GetBlockTime()), - GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize()); - strMessage += strprintf(" evodb_cache=%.1fMiB", evoDb->GetMemoryUsage() * (1.0 / (1<<20))); - if (!warningMessages.empty()) - strMessage += strprintf(" warning='%s'", warningMessages); - LogPrintf("%s\n", strMessage); + GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize(), + evoDb->GetMemoryUsage() * (1.0 / (1<<20)), + !warningMessages.empty() ? strprintf(" warning='%s'", warningMessages) : ""); + } /** Disconnect m_chain's tip. diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 2cddb81fac2b..467fa8262b14 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -304,7 +304,7 @@ def run_test(self): self.send_blocks([b24], success=False, reject_reason='bad-blk-length', reconnect=True) b25 = self.next_block(25, spend=out[7]) - self.send_blocks([b25], False, request_block=False, reconnect=True) + self.send_blocks([b25], False, force_send=True, reconnect=True) # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -594,14 +594,14 @@ def run_test(self): while b47.sha256 <= target: b47.nNonce += 1 b47.rehash() - self.send_blocks([b47], False, request_block=False) + self.send_blocks([b47], False, force_send=True, reject_reason='high-hash') self.log.info("Reject a block with a timestamp >2 hours in the future") self.move_tip(44) b48 = self.next_block(48, solve=False) b48.nTime = int(self.mocktime) + 60 * 60 * 3 b48.solve() - self.send_blocks([b48], False, request_block=False) + self.send_blocks([b48], False, force_send=True, reject_reason='time-too-new') self.log.info("Reject a block with invalid merkle hash") self.move_tip(44) @@ -615,7 +615,7 @@ def run_test(self): b50 = self.next_block(50) b50.nBits = b50.nBits - 1 b50.solve() - self.send_blocks([b50], False, request_block=False, reconnect=True) + self.send_blocks([b50], False, force_send=True, reject_reason='bad-diffbits', reconnect=True) self.log.info("Reject a block with two coinbase transactions") self.move_tip(44) @@ -645,7 +645,7 @@ def run_test(self): b54 = self.next_block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() - self.send_blocks([b54], False, request_block=False) + self.send_blocks([b54], False, force_send=True, reject_reason='time-too-old') # valid timestamp self.move_tip(53) @@ -863,7 +863,7 @@ def run_test(self): tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), MAX_BLOCK_SIZE + 8) - self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize():') + self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()') # dashd doesn't disconnect us for sending a bloated block, but if we subsequently # resend the header message, it won't send us the getdata message again. Just @@ -1117,11 +1117,11 @@ def run_test(self): self.move_tip(77) b80 = self.next_block(80, spend=out[25]) - self.send_blocks([b80], False, request_block=False) + self.send_blocks([b80], False, force_send=True) self.save_spendable_output() b81 = self.next_block(81, spend=out[26]) - self.send_blocks([b81], False, request_block=False) # other chain is same length + self.send_blocks([b81], False, force_send=True) # other chain is same length self.save_spendable_output() b82 = self.next_block(82, spend=out[27]) @@ -1228,7 +1228,7 @@ def run_test(self): blocks2 = [] for i in range(89, LARGE_REORG_SIZE + 89): blocks2.append(self.next_block("alt" + str(i))) - self.send_blocks(blocks2, False, request_block=False) + self.send_blocks(blocks2, False, force_send=True) # extend alt chain to trigger re-org block = self.next_block("alt" + str(chain1_tip + 1)) @@ -1237,7 +1237,7 @@ def run_test(self): # ... and re-org back to the first chain self.move_tip(chain1_tip) block = self.next_block(chain1_tip + 1) - self.send_blocks([block], False, request_block=False) + self.send_blocks([block], False, force_send=True) block = self.next_block(chain1_tip + 2) self.send_blocks([block], True, timeout=960) @@ -1350,14 +1350,15 @@ def reconnect_p2p(self): self.nodes[0].disconnect_p2ps() self.bootstrap_p2p() - def send_blocks(self, blocks, success=True, reject_reason=None, request_block=True, reconnect=False, timeout=60): + def send_blocks(self, blocks, success=True, reject_reason=None, force_send=False, reconnect=False, timeout=60): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block.""" - self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, request_block=request_block, timeout=timeout, expect_disconnect=reconnect) + self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect) if reconnect: self.reconnect_p2p() + if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py index 570244b70a06..65f90ec5a3c9 100755 --- a/test/functional/p2p_invalid_block.py +++ b/test/functional/p2p_invalid_block.py @@ -81,7 +81,7 @@ def run_test(self): assert_equal(orig_hash, block2.rehash()) assert block2_orig.vtx != block2.vtx - node.p2p.send_blocks_and_test([block2], node, success=False, request_block=False, reject_reason='bad-txns-duplicate') + node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate') # Check transactions for duplicate inputs (CVE-2018-17144) self.log.info("Test duplicate input block.") @@ -104,7 +104,8 @@ def run_test(self): block3.rehash() block3.solve() - node.p2p.send_blocks_and_test([block3], node, success=False, request_block=False, reject_reason='bad-cb-amount') + node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount') + # Complete testing of CVE-2012-2459 by sending the original block. # It should be accepted even though it has the same hash as the mutated one. diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 0cfeb8509d72..fdceaa2386a4 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -7,6 +7,7 @@ import os import json +from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, find_output @@ -17,7 +18,8 @@ def set_test_params(self): self.setup_clean_chain = False self.num_nodes = 3 # TODO: remove -txindex. Currently required for getrawtransaction call. - self.extra_args = [[], + self.extra_args = [ + ["-txindex"], ["-txindex"], ["-txindex"] ] @@ -195,5 +197,49 @@ def run_test(self): # Test decoding error: invalid base64 assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;") + # Send to all types of addresses + addr1 = self.nodes[1].getnewaddress() + txid1 = self.nodes[0].sendtoaddress(addr1, 11) + vout1 = find_output(self.nodes[0], txid1, 11) + addr2 = self.nodes[1].getnewaddress() + txid2 = self.nodes[0].sendtoaddress(addr2, 11) + vout2 = find_output(self.nodes[0], txid2, 11) + addr3 = self.nodes[1].getnewaddress() + txid3 = self.nodes[0].sendtoaddress(addr3, 11) + vout3 = find_output(self.nodes[0], txid3, 11) + self.sync_all() + + # Update a PSBT with UTXOs from the node + # Inputs should not be filled because they are non-witness + psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999}) + decoded = self.nodes[1].decodepsbt(psbt) + assert "witness_utxo" not in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0] + assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1] + assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2] + updated = self.nodes[1].utxoupdatepsbt(psbt) + decoded = self.nodes[1].decodepsbt(updated) + assert "witness_utxo" not in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0] + assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1] + assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2] + + # Two PSBTs with a common input should not be joinable + psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')}) + assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated]) + + # Join two distinct PSBTs + addr4 = self.nodes[1].getnewaddress() + txid4 = self.nodes[0].sendtoaddress(addr4, 5) + vout4 = find_output(self.nodes[0], txid4, 5) + self.nodes[0].generate(6) + self.sync_all() + psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')}) + psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt'] + psbt2_decoded = self.nodes[0].decodepsbt(psbt2) + assert "final_scriptwitness" not in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0] + joined = self.nodes[0].joinpsbts([psbt, psbt2]) + joined_decoded = self.nodes[0].decodepsbt(joined) + assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3] + + if __name__ == '__main__': PSBTTest().main() diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 0bbef10ac010..24fe3680dc81 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -614,14 +614,14 @@ def on_getheaders(self, message): if response is not None: self.send_message(response) - def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True, reject_reason=None, expect_disconnect=False, timeout=60): + def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - - if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will - ensure that any getdata messages are responded to + - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will + ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" @@ -633,9 +633,11 @@ def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): - self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) - - if request_block: + if force_send: + for b in blocks: + self.send_message(msg_block(block=b)) + else: + self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock) if expect_disconnect: diff --git a/test/util/data/txcreateoutpubkey1.json b/test/util/data/txcreateoutpubkey1.json index c7699b2ac604..75315c24103c 100644 --- a/test/util/data/txcreateoutpubkey1.json +++ b/test/util/data/txcreateoutpubkey1.json @@ -14,11 +14,7 @@ "scriptPubKey": { "asm": "02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 OP_CHECKSIG", "hex": "2102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac", - "reqSigs": 1, - "type": "pubkey", - "addresses": [ - "XqV6rHmzCyFUKF2jSVg8ZkZ7oRhGLVxifK" - ] + "type": "pubkey" } } ],