diff --git a/.travis.yml b/.travis.yml index 19837a348dfb..5a81f2049473 100644 --- a/.travis.yml +++ b/.travis.yml @@ -92,14 +92,16 @@ script: - depends/$HOST/native/bin/ccache --max-size=$CCACHE_SIZE - test -n "$USE_SHELL" && eval '"$USE_SHELL" -c "./autogen.sh"' || ./autogen.sh - mkdir build && cd build - - ../configure $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false) + - ../configure --cache-file=config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false) + - make distdir VERSION=$HOST + - cd dashcore-$HOST + - ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false) - make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL V=1 ; false ) - - make distdir - export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib - if [ "$RUN_TESTS" = "true" ]; then travis_wait 30 make $MAKEJOBS check VERBOSE=1; fi - if [ "$RUN_TESTS" = "true" ]; then qa/pull-tester/rpc-tests.py --coverage; fi - - cd .. - - if [ "$DOCKER_BUILD" = "true" ]; then BUILD_DIR=build ./docker/build-docker.sh; fi + - cd ../.. + - if [ "$DOCKER_BUILD" = "true" ]; then BUILD_DIR=build/dashcore-$HOST ./docker/build-docker.sh; fi after_script: - echo $TRAVIS_COMMIT_RANGE - echo $TRAVIS_COMMIT_LOG diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f5674da94adc..eac2894164cf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,7 +53,28 @@ about Git. - Create pull request The title of the pull request should be prefixed by the component or area that -the pull request affects. Examples: +the pull request affects. Valid areas as: + + - *Consensus* for changes to consensus critical code + - *Docs* for changes to the documentation + - *Qt* for changes to bitcoin-qt + - *Mining* for changes to the mining code + - *Net* or *P2P* for changes to the peer-to-peer network code + - *RPC/REST/ZMQ* for changes to the RPC, REST or ZMQ APIs + - *Scripts and tools* for changes to the scripts and tools + - *Tests* for changes to the bitcoin unit tests or QA tests + - *Trivial* should **only** be used for PRs that do not change generated + executable code. Notably, refactors (change of function arguments and code + reorganization) and changes in behavior should **not** be marked as trivial. + Examples of trivial PRs are changes to: + - comments + - whitespace + - variable names + - logging and messages + - *Utils and libraries* for changes to the utils and libraries + - *Wallet* for changes to the wallet code + +Examples: Consensus: Add new opcode for BIP-XXXX OP_CHECKAWESOMESIG Net: Automatically create hidden service, listen on Tor diff --git a/COPYING b/COPYING index f9561cadf757..a98226cf0934 100644 --- a/COPYING +++ b/COPYING @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2009-2016 The Bitcoin Core developers +Copyright (c) 2009-2017 The Bitcoin Core developers Copyright (c) 2014-2017 The Dash Core developers Permission is hereby granted, free of charge, to any person obtaining a copy diff --git a/build-aux/m4/bitcoin_qt.m4 b/build-aux/m4/bitcoin_qt.m4 index ba37522a74b9..e11c9b90ea6b 100644 --- a/build-aux/m4/bitcoin_qt.m4 +++ b/build-aux/m4/bitcoin_qt.m4 @@ -488,8 +488,8 @@ AC_DEFUN([_BITCOIN_QT_FIND_LIBS_WITHOUT_PKGCONFIG],[ ]) BITCOIN_QT_CHECK(AC_CHECK_LIB([z] ,[main],,AC_MSG_WARN([zlib not found. Assuming qt has it built-in]))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([png] ,[main],,AC_MSG_WARN([libpng not found. Assuming qt has it built-in]))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([jpeg] ,[main],,AC_MSG_WARN([libjpeg not found. Assuming qt has it built-in]))) + BITCOIN_QT_CHECK(AC_SEARCH_LIBS([png_error] ,[qtpng png],,AC_MSG_WARN([libpng not found. Assuming qt has it built-in]))) + BITCOIN_QT_CHECK(AC_SEARCH_LIBS([jpeg_create_decompress] ,[qtjpeg jpeg],,AC_MSG_WARN([libjpeg not found. Assuming qt has it built-in]))) BITCOIN_QT_CHECK(AC_SEARCH_LIBS([pcre16_exec], [qtpcre pcre16],,AC_MSG_WARN([libpcre16 not found. Assuming qt has it built-in]))) BITCOIN_QT_CHECK(AC_SEARCH_LIBS([hb_ot_tags_from_script] ,[qtharfbuzzng harfbuzz],,AC_MSG_WARN([libharfbuzz not found. Assuming qt has it built-in or support is disabled]))) BITCOIN_QT_CHECK(AC_CHECK_LIB([${QT_LIB_PREFIX}Core] ,[main],,BITCOIN_QT_FAIL(lib$QT_LIB_PREFIXCore not found))) diff --git a/contrib/debian/copyright b/contrib/debian/copyright index 2cd065cb45ed..eca0c5dd3ef8 100644 --- a/contrib/debian/copyright +++ b/contrib/debian/copyright @@ -5,7 +5,7 @@ Upstream-Contact: Satoshi Nakamoto Source: https://github.com/bitcoin/bitcoin Files: * -Copyright: 2009-2016, Bitcoin Core Developers +Copyright: 2009-2017, Bitcoin Core Developers License: Expat Comment: The Bitcoin Core Developers encompasses the current developers listed on bitcoin.org, as well as the numerous contributors to the project. diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py index 13d2573b9ff8..7ea49b65e177 100755 --- a/contrib/devtools/clang-format-diff.py +++ b/contrib/devtools/clang-format-diff.py @@ -128,7 +128,7 @@ def main(): line_count = int(match.group(3)) if line_count == 0: continue - end_line = start_line + line_count - 1; + end_line = start_line + line_count - 1 lines_by_file.setdefault(filename, []).extend( ['-lines', str(start_line) + ':' + str(end_line)]) @@ -147,7 +147,7 @@ def main(): stderr=None, stdin=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: - sys.exit(p.returncode); + sys.exit(p.returncode) if not args.i: with open(filename) as f: diff --git a/contrib/devtools/github-merge.py b/contrib/devtools/github-merge.py index aae966a8f6b0..0cee0921b122 100755 --- a/contrib/devtools/github-merge.py +++ b/contrib/devtools/github-merge.py @@ -15,7 +15,7 @@ # In case of a clean merge that is accepted by the user, the local branch with # name $BRANCH is overwritten with the merged result, and optionally pushed. from __future__ import division,print_function,unicode_literals -import os,sys +import os from sys import stdin,stdout,stderr import argparse import subprocess diff --git a/contrib/devtools/optimize-pngs.py b/contrib/devtools/optimize-pngs.py index be43ac8eea3f..6086533cdee1 100755 --- a/contrib/devtools/optimize-pngs.py +++ b/contrib/devtools/optimize-pngs.py @@ -39,7 +39,7 @@ def content_hash(filename): if extension.lower() == '.png': print("optimizing "+file+"..."), file_path = os.path.join(absFolder, file) - fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}; + fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)} fileMetaMap['contentHashPre'] = content_hash(file_path) pngCrushOutput = "" diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index c0f120392e62..18f9835faa25 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -7,7 +7,6 @@ ''' from __future__ import division,print_function import subprocess -import sys import unittest def write_testcode(filename): diff --git a/contrib/linearize/example-linearize.cfg b/contrib/linearize/example-linearize.cfg index d8fe8a55f2ff..a5ef48b2636a 100644 --- a/contrib/linearize/example-linearize.cfg +++ b/contrib/linearize/example-linearize.cfg @@ -15,7 +15,7 @@ output_file=/home/example/Downloads/bootstrap.dat hashlist=hashlist.txt genesis=00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6 -# Maxmimum size in bytes of out-of-order blocks cache in memory +# Maximum size in bytes of out-of-order blocks cache in memory out_of_order_cache_sz = 10000000 # Do we want the reverse the hash bytes coming from getblockhash? diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 3b7351f67953..d8d9547f7387 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -8,16 +8,10 @@ # from __future__ import print_function, division -try: # Python 3 - import http.client as httplib -except ImportError: # Python 2 - import httplib -import json import struct import re import os import os.path -import base64 import sys import hashlib import dash_hash diff --git a/contrib/linearize/linearize-hashes.py b/contrib/linearize/linearize-hashes.py index cf4e54dd706b..ebd9f1e75a0b 100755 --- a/contrib/linearize/linearize-hashes.py +++ b/contrib/linearize/linearize-hashes.py @@ -13,7 +13,6 @@ except ImportError: # Python 2 import httplib import json -import struct import re import base64 import sys diff --git a/contrib/qos/README.md b/contrib/qos/README.md index 1fcbb1a6f44e..3fc79d0dfcf0 100644 --- a/contrib/qos/README.md +++ b/contrib/qos/README.md @@ -1,5 +1,5 @@ -### Qos ### +### QoS (Quality of service) ### -This is a Linux bash script that will set up tc to limit the outgoing bandwidth for connections to the Dash network. It limits outbound TCP traffic with a source or destination port of 9999, but not if the destination IP is within a LAN (defined as 192.168.x.x). +This is a Linux bash script that will set up tc to limit the outgoing bandwidth for connections to the Dash network. It limits outbound TCP traffic with a source or destination port of 9999, but not if the destination IP is within a LAN. This means one can have an always-on bitcoind instance running, and another local bitcoind/bitcoin-qt instance which connects to this node and receives blocks from it. diff --git a/contrib/qos/tc.sh b/contrib/qos/tc.sh index e615ce12fe17..c3eb64d280a4 100644 --- a/contrib/qos/tc.sh +++ b/contrib/qos/tc.sh @@ -1,4 +1,4 @@ -# Copyright (c) 2013 The Bitcoin Core developers +# Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -8,8 +8,10 @@ IF="eth0" LINKCEIL="1gbit" #limit outbound Dash protocol traffic to this rate LIMIT="160kbit" -#defines the address space for which you wish to disable rate limiting -LOCALNET="192.168.0.0/16" +#defines the IPv4 address space for which you wish to disable rate limiting +LOCALNET_V4="192.168.0.0/16" +#defines the IPv6 address space for which you wish to disable rate limiting +LOCALNET_V6="fe80::/10" #delete existing rules tc qdisc del dev ${IF} root @@ -28,6 +30,12 @@ tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} p tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10 tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11 +if [ ! -z "${LOCALNET_V6}" ] ; then + # v6 cannot have the same priority value as v4 + tc filter add dev ${IF} parent 1: protocol ipv6 prio 3 handle 1 fw classid 1:10 + tc filter add dev ${IF} parent 1: protocol ipv6 prio 4 handle 2 fw classid 1:11 +fi + #delete any existing rules #disable for now #ret=0 @@ -37,9 +45,15 @@ tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11 #done #limit outgoing traffic to and from port 9999. but not when dealing with a host on the local network -# (defined by $LOCALNET) -# --set-mark marks packages matching these criteria with the number "2" -# these packages are filtered by the tc filter with "handle 2" +# (defined by $LOCALNET_V4 and $LOCALNET_V6) +# --set-mark marks packages matching these criteria with the number "2" (v4) +# --set-mark marks packages matching these criteria with the number "4" (v6) +# these packets are filtered by the tc filter with "handle 2" # this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT} -iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 9999 ! -d ${LOCALNET} -j MARK --set-mark 0x2 -iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 9999 ! -d ${LOCALNET} -j MARK --set-mark 0x2 +iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 9999 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2 +iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 9999 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2 + +if [ ! -z "${LOCALNET_V6}" ] ; then + ip6tables -t mangle -A OUTPUT -p tcp -m tcp --dport 9999 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4 + ip6tables -t mangle -A OUTPUT -p tcp -m tcp --sport 9999 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4 +fi diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py index bf6c4dd00099..47087e875b59 100755 --- a/contrib/zmq/zmq_sub.py +++ b/contrib/zmq/zmq_sub.py @@ -3,7 +3,6 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -import array import binascii import zmq import struct @@ -25,7 +24,7 @@ msg = zmqSubSocket.recv_multipart() topic = str(msg[0].decode("utf-8")) body = msg[1] - sequence = "Unknown"; + sequence = "Unknown" if len(msg[-1]) == 4: msgSequence = struct.unpack('/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-${LIBC} exit ;; @@ -1032,6 +1035,9 @@ EOF ppcle:Linux:*:*) echo powerpcle-unknown-linux-${LIBC} exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; diff --git a/depends/config.site.in b/depends/config.site.in index 27e3aedd8ed4..3d7c9fd43c7a 100644 --- a/depends/config.site.in +++ b/depends/config.site.in @@ -63,7 +63,6 @@ LDFLAGS="-L$depends_prefix/lib $LDFLAGS" CC="@CC@" CXX="@CXX@" OBJC="${CC}" -OBJCXX="${CXX}" CCACHE=$depends_prefix/native/bin/ccache PYTHONPATH=$depends_prefix/native/lib/python/dist-packages:$PYTHONPATH diff --git a/depends/config.sub b/depends/config.sub index 6d86a1e2f77b..7e792b4ae17b 100755 --- a/depends/config.sub +++ b/depends/config.sub @@ -1,8 +1,8 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2016 Free Software Foundation, Inc. +# Copyright 1992-2017 Free Software Foundation, Inc. -timestamp='2016-05-10' +timestamp='2017-01-01' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -67,7 +67,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright 1992-2016 Free Software Foundation, Inc. +Copyright 1992-2017 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -117,7 +117,7 @@ case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ - kopensolaris*-gnu* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` @@ -301,6 +301,7 @@ case $basic_machine in | open8 | or1k | or1knd | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ | pyramid \ | riscv32 | riscv64 \ | rl78 | rx \ @@ -428,6 +429,7 @@ case $basic_machine in | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ | pyramid-* \ | riscv32-* | riscv64-* \ | rl78-* | romp-* | rs6000-* | rx-* \ @@ -643,6 +645,14 @@ case $basic_machine in basic_machine=m68k-bull os=-sysv3 ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; ebmon29k) basic_machine=a29k-amd os=-ebmon @@ -1022,7 +1032,7 @@ case $basic_machine in ppc-* | ppcbe-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; - ppcle | powerpclittle | ppc-le | powerpc-little) + ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) @@ -1032,7 +1042,7 @@ case $basic_machine in ;; ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) + ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) @@ -1387,9 +1397,9 @@ case $os in | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ + | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ @@ -1399,7 +1409,7 @@ case $os in | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ - | -onefs* | -tirtos* | -phoenix*) + | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) @@ -1628,6 +1638,9 @@ case $basic_machine in sparc-* | *-sun) os=-sunos4.1.1 ;; + pru-*) + os=-elf + ;; *-be) os=-beos ;; diff --git a/depends/packages/dbus.mk b/depends/packages/dbus.mk index 8ac9ab742bfe..90ddcb923f21 100644 --- a/depends/packages/dbus.mk +++ b/depends/packages/dbus.mk @@ -1,8 +1,8 @@ package=dbus -$(package)_version=1.8.6 +$(package)_version=1.10.14 $(package)_download_path=http://dbus.freedesktop.org/releases/dbus $(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=eded83ca007b719f32761e60fd8b9ffd0f5796a4caf455b01b5a5ef740ebd23f +$(package)_sha256_hash=23238f70353e38ce5ca183ebc9525c0d97ac00ef640ad29cf794782af6e6a083 $(package)_dependencies=expat define $(package)_set_vars diff --git a/depends/packages/freetype.mk b/depends/packages/freetype.mk index 7cea28ff0bbf..76b025c46318 100644 --- a/depends/packages/freetype.mk +++ b/depends/packages/freetype.mk @@ -1,8 +1,8 @@ package=freetype -$(package)_version=2.6.3 +$(package)_version=2.7.1 $(package)_download_path=http://download.savannah.gnu.org/releases/$(package) $(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=371e707aa522acf5b15ce93f11183c725b8ed1ee8546d7b3af549863045863a2 +$(package)_sha256_hash=3a3bb2c4e15ffb433f2032f50a5b5a92558206822e22bfe8cbe339af4aa82f88 define $(package)_set_vars $(package)_config_opts=--without-zlib --without-png --disable-static diff --git a/depends/packages/native_ccache.mk b/depends/packages/native_ccache.mk index a3a58604e583..4ed61a49e9ca 100644 --- a/depends/packages/native_ccache.mk +++ b/depends/packages/native_ccache.mk @@ -1,8 +1,8 @@ package=native_ccache -$(package)_version=3.3.1 +$(package)_version=3.3.3 $(package)_download_path=https://samba.org/ftp/ccache $(package)_file_name=ccache-$($(package)_version).tar.bz2 -$(package)_sha256_hash=cb6e4bafbb19ba0a2ec43386b123a5f92a20e1e3384c071d5d13e0cb3c84bf73 +$(package)_sha256_hash=2985bc5e32ebe38d2958d508eb54ddcad39eed909489c0c2988035214597ca54 define $(package)_set_vars $(package)_config_opts= diff --git a/depends/packages/native_ds_store.mk b/depends/packages/native_ds_store.mk index 8e902af1b6e9..49f5829ac1c0 100644 --- a/depends/packages/native_ds_store.mk +++ b/depends/packages/native_ds_store.mk @@ -1,9 +1,9 @@ package=native_ds_store -$(package)_version=c80c23706eae +$(package)_version=1.1.0 $(package)_download_path=https://bitbucket.org/al45tair/ds_store/get -$(package)_download_file=$($(package)_version).tar.bz2 +$(package)_download_file=v$($(package)_version).tar.bz2 $(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=ce1aa412211610c63d567bbe3e06213006a2d5ba5d76d89399c151b5472cb0da +$(package)_sha256_hash=921596764d71d1bbd3297a90ef6d286f718794d667e4f81d91d14053525d64c1 $(package)_install_libdir=$(build_prefix)/lib/python/dist-packages $(package)_dependencies=native_biplist diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index 4cf44385b88d..088723ebd0d5 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -2,7 +2,7 @@ packages:=boost openssl libevent zeromq native_packages := native_ccache qt_native_packages = native_protobuf -qt_packages = qrencode protobuf +qt_packages = qrencode protobuf zlib qt_x86_64_linux_packages:=qt expat dbus libxcb xcb_proto libXau xproto freetype fontconfig libX11 xextproto libXext xtrans qt_i686_linux_packages:=$(qt_x86_64_linux_packages) diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index bdde15aa173d..bbfdb766ed69 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,30 +1,30 @@ PACKAGE=qt -$(package)_version=5.6.1 -$(package)_download_path=http://download.qt.io/official_releases/qt/5.6/$($(package)_version)/submodules +$(package)_version=5.7.1 +$(package)_download_path=http://download.qt.io/official_releases/qt/5.7/$($(package)_version)/submodules $(package)_suffix=opensource-src-$($(package)_version).tar.gz $(package)_file_name=qtbase-$($(package)_suffix) -$(package)_sha256_hash=0ac67cf8d66d52b995f96c31c4b48117a1afb3db99eaa93e20ccd8f7f55f7fde -$(package)_dependencies=openssl +$(package)_sha256_hash=95f83e532d23b3ddbde7973f380ecae1bac13230340557276f75f2e37984e410 +$(package)_dependencies=openssl zlib $(package)_linux_dependencies=freetype fontconfig libxcb libX11 xproto libXext $(package)_build_subdir=qtbase $(package)_qt_libs=corelib network widgets gui plugins testlib -$(package)_patches=mac-qmake.conf configure-xcoderun.patch mingw-uuidof.patch pidlist_absolute.patch fix-xcb-include-order.patch fix_qt_pkgconfig.patch +$(package)_patches=mac-qmake.conf mingw-uuidof.patch pidlist_absolute.patch fix-xcb-include-order.patch fix_qt_pkgconfig.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) -$(package)_qttranslations_sha256_hash=dcc1534d247babca1840cb6d0a000671801a341ea352d0535474f86adadaf028 +$(package)_qttranslations_sha256_hash=3a15aebd523c6d89fb97b2d3df866c94149653a26d27a00aac9b6d3020bc5a1d $(package)_qttools_file_name=qttools-$($(package)_suffix) -$(package)_qttools_sha256_hash=e0f845de28c31230dfa428f0190ccb3b91d1fc02481b1f064698ae4ef8376aa1 +$(package)_qttools_sha256_hash=22d67de915cb8cd93e16fdd38fa006224ad9170bd217c2be1e53045a8dd02f0f $(package)_extra_sources = $($(package)_qttranslations_file_name) $(package)_extra_sources += $($(package)_qttools_file_name) define $(package)_set_vars $(package)_config_opts_release = -release -$(package)_config_opts_debug = -debug +$(package)_config_opts_debug = -debug $(package)_config_opts += -bindir $(build_prefix)/bin -$(package)_config_opts += -c++11 +$(package)_config_opts += -c++std c++11 $(package)_config_opts += -confirm-license $(package)_config_opts += -dbus-runtime $(package)_config_opts += -hostprefix $(build_prefix) @@ -46,7 +46,6 @@ $(package)_config_opts += -no-linuxfb $(package)_config_opts += -no-libudev $(package)_config_opts += -no-mitshm $(package)_config_opts += -no-mtdev -$(package)_config_opts += -no-nis $(package)_config_opts += -no-pulseaudio $(package)_config_opts += -no-openvg $(package)_config_opts += -no-reduce-relocations @@ -74,11 +73,13 @@ $(package)_config_opts += -prefix $(host_prefix) $(package)_config_opts += -qt-libpng $(package)_config_opts += -qt-libjpeg $(package)_config_opts += -qt-pcre -$(package)_config_opts += -qt-zlib +$(package)_config_opts += -system-zlib $(package)_config_opts += -reduce-exports $(package)_config_opts += -static $(package)_config_opts += -silent $(package)_config_opts += -v +$(package)_config_opts += -no-feature-printer +$(package)_config_opts += -no-feature-printdialog ifneq ($(build_os),darwin) $(package)_config_opts_darwin = -xplatform macx-clang-linux @@ -125,6 +126,7 @@ endef define $(package)_preprocess_cmds sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \ + sed -i.old "/updateqm.depends =/d" qttranslations/translations/translations.pro && \ sed -i.old "s/src_plugins.depends = src_sql src_xml src_network/src_plugins.depends = src_xml src_network/" qtbase/src/src.pro && \ sed -i.old "s|X11/extensions/XIproto.h|X11/X.h|" qtbase/src/plugins/platforms/xcb/qxcbxsettings.cpp && \ sed -i.old 's/if \[ "$$$$XPLATFORM_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/if \[ "$$$$BUILD_ON_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/' qtbase/configure && \ @@ -134,17 +136,17 @@ define $(package)_preprocess_cmds cp -f qtbase/mkspecs/macx-clang/Info.plist.app qtbase/mkspecs/macx-clang-linux/ &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \ - patch -p1 < $($(package)_patch_dir)/configure-xcoderun.patch && \ patch -p1 < $($(package)_patch_dir)/mingw-uuidof.patch && \ patch -p1 < $($(package)_patch_dir)/pidlist_absolute.patch && \ patch -p1 < $($(package)_patch_dir)/fix-xcb-include-order.patch && \ patch -p1 < $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \ - echo "QMAKE_CFLAGS += $($(package)_cflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ - echo "QMAKE_CXXFLAGS += $($(package)_cxxflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ - echo "QMAKE_LFLAGS += $($(package)_ldflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ - sed -i.old "s|QMAKE_CFLAGS = |QMAKE_CFLAGS = $($(package)_cflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ - sed -i.old "s|QMAKE_LFLAGS = |QMAKE_LFLAGS = $($(package)_ldflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ - sed -i.old "s|QMAKE_CXXFLAGS = |QMAKE_CXXFLAGS = $($(package)_cxxflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf + echo "!host_build: QMAKE_CFLAGS += $($(package)_cflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ + echo "!host_build: QMAKE_CXXFLAGS += $($(package)_cxxflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ + echo "!host_build: QMAKE_LFLAGS += $($(package)_ldflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ + sed -i.old "s|QMAKE_CFLAGS = |!host_build: QMAKE_CFLAGS = $($(package)_cflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ + sed -i.old "s|QMAKE_LFLAGS = |!host_build: QMAKE_LFLAGS = $($(package)_ldflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ + sed -i.old "s|QMAKE_CXXFLAGS = |!host_build: QMAKE_CXXFLAGS = $($(package)_cxxflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf + endef define $(package)_config_cmds @@ -152,6 +154,8 @@ define $(package)_config_cmds export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \ export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \ ./configure $($(package)_config_opts) && \ + echo "host_build: QT_CONFIG ~= s/system-zlib/zlib" >> mkspecs/qconfig.pri && \ + echo "CONFIG += force_bootstrap" >> mkspecs/qconfig.pri && \ $(MAKE) sub-src-clean && \ cd ../qttranslations && ../qtbase/bin/qmake qttranslations.pro -o Makefile && \ cd translations && ../../qtbase/bin/qmake translations.pro -o Makefile && cd ../.. &&\ diff --git a/depends/packages/zlib.mk b/depends/packages/zlib.mk new file mode 100644 index 000000000000..7ff5d00bbdc6 --- /dev/null +++ b/depends/packages/zlib.mk @@ -0,0 +1,25 @@ +package=zlib +$(package)_version=1.2.11 +$(package)_download_path=http://www.zlib.net +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1 + +define $(package)_set_vars +$(package)_build_opts= CC="$($(package)_cc)" +$(package)_build_opts+=CFLAGS="$($(package)_cflags) $($(package)_cppflags) -fPIC" +$(package)_build_opts+=AR="$($(package)_ar)" +$(package)_build_opts+=RANLIB="$($(package)_ranlib)" +endef + +define $(package)_config_cmds + ./configure --static --prefix=$(host_prefix) +endef + +define $(package)_build_cmds + $(MAKE) $($(package)_build_opts) libz.a +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install $($(package)_build_opts) +endef + diff --git a/depends/patches/qt/configure-xcoderun.patch b/depends/patches/qt/configure-xcoderun.patch deleted file mode 100644 index 08286d8420bb..000000000000 --- a/depends/patches/qt/configure-xcoderun.patch +++ /dev/null @@ -1,25 +0,0 @@ ---- old/qtbase/configure -+++ new/qtbase/configure -@@ -543,7 +543,7 @@ if [ "$BUILD_ON_MAC" = "yes" ]; then - exit 2 - fi - -- if ! /usr/bin/xcrun -find xcrun >/dev/null 2>&1; then -+ if ! /usr/bin/xcrun -find xcodebuild >/dev/null 2>&1; then - echo >&2 - echo " Xcode not set up properly. You may need to confirm the license" >&2 - echo " agreement by running /usr/bin/xcodebuild without arguments." >&2 -diff --git a/mkspecs/features/mac/default_pre.prf b/mkspecs/features/mac/default_pre.prf -index 0cc8cd6..5df99d1 100644 ---- old/qtbase/mkspecs/features/mac/default_pre.prf -+++ new/qtbase/mkspecs/features/mac/default_pre.prf -@@ -12,7 +12,7 @@ isEmpty(QMAKE_XCODE_DEVELOPER_PATH) { - error("Xcode is not installed in $${QMAKE_XCODE_DEVELOPER_PATH}. Please use xcode-select to choose Xcode installation path.") - - # Make sure Xcode is set up properly -- isEmpty($$list($$system("/usr/bin/xcrun -find xcrun 2>/dev/null"))): \ -+ isEmpty($$list($$system("/usr/bin/xcrun -find xcodebuild 2>/dev/null"))): \ - error("Xcode not set up properly. You may need to confirm the license agreement by running /usr/bin/xcodebuild.") - } - --- diff --git a/depends/patches/qt/fix-xcb-include-order.patch b/depends/patches/qt/fix-xcb-include-order.patch index c7dbebedce83..ec2bc17d9bd9 100644 --- a/depends/patches/qt/fix-xcb-include-order.patch +++ b/depends/patches/qt/fix-xcb-include-order.patch @@ -1,6 +1,6 @@ ---- old/qtbase/src/plugins/platforms/xcb/xcb_qpa_lib.pro 2015-03-17 02:06:42.705930685 +0000 -+++ new/qtbase/src/plugins/platforms/xcb/xcb_qpa_lib.pro 2015-03-17 02:08:41.281926351 +0000 -@@ -74,8 +74,6 @@ +--- old/qtbase/src/plugins/platforms/xcb/xcb_qpa_lib.pro 2015-03-17 ++++ new/qtbase/src/plugins/platforms/xcb/xcb_qpa_lib.pro 2015-03-17 +@@ -76,8 +76,6 @@ DEFINES += $$QMAKE_DEFINES_XCB LIBS += $$QMAKE_LIBS_XCB @@ -9,18 +9,18 @@ CONFIG += qpa/genericunixfontdatabase -@@ -87,7 +85,8 @@ +@@ -89,7 +87,8 @@ contains(QT_CONFIG, xcb-qt) { DEFINES += XCB_USE_RENDER XCB_DIR = ../../../3rdparty/xcb - INCLUDEPATH += $$XCB_DIR/include $$XCB_DIR/sysinclude + QMAKE_CFLAGS += -I$$XCB_DIR/include -I$$XCB_DIR/sysinclude $$QMAKE_CFLAGS_XCB + QMAKE_CXXFLAGS += -I$$XCB_DIR/include -I$$XCB_DIR/sysinclude $$QMAKE_CFLAGS_XCB - LIBS += -lxcb -L$$OUT_PWD/xcb-static -lxcb-static + LIBS += -lxcb -L$$MODULE_BASE_OUTDIR/lib -lxcb-static$$qtPlatformTargetSuffix() } else { LIBS += -lxcb -lxcb-image -lxcb-icccm -lxcb-sync -lxcb-xfixes -lxcb-shm -lxcb-randr -lxcb-shape -lxcb-keysyms -lxcb-xinerama ---- old/qtbase/src/plugins/platforms/xcb/xcb-static/xcb-static.pro 2015-03-17 02:07:04.641929383 +0000 -+++ new/qtbase/src/plugins/platforms/xcb/xcb-static/xcb-static.pro 2015-03-17 02:10:15.485922059 +0000 +--- old/qtbase/src/plugins/platforms/xcb/xcb-static/xcb-static.pro ++++ new/qtbase/src/plugins/platforms/xcb/xcb-static/xcb-static.pro @@ -9,7 +9,8 @@ XCB_DIR = ../../../../3rdparty/xcb @@ -31,8 +31,8 @@ QMAKE_CXXFLAGS += $$QMAKE_CFLAGS_XCB QMAKE_CFLAGS += $$QMAKE_CFLAGS_XCB ---- old/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro 2015-07-24 16:02:59.530038830 -0400 -+++ new/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro 2015-07-24 16:01:22.106037459 -0400 +--- old/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro ++++ new/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro @@ -6,6 +6,13 @@ qxcbmain.cpp OTHER_FILES += xcb.json README diff --git a/depends/patches/qt/fix_qt_pkgconfig.patch b/depends/patches/qt/fix_qt_pkgconfig.patch index 3772db4f8b80..34302a9f2d2e 100644 --- a/depends/patches/qt/fix_qt_pkgconfig.patch +++ b/depends/patches/qt/fix_qt_pkgconfig.patch @@ -1,6 +1,6 @@ ---- old/qtbase/mkspecs/features/qt_module.prf 2016-03-17 02:06:42.705930685 +0000 -+++ new/qtbase/mkspecs/features/qt_module.prf 2016-03-17 02:06:42.705930685 +0000 -@@ -244,7 +244,7 @@ +--- old/qtbase/mkspecs/features/qt_module.prf ++++ new/qtbase/mkspecs/features/qt_module.prf +@@ -245,7 +245,7 @@ load(qt_targets) # this builds on top of qt_common diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf index a6d0070cca64..ca70d30b15e9 100644 --- a/depends/patches/qt/mac-qmake.conf +++ b/depends/patches/qt/mac-qmake.conf @@ -1,6 +1,5 @@ MAKEFILE_GENERATOR = UNIX CONFIG += app_bundle incremental global_init_link_order lib_version_first plugin_no_soname absolute_library_soname -DEFINES += QT_NO_PRINTER QT_NO_PRINTDIALOG QMAKE_INCREMENTAL_STYLE = sublib include(../common/macx.conf) include(../common/gcc-base-mac.conf) @@ -11,14 +10,14 @@ QMAKE_XCODE_VERSION=4.3 QMAKE_XCODE_DEVELOPER_PATH=/Developer QMAKE_MACOSX_DEPLOYMENT_TARGET = $${MAC_MIN_VERSION} QMAKE_MAC_SDK=macosx -QMAKE_MAC_SDK.macosx.path = $${MAC_SDK_PATH} +QMAKE_MAC_SDK.macosx.Path = $${MAC_SDK_PATH} QMAKE_MAC_SDK.macosx.platform_name = macosx -QMAKE_MAC_SDK.macosx.version = $${MAC_SDK_VERSION} -QMAKE_MAC_SDK.macosx.platform_path = /phony -QMAKE_CFLAGS += -target $${MAC_TARGET} -QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS -QMAKE_CXXFLAGS += $$QMAKE_CFLAGS -QMAKE_LFLAGS += -target $${MAC_TARGET} -mlinker-version=$${MAC_LD64_VERSION} +QMAKE_MAC_SDK.macosx.SDKVersion = $${MAC_SDK_VERSION} +QMAKE_MAC_SDK.macosx.PlatformPath = /phony +!host_build: QMAKE_CFLAGS += -target $${MAC_TARGET} +!host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS +!host_build: QMAKE_CXXFLAGS += $$QMAKE_CFLAGS +!host_build: QMAKE_LFLAGS += -target $${MAC_TARGET} -mlinker-version=$${MAC_LD64_VERSION} QMAKE_AR = $${CROSS_COMPILE}ar cq QMAKE_RANLIB=$${CROSS_COMPILE}ranlib QMAKE_LIBTOOL=$${CROSS_COMPILE}libtool diff --git a/depends/patches/qt/mingw-uuidof.patch b/depends/patches/qt/mingw-uuidof.patch index 975366e61229..fb21923c8c36 100644 --- a/depends/patches/qt/mingw-uuidof.patch +++ b/depends/patches/qt/mingw-uuidof.patch @@ -1,6 +1,6 @@ ---- old/qtbase/src/plugins/platforms/windows/qwindowscontext.cpp 2015-06-20 17:40:20.956781548 -0400 -+++ new/qtbase/src/plugins/platforms/windows/qwindowscontext.cpp 2015-06-20 17:29:32.052772416 -0400 -@@ -69,7 +69,7 @@ +--- old/qtbase/src/plugins/platforms/windows/qwindowscontext.cpp ++++ new/qtbase/src/plugins/platforms/windows/qwindowscontext.cpp +@@ -77,7 +77,7 @@ #include #include #include @@ -9,8 +9,8 @@ # include #endif -@@ -762,7 +762,7 @@ - HWND_MESSAGE, NULL, (HINSTANCE)GetModuleHandle(0), NULL); +@@ -814,7 +814,7 @@ + HWND_MESSAGE, NULL, static_cast(GetModuleHandle(0)), NULL); } -#ifndef Q_OS_WINCE @@ -18,16 +18,16 @@ // Re-engineered from the inline function _com_error::ErrorMessage(). // We cannot use it directly since it uses swprintf_s(), which is not // present in the MSVCRT.DLL found on Windows XP (QTBUG-35617). -@@ -781,7 +781,7 @@ - return QStringLiteral("IDispatch error #") + QString::number(wCode); - return QStringLiteral("Unknown error 0x0") + QString::number(comError.Error(), 16); +@@ -833,7 +833,7 @@ + return QString::asprintf("IDispatch error #%u", uint(wCode)); + return QString::asprintf("Unknown error 0x0%x", uint(comError.Error())); } -#endif // !Q_OS_WINCE +#endif // !defined(Q_OS_WINCE) && (!defined(USE___UUIDOF) || (defined(USE___UUIDOF) && USE___UUIDOF == 1)) /*! \brief Common COM error strings. -@@ -846,12 +846,12 @@ +@@ -901,12 +901,12 @@ default: break; } diff --git a/depends/patches/qt/pidlist_absolute.patch b/depends/patches/qt/pidlist_absolute.patch index 0b49c050dcc1..c79282417905 100644 --- a/depends/patches/qt/pidlist_absolute.patch +++ b/depends/patches/qt/pidlist_absolute.patch @@ -1,7 +1,7 @@ diff -dur old/qtbase/src/plugins/platforms/windows/qwindowscontext.h new/qtbase/src/plugins/platforms/windows/qwindowscontext.h ---- old/qtbase/src/plugins/platforms/windows/qwindowscontext.h 2015-06-29 22:04:40.000000000 +0200 -+++ new/qtbase/src/plugins/platforms/windows/qwindowscontext.h 2015-11-01 12:55:59.751234846 +0100 -@@ -124,10 +124,18 @@ +--- old/qtbase/src/plugins/platforms/windows/qwindowscontext.h ++++ new/qtbase/src/plugins/platforms/windows/qwindowscontext.h +@@ -136,10 +136,18 @@ inline void init(); typedef HRESULT (WINAPI *SHCreateItemFromParsingName)(PCWSTR, IBindCtx *, const GUID&, void **); @@ -21,9 +21,9 @@ diff -dur old/qtbase/src/plugins/platforms/windows/qwindowscontext.h new/qtbase/ SHCreateItemFromParsingName sHCreateItemFromParsingName; SHGetKnownFolderIDList sHGetKnownFolderIDList; diff -dur old/qtbase/src/plugins/platforms/windows/qwindowsdialoghelpers.cpp new/qtbase/src/plugins/platforms/windows/qwindowsdialoghelpers.cpp ---- old/qtbase/src/plugins/platforms/windows/qwindowsdialoghelpers.cpp 2015-06-29 22:04:40.000000000 +0200 -+++ new/qtbase/src/plugins/platforms/windows/qwindowsdialoghelpers.cpp 2015-11-01 13:41:09.503149772 +0100 -@@ -1008,7 +1008,11 @@ +--- old/qtbase/src/plugins/platforms/windows/qwindowsdialoghelpers.cpp ++++ new/qtbase/src/plugins/platforms/windows/qwindowsdialoghelpers.cpp +@@ -1016,7 +1016,11 @@ qWarning() << __FUNCTION__ << ": Invalid CLSID: " << url.path(); return Q_NULLPTR; } diff --git a/doc/man/dash-qt.1 b/doc/man/dash-qt.1 index d6ab4c0c0dfd..c784835465bc 100644 --- a/doc/man/dash-qt.1 +++ b/doc/man/dash-qt.1 @@ -80,11 +80,13 @@ Specify pid file (default: dashd.pid) .HP \fB\-prune=\fR .IP -Reduce storage requirements by pruning (deleting) old blocks. This mode -is incompatible with \fB\-txindex\fR and \fB\-rescan\fR. Warning: Reverting -this setting requires re\-downloading the entire blockchain. -(default: 0 = disable pruning blocks, >945 = target size in MiB -to use for block files) +Reduce storage requirements by enabling pruning (deleting) of old blocks. +This allows the pruneblockchain RPC to be called to delete specific blocks, +and enables automatic pruning of old blocks if a target size in MiB is +provided. This mode is incompatible with \fB\-txindex\fR and \fB\-rescan\fR. +Warning: Reverting this setting requires re\-downloading the entire blockchain. +(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >945 = +automatically prune block files to stay under the specified target size in MiB) .HP \fB\-reindex\-chainstate\fR .IP diff --git a/doc/man/dashd.1 b/doc/man/dashd.1 index 84b4c7b88802..39e6dfb43c0d 100644 --- a/doc/man/dashd.1 +++ b/doc/man/dashd.1 @@ -85,11 +85,13 @@ Specify pid file (default: dashd.pid) .HP \fB\-prune=\fR .IP -Reduce storage requirements by pruning (deleting) old blocks. This mode -is incompatible with \fB\-txindex\fR and \fB\-rescan\fR. Warning: Reverting -this setting requires re\-downloading the entire blockchain. -(default: 0 = disable pruning blocks, >945 = target size in MiB -to use for block files) +Reduce storage requirements by enabling pruning (deleting) of old blocks. +This allows the pruneblockchain RPC to be called to delete specific blocks, +and enables automatic pruning of old blocks if a target size in MiB is +provided. This mode is incompatible with \fB\-txindex\fR and \fB\-rescan\fR. +Warning: Reverting this setting requires re\-downloading the entire blockchain. +(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >945 = +automatically prune block files to stay under the specified target size in MiB) .HP \fB\-reindex\-chainstate\fR .IP diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index 9745289080da..e84680572c22 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -161,6 +161,7 @@ 'signmessages.py', 'import-rescan.py', 'rpcnamedargs.py', + 'listsinceblock.py', ] if ENABLE_ZMQ: testScripts.append('zmq_test.py') diff --git a/qa/rpc-tests/assumevalid.py b/qa/rpc-tests/assumevalid.py new file mode 100755 index 000000000000..e4bc22951b39 --- /dev/null +++ b/qa/rpc-tests/assumevalid.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2016 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +assumevalid.py + +Test logic for skipping signature validation on blocks which we've assumed +valid (https://github.com/bitcoin/bitcoin/pull/9484) + +We build a chain that includes and invalid signature for one of the +transactions: + + 0: genesis block + 1: block 1 with coinbase transaction output. + 2-101: bury that block with 100 blocks so the coinbase transaction + output can be spent + 102: a block containing a transaction spending the coinbase + transaction output. The transaction has an invalid signature. + 103-2202: bury the bad block with just over two weeks' worth of blocks + (2100 blocks) + +Start three nodes: + + - node0 has no -assumevalid parameter. Try to sync to block 2202. It will + reject block 102 and only sync as far as block 101 + - node1 has -assumevalid set to the hash of block 102. Try to sync to + block 2202. node1 will sync all the way to block 2202. + - node2 has -assumevalid set to the hash of block 102. Try to sync to + block 200. node2 will reject block 102 since it's assumed valid, but it + isn't buried by at least two weeks' work. +''' + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.blocktools import create_block, create_coinbase +from test_framework.key import CECKey +from test_framework.script import * + +class BaseNode(SingleNodeConnCB): + def __init__(self): + SingleNodeConnCB.__init__(self) + self.last_inv = None + self.last_headers = None + self.last_block = None + self.last_getdata = None + self.block_announced = False + self.last_getheaders = None + self.disconnected = False + self.last_blockhash_announced = None + + def on_close(self, conn): + self.disconnected = True + + def wait_for_disconnect(self, timeout=60): + test_function = lambda: self.disconnected + assert(wait_until(test_function, timeout=timeout)) + return + + def send_header_for_blocks(self, new_blocks): + headers_message = msg_headers() + headers_message.headers = [ CBlockHeader(b) for b in new_blocks ] + self.send_message(headers_message) + +class SendHeadersTest(BitcoinTestFramework): + def __init__(self): + super().__init__() + self.setup_clean_chain = True + self.num_nodes = 3 + + def setup_network(self): + # Start node0. We don't start the other nodes yet since + # we need to pre-mine a block with an invalid transaction + # signature so we can pass in the block hash as assumevalid. + self.nodes = [] + self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) + + def run_test(self): + + # Connect to node0 + node0 = BaseNode() + connections = [] + connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) + node0.add_connection(connections[0]) + + NetworkThread().start() # Start up network handling in another thread + node0.wait_for_verack() + + # Build the blockchain + self.tip = int(self.nodes[0].getbestblockhash(), 16) + self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 + + self.blocks = [] + + # Get a pubkey for the coinbase TXO + coinbase_key = CECKey() + coinbase_key.set_secretbytes(b"horsebattery") + coinbase_pubkey = coinbase_key.get_pubkey() + + # Create the first block with a coinbase output to our key + height = 1 + block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) + self.blocks.append(block) + self.block_time += 1 + block.solve() + # Save the coinbase for later + self.block1 = block + self.tip = block.sha256 + height += 1 + + # Bury the block 100 deep so the coinbase output is spendable + for i in range(100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.solve() + self.blocks.append(block) + self.tip = block.sha256 + self.block_time += 1 + height += 1 + + # Create a transaction spending the coinbase output with an invalid (null) signature + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) + tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE]))) + tx.calc_sha256() + + block102 = create_block(self.tip, create_coinbase(height), self.block_time) + self.block_time += 1 + block102.vtx.extend([tx]) + block102.hashMerkleRoot = block102.calc_merkle_root() + block102.rehash() + block102.solve() + self.blocks.append(block102) + self.tip = block102.sha256 + self.block_time += 1 + height += 1 + + # Bury the assumed valid block 2100 deep + for i in range(2100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.nVersion = 4 + block.solve() + self.blocks.append(block) + self.tip = block.sha256 + self.block_time += 1 + height += 1 + + # Start node1 and node2 with assumevalid so they accept a block with a bad signature. + self.nodes.append(start_node(1, self.options.tmpdir, + ["-debug", "-assumevalid=" + hex(block102.sha256)])) + node1 = BaseNode() # connects to node1 + connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1)) + node1.add_connection(connections[1]) + node1.wait_for_verack() + + self.nodes.append(start_node(2, self.options.tmpdir, + ["-debug", "-assumevalid=" + hex(block102.sha256)])) + node2 = BaseNode() # connects to node2 + connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) + node2.add_connection(connections[2]) + node2.wait_for_verack() + + # send header lists to all three nodes + node0.send_header_for_blocks(self.blocks[0:2000]) + node0.send_header_for_blocks(self.blocks[2000:]) + node1.send_header_for_blocks(self.blocks[0:2000]) + node1.send_header_for_blocks(self.blocks[2000:]) + node2.send_header_for_blocks(self.blocks[0:200]) + + # Send 102 blocks to node0. Block 102 will be rejected. + for i in range(101): + node0.send_message(msg_block(self.blocks[i])) + node0.sync_with_ping() # make sure the most recent block is synced + node0.send_message(msg_block(self.blocks[101])) + assert_equal(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101) + + # Send 3102 blocks to node1. All blocks will be accepted. + for i in range(2202): + node1.send_message(msg_block(self.blocks[i])) + node1.sync_with_ping() # make sure the most recent block is synced + assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) + + # Send 102 blocks to node2. Block 102 will be rejected. + for i in range(101): + node2.send_message(msg_block(self.blocks[i])) + node2.sync_with_ping() # make sure the most recent block is synced + node2.send_message(msg_block(self.blocks[101])) + assert_equal(self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101) + +if __name__ == '__main__': + SendHeadersTest().main() diff --git a/qa/rpc-tests/bip65-cltv-p2p.py b/qa/rpc-tests/bip65-cltv-p2p.py index 754b6873b71f..e903b2fbf07b 100755 --- a/qa/rpc-tests/bip65-cltv-p2p.py +++ b/qa/rpc-tests/bip65-cltv-p2p.py @@ -71,9 +71,9 @@ def get_tests(self): self.nodeaddress = self.nodes[0].getnewaddress() self.last_block_time = int(time.time()) - ''' 98 more version 3 blocks ''' + ''' 398 more version 3 blocks ''' test_blocks = [] - for i in range(98): + for i in range(398): block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.rehash() @@ -118,24 +118,6 @@ def get_tests(self): height += 1 yield TestInstance([[block, True]]) - ''' - Check that the new CLTV rules are enforced in the 751st version 4 - block. - ''' - spendtx = self.create_transaction(self.nodes[0], - self.coinbase_blocks[1], self.nodeaddress, 1.0) - cltv_invalidate(spendtx) - spendtx.rehash() - - block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) - block.nVersion = 4 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - self.last_block_time += 1 - yield TestInstance([[block, False]]) - ''' Mine 199 new version blocks on last valid tip ''' test_blocks = [] for i in range(199): @@ -169,6 +151,24 @@ def get_tests(self): height += 1 yield TestInstance([[block, True]]) + ''' + Check that the new CLTV rules are enforced in the 951st version 4 + block. + ''' + spendtx = self.create_transaction(self.nodes[0], + self.coinbase_blocks[1], self.nodeaddress, 1.0) + cltv_invalidate(spendtx) + spendtx.rehash() + + block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) + block.nVersion = 4 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + self.last_block_time += 1 + yield TestInstance([[block, False]]) + ''' Mine 1 old version block, should be invalid ''' block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 diff --git a/qa/rpc-tests/bip65-cltv.py b/qa/rpc-tests/bip65-cltv.py index abba7fc20e7d..baa77b92a05b 100755 --- a/qa/rpc-tests/bip65-cltv.py +++ b/qa/rpc-tests/bip65-cltv.py @@ -30,7 +30,8 @@ def run_test(self): cnt = self.nodes[0].getblockcount() # Mine some old-version blocks - self.nodes[1].generate(100) + self.nodes[1].generate(200) + cnt += 100 self.sync_all() if (self.nodes[0].getblockcount() != cnt + 100): raise AssertionError("Failed to mine 100 version=3 blocks") diff --git a/qa/rpc-tests/bip68-112-113-p2p.py b/qa/rpc-tests/bip68-112-113-p2p.py index c65fec32a7be..7d08b8d2ac32 100755 --- a/qa/rpc-tests/bip68-112-113-p2p.py +++ b/qa/rpc-tests/bip68-112-113-p2p.py @@ -289,6 +289,7 @@ def get_tests(self): # BIP113 test transaction will be modified before each use to put in appropriate block time bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("499.98")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE + bip113tx_v1.nVersion = 1 bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("499.98")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py index 4e4936a4ae6d..3bad5af5e67a 100755 --- a/qa/rpc-tests/bipdersig-p2p.py +++ b/qa/rpc-tests/bipdersig-p2p.py @@ -79,9 +79,9 @@ def get_tests(self): self.nodeaddress = self.nodes[0].getnewaddress() self.last_block_time = int(time.time()) - ''' 98 more version 2 blocks ''' + ''' 298 more version 2 blocks ''' test_blocks = [] - for i in range(98): + for i in range(298): block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 2 block.rehash() @@ -124,25 +124,7 @@ def get_tests(self): self.last_block_time += 1 self.tip = block.sha256 height += 1 - yield TestInstance([[block, True]]) - - ''' - Check that the new DERSIG rules are enforced in the 751st version 3 - block. - ''' - spendtx = self.create_transaction(self.nodes[0], - self.coinbase_blocks[1], self.nodeaddress, 1.0) - unDERify(spendtx) - spendtx.rehash() - - block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) - block.nVersion = 3 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - self.last_block_time += 1 - yield TestInstance([[block, False]]) + yield TestInstance([[block, True]]) ''' Mine 199 new version blocks on last valid tip ''' test_blocks = [] @@ -177,6 +159,24 @@ def get_tests(self): height += 1 yield TestInstance([[block, True]]) + ''' + Check that the new DERSIG rules are enforced in the 951st version 3 + block. + ''' + spendtx = self.create_transaction(self.nodes[0], + self.coinbase_blocks[1], self.nodeaddress, 1.0) + unDERify(spendtx) + spendtx.rehash() + + block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) + block.nVersion = 3 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + self.last_block_time += 1 + yield TestInstance([[block, False]]) + ''' Mine 1 old version block, should be invalid ''' block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 2 diff --git a/qa/rpc-tests/fundrawtransaction.py b/qa/rpc-tests/fundrawtransaction.py index e41f8b6be821..f62695c44ad3 100755 --- a/qa/rpc-tests/fundrawtransaction.py +++ b/qa/rpc-tests/fundrawtransaction.py @@ -226,7 +226,7 @@ def run_test(self): assert(False) rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0}) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - out = dec_tx['vout'][0]; + out = dec_tx['vout'][0] assert_equal(change, out['scriptPubKey']['addresses'][0]) @@ -650,7 +650,7 @@ def run_test(self): assert_equal(len(self.nodes[3].listunspent(1)), 1) inputs = [] - outputs = {self.nodes[2].getnewaddress() : 1} + outputs = {self.nodes[3].getnewaddress() : 1} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee) result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}) @@ -659,5 +659,107 @@ def run_test(self): assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate) assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate) + ############################# + # Test address reuse option # + ############################# + + result3 = self.nodes[3].fundrawtransaction(rawtx, {"reserveChangeKey": False}) + res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) + changeaddress = "" + for out in res_dec['vout']: + if out['value'] > 1.0: + changeaddress += out['scriptPubKey']['addresses'][0] + assert(changeaddress != "") + nextaddr = self.nodes[3].getnewaddress() + # frt should not have removed the key from the keypool + assert(changeaddress == nextaddr) + + result3 = self.nodes[3].fundrawtransaction(rawtx) + res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) + changeaddress = "" + for out in res_dec['vout']: + if out['value'] > 1.0: + changeaddress += out['scriptPubKey']['addresses'][0] + assert(changeaddress != "") + nextaddr = self.nodes[3].getnewaddress() + # Now the change address key should be removed from the keypool + assert(changeaddress != nextaddr) + + ###################################### + # Test subtractFeeFromOutputs option # + ###################################### + + # Make sure there is exactly one input so coin selection can't skew the result + assert_equal(len(self.nodes[3].listunspent(1)), 1) + + # Disable BIP69 sorting of inputs and outputs + self.nodes[3].setbip69enabled(False) + + inputs = [] + outputs = {self.nodes[2].getnewaddress(): 1} + rawtx = self.nodes[3].createrawtransaction(inputs, outputs) + + result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee) + self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list + self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee) + self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}), + self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})] + + dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result] + output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] + change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] + + assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) + assert_equal(result[3]['fee'], result[4]['fee']) + assert_equal(change[0], change[1]) + assert_equal(output[0], output[1]) + assert_equal(output[0], output[2] + result[2]['fee']) + assert_equal(change[0] + result[0]['fee'], change[2]) + assert_equal(output[3], output[4] + result[4]['fee']) + assert_equal(change[3] + result[3]['fee'], change[4]) + + inputs = [] + outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)} + keys = list(outputs.keys()) + rawtx = self.nodes[3].createrawtransaction(inputs, outputs) + + result = [self.nodes[3].fundrawtransaction(rawtx), + # split the fee between outputs 0, 2, and 3, but not output 1 + self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})] + + dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']), + self.nodes[3].decoderawtransaction(result[1]['hex'])] + + # Nested list of non-change output amounts for each transaction + output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']] + for d, r in zip(dec_tx, result)] + + # List of differences in output amounts between normal and subtractFee transactions + share = [o0 - o1 for o0, o1 in zip(output[0], output[1])] + + # output 1 is the same in both transactions + assert_equal(share[1], 0) + + # the other 3 outputs are smaller as a result of subtractFeeFromOutputs + assert_greater_than(share[0], 0) + assert_greater_than(share[2], 0) + assert_greater_than(share[3], 0) + + # outputs 2 and 3 take the same share of the fee + assert_equal(share[2], share[3]) + + # output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3 + assert_greater_than_or_equal(share[0], share[2]) + assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0]) + + # the fee is the same in both transactions + assert_equal(result[0]['fee'], result[1]['fee']) + + # the total subtracted from the outputs is equal to the fee + assert_equal(share[0] + share[2] + share[3], result[0]['fee']) + + # Reenable BIP69 sorting of inputs and outputs + self.nodes[3].setbip69enabled(True) + if __name__ == '__main__': RawTransactionsTest().main() diff --git a/qa/rpc-tests/import-rescan.py b/qa/rpc-tests/import-rescan.py index 2e4f290f5baf..d7aa3993931e 100755 --- a/qa/rpc-tests/import-rescan.py +++ b/qa/rpc-tests/import-rescan.py @@ -33,6 +33,7 @@ def call_import_rpc(call, data, address, scriptPubKey, pubkey, key, label, node, "scriptPubKey": { "address": address }, + "timestamp": "now", "pubkeys": [pubkey] if data == Data.pub else [], "keys": [key] if data == Data.priv else [], "label": label, diff --git a/qa/rpc-tests/importmulti.py b/qa/rpc-tests/importmulti.py index 5c536f2f4952..b4d4b6c5b804 100755 --- a/qa/rpc-tests/importmulti.py +++ b/qa/rpc-tests/importmulti.py @@ -52,7 +52,8 @@ def run_test (self): result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] - } + }, + "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) @@ -65,6 +66,7 @@ def run_test (self): address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", "internal": True }]) assert_equal(result[0]['success'], True) @@ -76,7 +78,8 @@ def run_test (self): print("Should not import a scriptPubKey without internal flag") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'] + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) @@ -93,6 +96,7 @@ def run_test (self): "scriptPubKey": { "address": address['address'] }, + "timestamp": "now", "pubkeys": [ address['pubkey'] ] }]) assert_equal(result[0]['success'], True) @@ -106,9 +110,10 @@ def run_test (self): address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", "pubkeys": [ address['pubkey'] ], "internal": True - }]; + }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) @@ -120,8 +125,9 @@ def run_test (self): address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", "pubkeys": [ address['pubkey'] ] - }]; + }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) @@ -137,6 +143,7 @@ def run_test (self): "scriptPubKey": { "address": address['address'] }, + "timestamp": "now", "keys": [ self.nodes[0].dumpprivkey(address['address']) ] }]) assert_equal(result[0]['success'], True) @@ -151,6 +158,7 @@ def run_test (self): "scriptPubKey": { "address": address['address'] }, + "timestamp": "now", "keys": [ self.nodes[0].dumpprivkey(address['address']) ], "watchonly": True }]) @@ -166,6 +174,7 @@ def run_test (self): address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", "keys": [ self.nodes[0].dumpprivkey(address['address']) ], "internal": True }]) @@ -179,6 +188,7 @@ def run_test (self): address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", "keys": [ self.nodes[0].dumpprivkey(address['address']) ] }]) assert_equal(result[0]['success'], False) @@ -197,13 +207,14 @@ def run_test (self): self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) - transaction = self.nodes[1].gettransaction(transactionid); + transaction = self.nodes[1].gettransaction(transactionid) print("Should import a p2sh") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] - } + }, + "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) @@ -222,13 +233,14 @@ def run_test (self): self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) - transaction = self.nodes[1].gettransaction(transactionid); + transaction = self.nodes[1].gettransaction(transactionid) print("Should import a p2sh with respective redeem script") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, + "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'] }]) assert_equal(result[0]['success'], True) @@ -246,13 +258,14 @@ def run_test (self): self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) - transaction = self.nodes[1].gettransaction(transactionid); + transaction = self.nodes[1].gettransaction(transactionid) print("Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, + "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'], "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])] }]) @@ -270,13 +283,14 @@ def run_test (self): self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) - transaction = self.nodes[1].gettransaction(transactionid); + transaction = self.nodes[1].gettransaction(transactionid) print("Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, + "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'], "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])], "watchonly": True @@ -294,6 +308,7 @@ def run_test (self): "scriptPubKey": { "address": address['address'] }, + "timestamp": "now", "pubkeys": [ address2['pubkey'] ] }]) assert_equal(result[0]['success'], False) @@ -310,9 +325,10 @@ def run_test (self): address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", "pubkeys": [ address2['pubkey'] ], "internal": True - }]; + }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) @@ -330,6 +346,7 @@ def run_test (self): "scriptPubKey": { "address": address['address'] }, + "timestamp": "now", "keys": [ self.nodes[0].dumpprivkey(address2['address']) ] }]) assert_equal(result[0]['success'], False) @@ -346,6 +363,7 @@ def run_test (self): address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", "keys": [ self.nodes[0].dumpprivkey(address2['address']) ], "internal": True }]) @@ -356,5 +374,18 @@ def run_test (self): assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) + # Bad or missing timestamps + print("Should throw on invalid or missing timestamp values") + assert_raises_message(JSONRPCException, 'Missing required timestamp field for key', + self.nodes[1].importmulti, [{ + "scriptPubKey": address['scriptPubKey'], + }]) + assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string', + self.nodes[1].importmulti, [{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "", + }]) + + if __name__ == '__main__': ImportMultiTest ().main () diff --git a/qa/rpc-tests/listsinceblock.py b/qa/rpc-tests/listsinceblock.py new file mode 100755 index 000000000000..9d0a4f9ed74c --- /dev/null +++ b/qa/rpc-tests/listsinceblock.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class ListSinceBlockTest (BitcoinTestFramework): + + def __init__(self): + super().__init__() + self.setup_clean_chain = True + self.num_nodes = 4 + + def run_test (self): + ''' + `listsinceblock` did not behave correctly when handed a block that was + no longer in the main chain: + + ab0 + / \ + aa1 [tx0] bb1 + | | + aa2 bb2 + | | + aa3 bb3 + | + bb4 + + Consider a client that has only seen block `aa3` above. It asks the node + to `listsinceblock aa3`. But at some point prior the main chain switched + to the bb chain. + + Previously: listsinceblock would find height=4 for block aa3 and compare + this to height=5 for the tip of the chain (bb4). It would then return + results restricted to bb3-bb4. + + Now: listsinceblock finds the fork at ab0 and returns results in the + range bb1-bb4. + + This test only checks that [tx0] is present. + ''' + + assert_equal(self.is_network_split, False) + self.nodes[2].generate(101) + self.sync_all() + + assert_equal(self.nodes[0].getbalance(), 0) + assert_equal(self.nodes[1].getbalance(), 0) + assert_equal(self.nodes[2].getbalance(), 500) + assert_equal(self.nodes[3].getbalance(), 0) + + # Split network into two + self.split_network() + assert_equal(self.is_network_split, True) + + # send to nodes[0] from nodes[2] + senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) + + # generate on both sides + lastblockhash = self.nodes[1].generate(6)[5] + self.nodes[2].generate(7) + print('lastblockhash=%s' % (lastblockhash)) + + self.sync_all() + + self.join_network() + + # listsinceblock(lastblockhash) should now include tx, as seen from nodes[0] + lsbres = self.nodes[0].listsinceblock(lastblockhash) + found = False + for tx in lsbres['transactions']: + if tx['txid'] == senttx: + found = True + break + assert_equal(found, True) + +if __name__ == '__main__': + ListSinceBlockTest().main() diff --git a/qa/rpc-tests/mempool_reorg.py b/qa/rpc-tests/mempool_reorg.py index 8ddc91018a61..36fa1d47747a 100755 --- a/qa/rpc-tests/mempool_reorg.py +++ b/qa/rpc-tests/mempool_reorg.py @@ -55,7 +55,7 @@ def run_test(self): # Create a block-height-locked transaction which will be invalid after reorg timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 499.9}) # Set the time lock - timelock_tx = timelock_tx.replace("ffffffff", "11111111", 1) + timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1) timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000" timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"] assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx) diff --git a/qa/rpc-tests/nodehandling.py b/qa/rpc-tests/nodehandling.py index e9682c4908f6..7313c79b54fa 100755 --- a/qa/rpc-tests/nodehandling.py +++ b/qa/rpc-tests/nodehandling.py @@ -10,7 +10,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * -import http.client import urllib.parse class NodeHandlingTest (BitcoinTestFramework): diff --git a/qa/rpc-tests/p2p-feefilter.py b/qa/rpc-tests/p2p-feefilter.py index 96d99d38a73c..86ce0b42e639 100755 --- a/qa/rpc-tests/p2p-feefilter.py +++ b/qa/rpc-tests/p2p-feefilter.py @@ -21,9 +21,9 @@ def allInvsMatch(invsExpected, testnode): for x in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): - return True; + return True time.sleep(1) - return False; + return False # TestNode: bare-bones "peer". Used to track which invs are received from a node # and to send the node feefilter messages. diff --git a/qa/rpc-tests/p2p-mempool.py b/qa/rpc-tests/p2p-mempool.py index 382d7f1e82ca..1fd125fdcdd8 100755 --- a/qa/rpc-tests/p2p-mempool.py +++ b/qa/rpc-tests/p2p-mempool.py @@ -6,7 +6,6 @@ from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * -import time class TestNode(NodeConnCB): def __init__(self): diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py index 78b8938e4a9f..9d155478e251 100755 --- a/qa/rpc-tests/pruning.py +++ b/qa/rpc-tests/pruning.py @@ -16,6 +16,8 @@ import time import os +MIN_BLOCKS_TO_KEEP = 288 + def calc_usage(blockdir): return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) @@ -25,7 +27,7 @@ class PruneTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True - self.num_nodes = 3 + self.num_nodes = 6 # Cache for utxos, as the listunspent may take a long time later in the test self.utxo_cache_0 = [] @@ -43,10 +45,22 @@ def setup_network(self): self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900)) self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/" + # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) + self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) + self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) + + # Create nodes 5 to test wallet in prune mode, but do not connect + self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"])) + + # Determine default relay fee + self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] + connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 2) connect_nodes(self.nodes[2], 0) - sync_blocks(self.nodes[0:3]) + connect_nodes(self.nodes[0], 3) + connect_nodes(self.nodes[0], 4) + sync_blocks(self.nodes[0:5]) def create_big_chain(self): # Start by creating some coinbases we can spend later @@ -57,7 +71,7 @@ def create_big_chain(self): for i in range(645): mine_large_block(self.nodes[0], self.utxo_cache_0) - sync_blocks(self.nodes[0:3]) + sync_blocks(self.nodes[0:5]) def test_height_min(self): if not os.path.isfile(self.prunedir+"blk00000.dat"): @@ -89,7 +103,7 @@ def create_chain_with_staleblocks(self): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine - stop_node(self.nodes[0],0) + self.stop_node(0) self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900) # Mine 24 blocks in node 1 for i in range(24): @@ -114,7 +128,7 @@ def reorg_test(self): # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) - stop_node(self.nodes[1],1) + self.stop_node(1) self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) height = self.nodes[1].getblockcount() @@ -137,7 +151,7 @@ def reorg_test(self): print("New best height", self.nodes[1].getblockcount()) # Reboot node1 to clear those giant tx's from mempool - stop_node(self.nodes[1],1) + self.stop_node(1) self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) print("Generating new longer chain of 300 more blocks") @@ -212,6 +226,114 @@ def reorg_back(self): # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) + def manual_test(self, node_number, use_timestamp): + # at this point, node has 995 blocks and has not yet run in prune mode + node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0"], timewait=900) + assert_equal(node.getblockcount(), 995) + assert_raises_message(JSONRPCException, "not in prune mode", node.pruneblockchain, 500) + self.stop_node(node_number) + + # now re-start in manual pruning mode + node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=1"], timewait=900) + assert_equal(node.getblockcount(), 995) + + def height(index): + if use_timestamp: + return node.getblockheader(node.getblockhash(index))["time"] + else: + return index + + def prune(index, expected_ret=None): + ret = node.pruneblockchain(height(index)) + # Check the return value. When use_timestamp is True, just check + # that the return value is less than or equal to the expected + # value, because when more than one block is generated per second, + # a timestamp will not be granular enough to uniquely identify an + # individual block. + if expected_ret is None: + expected_ret = index + if use_timestamp: + assert_greater_than(ret, 0) + assert_greater_than(expected_ret + 1, ret) + else: + assert_equal(ret, expected_ret) + + def has_block(index): + return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) + + # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) + assert_raises_message(JSONRPCException, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) + + # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) + node.generate(6) + assert_equal(node.getblockchaininfo()["blocks"], 1001) + + # negative heights should raise an exception + assert_raises_message(JSONRPCException, "Negative", node.pruneblockchain, -10) + + # height=100 too low to prune first block file so this is a no-op + prune(100) + if not has_block(0): + raise AssertionError("blk00000.dat is missing when should still be there") + + # Does nothing + node.pruneblockchain(height(0)) + if not has_block(0): + raise AssertionError("blk00000.dat is missing when should still be there") + + # height=500 should prune first file + prune(500) + if has_block(0): + raise AssertionError("blk00000.dat is still there, should be pruned by now") + if not has_block(1): + raise AssertionError("blk00001.dat is missing when should still be there") + + # height=650 should prune second file + prune(650) + if has_block(1): + raise AssertionError("blk00001.dat is still there, should be pruned by now") + + # height=1000 should not prune anything more, because tip-288 is in blk00002.dat. + prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) + if not has_block(2): + raise AssertionError("blk00002.dat is still there, should be pruned by now") + + # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) + node.generate(288) + prune(1000) + if has_block(2): + raise AssertionError("blk00002.dat is still there, should be pruned by now") + if has_block(3): + raise AssertionError("blk00003.dat is still there, should be pruned by now") + + # stop node, start back up with auto-prune at 550MB, make sure still runs + self.stop_node(node_number) + self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900) + + print("Success") + + def wallet_test(self): + # check that the pruning node's wallet is still in good shape + print("Stop and start pruning node to trigger wallet rescan") + try: + self.stop_node(2) + start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"]) + print("Success") + except Exception as detail: + raise AssertionError("Wallet test: unable to re-start the pruning node") + + # check that wallet loads loads successfully when restarting a pruned node after IBD. + # this was reported to fail in #7494. + print ("Syncing node 5 to test wallet") + connect_nodes(self.nodes[0], 5) + nds = [self.nodes[0], self.nodes[5]] + sync_blocks(nds, wait=5, timeout=300) + try: + self.stop_node(5) #stop and start to trigger rescan + start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"]) + print ("Success") + except Exception as detail: + raise AssertionError("Wallet test: unable to re-start node5") def run_test(self): print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") @@ -226,6 +348,10 @@ def run_test(self): # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) + # stop manual-pruning node with 995 blocks + self.stop_node(3) + self.stop_node(4) + print("Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight @@ -308,6 +434,15 @@ def run_test(self): # # N1 doesn't change because 1033 on main chain (*) is invalid + print("Test manual pruning with block indices") + self.manual_test(3, use_timestamp=False) + + print("Test manual pruning with timestamps") + self.manual_test(4, use_timestamp=True) + + print("Test wallet re-scan") + self.wallet_test() + print("Done") if __name__ == '__main__': diff --git a/qa/rpc-tests/test_framework/siphash.py b/qa/rpc-tests/test_framework/siphash.py new file mode 100644 index 000000000000..9c0574bd9317 --- /dev/null +++ b/qa/rpc-tests/test_framework/siphash.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# +# siphash.py - Specialized SipHash-2-4 implementations +# +# This implements SipHash-2-4 for 256-bit integers. + +def rotl64(n, b): + return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b + +def siphash_round(v0, v1, v2, v3): + v0 = (v0 + v1) & ((1 << 64) - 1) + v1 = rotl64(v1, 13) + v1 ^= v0 + v0 = rotl64(v0, 32) + v2 = (v2 + v3) & ((1 << 64) - 1) + v3 = rotl64(v3, 16) + v3 ^= v2 + v0 = (v0 + v3) & ((1 << 64) - 1) + v3 = rotl64(v3, 21) + v3 ^= v0 + v2 = (v2 + v1) & ((1 << 64) - 1) + v1 = rotl64(v1, 17) + v1 ^= v2 + v2 = rotl64(v2, 32) + return (v0, v1, v2, v3) + +def siphash256(k0, k1, h): + n0 = h & ((1 << 64) - 1) + n1 = (h >> 64) & ((1 << 64) - 1) + n2 = (h >> 128) & ((1 << 64) - 1) + n3 = (h >> 192) & ((1 << 64) - 1) + v0 = 0x736f6d6570736575 ^ k0 + v1 = 0x646f72616e646f6d ^ k1 + v2 = 0x6c7967656e657261 ^ k0 + v3 = 0x7465646279746573 ^ k1 ^ n0 + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0 ^= n0 + v3 ^= n1 + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0 ^= n1 + v3 ^= n2 + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0 ^= n2 + v3 ^= n3 + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0 ^= n3 + v3 ^= 0x2000000000000000 + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0 ^= 0x2000000000000000 + v2 ^= 0xFF + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) + return v0 ^ v1 ^ v2 ^ v3 diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py index 77f8c75db61b..ada82fb4c3d9 100644 --- a/qa/rpc-tests/test_framework/util.py +++ b/qa/rpc-tests/test_framework/util.py @@ -548,14 +548,18 @@ def assert_fee_amount(fee, tx_size, fee_per_kB): if fee > (tx_size + 2) * fee_per_kB / 1000: raise AssertionError("Fee of %s DASH too high! (Should be %s DASH)"%(str(fee), str(target_fee))) -def assert_equal(thing1, thing2): - if thing1 != thing2: - raise AssertionError("%s != %s"%(str(thing1),str(thing2))) +def assert_equal(thing1, thing2, *args): + if thing1 != thing2 or any(thing1 != arg for arg in args): + raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) +def assert_greater_than_or_equal(thing1, thing2): + if thing1 < thing2: + raise AssertionError("%s < %s"%(str(thing1),str(thing2))) + def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) diff --git a/qa/rpc-tests/zmq_test.py b/qa/rpc-tests/zmq_test.py index 3a116317fed9..e9afb2b09aff 100755 --- a/qa/rpc-tests/zmq_test.py +++ b/qa/rpc-tests/zmq_test.py @@ -12,9 +12,6 @@ import zmq import struct -import http.client -import urllib.parse - class ZMQTest (BitcoinTestFramework): def __init__(self): diff --git a/share/qt/extract_strings_qt.py b/share/qt/extract_strings_qt.py index ff96c02ff09c..43b43e76c7cd 100755 --- a/share/qt/extract_strings_qt.py +++ b/share/qt/extract_strings_qt.py @@ -8,7 +8,6 @@ ''' from __future__ import division,print_function,unicode_literals from subprocess import Popen, PIPE -import glob import operator import os import sys diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index ae27f58a4be0..e23a81c96ed7 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -16,6 +16,7 @@ bench_bench_dash_SOURCES = \ bench/bench.cpp \ bench/bench.h \ bench/checkblock.cpp \ + bench/checkqueue.cpp \ bench/Examples.cpp \ bench/rollingbloom.cpp \ bench/crypto_hash.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 6b0e4fef4932..dcf92561aa1d 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -13,18 +13,42 @@ EXTRA_DIST += \ test/bctest.py \ test/bitcoin-util-test.py \ test/data/bitcoin-util-test.json \ - test/data/blanktx.hex \ + test/data/blanktxv1.hex \ + test/data/blanktxv1.json \ + test/data/blanktxv2.hex \ + test/data/blanktxv2.json \ test/data/tt-delin1-out.hex \ + test/data/tt-delin1-out.json \ test/data/tt-delout1-out.hex \ + test/data/tt-delout1-out.json \ test/data/tt-locktime317000-out.hex \ + test/data/tt-locktime317000-out.json \ test/data/tx394b54bb.hex \ test/data/txcreate1.hex \ + test/data/txcreate1.json \ test/data/txcreate2.hex \ + test/data/txcreate2.json \ test/data/txcreatedata1.hex \ + test/data/txcreatedata1.json \ test/data/txcreatedata2.hex \ - test/data/txcreatesign.hex \ + test/data/txcreatedata2.json \ test/data/txcreatedata_seq0.hex \ - test/data/txcreatedata_seq1.hex + test/data/txcreatedata_seq0.json \ + test/data/txcreatedata_seq1.hex \ + test/data/txcreatedata_seq1.json \ + test/data/txcreatemultisig1.hex \ + test/data/txcreatemultisig1.json \ + test/data/txcreatemultisig2.hex \ + test/data/txcreatemultisig2.json \ + test/data/txcreateoutpubkey1.hex \ + test/data/txcreateoutpubkey1.json \ + test/data/txcreatescript1.hex \ + test/data/txcreatescript1.json \ + test/data/txcreatescript2.hex \ + test/data/txcreatescript2.json \ + test/data/txcreatesignv1.hex \ + test/data/txcreatesignv1.json \ + test/data/txcreatesignv2.hex JSON_TEST_FILES = \ test/data/script_tests.json \ diff --git a/src/addrman.cpp b/src/addrman.cpp index bb96ee1be487..a58f1e08d72c 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -255,7 +255,7 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP int nId; CAddrInfo* pinfo = Find(addr, &nId); - // Do not set a penality for a source's self-announcement + // Do not set a penalty for a source's self-announcement if (addr == source) { nTimePenalty = 0; } diff --git a/src/base58.h b/src/base58.h index 2054bf9ad6ce..afb986be2621 100644 --- a/src/base58.h +++ b/src/base58.h @@ -148,7 +148,7 @@ template class CBitcoinExtK K GetKey() { K ret; if (vchData.size() == Size) { - //if base58 encouded data not holds a ext key, return a !IsValid() key + // If base58 encoded data does not hold an ext key, return a !IsValid() key ret.Decode(&vchData[0]); } return ret; diff --git a/src/bench/checkqueue.cpp b/src/bench/checkqueue.cpp new file mode 100644 index 000000000000..6fa9fe4fe8c4 --- /dev/null +++ b/src/bench/checkqueue.cpp @@ -0,0 +1,103 @@ +// Copyright (c) 2015 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "bench.h" +#include "util.h" +#include "validation.h" +#include "checkqueue.h" +#include "prevector.h" +#include +#include +#include "random.h" + + +// This Benchmark tests the CheckQueue with the lightest +// weight Checks, so it should make any lock contention +// particularly visible +static const int MIN_CORES = 2; +static const size_t BATCHES = 101; +static const size_t BATCH_SIZE = 30; +static const int PREVECTOR_SIZE = 28; +static const int QUEUE_BATCH_SIZE = 128; +static void CCheckQueueSpeed(benchmark::State& state) +{ + struct FakeJobNoWork { + bool operator()() + { + return true; + } + void swap(FakeJobNoWork& x){}; + }; + CCheckQueue queue {QUEUE_BATCH_SIZE}; + boost::thread_group tg; + for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) { + tg.create_thread([&]{queue.Thread();}); + } + while (state.KeepRunning()) { + CCheckQueueControl control(&queue); + + // We call Add a number of times to simulate the behavior of adding + // a block of transactions at once. + + std::vector> vBatches(BATCHES); + for (auto& vChecks : vBatches) { + vChecks.resize(BATCH_SIZE); + } + for (auto& vChecks : vBatches) { + // We can't make vChecks in the inner loop because we want to measure + // the cost of getting the memory to each thread and we might get the same + // memory + control.Add(vChecks); + } + // control waits for completion by RAII, but + // it is done explicitly here for clarity + control.Wait(); + } + tg.interrupt_all(); + tg.join_all(); +} + +// This Benchmark tests the CheckQueue with a slightly realistic workload, +// where checks all contain a prevector that is indirect 50% of the time +// and there is a little bit of work done between calls to Add. +static void CCheckQueueSpeedPrevectorJob(benchmark::State& state) +{ + struct PrevectorJob { + prevector p; + PrevectorJob(){ + } + PrevectorJob(FastRandomContext& insecure_rand){ + p.resize(insecure_rand.rand32() % (PREVECTOR_SIZE*2)); + } + bool operator()() + { + return true; + } + void swap(PrevectorJob& x){p.swap(x.p);}; + }; + CCheckQueue queue {QUEUE_BATCH_SIZE}; + boost::thread_group tg; + for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) { + tg.create_thread([&]{queue.Thread();}); + } + while (state.KeepRunning()) { + // Make insecure_rand here so that each iteration is identical. + FastRandomContext insecure_rand(true); + CCheckQueueControl control(&queue); + std::vector> vBatches(BATCHES); + for (auto& vChecks : vBatches) { + vChecks.reserve(BATCH_SIZE); + for (size_t x = 0; x < BATCH_SIZE; ++x) + vChecks.emplace_back(insecure_rand); + control.Add(vChecks); + } + // control waits for completion by RAII, but + // it is done explicitly here for clarity + control.Wait(); + } + tg.interrupt_all(); + tg.join_all(); +} +BENCHMARK(CCheckQueueSpeed); +BENCHMARK(CCheckQueueSpeedPrevectorJob); diff --git a/src/bench/perf.cpp b/src/bench/perf.cpp index 1f43e5d3ac6f..a549ec29eaaf 100644 --- a/src/bench/perf.cpp +++ b/src/bench/perf.cpp @@ -6,7 +6,7 @@ #if defined(__i386__) || defined(__x86_64__) -/* These architectures support quering the cycle counter +/* These architectures support querying the cycle counter * from user space, no need for any syscall overhead. */ void perf_init(void) { } diff --git a/src/chain.cpp b/src/chain.cpp index 1e611906d146..1c42917dfb7f 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -61,10 +61,10 @@ const CBlockIndex *CChain::FindFork(const CBlockIndex *pindex) const { return pindex; } -CBlockIndex* CChain::FindLatestBefore(int64_t nTime) const +CBlockIndex* CChain::FindEarliestAtLeast(int64_t nTime) const { std::vector::const_iterator lower = std::lower_bound(vChain.begin(), vChain.end(), nTime, - [](CBlockIndex* pBlock, const int64_t& time) -> bool { return pBlock->GetBlockTime() < time; }); + [](CBlockIndex* pBlock, const int64_t& time) -> bool { return pBlock->GetBlockTimeMax() < time; }); return (lower == vChain.end() ? NULL : *lower); } diff --git a/src/chain.h b/src/chain.h index 08fd213e1f90..db92d6a8b9ef 100644 --- a/src/chain.h +++ b/src/chain.h @@ -200,6 +200,9 @@ class CBlockIndex //! (memory only) Sequential id assigned to distinguish order in which blocks are received. int32_t nSequenceId; + //! (memory only) Maximum nTime in the chain upto and including this block. + unsigned int nTimeMax; + void SetNull() { phashBlock = NULL; @@ -214,6 +217,7 @@ class CBlockIndex nChainTx = 0; nStatus = 0; nSequenceId = 0; + nTimeMax = 0; nVersion = 0; hashMerkleRoot = uint256(); @@ -279,6 +283,11 @@ class CBlockIndex return (int64_t)nTime; } + int64_t GetBlockTimeMax() const + { + return (int64_t)nTimeMax; + } + enum { nMedianTimeSpan=11 }; int64_t GetMedianTimePast() const @@ -466,8 +475,8 @@ class CChain { /** Find the last common block between this chain and a block index entry. */ const CBlockIndex *FindFork(const CBlockIndex *pindex) const; - /** Find the most recent block with timestamp lower than the given. */ - CBlockIndex* FindLatestBefore(int64_t nTime) const; + /** Find the earliest block with timestamp equal or greater than the given. */ + CBlockIndex* FindEarliestAtLeast(int64_t nTime) const; }; #endif // BITCOIN_CHAIN_H diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 8113a802de89..ee3da4e5234b 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -45,10 +45,11 @@ static CBlock CreateDevNetGenesisBlock(const uint256 &prevBlockHash, const std:: assert(!devNetName.empty()); CMutableTransaction txNew; - txNew.nVersion = 1; + txNew.nVersion = 4; txNew.vin.resize(1); txNew.vout.resize(1); - txNew.vin[0].scriptSig = CScript() << std::vector(devNetName.begin(), devNetName.end()); + // put height (BIP34) and devnet name into coinbase + txNew.vin[0].scriptSig = CScript() << 1 << std::vector(devNetName.begin(), devNetName.end()); txNew.vout[0].nValue = genesisReward; txNew.vout[0].scriptPubKey = CScript() << OP_RETURN; @@ -135,11 +136,10 @@ class CMainParams : public CChainParams { consensus.nGovernanceMinQuorum = 10; consensus.nGovernanceFilterElements = 20000; consensus.nMasternodeMinimumConfirmations = 15; - consensus.nMajorityEnforceBlockUpgrade = 750; - consensus.nMajorityRejectBlockOutdated = 950; - consensus.nMajorityWindow = 1000; - consensus.BIP34Height = 1; - consensus.BIP34Hash = uint256S("0x000007d91d1254d60e2dd1ae580383070a4ddffa4c64c2eeb4a2f9ecc0414343"); + consensus.BIP34Height = 951; + consensus.BIP34Hash = uint256S("0x000001f35e70f7c5705f64c6c5cc3dea9449e74d5b5c7cf74dad1bcca14a8012"); + consensus.BIP65Height = 619382; // 00000000000076d8fcea02ec0963de4abfd01e771fec0863f960c2c64fe6f357 + consensus.BIP66Height = 245817; // 00000000000b1fa2dfa312863570e13fae9ca7b5566cb27e55422620b469aefa consensus.powLimit = uint256S("00000fffff000000000000000000000000000000000000000000000000000000"); consensus.nPowTargetTimespan = 24 * 60 * 60; // Dash: 1 day consensus.nPowTargetSpacing = 2.5 * 60; // Dash: 2.5 minutes @@ -244,11 +244,14 @@ class CMainParams : public CChainParams { ( 407452, uint256S("0x000000000003c6a87e73623b9d70af7cd908ae22fee466063e4ffc20be1d2dbc")) ( 523412, uint256S("0x000000000000e54f036576a10597e0e42cc22a5159ce572f999c33975e121d4d")) ( 523930, uint256S("0x0000000000000bccdb11c2b1cfb0ecab452abf267d89b7f46eaf2d54ce6e652c")) - ( 750000, uint256S("0x00000000000000b4181bbbdddbae464ce11fede5d0292fb63fdede1e7c8ab21c")), - 1507424630, // * UNIX timestamp of last checkpoint block - 3701128, // * total number of transactions between genesis and last checkpoint + ( 750000, uint256S("0x00000000000000b4181bbbdddbae464ce11fede5d0292fb63fdede1e7c8ab21c")) + }; + + chainTxData = ChainTxData{ + 1507424630, // * UNIX timestamp of last known number of transactions + 3701128, // * total number of transactions between genesis and that timestamp // (the tx=... number in the SetBestChain debug.log lines) - 5000 // * estimated number of transactions per day after checkpoint + 0.1 // * estimated number of transactions per second after that timestamp }; } }; @@ -275,11 +278,10 @@ class CTestNetParams : public CChainParams { consensus.nGovernanceMinQuorum = 1; consensus.nGovernanceFilterElements = 500; consensus.nMasternodeMinimumConfirmations = 1; - consensus.nMajorityEnforceBlockUpgrade = 51; - consensus.nMajorityRejectBlockOutdated = 75; - consensus.nMajorityWindow = 100; - consensus.BIP34Height = 1; - consensus.BIP34Hash = uint256S("0x0000047d24635e347be3aaaeb66c26be94901a2f962feccd4f95090191f208c1"); + consensus.BIP34Height = 76; + consensus.BIP34Hash = uint256S("0x000008ebb1db2598e897d17275285767717c6acfeac4c73def49fbea1ddcbcb6"); + consensus.BIP65Height = 2431; // 0000039cf01242c7f921dcb4806a5994bc003b48c1973ae0c89b67809c2bb2ab + consensus.BIP66Height = 2075; // 0000002acdd29a14583540cb72e1c5cc83783560e38fa7081495d474fe1671f7 consensus.powLimit = uint256S("00000fffff000000000000000000000000000000000000000000000000000000"); consensus.nPowTargetTimespan = 24 * 60 * 60; // Dash: 1 day consensus.nPowTargetSpacing = 2.5 * 60; // Dash: 2.5 minutes @@ -361,12 +363,14 @@ class CTestNetParams : public CChainParams { boost::assign::map_list_of ( 261, uint256S("0x00000c26026d0815a7e2ce4fa270775f61403c040647ff2c3091f99e894a4618")) ( 1999, uint256S("0x00000052e538d27fa53693efe6fb6892a0c1d26c0235f599171c48a3cce553b1")) - ( 2999, uint256S("0x0000024bc3f4f4cb30d29827c13d921ad77d2c6072e586c7f60d83c2722cdcc5")), + ( 2999, uint256S("0x0000024bc3f4f4cb30d29827c13d921ad77d2c6072e586c7f60d83c2722cdcc5")) + }; - 1462856598, // * UNIX timestamp of last checkpoint block - 3094, // * total number of transactions between genesis and last checkpoint + chainTxData = ChainTxData{ + 1462856598, // * UNIX timestamp of last known number of transactions + 3094, // * total number of transactions between genesis and that timestamp // (the tx=... number in the SetBestChain debug.log lines) - 500 // * estimated number of transactions per day after checkpoint + 0.01 // * estimated number of transactions per second after that timestamp }; } @@ -394,11 +398,9 @@ class CDevNetParams : public CChainParams { consensus.nGovernanceMinQuorum = 1; consensus.nGovernanceFilterElements = 500; consensus.nMasternodeMinimumConfirmations = 1; - consensus.nMajorityEnforceBlockUpgrade = 51; - consensus.nMajorityRejectBlockOutdated = 75; - consensus.nMajorityWindow = 100; - consensus.BIP34Height = 1; - consensus.BIP34Hash = uint256S("0x0000047d24635e347be3aaaeb66c26be94901a2f962feccd4f95090191f208c1"); + consensus.BIP34Height = 1; // BIP34 activated immediately on devnet (BIP34Hash is set later for the devnet genesis block) + consensus.BIP65Height = 1; // BIP65 activated immediately on devnet + consensus.BIP66Height = 1; // BIP66 activated immediately on devnet consensus.powLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); consensus.nPowTargetTimespan = 24 * 60 * 60; // Dash: 1 day consensus.nPowTargetSpacing = 2.5 * 60; // Dash: 2.5 minutes @@ -446,6 +448,8 @@ class CDevNetParams : public CChainParams { devnetGenesis = FindDevNetGenesisBlock(consensus, genesis, 50 * COIN); + consensus.BIP34Hash = devnetGenesis.GetHash(); + vFixedSeeds.clear(); vSeeds.clear(); //vSeeds.push_back(CDNSSeedData("dashevo.org", "devnet-seed.dashevo.org")); @@ -478,13 +482,15 @@ class CDevNetParams : public CChainParams { checkpointData = (CCheckpointData) { boost::assign::map_list_of - ( 0, uint256S("0x000008ca1832a4baf228eb1553c03d3a2c8e02399550dd6ea8d65cec3ef23d2e")) - ( 1, devnetGenesis.GetHash()), - 0, - 0, - 0 + ( 0, uint256S("0x000008ca1832a4baf228eb1553c03d3a2c8e02399550dd6ea8d65cec3ef23d2e")) + ( 1, devnetGenesis.GetHash()) }; + chainTxData = ChainTxData{ + devnetGenesis.GetBlockTime(), // * UNIX timestamp of devnet genesis block + 2, // * we only have 2 coinbase transactions when a devnet is started up + 0.01 // * estimated number of transactions per second + }; } }; static CDevNetParams *devNetParams; @@ -511,11 +517,10 @@ class CRegTestParams : public CChainParams { consensus.nGovernanceMinQuorum = 1; consensus.nGovernanceFilterElements = 100; consensus.nMasternodeMinimumConfirmations = 1; - consensus.nMajorityEnforceBlockUpgrade = 750; - consensus.nMajorityRejectBlockOutdated = 950; - consensus.nMajorityWindow = 1000; - consensus.BIP34Height = -1; // BIP34 has not necessarily activated on regtest + consensus.BIP34Height = 100000000; // BIP34 has not activated on regtest (far in the future so block v1 are not rejected in tests) consensus.BIP34Hash = uint256(); + consensus.BIP65Height = 1351; // BIP65 activated on regtest (Used in rpc activation tests) + consensus.BIP66Height = 1251; // BIP66 activated on regtest (Used in rpc activation tests) consensus.powLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); consensus.nPowTargetTimespan = 24 * 60 * 60; // Dash: 1 day consensus.nPowTargetSpacing = 2.5 * 60; // Dash: 2.5 minutes @@ -567,11 +572,15 @@ class CRegTestParams : public CChainParams { checkpointData = (CCheckpointData){ boost::assign::map_list_of - ( 0, uint256S("0x000008ca1832a4baf228eb1553c03d3a2c8e02399550dd6ea8d65cec3ef23d2e")), + ( 0, uint256S("0x000008ca1832a4baf228eb1553c03d3a2c8e02399550dd6ea8d65cec3ef23d2e")) + }; + + chainTxData = ChainTxData{ 0, 0, 0 }; + // Regtest Dash addresses start with 'y' base58Prefixes[PUBKEY_ADDRESS] = std::vector(1,140); // Regtest Dash script addresses start with '8' or '9' diff --git a/src/chainparams.h b/src/chainparams.h index df19d2d2c235..4fd9774fcd2e 100644 --- a/src/chainparams.h +++ b/src/chainparams.h @@ -28,9 +28,12 @@ typedef std::map MapCheckpoints; struct CCheckpointData { MapCheckpoints mapCheckpoints; - int64_t nTimeLastCheckpoint; - int64_t nTransactionsLastCheckpoint; - double fTransactionsPerDay; +}; + +struct ChainTxData { + int64_t nTime; + int64_t nTxCount; + double dTxRate; }; /** @@ -79,6 +82,7 @@ class CChainParams int ExtCoinType() const { return nExtCoinType; } const std::vector& FixedSeeds() const { return vFixedSeeds; } const CCheckpointData& Checkpoints() const { return checkpointData; } + const ChainTxData& TxData() const { return chainTxData; } int PoolMaxTransactions() const { return nPoolMaxTransactions; } int FulfilledRequestExpireTime() const { return nFulfilledRequestExpireTime; } std::string SporkPubKey() const { return strSporkPubKey; } @@ -105,6 +109,7 @@ class CChainParams bool fMineBlocksOnDemand; bool fAllowMultipleAddressesFromGroup; CCheckpointData checkpointData; + ChainTxData chainTxData; int nPoolMaxTransactions; int nFulfilledRequestExpireTime; std::string strSporkPubKey; diff --git a/src/checkpoints.cpp b/src/checkpoints.cpp index b31c05a95c6a..6f4f0f2825bc 100644 --- a/src/checkpoints.cpp +++ b/src/checkpoints.cpp @@ -16,46 +16,6 @@ namespace Checkpoints { - /** - * How many times slower we expect checking transactions after the last - * checkpoint to be (from checking signatures, which is skipped up to the - * last checkpoint). This number is a compromise, as it can't be accurate - * for every system. When reindexing from a fast disk with a slow CPU, it - * can be up to 20, while when downloading from a slow network with a - * fast multicore CPU, it won't be much higher than 1. - */ - static const double SIGCHECK_VERIFICATION_FACTOR = 5.0; - - //! Guess how far we are in the verification process at the given block index - double GuessVerificationProgress(const CCheckpointData& data, CBlockIndex *pindex, bool fSigchecks) { - if (pindex==NULL) - return 0.0; - - int64_t nNow = time(NULL); - - double fSigcheckVerificationFactor = fSigchecks ? SIGCHECK_VERIFICATION_FACTOR : 1.0; - double fWorkBefore = 0.0; // Amount of work done before pindex - double fWorkAfter = 0.0; // Amount of work left after pindex (estimated) - // Work is defined as: 1.0 per transaction before the last checkpoint, and - // fSigcheckVerificationFactor per transaction after. - - if (pindex->nChainTx <= data.nTransactionsLastCheckpoint) { - double nCheapBefore = pindex->nChainTx; - double nCheapAfter = data.nTransactionsLastCheckpoint - pindex->nChainTx; - double nExpensiveAfter = (nNow - data.nTimeLastCheckpoint)/86400.0*data.fTransactionsPerDay; - fWorkBefore = nCheapBefore; - fWorkAfter = nCheapAfter + nExpensiveAfter*fSigcheckVerificationFactor; - } else { - double nCheapBefore = data.nTransactionsLastCheckpoint; - double nExpensiveBefore = pindex->nChainTx - data.nTransactionsLastCheckpoint; - double nExpensiveAfter = (nNow - pindex->GetBlockTime())/86400.0*data.fTransactionsPerDay; - fWorkBefore = nCheapBefore + nExpensiveBefore*fSigcheckVerificationFactor; - fWorkAfter = nExpensiveAfter*fSigcheckVerificationFactor; - } - - return fWorkBefore / (fWorkBefore + fWorkAfter); - } - CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) { const MapCheckpoints& checkpoints = data.mapCheckpoints; diff --git a/src/checkpoints.h b/src/checkpoints.h index 04346f35ffa7..c8584948f9de 100644 --- a/src/checkpoints.h +++ b/src/checkpoints.h @@ -22,8 +22,6 @@ namespace Checkpoints //! Returns last CBlockIndex* in mapBlockIndex that is a checkpoint CBlockIndex* GetLastCheckpoint(const CCheckpointData& data); -double GuessVerificationProgress(const CCheckpointData& data, CBlockIndex* pindex, bool fSigchecks = true); - } //namespace Checkpoints #endif // BITCOIN_CHECKPOINTS_H diff --git a/src/consensus/params.h b/src/consensus/params.h index e85949fe087e..741b1e76e051 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -56,15 +56,15 @@ struct Params { int nGovernanceMinQuorum; // Min absolute vote count to trigger an action int nGovernanceFilterElements; int nMasternodeMinimumConfirmations; - /** Used to check majorities for block version upgrade */ - int nMajorityEnforceBlockUpgrade; - int nMajorityRejectBlockOutdated; - int nMajorityWindow; /** Block height and hash at which BIP34 becomes active */ int BIP34Height; uint256 BIP34Hash; + /** Block height at which BIP65 becomes active */ + int BIP65Height; + /** Block height at which BIP66 becomes active */ + int BIP66Height; /** - * Minimum blocks including miner confirmation of the total of nMinerConfirmationWindow blocks in a retargetting period, + * Minimum blocks including miner confirmation of the total of nMinerConfirmationWindow blocks in a retargeting period, * (nPowTargetTimespan / nPowTargetSpacing) which is also used for BIP9 deployments. * Default BIP9Deployment::nThreshold value for deployments where it's not specified and for unknown deployments. * Examples: 1916 for 95%, 1512 for testchains. diff --git a/src/crypto/common.h b/src/crypto/common.h index 85f7ea7def06..02333fa7d888 100644 --- a/src/crypto/common.h +++ b/src/crypto/common.h @@ -10,57 +10,73 @@ #endif #include +#include #include "compat/endian.h" uint16_t static inline ReadLE16(const unsigned char* ptr) { - return le16toh(*((uint16_t*)ptr)); + uint16_t x; + memcpy((char*)&x, ptr, 2); + return le16toh(x); } uint32_t static inline ReadLE32(const unsigned char* ptr) { - return le32toh(*((uint32_t*)ptr)); + uint32_t x; + memcpy((char*)&x, ptr, 4); + return le32toh(x); } uint64_t static inline ReadLE64(const unsigned char* ptr) { - return le64toh(*((uint64_t*)ptr)); + uint64_t x; + memcpy((char*)&x, ptr, 8); + return le64toh(x); } void static inline WriteLE16(unsigned char* ptr, uint16_t x) { - *((uint16_t*)ptr) = htole16(x); + uint16_t v = htole16(x); + memcpy(ptr, (char*)&v, 2); } void static inline WriteLE32(unsigned char* ptr, uint32_t x) { - *((uint32_t*)ptr) = htole32(x); + uint32_t v = htole32(x); + memcpy(ptr, (char*)&v, 4); } void static inline WriteLE64(unsigned char* ptr, uint64_t x) { - *((uint64_t*)ptr) = htole64(x); + uint64_t v = htole64(x); + memcpy(ptr, (char*)&v, 8); } uint32_t static inline ReadBE32(const unsigned char* ptr) { - return be32toh(*((uint32_t*)ptr)); + uint32_t x; + memcpy((char*)&x, ptr, 4); + return be32toh(x); } uint64_t static inline ReadBE64(const unsigned char* ptr) { - return be64toh(*((uint64_t*)ptr)); + uint64_t x; + memcpy((char*)&x, ptr, 8); + return be64toh(x); } void static inline WriteBE32(unsigned char* ptr, uint32_t x) { - *((uint32_t*)ptr) = htobe32(x); + uint32_t v = htobe32(x); + memcpy(ptr, (char*)&v, 4); } void static inline WriteBE64(unsigned char* ptr, uint64_t x) { - *((uint64_t*)ptr) = htobe64(x); + uint64_t v = htobe64(x); + memcpy(ptr, (char*)&v, 8); } #endif // BITCOIN_CRYPTO_COMMON_H diff --git a/src/cuckoocache.h b/src/cuckoocache.h index efd6a820b5ce..ff47e9776b64 100644 --- a/src/cuckoocache.h +++ b/src/cuckoocache.h @@ -257,7 +257,7 @@ class cache * * First, epoch_check decrements and checks the cheap heuristic, and then does * a more expensive scan if the cheap heuristic runs out. If the expensive - * scan suceeds, the epochs are aged and old elements are allow_erased. The + * scan succeeds, the epochs are aged and old elements are allow_erased. The * cheap heuristic is reset to retrigger after the worst case growth of the * current epoch's elements would exceed the epoch_size. */ @@ -395,7 +395,7 @@ class cache * 1) On first iteration, last_loc == invalid(), find returns last, so * last_loc defaults to locs[0]. * 2) On further iterations, where last_loc == locs[k], last_loc will - * go to locs[k+1 % 8], i.e., next of the 8 indicies wrapping around + * go to locs[k+1 % 8], i.e., next of the 8 indices wrapping around * to 0 if needed. * * This prevents moving the element we just put in. diff --git a/src/dash-tx.cpp b/src/dash-tx.cpp index 5b70788b7d15..bf260b7347d3 100644 --- a/src/dash-tx.cpp +++ b/src/dash-tx.cpp @@ -78,8 +78,13 @@ static int AppInitRawTx(int argc, char* argv[]) strUsage += HelpMessageOpt("locktime=N", _("Set TX lock time to N")); strUsage += HelpMessageOpt("nversion=N", _("Set TX version to N")); strUsage += HelpMessageOpt("outaddr=VALUE:ADDRESS", _("Add address-based output to TX")); + strUsage += HelpMessageOpt("outpubkey=VALUE:PUBKEY[:FLAGS]", _("Add pay-to-pubkey output to TX") + ". " + + _("Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.")); strUsage += HelpMessageOpt("outdata=[VALUE:]DATA", _("Add data-based output to TX")); - strUsage += HelpMessageOpt("outscript=VALUE:SCRIPT", _("Add raw script output to TX")); + strUsage += HelpMessageOpt("outscript=VALUE:SCRIPT[:FLAGS]", _("Add raw script output to TX") + ". " + + _("Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.")); + strUsage += HelpMessageOpt("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", _("Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS") + ". " + + _("Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.")); strUsage += HelpMessageOpt("sign=SIGHASH-FLAGS", _("Add zero or more signatures to transaction") + ". " + _("This command requires JSON registers:") + _("prevtxs=JSON object") + ", " + @@ -168,6 +173,14 @@ static void RegisterLoad(const std::string& strInput) RegisterSetJson(key, valStr); } +static CAmount ExtractAndValidateValue(const std::string& strValue) +{ + CAmount value; + if (!ParseMoney(strValue, value)) + throw std::runtime_error("invalid TX output value"); + return value; +} + static void MutateTxVersion(CMutableTransaction& tx, const std::string& cmdVal) { int64_t newVersion = atoi64(cmdVal); @@ -222,25 +235,18 @@ static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInpu static void MutateTxAddOutAddr(CMutableTransaction& tx, const std::string& strInput) { - // separate VALUE:ADDRESS in string - size_t pos = strInput.find(':'); - if ((pos == std::string::npos) || - (pos == 0) || - (pos == (strInput.size() - 1))) - throw std::runtime_error("TX output missing separator"); + // Separate into VALUE:ADDRESS + std::vector vStrInputParts; + boost::split(vStrInputParts, strInput, boost::is_any_of(":")); - // extract and validate VALUE - std::string strValue = strInput.substr(0, pos); - CAmount value; - if (!ParseMoney(strValue, value)) - throw std::runtime_error("invalid TX output value"); + // Extract and validate VALUE + CAmount value = ExtractAndValidateValue(vStrInputParts[0]); // extract and validate ADDRESS - std::string strAddr = strInput.substr(pos + 1, std::string::npos); + std::string strAddr = vStrInputParts[1]; CBitcoinAddress addr(strAddr); if (!addr.IsValid()) throw std::runtime_error("invalid TX output address"); - // build standard output script via GetScriptForDestination() CScript scriptPubKey = GetScriptForDestination(addr.Get()); @@ -249,6 +255,102 @@ static void MutateTxAddOutAddr(CMutableTransaction& tx, const std::string& strIn tx.vout.push_back(txout); } +static void MutateTxAddOutPubKey(CMutableTransaction& tx, const std::string& strInput) +{ + // Separate into VALUE:PUBKEY[:FLAGS] + std::vector vStrInputParts; + boost::split(vStrInputParts, strInput, boost::is_any_of(":")); + + // Extract and validate VALUE + CAmount value = ExtractAndValidateValue(vStrInputParts[0]); + + // Extract and validate PUBKEY + CPubKey pubkey(ParseHex(vStrInputParts[1])); + if (!pubkey.IsFullyValid()) + throw std::runtime_error("invalid TX output pubkey"); + CScript scriptPubKey = GetScriptForRawPubKey(pubkey); + CBitcoinAddress addr(scriptPubKey); + + // Extract and validate FLAGS + bool bScriptHash = false; + if (vStrInputParts.size() == 3) { + std::string flags = vStrInputParts[2]; + bScriptHash = (flags.find("S") != std::string::npos); + } + + if (bScriptHash) { + // Get the address for the redeem script, then call + // GetScriptForDestination() to construct a P2SH scriptPubKey. + CBitcoinAddress redeemScriptAddr(scriptPubKey); + scriptPubKey = GetScriptForDestination(redeemScriptAddr.Get()); + } + + // construct TxOut, append to transaction output list + CTxOut txout(value, scriptPubKey); + tx.vout.push_back(txout); +} + +static void MutateTxAddOutMultiSig(CMutableTransaction& tx, const std::string& strInput) +{ + // Separate into VALUE:REQUIRED:NUMKEYS:PUBKEY1:PUBKEY2:....[:FLAGS] + std::vector vStrInputParts; + boost::split(vStrInputParts, strInput, boost::is_any_of(":")); + + // Check that there are enough parameters + if (vStrInputParts.size()<3) + throw std::runtime_error("Not enough multisig parameters"); + + // Extract and validate VALUE + CAmount value = ExtractAndValidateValue(vStrInputParts[0]); + + // Extract REQUIRED + uint32_t required = stoul(vStrInputParts[1]); + + // Extract NUMKEYS + uint32_t numkeys = stoul(vStrInputParts[2]); + + // Validate there are the correct number of pubkeys + if (vStrInputParts.size() < numkeys + 3) + throw std::runtime_error("incorrect number of multisig pubkeys"); + + if (required < 1 || required > 20 || numkeys < 1 || numkeys > 20 || numkeys < required) + throw std::runtime_error("multisig parameter mismatch. Required " \ + + std::to_string(required) + " of " + std::to_string(numkeys) + "signatures."); + + // extract and validate PUBKEYs + std::vector pubkeys; + for(int pos = 1; pos <= int(numkeys); pos++) { + CPubKey pubkey(ParseHex(vStrInputParts[pos + 2])); + if (!pubkey.IsFullyValid()) + throw std::runtime_error("invalid TX output pubkey"); + pubkeys.push_back(pubkey); + } + + // Extract FLAGS + bool bScriptHash = false; + if (vStrInputParts.size() == numkeys + 4) { + std::string flags = vStrInputParts.back(); + bScriptHash = (flags.find("S") != std::string::npos); + } + else if (vStrInputParts.size() > numkeys + 4) { + // Validate that there were no more parameters passed + throw std::runtime_error("Too many parameters"); + } + + CScript scriptPubKey = GetScriptForMultisig(required, pubkeys); + + if (bScriptHash) { + // Get the address for the redeem script, then call + // GetScriptForDestination() to construct a P2SH scriptPubKey. + CBitcoinAddress addr(scriptPubKey); + scriptPubKey = GetScriptForDestination(addr.Get()); + } + + // construct TxOut, append to transaction output list + CTxOut txout(value, scriptPubKey); + tx.vout.push_back(txout); +} + static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strInput) { CAmount value = 0; @@ -260,10 +362,8 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strIn throw std::runtime_error("TX output value not specified"); if (pos != std::string::npos) { - // extract and validate VALUE - std::string strValue = strInput.substr(0, pos); - if (!ParseMoney(strValue, value)) - throw std::runtime_error("invalid TX output value"); + // Extract and validate VALUE + value = ExtractAndValidateValue(strInput.substr(0, pos)); } // extract and validate DATA @@ -280,21 +380,30 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strIn static void MutateTxAddOutScript(CMutableTransaction& tx, const std::string& strInput) { - // separate VALUE:SCRIPT in string - size_t pos = strInput.find(':'); - if ((pos == std::string::npos) || - (pos == 0)) + // separate VALUE:SCRIPT[:FLAGS] + std::vector vStrInputParts; + boost::split(vStrInputParts, strInput, boost::is_any_of(":")); + if (vStrInputParts.size() < 2) throw std::runtime_error("TX output missing separator"); - // extract and validate VALUE - std::string strValue = strInput.substr(0, pos); - CAmount value; - if (!ParseMoney(strValue, value)) - throw std::runtime_error("invalid TX output value"); + // Extract and validate VALUE + CAmount value = ExtractAndValidateValue(vStrInputParts[0]); // extract and validate script - std::string strScript = strInput.substr(pos + 1, std::string::npos); - CScript scriptPubKey = ParseScript(strScript); // throws on err + std::string strScript = vStrInputParts[1]; + CScript scriptPubKey = ParseScript(strScript); + + // Extract FLAGS + bool bScriptHash = false; + if (vStrInputParts.size() == 3) { + std::string flags = vStrInputParts.back(); + bScriptHash = (flags.find("S") != std::string::npos); + } + + if (bScriptHash) { + CBitcoinAddress addr(scriptPubKey); + scriptPubKey = GetScriptForDestination(addr.Get()); + } // construct TxOut, append to transaction output list CTxOut txout(value, scriptPubKey); @@ -523,10 +632,14 @@ static void MutateTx(CMutableTransaction& tx, const std::string& command, MutateTxDelOutput(tx, commandVal); else if (command == "outaddr") MutateTxAddOutAddr(tx, commandVal); - else if (command == "outdata") - MutateTxAddOutData(tx, commandVal); + else if (command == "outpubkey") + MutateTxAddOutPubKey(tx, commandVal); + else if (command == "outmultisig") + MutateTxAddOutMultiSig(tx, commandVal); else if (command == "outscript") MutateTxAddOutScript(tx, commandVal); + else if (command == "outdata") + MutateTxAddOutData(tx, commandVal); else if (command == "sign") { if (!ecc) { ecc.reset(new Secp256k1Init()); } diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 970ce2db73b7..805e9e364ee6 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -25,7 +25,7 @@ static const char* WWW_AUTH_HEADER_DATA = "Basic realm=\"jsonrpc\""; /** Simple one-shot callback timer to be used by the RPC mechanism to e.g. - * re-lock the wellet. + * re-lock the wallet. */ class HTTPRPCTimer : public RPCTimerBase { @@ -113,8 +113,8 @@ static bool multiUserAuthorized(std::string strUserPass) std::string strHash = vFields[2]; unsigned int KEY_SIZE = 32; - unsigned char *out = new unsigned char[KEY_SIZE]; - + unsigned char out[KEY_SIZE]; + CHMAC_SHA256(reinterpret_cast(strSalt.c_str()), strSalt.size()).Write(reinterpret_cast(strPass.c_str()), strPass.size()).Finalize(out); std::vector hexvec(out, out+KEY_SIZE); std::string strHashFromPass = HexStr(hexvec); diff --git a/src/init.cpp b/src/init.cpp index 3fd1afcc184d..fb54c0e0c5b8 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -416,9 +416,9 @@ std::string HelpMessage(HelpMessageMode mode) #ifndef WIN32 strUsage += HelpMessageOpt("-pid=", strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME)); #endif - strUsage += HelpMessageOpt("-prune=", strprintf(_("Reduce storage requirements by pruning (deleting) old blocks. This mode is incompatible with -txindex and -rescan. " + strUsage += HelpMessageOpt("-prune=", strprintf(_("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. " "Warning: Reverting this setting requires re-downloading the entire blockchain. " - "(default: 0 = disable pruning blocks, >%u = target size in MiB to use for block files)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); + "(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >%u = automatically prune block files to stay under the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); strUsage += HelpMessageOpt("-reindex-chainstate", _("Rebuild chain state from the currently indexed blocks")); strUsage += HelpMessageOpt("-reindex", _("Rebuild chain state and block index from the blk*.dat files on disk")); #ifndef WIN32 @@ -565,8 +565,11 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageGroup(_("Node relay options:")); - if (showDebug) + if (showDebug) { strUsage += HelpMessageOpt("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !Params(CBaseChainParams::TESTNET).RequireStandard())); + strUsage += HelpMessageOpt("-incrementalrelayfee=", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE))); + strUsage += HelpMessageOpt("-dustrelayfee=", strprintf("Fee rate (in %s/kB) used to defined dust, the value of an output such that it will cost about 1/3 of its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE))); + } strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Minimum bytes per sigop in transactions we relay and mine (default: %u)"), DEFAULT_BYTES_PER_SIGOP)); strUsage += HelpMessageOpt("-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %u)"), DEFAULT_ACCEPT_DATACARRIER)); strUsage += HelpMessageOpt("-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we relay and mine (default: %u)"), MAX_OP_RETURN_RELAY)); @@ -575,6 +578,7 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageGroup(_("Block creation options:")); strUsage += HelpMessageOpt("-blockmaxsize=", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_BLOCK_MAX_SIZE)); strUsage += HelpMessageOpt("-blockprioritysize=", strprintf(_("Set maximum size of high-priority/low-fee transactions in bytes (default: %d)"), DEFAULT_BLOCK_PRIORITY_SIZE)); + strUsage += HelpMessageOpt("-blockmintxfee=", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE))); if (showDebug) strUsage += HelpMessageOpt("-blockversion=", "Override block version to test forking scenarios"); @@ -1086,6 +1090,15 @@ bool AppInitParameterInteraction() int64_t nMempoolSizeMin = GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40; if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin) return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0))); + // incremental relay fee sets the minimimum feerate increase necessary for BIP 125 replacement in the mempool + // and the amount the mempool min fee increases above the feerate of txs evicted due to mempool limiting. + if (IsArgSet("-incrementalrelayfee")) + { + CAmount n = 0; + if (!ParseMoney(GetArg("-incrementalrelayfee", ""), n)) + return InitError(AmountErrMsg("incrementalrelayfee", GetArg("-incrementalrelayfee", ""))); + incrementalRelayFee = CFeeRate(n); + } // -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency nScriptCheckThreads = GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS); @@ -1097,12 +1110,16 @@ bool AppInitParameterInteraction() nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS; // block pruning; get the amount of disk space (in MiB) to allot for block & undo files - int64_t nSignedPruneTarget = GetArg("-prune", 0) * 1024 * 1024; - if (nSignedPruneTarget < 0) { + int64_t nPruneArg = GetArg("-prune", 0); + if (nPruneArg < 0) { return InitError(_("Prune cannot be configured with a negative value.")); } - nPruneTarget = (uint64_t) nSignedPruneTarget; - if (nPruneTarget) { + nPruneTarget = (uint64_t) nPruneArg * 1024 * 1024; + if (nPruneArg == 1) { // manual pruning: -prune=1 + LogPrintf("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.\n"); + nPruneTarget = std::numeric_limits::max(); + fPruneMode = true; + } else if (nPruneTarget) { if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) { return InitError(strprintf(_("Prune configured below the minimum of %d MiB. Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); } @@ -1132,6 +1149,29 @@ bool AppInitParameterInteraction() return InitError(AmountErrMsg("minrelaytxfee", GetArg("-minrelaytxfee", ""))); // High fee check is done afterward in CWallet::ParameterInteraction() ::minRelayTxFee = CFeeRate(n); + } else if (incrementalRelayFee > ::minRelayTxFee) { + // Allow only setting incrementalRelayFee to control both + ::minRelayTxFee = incrementalRelayFee; + LogPrintf("Increasing minrelaytxfee to %s to match incrementalrelayfee\n",::minRelayTxFee.ToString()); + } + + // Sanity check argument for min fee for including tx in block + // TODO: Harmonize which arguments need sanity checking and where that happens + if (IsArgSet("-blockmintxfee")) + { + CAmount n = 0; + if (!ParseMoney(GetArg("-blockmintxfee", ""), n)) + return InitError(AmountErrMsg("blockmintxfee", GetArg("-blockmintxfee", ""))); + } + + // Feerate used to define dust. Shouldn't be changed lightly as old + // implementations may inadvertently create non-standard transactions + if (IsArgSet("-dustrelayfee")) + { + CAmount n = 0; + if (!ParseMoney(GetArg("-dustrelayfee", ""), n) || 0 == n) + return InitError(AmountErrMsg("dustrelayfee", GetArg("-dustrelayfee", ""))); + dustRelayFee = CFeeRate(n); } fRequireStandard = !GetBoolArg("-acceptnonstdtxn", !Params().RequireStandard()); diff --git a/src/miner.cpp b/src/miner.cpp index 47843a827dde..40220051f331 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -81,6 +81,15 @@ BlockAssembler::BlockAssembler(const CChainParams& _chainparams) { // Largest block you're willing to create: nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE); + + if (IsArgSet("-blockmintxfee")) { + CAmount n = 0; + ParseMoney(GetArg("-blockmintxfee", ""), n); + blockMinFeeRate = CFeeRate(n); + } else { + blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE); + } + // Limit to between 1K and MAX_BLOCK_SIZE-1K for sanity: nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MaxBlockSize(fDIP0001ActiveAtTip)-1000), nBlockMaxSize)); } @@ -409,7 +418,7 @@ void BlockAssembler::addPackageTxs() packageSigOps = modit->nSigOpCountWithAncestors; } - if (packageFees < ::minRelayTxFee.GetFee(packageSize)) { + if (packageFees < blockMinFeeRate.GetFee(packageSize)) { // Everything else we might consider has a lower fee rate return; } diff --git a/src/miner.h b/src/miner.h index e2f315590ec3..4b6915d29f4e 100644 --- a/src/miner.h +++ b/src/miner.h @@ -141,6 +141,7 @@ class BlockAssembler // Configuration parameters for the block size unsigned int nBlockMaxSize; + CFeeRate blockMinFeeRate; // Information on the current status of the block uint64_t nBlockSize; diff --git a/src/net.cpp b/src/net.cpp index 0d107895e630..49f5899d361d 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1190,12 +1190,10 @@ void CConnman::ThreadSocketHandler() // * Hand off all complete messages to the processor, to be handled without // blocking here. { - TRY_LOCK(pnode->cs_vSend, lockSend); - if (lockSend) { - if (!pnode->vSendMsg.empty()) { - FD_SET(pnode->hSocket, &fdsetSend); - continue; - } + LOCK(pnode->cs_vSend); + if (!pnode->vSendMsg.empty()) { + FD_SET(pnode->hSocket, &fdsetSend); + continue; } } { @@ -1309,12 +1307,10 @@ void CConnman::ThreadSocketHandler() continue; if (FD_ISSET(pnode->hSocket, &fdsetSend)) { - TRY_LOCK(pnode->cs_vSend, lockSend); - if (lockSend) { - size_t nBytes = SocketSendData(pnode); - if (nBytes) { - RecordBytesSent(nBytes); - } + LOCK(pnode->cs_vSend); + size_t nBytes = SocketSendData(pnode); + if (nBytes) { + RecordBytesSent(nBytes); } } @@ -1914,11 +1910,11 @@ bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai if (fConnectToMasternode) pnode->fMasternode = true; + GetNodeSignals().InitializeNode(pnode, *this); { LOCK(cs_vNodes); vNodes.push_back(pnode); } - GetNodeSignals().InitializeNode(pnode, *this); return true; } @@ -1948,9 +1944,8 @@ void CConnman::ThreadMessageHandler() // Send messages { - TRY_LOCK(pnode->cs_vSend, lockSend); - if (lockSend) - GetNodeSignals().SendMessages(pnode, *this, flagInterruptMsgProc); + LOCK(pnode->cs_sendProcessing); + GetNodeSignals().SendMessages(pnode, *this, flagInterruptMsgProc); } if (flagInterruptMsgProc) return; diff --git a/src/net.h b/src/net.h index 4305f5dd1236..f32b1f36aa94 100644 --- a/src/net.h +++ b/src/net.h @@ -391,6 +391,8 @@ class CConnman CSipHasher GetDeterministicRandomizer(uint64_t id); unsigned int GetReceiveFloodSize() const; + + void WakeMessageHandler(); private: struct ListenSocket { SOCKET socket; @@ -410,8 +412,6 @@ class CConnman uint64_t CalculateKeyedNetGroup(const CAddress& ad); - void WakeMessageHandler(); - CNode* FindNode(const CNetAddr& ip); CNode* FindNode(const CSubNet& subNet); CNode* FindNode(const std::string& addrName); @@ -687,6 +687,8 @@ class CNode std::list vProcessMsg; size_t nProcessQueueSize; + CCriticalSection cs_sendProcessing; + std::deque vRecvGetData; uint64_t nRecvBytes; std::atomic nRecvVersion; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 6bbf82ec58f0..a49a213f8be4 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -718,6 +718,7 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB } } }); + connman->WakeMessageHandler(); } nTimeBestReceived = GetTime(); @@ -1786,6 +1787,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted); } else { LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); + // We will continue to reject this tx since it has rejected + // parents so avoid re-requesting it from other peers. + recentRejects->insert(tx.GetHash()); } } else { assert(recentRejects); diff --git a/src/netaddress.h b/src/netaddress.h index c856019198e5..85b7b94b3e9e 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -51,7 +51,7 @@ class CNetAddr bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor) bool IsRFC1918() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) - bool IsRFC2544() const; // IPv4 inter-network communcations (192.18.0.0/15) + bool IsRFC2544() const; // IPv4 inter-network communications (192.18.0.0/15) bool IsRFC6598() const; // IPv4 ISP-level NAT (100.64.0.0/10) bool IsRFC5737() const; // IPv4 documentation addresses (192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24) bool IsRFC3849() const; // IPv6 documentation address (2001:0DB8::/32) diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 83d76a5b87f7..eb41ad17053c 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -102,7 +102,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason) else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) { reason = "bare-multisig"; return false; - } else if (txout.IsDust(::minRelayTxFee)) { + } else if (txout.IsDust(dustRelayFee)) { reason = "dust"; return false; } @@ -150,3 +150,6 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) return true; } + +CFeeRate incrementalRelayFee = CFeeRate(DEFAULT_INCREMENTAL_RELAY_FEE); +CFeeRate dustRelayFee = CFeeRate(DUST_RELAY_TX_FEE); diff --git a/src/policy/policy.h b/src/policy/policy.h index 6e67ca42fc8e..0b4931e63e96 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -18,6 +18,8 @@ class CCoinsViewCache; static const unsigned int DEFAULT_BLOCK_MAX_SIZE = 750000; /** Default for -blockprioritysize, maximum space for zero/low-fee transactions **/ static const unsigned int DEFAULT_BLOCK_PRIORITY_SIZE = 10000; // was 50000 in 0.12.0 and it is 0 in Bitcoin since 0.12 +/** Default for -blockmintxfee, which sets the minimum feerate for a transaction in blocks created by mining code **/ +static const unsigned int DEFAULT_BLOCK_MIN_TX_FEE = 1000; /** The maximum size for transactions we're willing to relay/mine */ static const unsigned int MAX_STANDARD_TX_SIZE = 100000; /** Maximum number of signature check operations in an IsStandard() P2SH script */ @@ -26,6 +28,14 @@ static const unsigned int MAX_P2SH_SIGOPS = 15; static const unsigned int MAX_STANDARD_TX_SIGOPS = 4000; /** Default for -maxmempool, maximum megabytes of mempool memory usage */ static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE = 300; +/** Default for -incrementalrelayfee, which sets the minimum feerate increase for mempool limiting or BIP 125 replacement **/ +static const unsigned int DEFAULT_INCREMENTAL_RELAY_FEE = 1000; +/** Min feerate for defining dust. Historically this has been the same as the + * minRelayTxFee, however changing the dust limit changes which transactions are + * standard and should be done with care and ideally rarely. It makes sense to + * only increase the dust limit after prior releases were already not creating + * outputs below the new threshold */ +static const unsigned int DUST_RELAY_TX_FEE = 1000; /** * Standard script verification flags that standard transactions will comply * with. However scripts violating these flags may still be present in valid @@ -63,4 +73,6 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason); */ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs); +extern CFeeRate incrementalRelayFee; +extern CFeeRate dustRelayFee; #endif // BITCOIN_POLICY_POLICY_H diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index a243e4d1879d..33f277f1f69f 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -209,7 +209,7 @@ class CTransaction { public: // Default transaction version. - static const int32_t CURRENT_VERSION=1; + static const int32_t CURRENT_VERSION=2; // Changing the default transaction version requires a two step process: first // adapting relay policy by bumping MAX_STANDARD_VERSION, and then later date diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp index fe53d89ba934..ee46feed3179 100644 --- a/src/qt/bantablemodel.cpp +++ b/src/qt/bantablemodel.cpp @@ -64,7 +64,7 @@ class BanTablePriv } if (sortColumn >= 0) - // sort cachedBanlist (use stable sort to prevent rows jumping around unneceesarily) + // sort cachedBanlist (use stable sort to prevent rows jumping around unnecessarily) qStableSort(cachedBanlist.begin(), cachedBanlist.end(), BannedNodeLessThan(sortColumn, sortOrder)); } diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index 54b00600819c..63a8a157bedd 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -166,7 +166,7 @@ double ClientModel::getVerificationProgress(const CBlockIndex *tipIn) const LOCK(cs_main); tip = chainActive.Tip(); } - return Checkpoints::GuessVerificationProgress(Params().Checkpoints(), tip); + return GuessVerificationProgress(Params().TxData(), tip); } void ClientModel::updateTimer() diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp index 3e271955a2f9..e9fb4019c544 100644 --- a/src/qt/coincontroldialog.cpp +++ b/src/qt/coincontroldialog.cpp @@ -16,7 +16,8 @@ #include "wallet/coincontrol.h" #include "init.h" -#include "validation.h" // For minRelayTxFee +#include "policy/policy.h" +#include "validation.h" // For mempool #include "wallet/wallet.h" #include "instantx.h" @@ -487,7 +488,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog) { CTxOut txout(amount, (CScript)std::vector(24, 0)); txDummy.vout.push_back(txout); - if (txout.IsDust(::minRelayTxFee)) + if (txout.IsDust(dustRelayFee)) fDust = true; } } @@ -596,10 +597,10 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog) if (nChange > 0 && nChange < MIN_CHANGE) { CTxOut txout(nChange, (CScript)std::vector(24, 0)); - if (txout.IsDust(::minRelayTxFee)) + if (txout.IsDust(dustRelayFee)) { if (CoinControlDialog::fSubtractFeeFromAmount) // dust-change will be raised until no dust - nChange = txout.GetDustThreshold(::minRelayTxFee); + nChange = txout.GetDustThreshold(dustRelayFee); else { nPayFee += nChange; diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 00b926610b6c..8ea9547df98b 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -12,7 +12,7 @@ #include "primitives/transaction.h" #include "init.h" -#include "validation.h" // For minRelayTxFee +#include "policy/policy.h" #include "protocol.h" #include "script/script.h" #include "script/standard.h" @@ -273,7 +273,7 @@ bool isDust(const QString& address, const CAmount& amount) CTxDestination dest = CBitcoinAddress(address.toStdString()).Get(); CScript script = GetScriptForDestination(dest); CTxOut txOut(amount, script); - return txOut.IsDust(::minRelayTxFee); + return txOut.IsDust(dustRelayFee); } QString HtmlEscape(const QString& str, bool fMultiLine) diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index 6cfe6294fb37..729f7247afa0 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -11,7 +11,7 @@ #include "base58.h" #include "chainparams.h" -#include "validation.h" // For minRelayTxFee +#include "policy/policy.h" #include "ui_interface.h" #include "util.h" #include "wallet/wallet.h" @@ -583,7 +583,7 @@ bool PaymentServer::processPaymentRequest(const PaymentRequestPlus& request, Sen // Extract and check amounts CTxOut txOut(sendingTo.second, sendingTo.first); - if (txOut.IsDust(::minRelayTxFee)) { + if (txOut.IsDust(dustRelayFee)) { Q_EMIT message(tr("Payment request error"), tr("Requested payment amount of %1 is too small (considered dust).") .arg(BitcoinUnits::formatWithUnit(optionsModel->getDisplayUnit(), sendingTo.second)), CClientUIInterface::MSG_ERROR); diff --git a/src/qt/paymentserver.h b/src/qt/paymentserver.h index fb378eec8732..c9b57acea2b7 100644 --- a/src/qt/paymentserver.h +++ b/src/qt/paymentserver.h @@ -21,10 +21,10 @@ // // When startup is finished and the main window is // shown, a signal is sent to slot uiReady(), which -// emits a receivedURL() signal for any payment +// emits a receivedURI() signal for any payment // requests that happened during startup. // -// After startup, receivedURL() happens as usual. +// After startup, receivedURI() happens as usual. // // This class has one more feature: a static // method that finds URIs passed in the command line diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index aa10af686d36..d010d8886e51 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -720,8 +720,8 @@ void SendCoinsDialog::updateGlobalFeeVariables() // set nMinimumTotalFee to 0 to not accidentally pay a custom fee CoinControlDialog::coinControl->nMinimumTotalFee = 0; - // show the estimated reuquired time for confirmation - ui->confirmationTargetLabel->setText(GUIUtil::formatDurationStr(nConfirmTarget * Params().GetConsensus().nPowTargetSpacing)+" / "+tr("%n block(s)", "", nConfirmTarget)); + // show the estimated required time for confirmation + ui->confirmationTargetLabel->setText(GUIUtil::formatDurationStr(nConfirmTarget * Params().GetConsensus().nPowTargetSpacing) + " / " + tr("%n block(s)", "", nConfirmTarget)); } else { diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp index 66572979c626..407f0f4544e9 100644 --- a/src/qt/test/rpcnestedtests.cpp +++ b/src/qt/test/rpcnestedtests.cpp @@ -29,7 +29,7 @@ static UniValue rpcNestedTest_rpc(const JSONRPCRequest& request) static const CRPCCommand vRPCCommands[] = { - { "test", "rpcNestedTest", &rpcNestedTest_rpc, true }, + { "test", "rpcNestedTest", &rpcNestedTest_rpc, true, {} }, }; void RPCNestedTests::rpcNestedTests() diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index de942e0a65d8..0a61d397b0d5 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -51,14 +51,15 @@ QList TransactionRecord::decomposeTransaction(const CWallet * // // Credit // - BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) + for(unsigned int i = 0; i < wtx.tx->vout.size(); i++) { + const CTxOut& txout = wtx.tx->vout[i]; isminetype mine = wallet->IsMine(txout); if(mine) { TransactionRecord sub(hash, nTime); CTxDestination address; - sub.idx = parts.size(); // sequence number + sub.idx = i; // vout index sub.credit = txout.nValue; sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY; if (ExtractDestination(txout.scriptPubKey, address) && IsMine(*wallet, address)) @@ -174,7 +175,7 @@ QList TransactionRecord::decomposeTransaction(const CWallet * { const CTxOut& txout = wtx.tx->vout[nOut]; TransactionRecord sub(hash, nTime); - sub.idx = parts.size(); + sub.idx = nOut; sub.involvesWatchAddress = involvesWatchAddress; if(wallet->IsMine(txout)) diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp index 5af0c22996fd..e9126968ee2d 100644 --- a/src/qt/utilitydialog.cpp +++ b/src/qt/utilitydialog.cpp @@ -40,7 +40,7 @@ HelpMessageDialog::HelpMessageDialog(QWidget *parent, HelpMode helpMode) : QString version = tr(PACKAGE_NAME) + " " + tr("version") + " " + QString::fromStdString(FormatFullVersion()); /* On x86 add a bit specifier to the version so that users can distinguish between - * 32 and 64 bit builds. On other architectures, 32/64 bit may be more ambigious. + * 32 and 64 bit builds. On other architectures, 32/64 bit may be more ambiguous. */ #if defined(__x86_64__) version += " " + tr("(%1-bit)").arg(64); diff --git a/src/rest.cpp b/src/rest.cpp index 91c7b488ce86..01c365318122 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -17,7 +17,6 @@ #include "version.h" #include -#include #include @@ -505,7 +504,8 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) vector bitmap; vector outs; std::string bitmapStringRepresentation; - boost::dynamic_bitset hits(vOutPoints.size()); + std::vector hits; + bitmap.resize((vOutPoints.size() + 7) / 8); { LOCK2(cs_main, mempool.cs); @@ -519,16 +519,18 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) view.SetBackend(viewMempool); // switch cache backend to db+mempool in case user likes to query mempool for (size_t i = 0; i < vOutPoints.size(); i++) { + bool hit = false; Coin coin; if (view.GetCoin(vOutPoints[i], coin) && !mempool.isSpent(vOutPoints[i])) { - hits[i] = true; + hit = true; outs.emplace_back(std::move(coin)); } - bitmapStringRepresentation.append(hits[i] ? "1" : "0"); // form a binary string representation (human-readable for json output) + hits.push_back(hit); + bitmapStringRepresentation.append(hit ? "1" : "0"); // form a binary string representation (human-readable for json output) + bitmap[i / 8] |= ((uint8_t)hit) << (i % 8); } } - boost::to_block_range(hits, std::back_inserter(bitmap)); switch (rf) { case RF_BINARY: { diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index fa61be52c742..0a595f059569 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -336,8 +336,8 @@ std::string EntryDescriptionString() " \"modifiedfee\" : n, (numeric) transaction fee with fee deltas used for mining priority\n" " \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n" " \"height\" : n, (numeric) block height when transaction entered pool\n" - " \"startingpriority\" : n, (numeric) priority when transaction entered pool\n" - " \"currentpriority\" : n, (numeric) transaction priority now\n" + " \"startingpriority\" : n, (numeric) DEPRECATED. Priority when transaction entered pool\n" + " \"currentpriority\" : n, (numeric) DEPRECATED. Transaction priority now\n" " \"descendantcount\" : n, (numeric) number of in-mempool descendant transactions (including this one)\n" " \"descendantsize\" : n, (numeric) size of in-mempool descendants (including this one)\n" " \"descendantfees\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one)\n" @@ -948,6 +948,53 @@ static bool GetUTXOStats(CCoinsView *view, CCoinsStats &stats) return true; } +UniValue pruneblockchain(const JSONRPCRequest& request) +{ + if (request.fHelp || request.params.size() != 1) + throw runtime_error( + "pruneblockchain\n" + "\nArguments:\n" + "1. \"height\" (numeric, required) The block height to prune up to. May be set to a discrete height, or to a unix timestamp to prune based on block time.\n" + "\nResult:\n" + "n (numeric) Height of the last block pruned.\n" + "\nExamples:\n" + + HelpExampleCli("pruneblockchain", "1000") + + HelpExampleRpc("pruneblockchain", "1000")); + + if (!fPruneMode) + throw JSONRPCError(RPC_METHOD_NOT_FOUND, "Cannot prune blocks because node is not in prune mode."); + + LOCK(cs_main); + + int heightParam = request.params[0].get_int(); + if (heightParam < 0) + throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative block height."); + + // Height value more than a billion is too high to be a block height, and + // too low to be a block time (corresponds to timestamp from Sep 2001). + if (heightParam > 1000000000) { + CBlockIndex* pindex = chainActive.FindEarliestAtLeast(heightParam); + if (!pindex) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Could not find block with at least the specified timestamp."); + } + heightParam = pindex->nHeight; + } + + unsigned int height = (unsigned int) heightParam; + unsigned int chainHeight = (unsigned int) chainActive.Height(); + if (chainHeight < Params().PruneAfterHeight()) + throw JSONRPCError(RPC_INTERNAL_ERROR, "Blockchain is too short for pruning."); + else if (height > chainHeight) + throw JSONRPCError(RPC_INVALID_PARAMETER, "Blockchain is shorter than the attempted prune height."); + else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) { + LogPrint("rpc", "Attempt to prune blocks close to the tip. Retaining the minimum number of blocks."); + height = chainHeight - MIN_BLOCKS_TO_KEEP; + } + + PruneBlockFilesManual(height); + return uint64_t(height); +} + UniValue gettxoutsetinfo(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 0) @@ -1097,22 +1144,23 @@ UniValue verifychain(const JSONRPCRequest& request) } /** Implementation of IsSuperMajority with better feedback */ -static UniValue SoftForkMajorityDesc(int minVersion, CBlockIndex* pindex, int nRequired, const Consensus::Params& consensusParams) +static UniValue SoftForkMajorityDesc(int version, CBlockIndex* pindex, const Consensus::Params& consensusParams) { - int nFound = 0; - CBlockIndex* pstart = pindex; - for (int i = 0; i < consensusParams.nMajorityWindow && pstart != NULL; i++) + UniValue rv(UniValue::VOBJ); + bool activated = false; + switch(version) { - if (pstart->nVersion >= minVersion) - ++nFound; - pstart = pstart->pprev; + case 2: + activated = pindex->nHeight >= consensusParams.BIP34Height; + break; + case 3: + activated = pindex->nHeight >= consensusParams.BIP66Height; + break; + case 4: + activated = pindex->nHeight >= consensusParams.BIP65Height; + break; } - - UniValue rv(UniValue::VOBJ); - rv.push_back(Pair("status", nFound >= nRequired)); - rv.push_back(Pair("found", nFound)); - rv.push_back(Pair("required", nRequired)); - rv.push_back(Pair("window", consensusParams.nMajorityWindow)); + rv.push_back(Pair("status", activated)); return rv; } @@ -1121,8 +1169,7 @@ static UniValue SoftForkDesc(const std::string &name, int version, CBlockIndex* UniValue rv(UniValue::VOBJ); rv.push_back(Pair("id", name)); rv.push_back(Pair("version", version)); - rv.push_back(Pair("enforce", SoftForkMajorityDesc(version, pindex, consensusParams.nMajorityEnforceBlockUpgrade, consensusParams))); - rv.push_back(Pair("reject", SoftForkMajorityDesc(version, pindex, consensusParams.nMajorityRejectBlockOutdated, consensusParams))); + rv.push_back(Pair("reject", SoftForkMajorityDesc(version, pindex, consensusParams))); return rv; } @@ -1178,13 +1225,9 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) " {\n" " \"id\": \"xxxx\", (string) name of softfork\n" " \"version\": xx, (numeric) block version\n" - " \"enforce\": { (object) progress toward enforcing the softfork rules for new-version blocks\n" + " \"reject\": { (object) progress toward rejecting pre-softfork blocks\n" " \"status\": xx, (boolean) true if threshold reached\n" - " \"found\": xx, (numeric) number of blocks with the new version found\n" - " \"required\": xx, (numeric) number of blocks required to trigger\n" - " \"window\": xx, (numeric) maximum size of examined window of recent blocks\n" " },\n" - " \"reject\": { ... } (object) progress toward rejecting pre-softfork blocks (same fields as \"enforce\")\n" " }, ...\n" " ],\n" " \"bip9_softforks\": { (object) status of BIP9 softforks in progress\n" @@ -1211,7 +1254,7 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) obj.push_back(Pair("bestblockhash", chainActive.Tip()->GetBlockHash().GetHex())); obj.push_back(Pair("difficulty", (double)GetDifficulty())); obj.push_back(Pair("mediantime", (int64_t)chainActive.Tip()->GetMedianTimePast())); - obj.push_back(Pair("verificationprogress", Checkpoints::GuessVerificationProgress(Params().Checkpoints(), chainActive.Tip()))); + obj.push_back(Pair("verificationprogress", GuessVerificationProgress(Params().TxData(), chainActive.Tip()))); obj.push_back(Pair("chainwork", chainActive.Tip()->nChainWork.GetHex())); obj.push_back(Pair("pruned", fPruneMode)); @@ -1545,6 +1588,7 @@ static const CRPCCommand commands[] = { "blockchain", "getrawmempool", &getrawmempool, true, {"verbose"} }, { "blockchain", "gettxout", &gettxout, true, {"txid","n","include_mempool"} }, { "blockchain", "gettxoutsetinfo", &gettxoutsetinfo, true, {} }, + { "blockchain", "pruneblockchain", &pruneblockchain, true, {"height"} }, { "blockchain", "verifychain", &verifychain, true, {"checklevel","nblocks"} }, { "blockchain", "preciousblock", &preciousblock, true, {"blockhash"} }, diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 512bd057b6d0..d3a6f7b3e682 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -125,6 +125,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "importmulti", 1, "options" }, { "verifychain", 0, "checklevel" }, { "verifychain", 1, "nblocks" }, + { "pruneblockchain", 0, "height" }, { "keypoolrefill", 0, "newsize" }, { "getrawmempool", 0, "verbose" }, { "estimatefee", 0, "nblocks" }, @@ -135,6 +136,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "prioritisetransaction", 2, "fee_delta" }, { "setban", 2, "bantime" }, { "setban", 3, "absolute" }, + { "setbip69enabled", 0, "enabled" }, { "setnetworkactive", 0, "state" }, { "getmempoolancestors", 1, "verbose" }, { "getmempooldescendants", 1, "verbose" }, diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index b1e64918279c..f2406ec0d2b5 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -101,13 +101,14 @@ UniValue getpeerinfo(const JSONRPCRequest& request) " \"inflight\": [\n" " n, (numeric) The heights of blocks we're currently asking from this peer\n" " ...\n" - " ]\n" + " ],\n" + " \"whitelisted\": true|false, (boolean) Whether the peer is whitelisted\n" " \"bytessent_per_msg\": {\n" - " \"addr\": n, (numeric) The total bytes sent aggregated by message type\n" + " \"addr\": n, (numeric) The total bytes sent aggregated by message type\n" " ...\n" - " }\n" + " },\n" " \"bytesrecv_per_msg\": {\n" - " \"addr\": n, (numeric) The total bytes received aggregated by message type\n" + " \"addr\": n, (numeric) The total bytes received aggregated by message type\n" " ...\n" " }\n" " }\n" @@ -150,7 +151,7 @@ UniValue getpeerinfo(const JSONRPCRequest& request) obj.push_back(Pair("pingwait", stats.dPingWait)); obj.push_back(Pair("version", stats.nVersion)); // Use the sanitized form of subver here, to avoid tricksy remote peers from - // corrupting or modifiying the JSON output by putting special characters in + // corrupting or modifying the JSON output by putting special characters in // their ver message. obj.push_back(Pair("subver", stats.cleanSubVer)); obj.push_back(Pair("inbound", stats.fInbound)); @@ -412,6 +413,7 @@ UniValue getnetworkinfo(const JSONRPCRequest& request) " \"limited\": true|false, (boolean) is the network limited using -onlynet?\n" " \"reachable\": true|false, (boolean) is the network reachable?\n" " \"proxy\": \"host:port\" (string) the proxy that is used for this network, or empty if none\n" + " \"proxy_randomize_credentials\": true|false, (string) Whether randomized credentials are used\n" " }\n" " ,...\n" " ],\n" @@ -424,7 +426,7 @@ UniValue getnetworkinfo(const JSONRPCRequest& request) " }\n" " ,...\n" " ]\n" - " \"warnings\": \"...\" (string) any network warnings (such as alert messages) \n" + " \"warnings\": \"...\" (string) any network warnings\n" "}\n" "\nExamples:\n" + HelpExampleCli("getnetworkinfo", "") diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index f58284b2dff9..3c119df07288 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -146,9 +146,11 @@ UniValue getrawtransaction(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) throw runtime_error( "getrawtransaction \"txid\" ( verbose )\n" - "\nNOTE: By default this function only works sometimes. This is when the tx is in the mempool\n" - "or there is an unspent output in the utxo for this transaction. To make it always work,\n" - "you need to maintain a transaction index, using the -txindex command line option.\n" + + "\nNOTE: By default this function only works for mempool transactions. If the -txindex option is\n" + "enabled, it also works for blockchain transactions.\n" + "DEPRECATED: for now, it also works for transactions with unspent outputs.\n" + "\nReturn the raw transaction data.\n" "\nIf verbose is 'true', returns an Object with information about 'txid'.\n" "If verbose is 'false' or omitted, returns a string that is serialized, hex-encoded data for 'txid'.\n" @@ -233,7 +235,9 @@ UniValue getrawtransaction(const JSONRPCRequest& request) CTransactionRef tx; uint256 hashBlock; if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock, true)) - throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available about transaction"); + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, std::string(fTxIndex ? "No such mempool or blockchain transaction" + : "No such mempool transaction. Use -txindex to enable blockchain transaction queries") + + ". Use gettransaction for wallet transactions."); string strHex = EncodeHexTx(*tx); diff --git a/src/script/script.cpp b/src/script/script.cpp index c87fce436d4e..63e226e12cb0 100644 --- a/src/script/script.cpp +++ b/src/script/script.cpp @@ -128,7 +128,7 @@ const char* GetOpName(opcodetype opcode) case OP_CHECKMULTISIG : return "OP_CHECKMULTISIG"; case OP_CHECKMULTISIGVERIFY : return "OP_CHECKMULTISIGVERIFY"; - // expanson + // expansion case OP_NOP1 : return "OP_NOP1"; case OP_CHECKLOCKTIMEVERIFY : return "OP_CHECKLOCKTIMEVERIFY"; case OP_CHECKSEQUENCEVERIFY : return "OP_CHECKSEQUENCEVERIFY"; diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index cf5533d332d7..c80a46c98869 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -297,7 +297,7 @@ BOOST_AUTO_TEST_CASE(addrman_find) // Test 18; Find does not discriminate by port number. CAddrInfo* info2 = addrman.Find(addr2); BOOST_CHECK(info2); - if (info2) + if (info2 && info1) BOOST_CHECK(info2->ToString() == info1->ToString()); // Test 19: Find returns another IP matching what we searched on. diff --git a/src/test/bitcoin-util-test.py b/src/test/bitcoin-util-test.py index 1c090b3f3f1e..e2087187aaba 100755 --- a/src/test/bitcoin-util-test.py +++ b/src/test/bitcoin-util-test.py @@ -5,7 +5,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. from __future__ import division,print_function,unicode_literals import os -import sys import bctest import buildenv import argparse diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp index 326ddb02004e..228b3e571ae2 100644 --- a/src/test/cuckoocache_tests.cpp +++ b/src/test/cuckoocache_tests.cpp @@ -15,7 +15,7 @@ * with deterministic seeds) * 2) Some test methods are templated to allow for easier testing * against new versions / comparing - * 3) Results should be treated as a regression test, ie, did the behavior + * 3) Results should be treated as a regression test, i.e., did the behavior * change significantly from what was expected. This can be OK, depending on * the nature of the change, but requires updating the tests to reflect the new * expected behavior. For example improving the hit rate may cause some tests diff --git a/src/test/data/bitcoin-util-test.json b/src/test/data/bitcoin-util-test.json index b0e20b31ea9b..9f472d7e6e3a 100644 --- a/src/test/data/bitcoin-util-test.json +++ b/src/test/data/bitcoin-util-test.json @@ -1,25 +1,30 @@ [ { "exec": "./dash-tx", - "args": ["-create"], - "output_cmp": "blanktx.hex", - "description": "Creates a blank transaction" + "args": ["-create", "nversion=1"], + "output_cmp": "blanktxv1.hex", + "description": "Creates a blank v1 transaction" }, { "exec": "./dash-tx", - "args": ["-json","-create"], - "output_cmp": "blanktx.json", - "description": "Creates a blank transaction (output in json)" + "args": ["-json","-create", "nversion=1"], + "output_cmp": "blanktxv1.json", + "description": "Creates a blank v1 transaction (output in json)" }, { "exec": "./dash-tx", "args": ["-"], - "input": "blanktx.hex", - "output_cmp": "blanktx.hex", - "description": "Creates a blank transaction when nothing is piped into bitcoin-tx" + "input": "blanktxv2.hex", + "output_cmp": "blanktxv2.hex", + "description": "Creates a blank transaction when nothing is piped into dash-tx" + }, + { "exec": "./dash-tx", + "args": ["-json","-create"], + "output_cmp": "blanktxv2.json", + "description": "Creates a blank transaction (output in json)" }, { "exec": "./dash-tx", "args": ["-json","-"], - "input": "blanktx.hex", - "output_cmp": "blanktx.json", - "description": "Creates a blank transaction when nothing is piped into bitcoin-tx (output in json)" + "input": "blanktxv2.hex", + "output_cmp": "blanktxv2.json", + "description": "Creates a blank transaction when nothing is piped into dash-tx (output in json)" }, { "exec": "./dash-tx", "args": ["-", "delin=1"], @@ -102,28 +107,81 @@ "output_cmp": "txcreate2.json", "description": "Creates a new transaction with a single empty output script (output in json)" }, + { "exec": "./dash-tx", + "args": ["-create", "outscript=0:OP_DROP", "nversion=1"], + "output_cmp": "txcreatescript1.hex", + "description": "Create a new transaction with a single output script (OP_DROP)" + }, + { "exec": "./dash-tx", + "args": ["-json", "-create", "outscript=0:OP_DROP", "nversion=1"], + "output_cmp": "txcreatescript1.json", + "description": "Create a new transaction with a single output script (OP_DROP) (output as json)" + }, + { "exec": "./dash-tx", + "args": ["-create", "outscript=0:OP_DROP:S", "nversion=1"], + "output_cmp": "txcreatescript2.hex", + "description": "Create a new transaction with a single output script (OP_DROP) in a P2SH" + }, + { "exec": "./dash-tx", + "args": ["-json", "-create", "outscript=0:OP_DROP:S", "nversion=1"], + "output_cmp": "txcreatescript2.json", + "description": "Create a new transaction with a single output script (OP_DROP) in a P2SH (output as json)" + }, + { "exec": "./dash-tx", + "args": ["02000000000100000000000000000000000000"], + "output_cmp": "txcreate2.hex", + "description": "Parses a transation with no inputs and a single output script" + }, + { "exec": "./dash-tx", + "args": ["-json", "02000000000100000000000000000000000000"], + "output_cmp": "txcreate2.json", + "description": "Parses a transation with no inputs and a single output script (output in json)" + }, { "exec": "./dash-tx", "args": - ["-create", + ["-create", "nversion=1", "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0", "set=privatekeys:[\"7qYrzJZWqnyCWMYswFcqaRJypGdVceudXPSxmZKsngN7fyo7aAV\"]", "set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]", "sign=ALL", "outaddr=0.001:XijDvbYpPmznwgpWD3DkdYNfGmRP2KoVSk"], - "output_cmp": "txcreatesign.hex", - "description": "Creates a new transaction with a single input and a single output, and then signs the transaction" + "output_cmp": "txcreatesignv1.hex", + "description": "Creates a new v1 transaction with a single input and a single output, and then signs the transaction" }, { "exec": "./dash-tx", "args": ["-json", - "-create", + "-create", "nversion=1", "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0", "set=privatekeys:[\"7qYrzJZWqnyCWMYswFcqaRJypGdVceudXPSxmZKsngN7fyo7aAV\"]", "set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]", "sign=ALL", "outaddr=0.001:XijDvbYpPmznwgpWD3DkdYNfGmRP2KoVSk"], - "output_cmp": "txcreatesign.json", - "description": "Creates a new transaction with a single input and a single output, and then signs the transaction (output in json)" + "output_cmp": "txcreatesignv1.json", + "description": "Creates a new v1 transaction with a single input and a single output, and then signs the transaction (output in json)" + }, + { "exec": "./dash-tx", + "args": + ["-create", + "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0", + "set=privatekeys:[\"7qYrzJZWqnyCWMYswFcqaRJypGdVceudXPSxmZKsngN7fyo7aAV\"]", + "set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]", + "sign=ALL", + "outaddr=0.001:XijDvbYpPmznwgpWD3DkdYNfGmRP2KoVSk"], + "output_cmp": "txcreatesignv2.hex", + "description": "Creates a new transaction with a single input and a single output, and then signs the transaction" + }, + { "exec": "./dash-tx", + "args": + ["-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397", "nversion=1"], + "output_cmp": "txcreateoutpubkey1.hex", + "description": "Creates a new transaction with a single pay-to-pubkey output" + }, + { "exec": "./dash-tx", + "args": + ["-json", "-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397", "nversion=1"], + "output_cmp": "txcreateoutpubkey1.json", + "description": "Creates a new transaction with a single pay-to-pubkey output (output as json)" }, { "exec": "./dash-tx", "args": @@ -153,12 +211,12 @@ { "exec": "./dash-tx", "args": ["-json", - "-create", + "-create", "nversion=1", "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0", "outaddr=0.18:Xdak8YsJz8tm1iHFmycfTyKeUvHgfbdpyw", "outdata=4:54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e"], "output_cmp": "txcreatedata1.json", - "description": "Creates a new transaction with one input, one address output and one data output (output in json)" + "description": "Creates a new v1 transaction with one input, one address output and one data output (output in json)" }, { "exec": "./dash-tx", "args": @@ -210,5 +268,25 @@ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:1"], "output_cmp": "txcreatedata_seq1.json", "description": "Adds a new input with sequence number to a transaction (output in json)" + }, + { "exec": "./dash-tx", + "args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"], + "output_cmp": "txcreatemultisig1.hex", + "description": "Creates a new transaction with a single 2-of-3 multisig output" + }, + { "exec": "./dash-tx", + "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"], + "output_cmp": "txcreatemultisig1.json", + "description": "Creates a new transaction with a single 2-of-3 multisig output (output in json)" + }, + { "exec": "./dash-tx", + "args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S", "nversion=1"], + "output_cmp": "txcreatemultisig2.hex", + "description": "Creates a new transaction with a single 2-of-3 multisig in a P2SH output" + }, + { "exec": "./dash-tx", + "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S", "nversion=1"], + "output_cmp": "txcreatemultisig2.json", + "description": "Creates a new transaction with a single 2-of-3 multisig in a P2SH output (output in json)" } ] diff --git a/src/test/data/blanktx.hex b/src/test/data/blanktxv1.hex similarity index 100% rename from src/test/data/blanktx.hex rename to src/test/data/blanktxv1.hex diff --git a/src/test/data/blanktx.json b/src/test/data/blanktxv1.json similarity index 100% rename from src/test/data/blanktx.json rename to src/test/data/blanktxv1.json diff --git a/src/test/data/blanktxv2.hex b/src/test/data/blanktxv2.hex new file mode 100644 index 000000000000..22d830eda151 --- /dev/null +++ b/src/test/data/blanktxv2.hex @@ -0,0 +1 @@ +02000000000000000000 diff --git a/src/test/data/blanktxv2.json b/src/test/data/blanktxv2.json new file mode 100644 index 000000000000..59165bb512f5 --- /dev/null +++ b/src/test/data/blanktxv2.json @@ -0,0 +1,10 @@ +{ + "txid": "4ebd325a4b394cff8c57e8317ccf5a8d0e2bdf1b8526f8aad6c8e43d8240621a", + "version": 2, + "locktime": 0, + "vin": [ + ], + "vout": [ + ], + "hex": "02000000000000000000" +} diff --git a/src/test/data/txcreate1.hex b/src/test/data/txcreate1.hex index e2981a51c987..9ec6ee3531b7 100644 --- a/src/test/data/txcreate1.hex +++ b/src/test/data/txcreate1.hex @@ -1 +1 @@ -01000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000 +02000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000 diff --git a/src/test/data/txcreate1.json b/src/test/data/txcreate1.json index 5a1f8476438a..e646ff994202 100644 --- a/src/test/data/txcreate1.json +++ b/src/test/data/txcreate1.json @@ -1,6 +1,6 @@ { - "txid": "f70f0d6c71416ed538e37549f430ab3665fee2437a42f10238c1bd490e782231", - "version": 1, + "txid": "fe7d174f42dce0cffa7a527e9bc8368956057619ec817648f6138b98f2533e8f", + "version": 2, "locktime": 0, "vin": [ { @@ -59,5 +59,5 @@ } } ], - "hex": "01000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000" + "hex": "02000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000" } diff --git a/src/test/data/txcreate2.hex b/src/test/data/txcreate2.hex index 5243c2d02e8b..38bb7b104665 100644 --- a/src/test/data/txcreate2.hex +++ b/src/test/data/txcreate2.hex @@ -1 +1 @@ -01000000000100000000000000000000000000 +02000000000100000000000000000000000000 diff --git a/src/test/data/txcreate2.json b/src/test/data/txcreate2.json index c56293eaf2c6..bce5c9b090f0 100644 --- a/src/test/data/txcreate2.json +++ b/src/test/data/txcreate2.json @@ -1,6 +1,6 @@ { - "txid": "cf90229625e9eb10f6be8156bf6aa5ec2eca19a42b1e05c11f3029b560a32e13", - "version": 1, + "txid": "0481afb29931341d0d7861d8a2f6f26456fa042abf54a23e96440ed7946e0715", + "version": 2, "locktime": 0, "vin": [ ], @@ -15,5 +15,5 @@ } } ], - "hex": "01000000000100000000000000000000000000" + "hex": "02000000000100000000000000000000000000" } diff --git a/src/test/data/txcreatedata1.hex b/src/test/data/txcreatedata1.hex index c3b394c72d8e..8327a4aa5905 100644 --- a/src/test/data/txcreatedata1.hex +++ b/src/test/data/txcreatedata1.hex @@ -1 +1 @@ -01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac0084d71700000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000 +02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac0084d71700000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000 diff --git a/src/test/data/txcreatedata2.hex b/src/test/data/txcreatedata2.hex index 2a253ef5f7ba..88f7a10d3865 100644 --- a/src/test/data/txcreatedata2.hex +++ b/src/test/data/txcreatedata2.hex @@ -1 +1 @@ -01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000 +02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000 diff --git a/src/test/data/txcreatedata2.json b/src/test/data/txcreatedata2.json index 1c65a9655aea..62ca31aa3497 100644 --- a/src/test/data/txcreatedata2.json +++ b/src/test/data/txcreatedata2.json @@ -1,6 +1,6 @@ { - "txid": "4ed17118f5e932ba8c75c461787d171bc02a016d8557cb5bcf34cd416c27bb8b", - "version": 1, + "txid": "c14b007fa3a6c1e7765919c1d14c1cfc2b8642c3a5d3be4b1fa8c4ccfec98bb0", + "version": 2, "locktime": 0, "vin": [ { @@ -37,5 +37,5 @@ } } ], - "hex": "01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000" + "hex": "02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000" } diff --git a/src/test/data/txcreatedata_seq0.hex b/src/test/data/txcreatedata_seq0.hex index c08dd6970759..7dce523c7960 100644 --- a/src/test/data/txcreatedata_seq0.hex +++ b/src/test/data/txcreatedata_seq0.hex @@ -1 +1 @@ -01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac00000000 +02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac00000000 diff --git a/src/test/data/txcreatedata_seq0.json b/src/test/data/txcreatedata_seq0.json index 175ba34d7477..463169873cd0 100644 --- a/src/test/data/txcreatedata_seq0.json +++ b/src/test/data/txcreatedata_seq0.json @@ -1,6 +1,6 @@ { - "txid": "71603ccb1cd76d73d76eb6cfd5f0b9df6d65d90d76860ee52cb461c4be7032e8", - "version": 1, + "txid": "8df6ed527472542dd5e137c242a7c5a9f337ac34f7b257ae4af886aeaebb51b0", + "version": 2, "locktime": 0, "vin": [ { @@ -28,5 +28,5 @@ } } ], - "hex": "01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000" + "hex": "02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000" } diff --git a/src/test/data/txcreatemultisig1.hex b/src/test/data/txcreatemultisig1.hex new file mode 100644 index 000000000000..9c00004d383d --- /dev/null +++ b/src/test/data/txcreatemultisig1.hex @@ -0,0 +1 @@ +01000000000100e1f5050000000069522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae00000000 diff --git a/src/test/data/txcreatemultisig1.json b/src/test/data/txcreatemultisig1.json new file mode 100644 index 000000000000..d7f9101dd041 --- /dev/null +++ b/src/test/data/txcreatemultisig1.json @@ -0,0 +1,25 @@ +{ + "txid": "0d1d4edfc217d9db3ab6a9298f26a52eae3c52f55a6cb8ccbc14f7c727572894", + "version": 1, + "locktime": 0, + "vin": [ + ], + "vout": [ + { + "value": 1.00, + "n": 0, + "scriptPubKey": { + "asm": "2 02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d 02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485 3 OP_CHECKMULTISIG", + "hex": "522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae", + "reqSigs": 2, + "type": "multisig", + "addresses": [ + "XqV6rHmzCyFUKF2jSVg8ZkZ7oRhGLVxifK", + "XqDjpPyN61bMZAZsVbYyvouVzgV59oSmWp", + "Xe2kRBG5ZEn8T34TqvvPKwQwHEzVL9WvxH" + ] + } + } + ], + "hex": "01000000000100e1f5050000000069522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae00000000" +} diff --git a/src/test/data/txcreatemultisig2.hex b/src/test/data/txcreatemultisig2.hex new file mode 100644 index 000000000000..07835c54d3e1 --- /dev/null +++ b/src/test/data/txcreatemultisig2.hex @@ -0,0 +1 @@ +01000000000100e1f5050000000017a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac8700000000 diff --git a/src/test/data/txcreatemultisig2.json b/src/test/data/txcreatemultisig2.json new file mode 100644 index 000000000000..e7e95312d94e --- /dev/null +++ b/src/test/data/txcreatemultisig2.json @@ -0,0 +1,23 @@ +{ + "txid": "0d861f278a3b7bce7cb5a88d71e6e6a903336f95ad5a2c29b295b63835b6eee3", + "version": 1, + "locktime": 0, + "vin": [ + ], + "vout": [ + { + "value": 1.00, + "n": 0, + "scriptPubKey": { + "asm": "OP_HASH160 1c6fbaf46d64221e80cbae182c33ddf81b9294ac OP_EQUAL", + "hex": "a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac87", + "reqSigs": 1, + "type": "scripthash", + "addresses": [ + "7V11XGPxzBWxkiuw15a1Vgk7XT74tyYtCY" + ] + } + } + ], + "hex": "01000000000100e1f5050000000017a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac8700000000" +} diff --git a/src/test/data/txcreateoutpubkey1.hex b/src/test/data/txcreateoutpubkey1.hex new file mode 100644 index 000000000000..4a08244b2fc0 --- /dev/null +++ b/src/test/data/txcreateoutpubkey1.hex @@ -0,0 +1 @@ +0100000000010000000000000000232102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac00000000 diff --git a/src/test/data/txcreateoutpubkey1.json b/src/test/data/txcreateoutpubkey1.json new file mode 100644 index 000000000000..74a3ac4c8bb3 --- /dev/null +++ b/src/test/data/txcreateoutpubkey1.json @@ -0,0 +1,23 @@ +{ + "txid": "f42b38ac12e3fafc96ba1a9ba70cbfe326744aef75df5fb9db5d6e2855ca415f", + "version": 1, + "locktime": 0, + "vin": [ + ], + "vout": [ + { + "value": 0.00, + "n": 0, + "scriptPubKey": { + "asm": "02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 OP_CHECKSIG", + "hex": "2102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac", + "reqSigs": 1, + "type": "pubkey", + "addresses": [ + "XqV6rHmzCyFUKF2jSVg8ZkZ7oRhGLVxifK" + ] + } + } + ], + "hex": "0100000000010000000000000000232102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac00000000" +} diff --git a/src/test/data/txcreatescript1.hex b/src/test/data/txcreatescript1.hex new file mode 100644 index 000000000000..0adce270fbec --- /dev/null +++ b/src/test/data/txcreatescript1.hex @@ -0,0 +1 @@ +0100000000010000000000000000017500000000 diff --git a/src/test/data/txcreatescript1.json b/src/test/data/txcreatescript1.json new file mode 100644 index 000000000000..e54564fbada7 --- /dev/null +++ b/src/test/data/txcreatescript1.json @@ -0,0 +1,19 @@ +{ + "txid": "f0851b68202f736b792649cfc960259c2374badcb644ab20cac726b5f72f61c9", + "version": 1, + "locktime": 0, + "vin": [ + ], + "vout": [ + { + "value": 0.00, + "n": 0, + "scriptPubKey": { + "asm": "OP_DROP", + "hex": "75", + "type": "nonstandard" + } + } + ], + "hex": "0100000000010000000000000000017500000000" +} diff --git a/src/test/data/txcreatescript2.hex b/src/test/data/txcreatescript2.hex new file mode 100644 index 000000000000..5afe8786e39c --- /dev/null +++ b/src/test/data/txcreatescript2.hex @@ -0,0 +1 @@ +010000000001000000000000000017a91471ed53322d470bb96657deb786b94f97dd46fb158700000000 diff --git a/src/test/data/txcreatescript2.json b/src/test/data/txcreatescript2.json new file mode 100644 index 000000000000..95b1f368d8e2 --- /dev/null +++ b/src/test/data/txcreatescript2.json @@ -0,0 +1,23 @@ +{ + "txid": "6e07a7cc075e0703f32ee8c4e5373fe654bfbc315148fda364e1be286ff290d0", + "version": 1, + "locktime": 0, + "vin": [ + ], + "vout": [ + { + "value": 0.00, + "n": 0, + "scriptPubKey": { + "asm": "OP_HASH160 71ed53322d470bb96657deb786b94f97dd46fb15 OP_EQUAL", + "hex": "a91471ed53322d470bb96657deb786b94f97dd46fb1587", + "reqSigs": 1, + "type": "scripthash", + "addresses": [ + "7co3R3WSW8mHKMkFK2FrzQY2fLCBWsg56D" + ] + } + } + ], + "hex": "010000000001000000000000000017a91471ed53322d470bb96657deb786b94f97dd46fb158700000000" +} diff --git a/src/test/data/txcreatesign.hex b/src/test/data/txcreatesignv1.hex similarity index 100% rename from src/test/data/txcreatesign.hex rename to src/test/data/txcreatesignv1.hex diff --git a/src/test/data/txcreatesign.json b/src/test/data/txcreatesignv1.json similarity index 100% rename from src/test/data/txcreatesign.json rename to src/test/data/txcreatesignv1.json diff --git a/src/test/data/txcreatesignv2.hex b/src/test/data/txcreatesignv2.hex new file mode 100644 index 000000000000..ee425cd98c5d --- /dev/null +++ b/src/test/data/txcreatesignv2.hex @@ -0,0 +1 @@ +02000000018594c5bdcaec8f06b78b596f31cd292a294fd031e24eec716f43dac91ea7494d000000008a473044022079c7aa014177a2e973caf6df7c7b8f15399083b91eba370ea1e19c4caed9181e02205f8f8763505ce8e6cbdd2cd28fab3fd407a75003e7d0dc04e6bebb0a3c89e7cb01410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8ffffffff01a0860100000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac00000000 diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index 3f0bff9e2f03..e63e335b69f1 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_batch) BOOST_CHECK(dbw.Read(key2, res)); BOOST_CHECK_EQUAL(res.ToString(), in2.ToString()); - // key3 never should've been written + // key3 should've never been written BOOST_CHECK(dbw.Read(key3, res) == false); } } diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp index 82d7b3177d37..46dc10ef203f 100644 --- a/src/test/hash_tests.cpp +++ b/src/test/hash_tests.cpp @@ -121,6 +121,13 @@ BOOST_AUTO_TEST_CASE(siphash) (uint64_t(x+4)<<32)|(uint64_t(x+5)<<40)|(uint64_t(x+6)<<48)|(uint64_t(x+7)<<56)); } + CHashWriter ss(SER_DISK, CLIENT_VERSION); + CMutableTransaction tx; + // Note these tests were originally written with tx.nVersion=1 + // and the test would be affected by default tx version bumps if not fixed. + tx.nVersion = 1; + ss << tx; + // Check consistency between CSipHasher and SipHashUint256[Extra]. // TODO reenable when backporting Bitcoin #10321 /*FastRandomContext ctx; diff --git a/src/test/limitedmap_tests.cpp b/src/test/limitedmap_tests.cpp index 99eaea6ef597..74d974a3a6f2 100644 --- a/src/test/limitedmap_tests.cpp +++ b/src/test/limitedmap_tests.cpp @@ -27,7 +27,7 @@ BOOST_AUTO_TEST_CASE(limitedmap_test) // make sure that the size is updated BOOST_CHECK(map.size() == 1); - // make sure that the new items is in the map + // make sure that the new item is in the map BOOST_CHECK(map.count(-1) == 1); // insert 10 new items diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp index 397442d03fcc..707176e1c413 100644 --- a/src/test/mempool_tests.cpp +++ b/src/test/mempool_tests.cpp @@ -387,7 +387,12 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) pool.addUnchecked(tx6.GetHash(), entry.Fee(0LL).FromTx(tx6)); BOOST_CHECK_EQUAL(pool.size(), 6); - sortedOrder.push_back(tx6.GetHash().ToString()); + // Ties are broken by hash + if (tx3.GetHash() < tx6.GetHash()) + sortedOrder.push_back(tx6.GetHash().ToString()); + else + sortedOrder.insert(sortedOrder.end()-1,tx6.GetHash().ToString()); + CheckSort(pool, sortedOrder); CMutableTransaction tx7 = CMutableTransaction(); @@ -414,7 +419,11 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) pool.removeForBlock(vtx, 1); sortedOrder.erase(sortedOrder.begin()+1); - sortedOrder.pop_back(); + // Ties are broken by hash + if (tx3.GetHash() < tx6.GetHash()) + sortedOrder.pop_back(); + else + sortedOrder.erase(sortedOrder.end()-2); sortedOrder.insert(sortedOrder.begin(), tx7.GetHash().ToString()); CheckSort(pool, sortedOrder); } diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index d862f5ba2831..ec8e7f96df0c 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -30,34 +30,36 @@ struct { unsigned char extranonce; unsigned int nonce; } blockinfo[] = { - {0, 0x009c5477}, {0, 0x00a94582}, {0, 0x00af3d7f}, {0, 0x00d0b721}, - {0, 0x00d53e10}, {0, 0x00f52f0f}, {0, 0x00fb5876}, {0, 0x0117fb12}, - {0, 0x011f930b}, {0, 0x013365d2}, {0, 0x0151737e}, {0, 0x0152cdd0}, - {0, 0x01758d20}, {0, 0x0178d509}, {0, 0x0192103c}, {0, 0x01a3f1b8}, - {0, 0x01abc9c7}, {0, 0x01d2f50c}, {0, 0x01eebad1}, {0, 0x01ef3419}, - {0, 0x01f3f154}, {0, 0x01fa6245}, {0, 0x0224e780}, {0, 0x02281625}, - {0, 0x023a4d10}, {0, 0x0251d3cf}, {0, 0x02555277}, {0, 0x02648a41}, - {0, 0x0280795e}, {0, 0x02a3a585}, {0, 0x02ade34a}, {0, 0x02b02b02}, - {0, 0x02c9dc32}, {0, 0x02da9867}, {0, 0x02e4126e}, {0, 0x02e738c7}, - {0, 0x02f5c6a9}, {0, 0x0307bb0f}, {0, 0x0328ea58}, {0, 0x034fe819}, - {0, 0x036c6fcb}, {0, 0x039b8e11}, {0, 0x039fec90}, {0, 0x03a268ff}, - {0, 0x03d37583}, {0, 0x03d6a9a7}, {0, 0x03e7a013}, {0, 0x03f01ebe}, - {0, 0x0437104d}, {0, 0x043d0af7}, {0, 0x043d824d}, {0, 0x043f50fc}, - {0, 0x044def8c}, {0, 0x0452309a}, {0, 0x04538bd3}, {0, 0x0459286b}, - {0, 0x045bc734}, {0, 0x045c878a}, {0, 0x0485d3ba}, {0, 0x048a64e5}, - {0, 0x048d6ae1}, {0, 0x048dcfec}, {0, 0x049d2c79}, {0, 0x04ade791}, - {0, 0x04b75856}, {0, 0x04c1f89e}, {0, 0x04c2f731}, {0, 0x04ca0376}, - {0, 0x04ca102a}, {0, 0x04cbdfe5}, {0, 0x04cbe35a}, {0, 0x04ccfa95}, - {0, 0x04dcd6e4}, {0, 0x05066d8b}, {0, 0x05150274}, {0, 0x051dcfa0}, - {0, 0x052a4c40}, {0, 0x05310c4e}, {0, 0x05452f69}, {0, 0x05517592}, - {0, 0x05543eb8}, {0, 0x05549dc7}, {0, 0x05732695}, {0, 0x057b00d3}, - {0, 0x0584760d}, {0, 0x059ca419}, {0, 0x05b23b58}, {0, 0x05c69745}, - {0, 0x05e31a12}, {0, 0x05e932d5}, {0, 0x05ef8400}, {0, 0x05f0bdf6}, - {0, 0x05f93997}, {0, 0x05ff2978}, {0, 0x06030233}, {0, 0x0627d615}, - {0, 0x0644a441}, {0, 0x06518661}, {0, 0x06805ef2}, {0, 0x068c43dd}, - {0, 0x069cca16}, {0, 0x06acbf10}, {0, 0x06c2d607}, {0, 0x06d9ea08}, - {0, 0x0700d639}, {0, 0x07083d86}, {0, 0x071cc39d}, {0, 0x072c3cb8}, - {0, 0x07665a0f}, {0, 0x07741214}, + {0, 0x0017f257}, {0, 0x000d4581}, {0, 0x0048042c}, {0, 0x0025bff0}, + {0, 0x2002d3f8}, {0, 0x6001161f}, {0, 0xe000c5e5}, {0, 0x2000cce2}, + {0, 0x40004753}, {0, 0x80025297}, {0, 0x600009de}, {0, 0x6005780c}, + {0, 0x40025ae9}, {0, 0xc000341c}, {0, 0xc0053062}, {0, 0x40002f90}, + {0, 0xc00047ae}, {0, 0xa0015716}, {0, 0x2000d499}, {0, 0x80009b45}, + {0, 0xc000a7c9}, {0, 0x8001f8ba}, {0, 0xc000d147}, {0, 0x60018ac3}, + {0, 0xc000a9ac}, {0, 0xa003f6e6}, {0, 0x2007436e}, {0, 0xc0013f28}, + {0, 0x00010892}, {0, 0xa0000027}, {0, 0x40008de9}, {0, 0x400019f3}, + {0, 0x00025b86}, {0, 0x80002799}, {0, 0xc001eb0e}, {0, 0xe003e950}, + {0, 0xe001ff87}, {0, 0x000158b0}, {0, 0x600189da}, {0, 0x0000028c}, + {0, 0x600014ca}, {0, 0x60000e4d}, {0, 0xc0000820}, {0, 0xa005184e}, + {0, 0x40012b22}, {0, 0xe0028f6b}, {0, 0xe0027bce}, {0, 0xa0007b51}, + {0, 0x8002496d}, {0, 0xc001f211}, {0, 0x00032bf0}, {0, 0x4002d767}, + {0, 0x6008410a}, {0, 0x800361c3}, {0, 0xe000f80d}, {0, 0xe009ac97}, + {0, 0x80002103}, {0, 0x6001fab4}, {0, 0x4002843b}, {0, 0x6002b67c}, + {0, 0xa000faf3}, {0, 0x6000949e}, {0, 0x80000f1f}, {0, 0x6000c946}, + {0, 0xe00314b3}, {0, 0x20012bbf}, {0, 0x00009c7e}, {0, 0x2003e63a}, + {0, 0x20025157}, {0, 0x80041ff5}, {0, 0x60012a6c}, {0, 0x4000119b}, + {0, 0xc000a454}, {0, 0x20042c4b}, {0, 0x0003003c}, {0, 0x000558b2}, + {0, 0x2000198c}, {0, 0x200b0b3e}, {0, 0x4001c1e4}, {0, 0x80000034}, + {0, 0xe00039d1}, {0, 0xc001ded3}, {0, 0x80006740}, {0, 0xc0014546}, + {0, 0x00036a1a}, {0, 0xa001ae9c}, {0, 0x6000a148}, {0, 0xe001fd73}, + {0, 0xa001cebb}, {0, 0xa000d4b8}, {0, 0xe00154b3}, {0, 0x40004bec}, + {0, 0xc003f230}, {0, 0xe0069a26}, {0, 0xa00072b4}, {0, 0xc002e1b2}, + {0, 0x20009a02}, {0, 0xc0004a10}, {0, 0xe0045a11}, {0, 0x60034d09}, + {0, 0x000073ff}, {0, 0x00003f1c}, {0, 0x4002c4fd}, {0, 0x2000bb60}, + {0, 0x4000b6b6}, {0, 0x6000ea25}, {0, 0x400989d9}, {0, 0xc000877f}, + {0, 0x6000d17c}, {0, 0xc0009228}, {0, 0x4002827f}, {0, 0x80056a85}, + {0, 0x40045af7}, {0, 0x6000df7a}, {0, 0xe00131a1}, {0, 0x40021386}, + {0, 0xa00891b5}, {0, 0x60007854} }; CBlockIndex CreateBlockIndex(int nHeight) @@ -213,11 +215,11 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i) { CBlock *pblock = &pblocktemplate->block; // pointer for convenience - pblock->nVersion = 1; + pblock->nVersion = 2; pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; - txCoinbase.vin[0].scriptSig = CScript(); + txCoinbase.vin[0].scriptSig = CScript() << (chainActive.Height() + 1); txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); txCoinbase.vin[0].scriptSig.push_back(chainActive.Height()); txCoinbase.vout[0].scriptPubKey = CScript(); diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index b401d809e00c..33f2102c608a 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -162,12 +162,12 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) bool fInboundIn = false; // Test that fFeeler is false by default. - CNode* pnode1 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, pszDest, fInboundIn); + std::unique_ptr pnode1(new CNode(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, pszDest, fInboundIn)); BOOST_CHECK(pnode1->fInbound == false); BOOST_CHECK(pnode1->fFeeler == false); fInboundIn = true; - CNode* pnode2 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, pszDest, fInboundIn); + std::unique_ptr pnode2(new CNode(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, pszDest, fInboundIn)); BOOST_CHECK(pnode2->fInbound == true); BOOST_CHECK(pnode2->fFeeler == false); } diff --git a/src/test/scheduler_tests.cpp b/src/test/scheduler_tests.cpp index 8ef9db655035..8a9378179d5c 100644 --- a/src/test/scheduler_tests.cpp +++ b/src/test/scheduler_tests.cpp @@ -47,8 +47,8 @@ BOOST_AUTO_TEST_CASE(manythreads) // // So... ten shared counters, which if all the tasks execute // properly will sum to the number of tasks done. - // Each task adds or subtracts from one of the counters a - // random amount, and then schedules another task 0-1000 + // Each task adds or subtracts a random amount from one of the + // counters, and then schedules another task 0-1000 // microseconds in the future to subtract or add from // the counter -random_amount+1, so in the end the shared // counters should sum to the number of initial tasks performed. diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index f7267ade0d93..d3e10f14fcd7 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -388,7 +388,7 @@ class TestBuilder std::string JSONPrettyPrint(const UniValue& univalue) { std::string ret = univalue.write(4); - // Workaround for libunivalue pretty printer, which puts a space between comma's and newlines + // Workaround for libunivalue pretty printer, which puts a space between commas and newlines size_t pos = 0; while ((pos = ret.find(" \n", pos)) != std::string::npos) { ret.replace(pos, 2, "\n"); diff --git a/src/test/scriptnum_tests.cpp b/src/test/scriptnum_tests.cpp index bd841aedc3c9..843bece27bca 100644 --- a/src/test/scriptnum_tests.cpp +++ b/src/test/scriptnum_tests.cpp @@ -12,8 +12,10 @@ BOOST_FIXTURE_TEST_SUITE(scriptnum_tests, BasicTestingSetup) -static const int64_t values[] = \ -{ 0, 1, CHAR_MIN, CHAR_MAX, UCHAR_MAX, SHRT_MIN, USHRT_MAX, INT_MIN, INT_MAX, UINT_MAX, LONG_MIN, LONG_MAX }; +/** A selection of numbers that do not trigger int64_t overflow + * when added/subtracted. */ +static const int64_t values[] = { 0, 1, -2, 127, 128, -255, 256, (1LL << 15) - 1, -(1LL << 16), (1LL << 24) - 1, (1LL << 31), 1 - (1LL << 32), 1LL << 40 }; + static const int64_t offsets[] = { 1, 0x79, 0x80, 0x81, 0xFF, 0x7FFF, 0x8000, 0xFFFF, 0x10000}; static bool verify(const CScriptNum10& bignum, const CScriptNum& scriptnum) diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index d0e43159fb7c..189a09f53f54 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(sizes) BOOST_AUTO_TEST_CASE(floats_conversion) { - // Choose values that map unambigiously to binary floating point to avoid + // Choose values that map unambiguously to binary floating point to avoid // rounding issues at the compiler side. BOOST_CHECK_EQUAL(ser_uint32_to_float(0x00000000), 0.0F); BOOST_CHECK_EQUAL(ser_uint32_to_float(0x3f000000), 0.5F); @@ -109,7 +109,7 @@ BOOST_AUTO_TEST_CASE(floats_conversion) BOOST_AUTO_TEST_CASE(doubles_conversion) { - // Choose values that map unambigiously to binary floating point to avoid + // Choose values that map unambiguously to binary floating point to avoid // rounding issues at the compiler side. BOOST_CHECK_EQUAL(ser_uint64_to_double(0x0000000000000000ULL), 0.0); BOOST_CHECK_EQUAL(ser_uint64_to_double(0x3fe0000000000000ULL), 0.5); diff --git a/src/test/skiplist_tests.cpp b/src/test/skiplist_tests.cpp index 4692e5d6a14e..aafaaddccf5c 100644 --- a/src/test/skiplist_tests.cpp +++ b/src/test/skiplist_tests.cpp @@ -100,4 +100,47 @@ BOOST_AUTO_TEST_CASE(getlocator_test) } } +BOOST_AUTO_TEST_CASE(findearliestatleast_test) +{ + std::vector vHashMain(100000); + std::vector vBlocksMain(100000); + for (unsigned int i=0; inTimeMax >= test_time); + BOOST_CHECK((ret->pprev==NULL) || ret->pprev->nTimeMax < test_time); + BOOST_CHECK(vBlocksMain[r].GetAncestor(ret->nHeight) == ret); + } +} BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/test_dash.cpp b/src/test/test_dash.cpp index c4281c2b394e..574b240def04 100644 --- a/src/test/test_dash.cpp +++ b/src/test/test_dash.cpp @@ -142,7 +142,7 @@ TestChain100Setup::~TestChain100Setup() CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction &tx, CTxMemPool *pool) { CTransaction txn(tx); - // Hack to assume either its completely dependent on other mempool txs or not at all + // Hack to assume either it's completely dependent on other mempool txs or not at all CAmount inChainValue = pool && pool->HasNoInputsOf(txn) ? txn.GetValueOut() : 0; return CTxMemPoolEntry(MakeTransactionRef(txn), nFee, nTime, dPriority, nHeight, diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index da227442d25e..dad3702cd41e 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -335,7 +335,7 @@ BOOST_AUTO_TEST_CASE(test_IsStandard) BOOST_CHECK(IsStandardTx(t, reason)); // Check dust with default relay fee: - CAmount nDustThreshold = 182 * minRelayTxFee.GetFeePerK()/1000 * 3; + CAmount nDustThreshold = 182 * dustRelayFee.GetFeePerK()/1000 * 3; BOOST_CHECK_EQUAL(nDustThreshold, 546); // dust: t.vout[0].nValue = nDustThreshold - 1; @@ -348,12 +348,12 @@ BOOST_AUTO_TEST_CASE(test_IsStandard) // nDustThreshold = 182 * 1234 / 1000 * 3 minRelayTxFee = CFeeRate(1234); // dust: - t.vout[0].nValue = 672 - 1; + t.vout[0].nValue = 546 - 1; BOOST_CHECK(!IsStandardTx(t, reason)); // not dust: - t.vout[0].nValue = 672; + t.vout[0].nValue = 546; BOOST_CHECK(IsStandardTx(t, reason)); - minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); + minRelayTxFee = CFeeRate(DUST_RELAY_TX_FEE); t.vout[0].scriptPubKey = CScript() << OP_1; BOOST_CHECK(!IsStandardTx(t, reason)); diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 4209d191d8f8..8e6637e4f1be 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -39,6 +39,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) spends.resize(2); for (int i = 0; i < 2; i++) { + spends[i].nVersion = 1; spends[i].vin.resize(1); spends[i].vin[0].prevout.hash = coinbaseTxns[0].GetHash(); spends[i].vin[0].prevout.n = 0; diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 9b6cc0e0de49..952149c06698 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -372,7 +372,7 @@ class TorController struct event *reconnect_ev; float reconnect_timeout; CService service; - /** Cooie for SAFECOOKIE auth */ + /** Cookie for SAFECOOKIE auth */ std::vector cookie; /** ClientNonce for SAFECOOKIE auth */ std::vector clientNonce; diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 3c64a8db8116..a55a657eaf87 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -9,6 +9,7 @@ #include "consensus/consensus.h" #include "consensus/validation.h" #include "validation.h" +#include "policy/policy.h" #include "policy/fees.h" #include "random.h" #include "streams.h" @@ -355,7 +356,6 @@ CTxMemPool::CTxMemPool(const CFeeRate& _minReasonableRelayFee) : nCheckFrequency = 0; minerPolicyEstimator = new CBlockPolicyEstimator(_minReasonableRelayFee); - minReasonableRelayFee = _minReasonableRelayFee; } CTxMemPool::~CTxMemPool() @@ -383,6 +383,7 @@ void CTxMemPool::AddTransactionsUpdated(unsigned int n) bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool validFeeEstimate) { + NotifyEntryAdded(entry.GetSharedTx()); // Add to memory pool without checking anything. // Used by main.cpp AcceptToMemoryPool(), which DOES do // all the appropriate checks. @@ -592,8 +593,9 @@ bool CTxMemPool::removeSpentIndex(const uint256 txhash) return true; } -void CTxMemPool::removeUnchecked(txiter it) +void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { + NotifyEntryRemoved(it->GetSharedTx(), reason); const uint256 hash = it->GetTx().GetHash(); BOOST_FOREACH(const CTxIn& txin, it->GetTx().vin) mapNextTx.erase(txin.prevout); @@ -638,7 +640,7 @@ void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants } } -void CTxMemPool::removeRecursive(const CTransaction &origTx) +void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReason reason) { // Remove transaction from memory pool { @@ -665,7 +667,8 @@ void CTxMemPool::removeRecursive(const CTransaction &origTx) BOOST_FOREACH(txiter it, txToRemove) { CalculateDescendants(it, setAllRemoves); } - RemoveStaged(setAllRemoves, false); + + RemoveStaged(setAllRemoves, false, reason); } } @@ -703,7 +706,7 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } - RemoveStaged(setAllRemoves, false); + RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG); } void CTxMemPool::removeConflicts(const CTransaction &tx) @@ -716,8 +719,8 @@ void CTxMemPool::removeConflicts(const CTransaction &tx) const CTransaction &txConflict = *it->second; if (txConflict != tx) { - removeRecursive(txConflict); ClearPrioritisation(txConflict.GetHash()); + removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); } } } @@ -746,7 +749,7 @@ void CTxMemPool::removeForBlock(const std::vector& vtx, unsigne if (it != mapTx.end()) { setEntries stage; stage.insert(it); - RemoveStaged(stage, true); + RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK); } removeConflicts(*tx); ClearPrioritisation(tx->GetHash()); @@ -1124,11 +1127,11 @@ size_t CTxMemPool::DynamicMemoryUsage() const { return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 15 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + cachedInnerUsage; } -void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants) { +void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason) { AssertLockHeld(cs); UpdateForRemoveFromMempool(stage, updateDescendants); BOOST_FOREACH(const txiter& it, stage) { - removeUnchecked(it); + removeUnchecked(it, reason); } } @@ -1144,7 +1147,7 @@ int CTxMemPool::Expire(int64_t time) { BOOST_FOREACH(txiter removeit, toremove) { CalculateDescendants(removeit, stage); } - RemoveStaged(stage, false); + RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY); return stage.size(); } @@ -1210,12 +1213,12 @@ CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; - if (rollingMinimumFeeRate < (double)minReasonableRelayFee.GetFeePerK() / 2) { + if (rollingMinimumFeeRate < (double)incrementalRelayFee.GetFeePerK() / 2) { rollingMinimumFeeRate = 0; return CFeeRate(0); } } - return std::max(CFeeRate(rollingMinimumFeeRate), minReasonableRelayFee); + return std::max(CFeeRate(rollingMinimumFeeRate), incrementalRelayFee); } void CTxMemPool::UpdateMinFee(const CFeeRate& _minReasonableRelayFee) @@ -1223,7 +1226,6 @@ void CTxMemPool::UpdateMinFee(const CFeeRate& _minReasonableRelayFee) LOCK(cs); delete minerPolicyEstimator; minerPolicyEstimator = new CBlockPolicyEstimator(_minReasonableRelayFee); - minReasonableRelayFee = _minReasonableRelayFee; } void CTxMemPool::trackPackageRemoved(const CFeeRate& rate) { @@ -1247,7 +1249,7 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector* pvNoSpends // to have 0 fee). This way, we don't allow txn to enter mempool with feerate // equal to txn which were removed with no block in between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); - removed += minReasonableRelayFee; + removed += incrementalRelayFee; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); @@ -1261,7 +1263,7 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector* pvNoSpends BOOST_FOREACH(txiter iter, stage) txn.push_back(iter->GetTx()); } - RemoveStaged(stage, false); + RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT); if (pvNoSpendsRemaining) { BOOST_FOREACH(const CTransaction& tx, txn) { BOOST_FOREACH(const CTxIn& txin, tx.vin) { diff --git a/src/txmempool.h b/src/txmempool.h index 63d16eedcbd1..28d024c298cf 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -27,6 +27,8 @@ #include "boost/multi_index/ordered_index.hpp" #include "boost/multi_index/hashed_index.hpp" +#include + class CAutoFile; class CBlockIndex; @@ -333,6 +335,19 @@ struct TxMempoolInfo int64_t nFeeDelta; }; +/** Reason why a transaction was removed from the mempool, + * this is passed to the notification signal. + */ +enum class MemPoolRemovalReason { + UNKNOWN = 0, //! Manually removed or unknown reason + EXPIRY, //! Expired from mempool + SIZELIMIT, //! Removed in size limiting + REORG, //! Removed for reorganization + BLOCK, //! Removed for block + CONFLICT, //! Removed for conflict with in-block transaction + REPLACED //! Removed for replacement +}; + class SaltedTxidHasher { private: @@ -354,7 +369,7 @@ class SaltedTxidHasher * Transactions are added when they are seen on the network (or created by the * local node), but not all transactions seen are added to the pool. For * example, the following new transactions will not be added to the mempool: - * - a transaction which doesn't make the mimimum fee requirements. + * - a transaction which doesn't meet the minimum fee requirements. * - a new transaction that double-spends an input of a transaction already in * the pool where the new transaction does not meet the Replace-By-Fee * requirements as defined in BIP 125. @@ -437,8 +452,6 @@ class CTxMemPool uint64_t totalTxSize; //!< sum of all mempool tx' byte sizes uint64_t cachedInnerUsage; //!< sum of dynamic memory usage of all the map elements (NOT the maps themselves) - CFeeRate minReasonableRelayFee; - mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; mutable double rollingMinimumFeeRate; //!< minimum fee to get into the pool, decreases exponentially @@ -526,9 +539,6 @@ class CTxMemPool std::map > mapDeltas; /** Create a new CTxMemPool. - * minReasonableRelayFee should be a feerate which is, roughly, somewhere - * around what it "costs" to relay a transaction around the network and - * below which we would reasonably say a transaction has 0-effective-fee. */ CTxMemPool(const CFeeRate& _minReasonableRelayFee); ~CTxMemPool(); @@ -558,10 +568,11 @@ class CTxMemPool bool getSpentIndex(CSpentIndexKey &key, CSpentIndexValue &value); bool removeSpentIndex(const uint256 txhash); - void removeRecursive(const CTransaction &tx); + void removeRecursive(const CTransaction &tx, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); void removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags); void removeConflicts(const CTransaction &tx); void removeForBlock(const std::vector& vtx, unsigned int nBlockHeight); + void clear(); void _clear(); //lock free bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); @@ -588,7 +599,7 @@ class CTxMemPool * Set updateDescendants to true when removing a tx that was in a block, so * that any in-mempool descendants have their ancestor state updated. */ - void RemoveStaged(setEntries &stage, bool updateDescendants); + void RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); /** When adding transactions from a disconnected block back to the mempool, * new mempool entries may have children in the mempool (which is generally @@ -620,7 +631,7 @@ class CTxMemPool /** The minimum fee to get into the mempool, which may itself not be enough * for larger-sized transactions. - * The minReasonableRelayFee constructor arg is used to bound the time it + * The incrementalRelayFee policy variable is used to bound the time it * takes the fee rate to go back down all the way to 0. When the feerate * would otherwise be half of this, it is set to 0 instead. */ @@ -692,6 +703,9 @@ class CTxMemPool size_t DynamicMemoryUsage() const; + boost::signals2::signal NotifyEntryAdded; + boost::signals2::signal NotifyEntryRemoved; + private: /** UpdateForDescendants is used by UpdateTransactionsFromBlock to update * the descendants for a single transaction that has been added to the @@ -728,7 +742,7 @@ class CTxMemPool * transactions in a chain before we've updated all the state for the * removal. */ - void removeUnchecked(txiter entry); + void removeUnchecked(txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); }; /** diff --git a/src/utiltime.cpp b/src/utiltime.cpp index 3ebd1809f0f8..58842385ec6a 100644 --- a/src/utiltime.cpp +++ b/src/utiltime.cpp @@ -64,7 +64,7 @@ void MilliSleep(int64_t n) { /** - * Boost's sleep_for was uninterruptable when backed by nanosleep from 1.50 + * Boost's sleep_for was uninterruptible when backed by nanosleep from 1.50 * until fixed in 1.52. Use the deprecated sleep method for the broken case. * See: https://svn.boost.org/trac/boost/ticket/7238 */ diff --git a/src/validation.cpp b/src/validation.cpp index 87d9863f2c45..c9c258aa7481 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -107,11 +107,6 @@ map mapRejectedBlocks GUARDED_BY(cs_main); void LoopMapOrphanTransactionsByPrev(const CTransaction &tx, std::vector &vOrphanErase); int EraseOrphanTx(uint256 hash); -/** - * Returns true if there are nRequired or more blocks of minVersion or above - * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards. - */ -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams); static void CheckBlockIndex(const Consensus::Params& consensusParams); /** Constant stuff for coinbase transactions we create: */ @@ -184,6 +179,39 @@ namespace { set setDirtyFileInfo; } // anon namespace +/* Use this class to start tracking transactions that are removed from the + * mempool and pass all those transactions through SyncTransaction when the + * object goes out of scope. This is currently only used to call SyncTransaction + * on conflicts removed from the mempool during block connection. Applied in + * ActivateBestChain around ActivateBestStep which in turn calls: + * ConnectTip->removeForBlock->removeConflicts + */ +class MemPoolConflictRemovalTracker +{ +private: + std::vector conflictedTxs; + CTxMemPool &pool; + +public: + MemPoolConflictRemovalTracker(CTxMemPool &_pool) : pool(_pool) { + pool.NotifyEntryRemoved.connect(boost::bind(&MemPoolConflictRemovalTracker::NotifyEntryRemoved, this, _1, _2)); + } + + void NotifyEntryRemoved(CTransactionRef txRemoved, MemPoolRemovalReason reason) { + if (reason == MemPoolRemovalReason::CONFLICT) { + conflictedTxs.push_back(txRemoved); + } + } + + ~MemPoolConflictRemovalTracker() { + pool.NotifyEntryRemoved.disconnect(boost::bind(&MemPoolConflictRemovalTracker::NotifyEntryRemoved, this, _1, _2)); + for (const auto& tx : conflictedTxs) { + GetMainSignals().SyncTransaction(*tx, NULL, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK); + } + conflictedTxs.clear(); + } +}; + CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) { // Find the first block the caller has in the main chain @@ -211,7 +239,8 @@ enum FlushStateMode { }; // See definition for documentation -bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode); +bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0); +void FindFilesToPruneManual(std::set& setFilesToPrune, int nManualPruneHeight); bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) { @@ -855,7 +884,8 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C // subsequent RemoveStaged() and addUnchecked() calls don't guarantee // mempool consistency for us. LOCK(pool.cs); - if (setConflicts.size()) + const bool fReplacementTransaction = setConflicts.size(); + if (fReplacementTransaction) { CFeeRate newFeeRate(nModifiedFees, nSize); set setConflictsParents; @@ -1002,12 +1032,13 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C FormatMoney(nModifiedFees - nConflictingFees), (int)nSize - (int)nConflictingSize); } - pool.RemoveStaged(allConflicting, false); + pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED); - // This transaction should only count for fee estimation if - // the node is not behind and it is not dependent on any other - // transactions in the mempool - bool validForFeeEstimation = IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); + // This transaction should only count for fee estimation if it isn't a + // BIP 125 replacement transaction (may not be widely supported), the + // node is not behind, and the transaction is not dependent on any other + // transactions in the mempool. + bool validForFeeEstimation = !fReplacementTransaction && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); // Store transaction in memory pool.addUnchecked(hash, entry, setAncestors, validForFeeEstimation); @@ -2085,15 +2116,13 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE; - // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks, - // when 75% of the network has upgraded: - if (block.nVersion >= 3 && IsSuperMajority(3, pindex->pprev, chainparams.GetConsensus().nMajorityEnforceBlockUpgrade, chainparams.GetConsensus())) { + // Start enforcing the DERSIG (BIP66) rule + if (pindex->nHeight >= chainparams.GetConsensus().BIP66Height) { flags |= SCRIPT_VERIFY_DERSIG; } - // Start enforcing CHECKLOCKTIMEVERIFY, (BIP65) for block.nVersion=4 - // blocks, when 75% of the network has upgraded: - if (block.nVersion >= 4 && IsSuperMajority(4, pindex->pprev, chainparams.GetConsensus().nMajorityEnforceBlockUpgrade, chainparams.GetConsensus())) { + // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule + if (pindex->nHeight >= chainparams.GetConsensus().BIP65Height) { flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; } @@ -2355,7 +2384,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd * if they're too large, if it's been a while since the last write, * or always and in all cases if we're in prune mode and are deleting files. */ -bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { +bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode, int nManualPruneHeight) { int64_t nMempoolUsage = mempool.DynamicMemoryUsage(); const CChainParams& chainparams = Params(); LOCK2(cs_main, cs_LastBlockFile); @@ -2365,9 +2394,13 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { std::set setFilesToPrune; bool fFlushForPrune = false; try { - if (fPruneMode && fCheckForPruning && !fReindex) { - FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); - fCheckForPruning = false; + if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { + if (nManualPruneHeight > 0) { + FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight); + } else { + FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); + fCheckForPruning = false; + } if (!setFilesToPrune.empty()) { fFlushForPrune = true; if (!fHavePruned) { @@ -2422,7 +2455,7 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { setDirtyBlockIndex.erase(it++); } if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { - return AbortNode(state, "Files to write to block index database"); + return AbortNode(state, "Failed to write to block index database"); } } // Finally remove any pruned files @@ -2522,7 +2555,7 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) { chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), chainActive.Tip()->nVersion, log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), - Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); + GuessVerificationProgress(chainParams.TxData(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); if (!warningMessages.empty()) LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages, ", ")); LogPrintf("\n"); @@ -2557,7 +2590,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara // ignore validation errors in resurrected transactions CValidationState stateDummy; if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, it, false, NULL, true)) { - mempool.removeRecursive(tx); + mempool.removeRecursive(tx, MemPoolRemovalReason::REORG); } else if (mempool.exists(tx.GetHash())) { vHashUpdate.push_back(tx.GetHash()); } @@ -2881,6 +2914,14 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, bool fInitialDownload; { LOCK(cs_main); + { // TODO: Tempoarily ensure that mempool removals are notified before + // connected transactions. This shouldn't matter, but the abandoned + // state of transactions in our wallet is currently cleared when we + // receive another notification and there is a race condition where + // notification of a connected conflict might cause an outside process + // to abandon a transaction and then have it inadvertantly cleared by + // the notification that the conflicted transaction was evicted. + MemPoolConflictRemovalTracker mrt(mempool); CBlockIndex *pindexOldTip = chainActive.Tip(); if (pindexMostWork == NULL) { pindexMostWork = FindMostWorkChain(); @@ -2902,20 +2943,23 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, pindexNewTip = chainActive.Tip(); pindexFork = chainActive.FindFork(pindexOldTip); fInitialDownload = IsInitialBlockDownload(); + + // throw all transactions though the signal-interface + + } // MemPoolConflictRemovalTracker destroyed and conflict evictions are notified + + // Transactions in the connnected block are notified + for (const auto& pair : connectTrace.blocksConnected) { + assert(pair.second); + const CBlock& block = *(pair.second); + for (unsigned int i = 0; i < block.vtx.size(); i++) + GetMainSignals().SyncTransaction(*block.vtx[i], pair.first, i); + } } // When we reach this point, we switched to a new tip (stored in pindexNewTip). // Notifications/callbacks that can run without cs_main - // throw all transactions though the signal-interface - // while _not_ holding the cs_main lock - for (const auto& pair : connectTrace.blocksConnected) { - assert(pair.second); - const CBlock& block = *(pair.second); - for (unsigned int i = 0; i < block.vtx.size(); i++) - GetMainSignals().SyncTransaction(*block.vtx[i], pair.first, i); - } - // Notify external listeners about the new tip. GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); @@ -3061,6 +3105,7 @@ CBlockIndex* AddToBlockIndex(const CBlockHeader& block) pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->BuildSkip(); } + pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BLOCK_VALID_TREE); if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork) @@ -3333,7 +3378,7 @@ static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidati bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev) { - int nHeight = pindexPrev->nHeight + 1; + const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1; // Check proof of work if(Params().NetworkIDString() == CBaseChainParams::MAIN && nHeight <= 68589){ // architecture issues with DGW v1 and v2) @@ -3353,11 +3398,12 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early"); - // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded: - for (int32_t version = 2; version < 5; ++version) // check for version 2, 3 and 4 upgrades - if (block.nVersion < version && IsSuperMajority(version, pindexPrev, consensusParams.nMajorityRejectBlockOutdated, consensusParams)) - return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", version - 1), - strprintf("rejected nVersion=0x%08x block", version - 1)); + // check for version 2, 3 and 4 upgrades + if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) || + (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) || + (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) + return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion), + strprintf("rejected nVersion=0x%08x block", block.nVersion)); return true; } @@ -3400,9 +3446,8 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co if (nSigOps > MaxBlockSigOps(fDIP0001Active_context)) return state.DoS(10, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount"); - // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height - // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet): - if (block.nVersion >= 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityEnforceBlockUpgrade, consensusParams)) + // Enforce rule that the coinbase starts with serialized block height + if (nHeight >= consensusParams.BIP34Height) { CScript expect = CScript() << nHeight; if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() || @@ -3556,19 +3601,6 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha return true; } -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams) -{ - unsigned int nFound = 0; - for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++) - { - if (pstart->nVersion >= minVersion) - ++nFound; - pstart = pstart->pprev; - } - return (nFound >= nRequired); -} - - bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr pblock, bool fForceProcessing, bool *fNewBlock) { { @@ -3679,6 +3711,35 @@ void UnlinkPrunedFiles(std::set& setFilesToPrune) } } +/* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */ +void FindFilesToPruneManual(std::set& setFilesToPrune, int nManualPruneHeight) +{ + assert(fPruneMode && nManualPruneHeight > 0); + + LOCK2(cs_main, cs_LastBlockFile); + if (chainActive.Tip() == NULL) + return; + + // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip) + unsigned int nLastBlockWeCanPrune = min((unsigned)nManualPruneHeight, chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP); + int count=0; + for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { + if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) + continue; + PruneOneBlockFile(fileNumber); + setFilesToPrune.insert(fileNumber); + count++; + } + LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); +} + +/* This function is called from the RPC code for pruneblockchain */ +void PruneBlockFilesManual(int nManualPruneHeight) +{ + CValidationState state; + FlushStateToDisk(state, FLUSH_STATE_NONE, nManualPruneHeight); +} + /* Calculate the block/rev files that should be deleted to remain under target*/ void FindFilesToPrune(std::set& setFilesToPrune, uint64_t nPruneAfterHeight) { @@ -3814,6 +3875,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) { CBlockIndex* pindex = item.second; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); + pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); // We can link the chain of blocks for which we've received transactions at some point. // Pruned nodes may have deleted the block. if (pindex->nTx > 0) { @@ -3910,7 +3972,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__, chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), - Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.Tip())); + GuessVerificationProgress(chainparams.TxData(), chainActive.Tip())); return true; } @@ -3968,7 +4030,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime())) - return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, + return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state)); // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { @@ -4072,7 +4134,7 @@ static bool AddGenesisBlock(const CChainParams& chainparams, const CBlock& block return true; } -bool InitBlockIndex(const CChainParams& chainparams) +bool InitBlockIndex(const CChainParams& chainparams) { LOCK(cs_main); @@ -4514,7 +4576,7 @@ void DumpMempool(void) { LOCK(mempool.cs); for (const auto &i : mempool.mapDeltas) { - mapDeltas[i.first] = i.second.first; + mapDeltas[i.first] = i.second.second; } vinfo = mempool.infoAll(); } @@ -4551,6 +4613,24 @@ void DumpMempool(void) } } +//! Guess how far we are in the verification process at the given block index +double GuessVerificationProgress(const ChainTxData& data, CBlockIndex *pindex) { + if (pindex == NULL) + return 0.0; + + int64_t nNow = time(NULL); + + double fTxTotal; + + if (pindex->nChainTx <= data.nTxCount) { + fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate; + } else { + fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate; + } + + return pindex->nChainTx / fTxTotal; +} + class CMainCleanup { public: diff --git a/src/validation.h b/src/validation.h index 8ab228e5c5fa..811aecdfe5cd 100644 --- a/src/validation.h +++ b/src/validation.h @@ -45,6 +45,7 @@ class CScriptCheck; class CTxMemPool; class CValidationInterface; class CValidationState; +struct ChainTxData; struct LockPoints; @@ -293,6 +294,9 @@ double ConvertBitsToDouble(unsigned int nBits); CAmount GetBlockSubsidy(int nBits, int nHeight, const Consensus::Params& consensusParams, bool fSuperblockPartOnly = false); CAmount GetMasternodePayment(int nHeight, CAmount blockValue); +/** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */ +double GuessVerificationProgress(const ChainTxData& data, CBlockIndex* pindex); + /** * Prune block and undo files (blk???.dat and undo???.dat) so that the disk space used is less than a user-defined target. * The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new @@ -321,6 +325,8 @@ CBlockIndex * InsertBlockIndex(uint256 hash); void FlushStateToDisk(); /** Prune block files and flush state to disk. */ void PruneAndFlush(); +/** Prune block files up to a given height */ +void PruneBlockFilesManual(int nPruneUpToHeight); /** (try to) add transaction to memory pool **/ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, diff --git a/src/validationinterface.h b/src/validationinterface.h index 391a594a04b2..b0db718dbebb 100644 --- a/src/validationinterface.h +++ b/src/validationinterface.h @@ -55,9 +55,16 @@ struct CMainSignals { boost::signals2::signal NotifyHeaderTip; /** Notifies listeners of updated block chain tip */ boost::signals2::signal UpdatedBlockTip; - /** A posInBlock value for SyncTransaction which indicates the transaction was conflicted, disconnected, or not in a block */ + /** A posInBlock value for SyncTransaction calls for tranactions not + * included in connected blocks such as transactions removed from mempool, + * accepted to mempool or appearing in disconnected blocks.*/ static const int SYNC_TRANSACTION_NOT_IN_BLOCK = -1; - /** Notifies listeners of updated transaction data (transaction, and optionally the block it is found in. */ + /** Notifies listeners of updated transaction data (transaction, and + * optionally the block it is found in). Called with block data when + * transaction is included in a connected block, and without block data when + * transaction was accepted to mempool, removed from mempool (only when + * removal was due to conflict from connected block), or appeared in a + * disconnected block.*/ boost::signals2::signal SyncTransaction; /** Notifies listeners of an updated transaction lock without new data. */ boost::signals2::signal NotifyTransactionLock; diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 65697854c311..4479d9bcf853 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -844,7 +844,8 @@ UniValue dumpwallet(const JSONRPCRequest& request) } -UniValue processImport(const UniValue& data) { +UniValue ProcessImport(const UniValue& data, const int64_t timestamp) +{ try { bool success = false; @@ -863,7 +864,6 @@ UniValue processImport(const UniValue& data) { const bool& internal = data.exists("internal") ? data["internal"].get_bool() : false; const bool& watchOnly = data.exists("watchonly") ? data["watchonly"].get_bool() : false; const string& label = data.exists("label") && !internal ? data["label"].get_str() : ""; - const int64_t& timestamp = data.exists("timestamp") && data["timestamp"].get_int64() > 1 ? data["timestamp"].get_int64() : 1; bool isScript = scriptPubKey.getType() == UniValue::VSTR; bool isP2SH = strRedeemScript.length() > 0; @@ -1162,6 +1162,20 @@ UniValue processImport(const UniValue& data) { } } +int64_t GetImportTimestamp(const UniValue& data, int64_t now) +{ + if (data.exists("timestamp")) { + const UniValue& timestamp = data["timestamp"]; + if (timestamp.isNum()) { + return timestamp.get_int64(); + } else if (timestamp.isStr() && timestamp.get_str() == "now") { + return now; + } + throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Expected number or \"now\" timestamp value for key. got type %s", uvTypeName(timestamp.type()))); + } + throw JSONRPCError(RPC_TYPE_ERROR, "Missing required timestamp field for key"); +} + UniValue importmulti(const JSONRPCRequest& mainRequest) { // clang-format off @@ -1174,13 +1188,17 @@ UniValue importmulti(const JSONRPCRequest& mainRequest) " [ (array of json objects)\n" " {\n" " \"scriptPubKey\": \"