diff --git a/.cirrus.yml b/.cirrus.yml new file mode 100644 index 0000000000000..c4aae3a50aa5e --- /dev/null +++ b/.cirrus.yml @@ -0,0 +1,336 @@ +env: # Global defaults + PACKAGE_MANAGER_INSTALL: "apt-get update && apt-get install -y" + MAKEJOBS: "-j10" + TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache + CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling process and setting this variable avoids killing the CI script itself on error + CCACHE_SIZE: "200M" + CCACHE_DIR: "/tmp/ccache_dir" + CCACHE_NOHASHDIR: "1" # Debug info might contain a stale path if the build dir changes, but this is fine + +cirrus_ephemeral_worker_template_env: &CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + DANGER_RUN_CI_ON_HOST: "1" # Containers will be discarded after the run, so there is no risk that the ci scripts modify the system + +persistent_worker_template_env: &PERSISTENT_WORKER_TEMPLATE_ENV + RESTART_CI_DOCKER_BEFORE_RUN: "1" + +persistent_worker_template: &PERSISTENT_WORKER_TEMPLATE + persistent_worker: {} # https://cirrus-ci.org/guide/persistent-workers/ + +# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks +filter_template: &FILTER_TEMPLATE + skip: $CIRRUS_REPO_FULL_NAME == "bitcoin-core/gui" && $CIRRUS_PR == "" # No need to run on the read-only mirror, unless it is a PR. https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution + stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks + +base_template: &BASE_TEMPLATE + << : *FILTER_TEMPLATE + merge_base_script: + # Unconditionally install git (used in fingerprint_script) and set the + # default git author name (used in verify-commits.py) + - bash -c "$PACKAGE_MANAGER_INSTALL git" + - git config --global user.email "ci@ci.ci" + - git config --global user.name "ci" + - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi + - git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH + - git merge FETCH_HEAD # Merge base to detect silent merge conflicts + +main_template: &MAIN_TEMPLATE + timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out + container: + # https://cirrus-ci.org/faq/#are-there-any-limits + # Each project has 16 CPU in total, assign 2 to each container, so that 8 tasks run in parallel + cpu: 2 + greedy: true + memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers + ccache_cache: + folder: "/tmp/ccache_dir" + depends_built_cache: + folder: "depends/built" + fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-list -1 HEAD ./depends) + ci_script: + - ./ci/test_run_all.sh + +global_task_template: &GLOBAL_TASK_TEMPLATE + << : *BASE_TEMPLATE + << : *MAIN_TEMPLATE + +compute_credits_template: &CREDITS_TEMPLATE + # https://cirrus-ci.org/pricing/#compute-credits + # Only use credits for pull requests to the main repo + use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != "" + +task: + name: 'lint [bionic]' + << : *BASE_TEMPLATE + container: + image: ubuntu:bionic # For python 3.6, oldest supported version according to doc/dependencies.md + cpu: 1 + memory: 1G + # For faster CI feedback, immediately schedule the linters + << : *CREDITS_TEMPLATE + lint_script: + - ./ci/lint_run_all.sh + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + +task: + name: 'tidy [jammy]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:jammy + cpu: 2 + memory: 5G + # For faster CI feedback, immediately schedule the linters + << : *CREDITS_TEMPLATE + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh" + +task: + name: "Win64 native [msvc]" + << : *FILTER_TEMPLATE + windows_container: + cpu: 4 + memory: 8G + image: cirrusci/windowsservercore:visualstudio2019 + timeout_in: 120m + env: + PATH: 'C:\jom;C:\Python39;C:\Python39\Scripts;C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin;%PATH%' + PYTHONUTF8: 1 + CI_VCPKG_TAG: '2022.04.12' + VCPKG_DOWNLOADS: 'C:\Users\ContainerAdministrator\AppData\Local\vcpkg\downloads' + VCPKG_DEFAULT_BINARY_CACHE: 'C:\Users\ContainerAdministrator\AppData\Local\vcpkg\archives' + CCACHE_DIR: 'C:\Users\ContainerAdministrator\AppData\Local\ccache' + WRAPPED_CL: 'C:\Users\ContainerAdministrator\AppData\Local\Temp\cirrus-ci-build\ci\test\wrapped-cl.bat' + QT_DOWNLOAD_URL: 'https://download.qt.io/official_releases/qt/5.15/5.15.3/single/qt-everywhere-opensource-src-5.15.3.zip' + QT_LOCAL_PATH: 'C:\qt-everywhere-opensource-src-5.15.3.zip' + QT_SOURCE_DIR: 'C:\qt-everywhere-src-5.15.3' + QTBASEDIR: 'C:\Qt_static' + x64_NATIVE_TOOLS: '"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvars64.bat"' + QT_CONFIGURE_COMMAND: '..\configure -release -silent -opensource -confirm-license -opengl desktop -static -static-runtime -mp -qt-zlib -qt-pcre -qt-libpng -nomake examples -nomake tests -nomake tools -no-angle -no-dbus -no-gif -no-gtk -no-ico -no-icu -no-libjpeg -no-libudev -no-sql-sqlite -no-sql-odbc -no-sqlite -no-vulkan -skip qt3d -skip qtactiveqt -skip qtandroidextras -skip qtcharts -skip qtconnectivity -skip qtdatavis3d -skip qtdeclarative -skip doc -skip qtdoc -skip qtgamepad -skip qtgraphicaleffects -skip qtimageformats -skip qtlocation -skip qtlottie -skip qtmacextras -skip qtmultimedia -skip qtnetworkauth -skip qtpurchasing -skip qtquick3d -skip qtquickcontrols -skip qtquickcontrols2 -skip qtquicktimeline -skip qtremoteobjects -skip qtscript -skip qtscxml -skip qtsensors -skip qtserialbus -skip qtserialport -skip qtspeech -skip qtsvg -skip qtvirtualkeyboard -skip qtwayland -skip qtwebchannel -skip qtwebengine -skip qtwebglplugin -skip qtwebsockets -skip qtwebview -skip qtx11extras -skip qtxmlpatterns -no-openssl -no-feature-bearermanagement -no-feature-printdialog -no-feature-printer -no-feature-printpreviewdialog -no-feature-printpreviewwidget -no-feature-sql -no-feature-sqlmodel -no-feature-textbrowser -no-feature-textmarkdownwriter -no-feature-textodfwriter -no-feature-xml' + IgnoreWarnIntDirInTempDetected: 'true' + merge_script: + - git config --global user.email "ci@ci.ci" + - git config --global user.name "ci" + # Windows filesystem loses the executable bit, and all of the executable + # files are considered "modified" now. It will break the following `git merge` + # command. The next two commands make git ignore this issue. + - git config core.filemode false + - git reset --hard + - PowerShell -NoLogo -Command if ($env:CIRRUS_PR -ne $null) { git fetch $env:CIRRUS_REPO_CLONE_URL $env:CIRRUS_BASE_BRANCH; git merge FETCH_HEAD; } + msvc_qt_built_cache: + folder: "%QTBASEDIR%" + reupload_on_changes: false + fingerprint_script: + - echo %QT_DOWNLOAD_URL% %QT_CONFIGURE_COMMAND% + - msbuild -version + populate_script: + - curl -L -o C:\jom.zip http://download.qt.io/official_releases/jom/jom.zip + - mkdir C:\jom + - tar -xf C:\jom.zip -C C:\jom + - curl -L -o %QT_LOCAL_PATH% %QT_DOWNLOAD_URL% + - tar -xf %QT_LOCAL_PATH% -C C:\ + - '%x64_NATIVE_TOOLS%' + - cd %QT_SOURCE_DIR% + - mkdir build + - cd build + - '%QT_CONFIGURE_COMMAND% -prefix %QTBASEDIR%' + - jom + - jom install + vcpkg_tools_cache: + folder: '%VCPKG_DOWNLOADS%\tools' + reupload_on_changes: false + fingerprint_script: + - echo %CI_VCPKG_TAG% + - msbuild -version + vcpkg_binary_cache: + folder: '%VCPKG_DEFAULT_BINARY_CACHE%' + reupload_on_changes: true + fingerprint_script: + - echo %CI_VCPKG_TAG% + - type build_msvc\vcpkg.json + - msbuild -version + populate_script: + - mkdir %VCPKG_DEFAULT_BINARY_CACHE% + ccache_cache: + folder: '%CCACHE_DIR%' + install_tools_script: + - choco install --yes --no-progress ccache + - choco install --yes --no-progress python3 --version=3.9.6 + - pip install zmq + - ccache --version + - python -VV + install_vcpkg_script: + - cd .. + - git clone --quiet https://github.com/microsoft/vcpkg.git + - cd vcpkg + - git -c advice.detachedHead=false checkout %CI_VCPKG_TAG% + - .\bootstrap-vcpkg -disableMetrics + - echo set(VCPKG_BUILD_TYPE release) >> triplets\x64-windows-static.cmake + - .\vcpkg integrate install + - .\vcpkg version + build_script: + - '%x64_NATIVE_TOOLS%' + - cd %CIRRUS_WORKING_DIR% + - ccache --zero-stats + - python build_msvc\msvc-autogen.py + - msbuild build_msvc\bitcoin.sln -property:CLToolExe=%WRAPPED_CL% -property:Configuration=Release -maxCpuCount -verbosity:minimal -noLogo + - ccache --show-stats + unit_tests_script: + - src\test_bitcoin.exe -l test_suite + - src\bench_bitcoin.exe > NUL + - python test\util\test_runner.py + - python test\util\rpcauth-test.py + functional_tests_script: + # Increase the dynamic port range to the maximum allowed value to mitigate "OSError: [WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted". + # See: https://docs.microsoft.com/en-us/biztalk/technical-guides/settings-that-can-be-modified-to-improve-network-performance + - netsh int ipv4 set dynamicport tcp start=1025 num=64511 + - netsh int ipv6 set dynamicport tcp start=1025 num=64511 + # Exclude feature_dbcrash for now due to timeout + - python test\functional\test_runner.py --nocleanup --ci --quiet --combinedlogslen=4000 --jobs=4 --timeout-factor=8 --extended --exclude feature_dbcrash + +task: + name: 'ARM [unit tests, no functional tests] [bullseye]' + << : *GLOBAL_TASK_TEMPLATE + arm_container: + image: debian:bullseye + cpu: 2 + memory: 8G + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_arm.sh" + QEMU_USER_CMD: "" # Disable qemu and run the test natively + +task: + name: 'Win64 [unit tests, no gui tests, no boost::process, no functional tests] [jammy]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:jammy + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_win64.sh" + +task: + name: '32-bit + dash [gui] [CentOS 8]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: quay.io/centos/centos:stream8 + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + PACKAGE_MANAGER_INSTALL: "yum install -y" + FILE_ENV: "./ci/test/00_setup_env_i686_centos.sh" + +task: + name: '[previous releases, uses qt5 dev package and some depends packages, DEBUG] [unsigned char] [buster]' + previous_releases_cache: + folder: "releases" + << : *GLOBAL_TASK_TEMPLATE + << : *PERSISTENT_WORKER_TEMPLATE + env: + << : *PERSISTENT_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_qt5.sh" + +task: + name: '[TSan, depends, gui] [jammy]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:jammy + cpu: 6 # Increase CPU and Memory to avoid timeout + memory: 24G + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" + +task: + name: '[MSan, depends] [focal]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:focal + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_msan.sh" + MAKEJOBS: "-j4" # Avoid excessive memory use due to MSan + +task: + name: '[ASan + LSan + UBSan + integer, no depends] [jammy]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:jammy + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_asan.sh" + MAKEJOBS: "-j4" # Avoid excessive memory use + +task: + name: '[fuzzer,address,undefined,integer, no depends] [jammy]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:jammy + cpu: 4 # Increase CPU and memory to avoid timeout + memory: 16G + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" + +task: + name: '[multiprocess, i686, DEBUG] [focal]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:focal + cpu: 4 + memory: 16G # The default memory is sometimes just a bit too small, so double everything + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh" + +task: + name: '[no wallet, libbitcoinkernel] [bionic]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:bionic + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh" + +task: + name: 'macOS 10.15 [gui, no tests] [focal]' + << : *BASE_TEMPLATE + macos_sdk_cache: + folder: "depends/SDKs/$MACOS_SDK" + fingerprint_key: "$MACOS_SDK" + << : *MAIN_TEMPLATE + container: + image: ubuntu:focal + env: + MACOS_SDK: "Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers" + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_mac.sh" + +task: + name: 'macOS 12 native [gui, system sqlite only] [no depends]' + brew_install_script: + - brew install boost libevent qt@5 miniupnpc libnatpmp ccache zeromq qrencode libtool automake gnu-getopt + << : *GLOBAL_TASK_TEMPLATE + macos_instance: + # Use latest image, but hardcode version to avoid silent upgrades (and breaks) + image: monterey-xcode-13.3 # https://cirrus-ci.org/guide/macOS + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + CI_USE_APT_INSTALL: "no" + PACKAGE_MANAGER_INSTALL: "echo" # Nothing to do + FILE_ENV: "./ci/test/00_setup_env_mac_host.sh" + +task: + name: 'ARM64 Android APK [focal]' + << : *BASE_TEMPLATE + android_sdk_cache: + folder: "depends/SDKs/android" + fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.1.7779620" + depends_sources_cache: + folder: "depends/sources" + fingerprint_script: git rev-list -1 HEAD ./depends + << : *MAIN_TEMPLATE + container: + image: ubuntu:focal + env: + << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_android.sh" diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000..ae7e92d1c8a82 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +# This is the top-most EditorConfig file. +root = true + +# For all files. +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +# Source code files +[*.{h,cpp,py,sh}] +indent_size = 4 + +# .cirrus.yml, .fuzzbuzz.yml, etc. +[*.yml] +indent_size = 2 + +# Makefiles +[{*.am,Makefile.*.include}] +indent_style = tab + +# Autoconf scripts +[configure.ac] +indent_size = 2 diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000000..eedeeb4e54c58 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000000..fb91208954ea9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,41 @@ +--- +name: Bug report +about: Create a report to help us improve (use this for suspected bugs only, if not sure, open a regular issue below) +title: '' +labels: Bug +assignees: '' + +--- + + + + + +**Expected behavior** + + + +**Actual behavior** + + + +**To reproduce** + + + +**System information** + + + + + + + + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000000..2d5685185ea36 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: Feature +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** + + +**Describe the solution you'd like** + + +**Describe alternatives you've considered** + + +**Additional context** + diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.md b/.github/ISSUE_TEMPLATE/good_first_issue.md new file mode 100644 index 0000000000000..d32e22d36079a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/good_first_issue.md @@ -0,0 +1,22 @@ +--- +name: Good first issue +about: '(Regular devs only): Suggest a new good first issue' +title: '' +labels: '' +assignees: '' + +--- + + + + + + + +#### Useful skills: + + + +#### Want to work on this issue? + +For guidance on contributing, please read [CONTRIBUTING.md](https://github.com/bitcoin/bitcoin/blob/master/CONTRIBUTING.md) before opening your pull request. diff --git a/.github/ISSUE_TEMPLATE/gui_issue.md b/.github/ISSUE_TEMPLATE/gui_issue.md new file mode 100644 index 0000000000000..37acc81e21e6c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/gui_issue.md @@ -0,0 +1,11 @@ +--- +name: An issue or feature request related to the GUI +about: Any report, issue or feature request related to the GUI should be reported at https://github.com/bitcoin-core/gui/issues/ +title: Any report, issue or feature request related to the GUI should be reported at https://github.com/bitcoin-core/gui/issues/ +labels: GUI +assignees: '' + +--- + +Any report, issue or feature request related to the GUI should be reported at +https://github.com/bitcoin-core/gui/issues/ diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000..ae92fc78f2d1c --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,43 @@ + + + + + diff --git a/.gitignore b/.gitignore index 7343e722d5c4f..6c888bfdc46e4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,17 @@ *.tar.gz *.exe +*.pdb src/bitcoin src/bitcoind src/bitcoin-cli +src/bitcoin-gui +src/bitcoin-node src/bitcoin-tx +src/bitcoin-util +src/bitcoin-chainstate +src/bitcoin-wallet +src/test/fuzz/fuzz src/test/test_bitcoin src/qt/test/test_bitcoin-qt @@ -25,6 +32,7 @@ build-aux/m4/ltversion.m4 build-aux/missing build-aux/compile build-aux/test-driver +config.cache config.log config.status configure @@ -32,6 +40,7 @@ libtool src/config/bitcoin-config.h src/config/bitcoin-config.h.in src/config/stamp-h1 +src/obj share/setup.nsi share/qt/Info.plist @@ -43,22 +52,27 @@ src/qt/forms/ui_*.h src/qt/test/moc*.cpp +src/qt/bitcoin-qt.config +src/qt/bitcoin-qt.creator +src/qt/bitcoin-qt.creator.user +src/qt/bitcoin-qt.files +src/qt/bitcoin-qt.includes + .deps .dirstamp .libs .*.swp -*.*~* +*~ *.bak *.rej *.orig *.pyc *.o *.o-* -*.patch -.bitcoin *.a *.pb.cc *.pb.h +*.dat *.log *.trs @@ -67,6 +81,11 @@ src/qt/test/moc*.cpp *.json.h *.raw.h +# Only ignore unexpected patches +*.patch +!contrib/guix/patches/*.patch +!depends/patches/**/*.patch + #libtool object files *.lo *.la @@ -74,40 +93,62 @@ src/qt/test/moc*.cpp # Compilation and Qt preprocessor part *.qm Makefile -bitcoin-qt +!depends/Makefile +src/qt/bitcoin-qt Bitcoin-Qt.app +# Qt Creator +Makefile.am.user + # Unit-tests Makefile.test bitcoin-qt_test -src/test/buildenv.py # Resources cpp qrc_*.cpp -# Qt creator -*.pro.user - # Mac specific .DS_Store build +# Previous releases +releases + #lcov *.gcno +*.gcda /*.info test_bitcoin.coverage/ total.coverage/ +fuzz.coverage/ coverage_percent.txt +/cov_tool_wrapper.sh +qa-assets/ #build tests linux-coverage-build linux-build win32-build -qa/pull-tester/run-bitcoind-for-test.sh -qa/pull-tester/tests-config.sh -qa/pull-tester/cache/* -qa/pull-tester/test.*/* +test/config.ini +test/cache/* +test/.mypy_cache/ !src/leveldb*/Makefile /doc/doxygen/ + +libbitcoinconsensus.pc +contrib/devtools/split-debug.sh + +# Output from running db4 installation +db4/ + +# clang-check +*.plist + +osx_volname +dist/ + +/guix-build-* + +/ci/scratch/ diff --git a/.python-version b/.python-version new file mode 100644 index 0000000000000..8b7b0b52e55c3 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.6.12 diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 0000000000000..69d8c6aee417d --- /dev/null +++ b/.style.yapf @@ -0,0 +1,261 @@ +[style] +# Align closing bracket with visual indentation. +align_closing_bracket_with_visual_indent=True + +# Allow dictionary keys to exist on multiple lines. For example: +# +# x = { +# ('this is the first element of a tuple', +# 'this is the second element of a tuple'): +# value, +# } +allow_multiline_dictionary_keys=False + +# Allow lambdas to be formatted on more than one line. +allow_multiline_lambdas=False + +# Allow splits before the dictionary value. +allow_split_before_dict_value=True + +# Number of blank lines surrounding top-level function and class +# definitions. +blank_lines_around_top_level_definition=2 + +# Insert a blank line before a class-level docstring. +blank_line_before_class_docstring=False + +# Insert a blank line before a module docstring. +blank_line_before_module_docstring=False + +# Insert a blank line before a 'def' or 'class' immediately nested +# within another 'def' or 'class'. For example: +# +# class Foo: +# # <------ this blank line +# def method(): +# ... +blank_line_before_nested_class_or_def=False + +# Do not split consecutive brackets. Only relevant when +# dedent_closing_brackets is set. For example: +# +# call_func_that_takes_a_dict( +# { +# 'key1': 'value1', +# 'key2': 'value2', +# } +# ) +# +# would reformat to: +# +# call_func_that_takes_a_dict({ +# 'key1': 'value1', +# 'key2': 'value2', +# }) +coalesce_brackets=False + +# The column limit. +column_limit=160 + +# The style for continuation alignment. Possible values are: +# +# - SPACE: Use spaces for continuation alignment. This is default behavior. +# - FIXED: Use fixed number (CONTINUATION_INDENT_WIDTH) of columns +# (ie: CONTINUATION_INDENT_WIDTH/INDENT_WIDTH tabs) for continuation +# alignment. +# - LESS: Slightly left if cannot vertically align continuation lines with +# indent characters. +# - VALIGN-RIGHT: Vertically align continuation lines with indent +# characters. Slightly right (one more indent character) if cannot +# vertically align continuation lines with indent characters. +# +# For options FIXED, and VALIGN-RIGHT are only available when USE_TABS is +# enabled. +continuation_align_style=SPACE + +# Indent width used for line continuations. +continuation_indent_width=4 + +# Put closing brackets on a separate line, dedented, if the bracketed +# expression can't fit in a single line. Applies to all kinds of brackets, +# including function definitions and calls. For example: +# +# config = { +# 'key1': 'value1', +# 'key2': 'value2', +# } # <--- this bracket is dedented and on a separate line +# +# time_series = self.remote_client.query_entity_counters( +# entity='dev3246.region1', +# key='dns.query_latency_tcp', +# transform=Transformation.AVERAGE(window=timedelta(seconds=60)), +# start_ts=now()-timedelta(days=3), +# end_ts=now(), +# ) # <--- this bracket is dedented and on a separate line +dedent_closing_brackets=False + +# Disable the heuristic which places each list element on a separate line +# if the list is comma-terminated. +disable_ending_comma_heuristic=False + +# Place each dictionary entry onto its own line. +each_dict_entry_on_separate_line=True + +# The regex for an i18n comment. The presence of this comment stops +# reformatting of that line, because the comments are required to be +# next to the string they translate. +i18n_comment= + +# The i18n function call names. The presence of this function stops +# reformattting on that line, because the string it has cannot be moved +# away from the i18n comment. +i18n_function_call= + +# Indent the dictionary value if it cannot fit on the same line as the +# dictionary key. For example: +# +# config = { +# 'key1': +# 'value1', +# 'key2': value1 + +# value2, +# } +indent_dictionary_value=False + +# The number of columns to use for indentation. +indent_width=4 + +# Join short lines into one line. E.g., single line 'if' statements. +join_multiple_lines=True + +# Do not include spaces around selected binary operators. For example: +# +# 1 + 2 * 3 - 4 / 5 +# +# will be formatted as follows when configured with "*,/": +# +# 1 + 2*3 - 4/5 +# +no_spaces_around_selected_binary_operators= + +# Use spaces around default or named assigns. +spaces_around_default_or_named_assign=False + +# Use spaces around the power operator. +spaces_around_power_operator=False + +# The number of spaces required before a trailing comment. +spaces_before_comment=2 + +# Insert a space between the ending comma and closing bracket of a list, +# etc. +space_between_ending_comma_and_closing_bracket=True + +# Split before arguments +split_all_comma_separated_values=False + +# Split before arguments if the argument list is terminated by a +# comma. +split_arguments_when_comma_terminated=False + +# Set to True to prefer splitting before '&', '|' or '^' rather than +# after. +split_before_bitwise_operator=True + +# Split before the closing bracket if a list or dict literal doesn't fit on +# a single line. +split_before_closing_bracket=True + +# Split before a dictionary or set generator (comp_for). For example, note +# the split before the 'for': +# +# foo = { +# variable: 'Hello world, have a nice day!' +# for variable in bar if variable != 42 +# } +split_before_dict_set_generator=True + +# Split before the '.' if we need to split a longer expression: +# +# foo = ('This is a really long string: {}, {}, {}, {}'.format(a, b, c, d)) +# +# would reformat to something like: +# +# foo = ('This is a really long string: {}, {}, {}, {}' +# .format(a, b, c, d)) +split_before_dot=False + +# Split after the opening paren which surrounds an expression if it doesn't +# fit on a single line. +split_before_expression_after_opening_paren=False + +# If an argument / parameter list is going to be split, then split before +# the first argument. +split_before_first_argument=False + +# Set to True to prefer splitting before 'and' or 'or' rather than +# after. +split_before_logical_operator=True + +# Split named assignments onto individual lines. +split_before_named_assigns=True + +# Set to True to split list comprehensions and generators that have +# non-trivial expressions and multiple clauses before each of these +# clauses. For example: +# +# result = [ +# a_long_var + 100 for a_long_var in xrange(1000) +# if a_long_var % 10] +# +# would reformat to something like: +# +# result = [ +# a_long_var + 100 +# for a_long_var in xrange(1000) +# if a_long_var % 10] +split_complex_comprehension=False + +# The penalty for splitting right after the opening bracket. +split_penalty_after_opening_bracket=30 + +# The penalty for splitting the line after a unary operator. +split_penalty_after_unary_operator=10000 + +# The penalty for splitting right before an if expression. +split_penalty_before_if_expr=0 + +# The penalty of splitting the line around the '&', '|', and '^' +# operators. +split_penalty_bitwise_operator=300 + +# The penalty for splitting a list comprehension or generator +# expression. +split_penalty_comprehension=80 + +# The penalty for characters over the column limit. +split_penalty_excess_character=7000 + +# The penalty incurred by adding a line split to the unwrapped line. The +# more line splits added the higher the penalty. +split_penalty_for_added_line_split=30 + +# The penalty of splitting a list of "import as" names. For example: +# +# from a_very_long_or_indented_module_name_yada_yad import (long_argument_1, +# long_argument_2, +# long_argument_3) +# +# would reformat to something like: +# +# from a_very_long_or_indented_module_name_yada_yad import ( +# long_argument_1, long_argument_2, long_argument_3) +split_penalty_import_names=0 + +# The penalty of splitting the line around the 'and' and 'or' +# operators. +split_penalty_logical_operator=300 + +# Use the Tab character for indentation. +use_tabs=False + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 1630c1d02a4da..0000000000000 --- a/.travis.yml +++ /dev/null @@ -1,66 +0,0 @@ -# errata: -# - A travis bug causes caches to trample eachother when using the same -# compiler key (which we don't use anyway). This is worked around for now by -# replacing the "compilers" with a build name prefixed by the no-op ":" -# command. See: https://github.com/travis-ci/casher/issues/6 - -os: linux -language: cpp -env: - global: - - MAKEJOBS=-j3 - - RUN_TESTS=false - - CCACHE_SIZE=100M - - CCACHE_TEMPDIR=/tmp/.ccache-temp - - CCACHE_COMPRESS=1 - - BASE_OUTDIR=$TRAVIS_BUILD_DIR/out - - SDK_URL=https://bitcoincore.org/depends-sources/sdks -cache: - apt: true - directories: - - depends/built - - depends/sdk-sources - - $HOME/.ccache -matrix: - fast_finish: true - include: - - compiler: ": ARM" - env: HOST=arm-linux-gnueabihf PACKAGES="g++-arm-linux-gnueabihf" DEP_OPTS="NO_QT=1" GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat" - - compiler: ": bitcoind" - env: HOST=x86_64-unknown-linux-gnu PACKAGES="bc" DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat CPPFLAGS=-DDEBUG_LOCKORDER" - - compiler: ": No wallet" - env: HOST=x86_64-unknown-linux-gnu DEP_OPTS="NO_WALLET=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat" - - compiler: ": 32-bit + dash" - env: HOST=i686-pc-linux-gnu PACKAGES="g++-multilib bc" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat" USE_SHELL="/bin/dash" - - compiler: ": Cross-Mac" - env: HOST=x86_64-apple-darwin11 PACKAGES="gcc-multilib g++-multilib cmake libcap-dev libz-dev libbz2-dev" OSX_SDK=10.7 GOAL="deploy" - - compiler: ": Win64" - env: HOST=x86_64-w64-mingw32 PACKAGES="nsis gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64 binutils-mingw-w64-x86-64 mingw-w64-dev wine bc" RUN_TESTS=true GOAL="deploy" BITCOIN_CONFIG="--enable-gui" MAKEJOBS="-j2" - - compiler: ": Win32" - env: HOST=i686-w64-mingw32 PACKAGES="nsis gcc-mingw-w64-i686 g++-mingw-w64-i686 binutils-mingw-w64-i686 mingw-w64-dev wine bc" RUN_TESTS=true GOAL="deploy" BITCOIN_CONFIG="--enable-gui" MAKEJOBS="-j2" -install: - - if [ -n "$PACKAGES" ]; then travis_retry sudo apt-get update; fi - - if [ -n "$PACKAGES" ]; then travis_retry sudo apt-get install --no-install-recommends --no-upgrade -qq $PACKAGES; fi -before_script: - - unset CC; unset CXX - - mkdir -p depends/SDKs depends/sdk-sources - - if [ -n "$OSX_SDK" -a ! -f depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then wget $SDK_URL/MacOSX${OSX_SDK}.sdk.tar.gz -O depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz; fi - - if [ -n "$OSX_SDK" -a -f depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then tar -C depends/SDKs -xf depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz; fi - - make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS -script: - - if [ -n "$USE_SHELL" ]; then export CONFIG_SHELL="$USE_SHELL"; fi - - OUTDIR=$BASE_OUTDIR/$TRAVIS_PULL_REQUEST/$TRAVIS_JOB_NUMBER-$HOST - - BITCOIN_CONFIG_ALL="--disable-dependency-tracking --prefix=$TRAVIS_BUILD_DIR/depends/$HOST --bindir=$OUTDIR/bin --libdir=$OUTDIR/lib" - - depends/$HOST/native/bin/ccache --max-size=$CCACHE_SIZE - - if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then export CCACHE_READONLY=1; fi - - test -n "$USE_SHELL" && eval '"$USE_SHELL" -c "./autogen.sh"' || ./autogen.sh - - ./configure --cache-file=config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false) - - make distdir PACKAGE=bitcoin VERSION=$HOST - - cd bitcoin-$HOST - - ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false) - - make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL V=1 ; false ) - - export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib - - if [ "$RUN_TESTS" = "true" ]; then make check; fi - - if [ "$RUN_TESTS" = "true" ]; then qa/pull-tester/rpc-tests.sh; fi -after_script: - - if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then (echo "Upload goes here. Something like: scp -r $BASE_OUTDIR server" || echo "upload failed"); fi diff --git a/.tx/config b/.tx/config index 472d27b46fe13..c4fe7cc324d5d 100644 --- a/.tx/config +++ b/.tx/config @@ -1,7 +1,7 @@ [main] host = https://www.transifex.com -[bitcoin.qt-translation-010x] -file_filter = src/qt/locale/bitcoin_.ts -source_file = src/qt/locale/bitcoin_en.ts +[bitcoin.qt-translation-023x] +file_filter = src/qt/locale/bitcoin_.xlf +source_file = src/qt/locale/bitcoin_en.xlf source_lang = en diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000..aec6995d3b63f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,444 @@ +Contributing to Bitcoin Core +============================ + +The Bitcoin Core project operates an open contributor model where anyone is +welcome to contribute towards development in the form of peer review, testing +and patches. This document explains the practical process and guidelines for +contributing. + +First, in terms of structure, there is no particular concept of "Bitcoin Core +developers" in the sense of privileged people. Open source often naturally +revolves around a meritocracy where contributors earn trust from the developer +community over time. Nevertheless, some hierarchy is necessary for practical +purposes. As such, there are repository maintainers who are responsible for +merging pull requests, the [release cycle](/doc/release-process.md), and +moderation. + +Getting Started +--------------- + +New contributors are very welcome and needed. + +Reviewing and testing is highly valued and the most effective way you can contribute +as a new contributor. It also will teach you much more about the code and +process than opening pull requests. Please refer to the [peer review](#peer-review) +section below. + +Before you start contributing, familiarize yourself with the Bitcoin Core build +system and tests. Refer to the documentation in the repository on how to build +Bitcoin Core and how to run the unit tests, functional tests, and fuzz tests. + +There are many open issues of varying difficulty waiting to be fixed. +If you're looking for somewhere to start contributing, check out the +[good first issue](https://github.com/bitcoin/bitcoin/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) +list or changes that are +[up for grabs](https://github.com/bitcoin/bitcoin/issues?utf8=%E2%9C%93&q=label%3A%22Up+for+grabs%22). +Some of them might no longer be applicable. So if you are interested, but +unsure, you might want to leave a comment on the issue first. + +You may also participate in the weekly +[Bitcoin Core PR Review Club](https://bitcoincore.reviews/) meeting. + +### Good First Issue Label + +The purpose of the `good first issue` label is to highlight which issues are +suitable for a new contributor without a deep understanding of the codebase. + +However, good first issues can be solved by anyone. If they remain unsolved +for a longer time, a frequent contributor might address them. + +You do not need to request permission to start working on an issue. However, +you are encouraged to leave a comment if you are planning to work on it. This +will help other contributors monitor which issues are actively being addressed +and is also an effective way to request assistance if and when you need it. + +Communication Channels +---------------------- + +Most communication about Bitcoin Core development happens on IRC, in the +`#bitcoin-core-dev` channel on Libera Chat. The easiest way to participate on IRC is +with the web client, [web.libera.chat](https://web.libera.chat/#bitcoin-core-dev). Chat +history logs can be found +on [https://www.erisian.com.au/bitcoin-core-dev/](https://www.erisian.com.au/bitcoin-core-dev/) +and [https://gnusha.org/bitcoin-core-dev/](https://gnusha.org/bitcoin-core-dev/). + +Discussion about codebase improvements happens in GitHub issues and pull +requests. + +The developer +[mailing list](https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev) +should be used to discuss complicated or controversial consensus or P2P protocol changes before working on +a patch set. + + +Contributor Workflow +-------------------- + +The codebase is maintained using the "contributor workflow" where everyone +without exception contributes patch proposals using "pull requests" (PRs). This +facilitates social contribution, easy testing and peer review. + +To contribute a patch, the workflow is as follows: + + 1. Fork repository ([only for the first time](https://docs.github.com/en/get-started/quickstart/fork-a-repo)) + 1. Create topic branch + 1. Commit patches + +For GUI-related issues or pull requests, the https://github.com/bitcoin-core/gui repository should be used. +For all other issues and pull requests, the https://github.com/bitcoin/bitcoin node repository should be used. + +The master branch for all monotree repositories is identical. + +As a rule of thumb, everything that only modifies `src/qt` is a GUI-only pull +request. However: + +* For global refactoring or other transversal changes the node repository + should be used. +* For GUI-related build system changes, the node repository should be used + because the change needs review by the build systems reviewers. +* Changes in `src/interfaces` need to go to the node repository because they + might affect other components like the wallet. + +For large GUI changes that include build system and interface changes, it is +recommended to first open a pull request against the GUI repository. When there +is agreement to proceed with the changes, a pull request with the build system +and interfaces changes can be submitted to the node repository. + +The project coding conventions in the [developer notes](doc/developer-notes.md) +must be followed. + +### Committing Patches + +In general, [commits should be atomic](https://en.wikipedia.org/wiki/Atomic_commit#Atomic_commit_convention) +and diffs should be easy to read. For this reason, do not mix any formatting +fixes or code moves with actual code changes. + +Make sure each individual commit is hygienic: that it builds successfully on its +own without warnings, errors, regressions, or test failures. + +Commit messages should be verbose by default consisting of a short subject line +(50 chars max), a blank line and detailed explanatory text as separate +paragraph(s), unless the title alone is self-explanatory (like "Correct typo +in init.cpp") in which case a single title line is sufficient. Commit messages should be +helpful to people reading your code in the future, so explain the reasoning for +your decisions. Further explanation [here](https://chris.beams.io/posts/git-commit/). + +If a particular commit references another issue, please add the reference. For +example: `refs #1234` or `fixes #4321`. Using the `fixes` or `closes` keywords +will cause the corresponding issue to be closed when the pull request is merged. + +Commit messages should never contain any `@` mentions (usernames prefixed with "@"). + +Please refer to the [Git manual](https://git-scm.com/doc) for more information +about Git. + + - Push changes to your fork + - Create pull request + +### Creating the Pull Request + +The title of the pull request should be prefixed by the component or area that +the pull request affects. Valid areas as: + + - `consensus` for changes to consensus critical code + - `doc` for changes to the documentation + - `qt` or `gui` for changes to bitcoin-qt + - `log` for changes to log messages + - `mining` for changes to the mining code + - `net` or `p2p` for changes to the peer-to-peer network code + - `refactor` for structural changes that do not change behavior + - `rpc`, `rest` or `zmq` for changes to the RPC, REST or ZMQ APIs + - `script` for changes to the scripts and tools + - `test`, `qa` or `ci` for changes to the unit tests, QA tests or CI code + - `util` or `lib` for changes to the utils or libraries + - `wallet` for changes to the wallet code + - `build` for changes to the GNU Autotools or MSVC builds + - `guix` for changes to the GUIX reproducible builds + +Examples: + + consensus: Add new opcode for BIP-XXXX OP_CHECKAWESOMESIG + net: Automatically create onion service, listen on Tor + qt: Add feed bump button + log: Fix typo in log message + +The body of the pull request should contain sufficient description of *what* the +patch does, and even more importantly, *why*, with justification and reasoning. +You should include references to any discussions (for example, other issues or +mailing list discussions). + +The description for a new pull request should not contain any `@` mentions. The +PR description will be included in the commit message when the PR is merged and +any users mentioned in the description will be annoyingly notified each time a +fork of Bitcoin Core copies the merge. Instead, make any username mentions in a +subsequent comment to the PR. + +### Translation changes + +Note that translations should not be submitted as pull requests. Please see +[Translation Process](https://github.com/bitcoin/bitcoin/blob/master/doc/translation_process.md) +for more information on helping with translations. + +### Work in Progress Changes and Requests for Comments + +If a pull request is not to be considered for merging (yet), please +prefix the title with [WIP] or use [Tasks Lists](https://docs.github.com/en/github/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax#task-lists) +in the body of the pull request to indicate tasks are pending. + +### Address Feedback + +At this stage, one should expect comments and review from other contributors. You +can add more commits to your pull request by committing them locally and pushing +to your fork. + +You are expected to reply to any review comments before your pull request is +merged. You may update the code or reject the feedback if you do not agree with +it, but you should express so in a reply. If there is outstanding feedback and +you are not actively working on it, your pull request may be closed. + +Please refer to the [peer review](#peer-review) section below for more details. + +### Squashing Commits + +If your pull request contains fixup commits (commits that change the same line of code repeatedly) or too fine-grained +commits, you may be asked to [squash](https://git-scm.com/docs/git-rebase#_interactive_mode) your commits +before it will be reviewed. The basic squashing workflow is shown below. + + git checkout your_branch_name + git rebase -i HEAD~n + # n is normally the number of commits in the pull request. + # Set commits (except the one in the first line) from 'pick' to 'squash', save and quit. + # On the next screen, edit/refine commit messages. + # Save and quit. + git push -f # (force push to GitHub) + +Please update the resulting commit message, if needed. It should read as a +coherent message. In most cases, this means not just listing the interim +commits. + +If you have problems with squashing or other git workflows, you can enable +"Allow edits from maintainers" in the right-hand sidebar of the GitHub web +interface and ask for help in the pull request. + +Please refrain from creating several pull requests for the same change. +Use the pull request that is already open (or was created earlier) to amend +changes. This preserves the discussion and review that happened earlier for +the respective change set. + +The length of time required for peer review is unpredictable and will vary from +pull request to pull request. + +### Rebasing Changes + +When a pull request conflicts with the target branch, you may be asked to rebase it on top of the current target branch. +The `git rebase` command will take care of rebuilding your commits on top of the new base. + +This project aims to have a clean git history, where code changes are only made in non-merge commits. This simplifies +auditability because merge commits can be assumed to not contain arbitrary code changes. Merge commits should be signed, +and the resulting git tree hash must be deterministic and reproducible. The script in +[/contrib/verify-commits](/contrib/verify-commits) checks that. + +After a rebase, reviewers are encouraged to sign off on the force push. This should be relatively straightforward with +the `git range-diff` tool explained in the [productivity +notes](/doc/productivity.md#diff-the-diffs-with-git-range-diff). To avoid needless review churn, maintainers will +generally merge pull requests that received the most review attention first. + +Pull Request Philosophy +----------------------- + +Patchsets should always be focused. For example, a pull request could add a +feature, fix a bug, or refactor code; but not a mixture. Please also avoid super +pull requests which attempt to do too much, are overly large, or overly complex +as this makes review difficult. + + +### Features + +When adding a new feature, thought must be given to the long term technical debt +and maintenance that feature may require after inclusion. Before proposing a new +feature that will require maintenance, please consider if you are willing to +maintain it (including bug fixing). If features get orphaned with no maintainer +in the future, they may be removed by the Repository Maintainer. + + +### Refactoring + +Refactoring is a necessary part of any software project's evolution. The +following guidelines cover refactoring pull requests for the project. + +There are three categories of refactoring: code-only moves, code style fixes, and +code refactoring. In general, refactoring pull requests should not mix these +three kinds of activities in order to make refactoring pull requests easy to +review and uncontroversial. In all cases, refactoring PRs must not change the +behaviour of code within the pull request (bugs must be preserved as is). + +Project maintainers aim for a quick turnaround on refactoring pull requests, so +where possible keep them short, uncomplex and easy to verify. + +Pull requests that refactor the code should not be made by new contributors. It +requires a certain level of experience to know where the code belongs to and to +understand the full ramification (including rebase effort of open pull requests). + +Trivial pull requests or pull requests that refactor the code with no clear +benefits may be immediately closed by the maintainers to reduce unnecessary +workload on reviewing. + + +"Decision Making" Process +------------------------- + +The following applies to code changes to the Bitcoin Core project (and related +projects such as libsecp256k1), and is not to be confused with overall Bitcoin +Network Protocol consensus changes. + +Whether a pull request is merged into Bitcoin Core rests with the project merge +maintainers. + +Maintainers will take into consideration if a patch is in line with the general +principles of the project; meets the minimum standards for inclusion; and will +judge the general consensus of contributors. + +In general, all pull requests must: + + - Have a clear use case, fix a demonstrable bug or serve the greater good of + the project (for example refactoring for modularisation); + - Be well peer-reviewed; + - Have unit tests, functional tests, and fuzz tests, where appropriate; + - Follow code style guidelines ([C++](doc/developer-notes.md), [functional tests](test/functional/README.md)); + - Not break the existing test suite; + - Where bugs are fixed, where possible, there should be unit tests + demonstrating the bug and also proving the fix. This helps prevent regression. + - Change relevant comments and documentation when behaviour of code changes. + +Patches that change Bitcoin consensus rules are considerably more involved than +normal because they affect the entire ecosystem and so must be preceded by +extensive mailing list discussions and have a numbered BIP. While each case will +be different, one should be prepared to expend more time and effort than for +other kinds of patches because of increased peer review and consensus building +requirements. + + +### Peer Review + +Anyone may participate in peer review which is expressed by comments in the pull +request. Typically reviewers will review the code for obvious errors, as well as +test out the patch set and opine on the technical merits of the patch. Project +maintainers take into account the peer review when determining if there is +consensus to merge a pull request (remember that discussions may have been +spread out over GitHub, mailing list and IRC discussions). + +Code review is a burdensome but important part of the development process, and +as such, certain types of pull requests are rejected. In general, if the +**improvements** do not warrant the **review effort** required, the PR has a +high chance of being rejected. It is up to the PR author to convince the +reviewers that the changes warrant the review effort, and if reviewers are +"Concept NACK'ing" the PR, the author may need to present arguments and/or do +research backing their suggested changes. + +#### Conceptual Review + +A review can be a conceptual review, where the reviewer leaves a comment + * `Concept (N)ACK`, meaning "I do (not) agree with the general goal of this pull + request", + * `Approach (N)ACK`, meaning `Concept ACK`, but "I do (not) agree with the + approach of this change". + +A `NACK` needs to include a rationale why the change is not worthwhile. +NACKs without accompanying reasoning may be disregarded. + +#### Code Review + +After conceptual agreement on the change, code review can be provided. A review +begins with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the PR +branch, followed by a description of how the reviewer did the review. The +following language is used within pull request comments: + + - "I have tested the code", involving change-specific manual testing in + addition to running the unit, functional, or fuzz tests, and in case it is + not obvious how the manual testing was done, it should be described; + - "I have not tested the code, but I have reviewed it and it looks + OK, I agree it can be merged"; + - A "nit" refers to a trivial, often non-blocking issue. + +Project maintainers reserve the right to weigh the opinions of peer reviewers +using common sense judgement and may also weigh based on merit. Reviewers that +have demonstrated a deeper commitment and understanding of the project over time +or who have clear domain expertise may naturally have more weight, as one would +expect in all walks of life. + +Where a patch set affects consensus-critical code, the bar will be much +higher in terms of discussion and peer review requirements, keeping in mind that +mistakes could be very costly to the wider community. This includes refactoring +of consensus-critical code. + +Where a patch set proposes to change the Bitcoin consensus, it must have been +discussed extensively on the mailing list and IRC, be accompanied by a widely +discussed BIP and have a generally widely perceived technical consensus of being +a worthwhile change based on the judgement of the maintainers. + +### Finding Reviewers + +As most reviewers are themselves developers with their own projects, the review +process can be quite lengthy, and some amount of patience is required. If you find +that you've been waiting for a pull request to be given attention for several +months, there may be a number of reasons for this, some of which you can do something +about: + + - It may be because of a feature freeze due to an upcoming release. During this time, + only bug fixes are taken into consideration. If your pull request is a new feature, + it will not be prioritized until after the release. Wait for the release. + - It may be because the changes you are suggesting do not appeal to people. Rather than + nits and critique, which require effort and means they care enough to spend time on your + contribution, thundering silence is a good sign of widespread (mild) dislike of a given change + (because people don't assume *others* won't actually like the proposal). Don't take + that personally, though! Instead, take another critical look at what you are suggesting + and see if it: changes too much, is too broad, doesn't adhere to the + [developer notes](doc/developer-notes.md), is dangerous or insecure, is messily written, etc. + Identify and address any of the issues you find. Then ask e.g. on IRC if someone could give + their opinion on the concept itself. + - It may be because your code is too complex for all but a few people, and those people + may not have realized your pull request even exists. A great way to find people who + are qualified and care about the code you are touching is the + [Git Blame feature](https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/tracking-changes-in-a-file). Simply + look up who last modified the code you are changing and see if you can find + them and give them a nudge. Don't be incessant about the nudging, though. + - Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request + a look. If you think you've been waiting for an unreasonably long time (say, + more than a month) for no particular reason (a few lines changed, etc.), + this is totally fine. Try to return the favor when someone else is asking + for feedback on their code, and the universe balances out. + - Remember that the best thing you can do while waiting is give review to others! + + +Backporting +----------- + +Security and bug fixes can be backported from `master` to release +branches. +If the backport is non-trivial, it may be appropriate to open an +additional PR to backport the change, but only after the original PR +has been merged. +Otherwise, backports will be done in batches and +the maintainers will use the proper `Needs backport (...)` labels +when needed (the original author does not need to worry about it). + +A backport should contain the following metadata in the commit body: + +``` +Github-Pull: # +Rebased-From: +``` + +Have a look at [an example backport PR]( +https://github.com/bitcoin/bitcoin/pull/16189). + +Also see the [backport.py script]( +https://github.com/bitcoin-core/bitcoin-maintainer-tools#backport). + +Copyright +--------- + +By contributing to this repository, you agree to license your work under the +MIT license unless specified otherwise in `contrib/debian/copyright` or at +the top of the file itself. Any work contributed where you are not the original +author must contain its license header with the original author(s) and source. diff --git a/COPYING b/COPYING index 6219bd75a6497..b157c5fe49081 100644 --- a/COPYING +++ b/COPYING @@ -1,4 +1,7 @@ -Copyright (c) 2009-2014 Bitcoin Developers +The MIT License (MIT) + +Copyright (c) 2009-2022 The Bitcoin Core developers +Copyright (c) 2009-2022 Bitcoin Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/INSTALL b/INSTALL deleted file mode 100644 index 07ee48427cd05..0000000000000 --- a/INSTALL +++ /dev/null @@ -1,5 +0,0 @@ -Building Bitcoin - -See doc/build-*.md for instructions on building bitcoind, -the intended-for-services, no-graphical-interface, reference -implementation of Bitcoin. \ No newline at end of file diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000000000..4cead0303612f --- /dev/null +++ b/INSTALL.md @@ -0,0 +1 @@ +See [doc/build-\*.md](/doc) \ No newline at end of file diff --git a/Makefile.am b/Makefile.am index b51f477b7856d..05e89f12b7a4f 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,64 +1,95 @@ +# Copyright (c) 2013-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# Pattern rule to print variables, e.g. make print-top_srcdir +print-%: FORCE + @echo '$*'='$($*)' + ACLOCAL_AMFLAGS = -I build-aux/m4 SUBDIRS = src +if ENABLE_MAN +SUBDIRS += doc/man +endif .PHONY: deploy FORCE +.INTERMEDIATE: $(COVERAGE_INFO) -GZIP_ENV="-9n" +export PYTHONPATH -BITCOIND_BIN=$(top_builddir)/src/bitcoind$(EXEEXT) -BITCOIN_QT_BIN=$(top_builddir)/src/qt/bitcoin-qt$(EXEEXT) -BITCOIN_CLI_BIN=$(top_builddir)/src/bitcoin-cli$(EXEEXT) -BITCOIN_WIN_INSTALLER=$(PACKAGE)-$(PACKAGE_VERSION)-win$(WINDOWS_BITS)-setup$(EXEEXT) +if BUILD_BITCOIN_LIBS +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libbitcoinconsensus.pc +endif + +BITCOIND_BIN=$(top_builddir)/src/$(BITCOIN_DAEMON_NAME)$(EXEEXT) +BITCOIN_QT_BIN=$(top_builddir)/src/qt/$(BITCOIN_GUI_NAME)$(EXEEXT) +BITCOIN_CLI_BIN=$(top_builddir)/src/$(BITCOIN_CLI_NAME)$(EXEEXT) +BITCOIN_TX_BIN=$(top_builddir)/src/$(BITCOIN_TX_NAME)$(EXEEXT) +BITCOIN_UTIL_BIN=$(top_builddir)/src/$(BITCOIN_UTIL_NAME)$(EXEEXT) +BITCOIN_WALLET_BIN=$(top_builddir)/src/$(BITCOIN_WALLET_TOOL_NAME)$(EXEEXT) +BITCOIN_NODE_BIN=$(top_builddir)/src/$(BITCOIN_MP_NODE_NAME)$(EXEEXT) +BITCOIN_GUI_BIN=$(top_builddir)/src/$(BITCOIN_MP_GUI_NAME)$(EXEEXT) +BITCOIN_WIN_INSTALLER=$(PACKAGE)-$(PACKAGE_VERSION)-win64-setup$(EXEEXT) + +empty := +space := $(empty) $(empty) OSX_APP=Bitcoin-Qt.app -OSX_DMG=Bitcoin-Qt.dmg +OSX_VOLNAME = $(subst $(space),-,$(PACKAGE_NAME)) +OSX_DMG = $(OSX_VOLNAME).dmg OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus -OSX_FANCY_PLIST=$(top_srcdir)/contrib/macdeploy/fancy.plist OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/bitcoin.icns -OSX_PLIST=$(top_srcdir)/share/qt/Info.plist #not installed -OSX_QT_TRANSLATIONS = da,de,es,hu,ru,uk,zh_CN,zh_TW +OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed + +DIST_CONTRIB = \ + $(top_srcdir)/test/sanitizer_suppressions/lsan \ + $(top_srcdir)/test/sanitizer_suppressions/tsan \ + $(top_srcdir)/test/sanitizer_suppressions/ubsan \ + $(top_srcdir)/contrib/linearize/linearize-data.py \ + $(top_srcdir)/contrib/linearize/linearize-hashes.py \ + $(top_srcdir)/contrib/signet/miner -DIST_DOCS = $(wildcard doc/*.md) $(wildcard doc/release-notes/*.md) +DIST_SHARE = \ + $(top_srcdir)/share/genbuild.sh \ + $(top_srcdir)/share/rpcauth + +BIN_CHECKS=$(top_srcdir)/contrib/devtools/symbol-check.py \ + $(top_srcdir)/contrib/devtools/security-check.py \ + $(top_srcdir)/contrib/devtools/utils.py WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/bitcoin.ico \ $(top_srcdir)/share/pixmaps/nsis-header.bmp \ $(top_srcdir)/share/pixmaps/nsis-wizard.bmp \ $(top_srcdir)/doc/README_windows.txt -OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \ - $(top_srcdir)/contrib/macdeploy/background.png \ - $(top_srcdir)/contrib/macdeploy/DS_Store \ - $(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \ +OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_INSTALLER_ICONS) \ $(top_srcdir)/contrib/macdeploy/detached-sig-create.sh -COVERAGE_INFO = baseline_filtered_combined.info baseline.info block_test.info \ - leveldb_baseline.info test_bitcoin_filtered.info total_coverage.info \ - baseline_filtered.info block_test_filtered.info \ - leveldb_baseline_filtered.info test_bitcoin_coverage.info test_bitcoin.info +COVERAGE_INFO = $(COV_TOOL_WRAPPER) baseline.info \ + test_bitcoin_filtered.info total_coverage.info \ + baseline_filtered.info functional_test.info functional_test_filtered.info \ + test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_filtered.info fuzz_coverage.info dist-hook: - -$(MAKE) -C $(top_distdir)/src/leveldb clean - -$(MAKE) -C $(top_distdir)/src/secp256k1 distclean -$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf - -distcheck-hook: - $(MKDIR_P) $(top_distdir)/_build/src/leveldb - cp -rf $(top_srcdir)/src/leveldb/* $(top_distdir)/_build/src/leveldb/ - -$(MAKE) -C $(top_distdir)/_build/src/leveldb clean - -distcleancheck: - @: - +if TARGET_WINDOWS $(BITCOIN_WIN_INSTALLER): all-recursive $(MKDIR_P) $(top_builddir)/release STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIND_BIN) $(top_builddir)/release STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIN_QT_BIN) $(top_builddir)/release STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIN_CLI_BIN) $(top_builddir)/release - @test -f $(MAKENSIS) && $(MAKENSIS) $(top_builddir)/share/setup.nsi || \ + STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIN_TX_BIN) $(top_builddir)/release + STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIN_WALLET_BIN) $(top_builddir)/release + STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIN_UTIL_BIN) $(top_builddir)/release + @test -f $(MAKENSIS) && echo 'OutFile "$@"' | cat $(top_builddir)/share/setup.nsi - | $(MAKENSIS) -V2 - || \ echo error: could not build $@ + @echo built $@ -$(if $(findstring src/,$(MAKECMDGOALS)),$(MAKECMDGOALS), none): FORCE - $(MAKE) -C src $(patsubst src/%,%,$@) +deploy: $(BITCOIN_WIN_INSTALLER) +endif +if TARGET_DARWIN $(OSX_APP)/Contents/PkgInfo: $(MKDIR_P) $(@D) @echo "APPL????" > $@ @@ -75,51 +106,40 @@ $(OSX_APP)/Contents/Resources/bitcoin.icns: $(OSX_INSTALLER_ICONS) $(MKDIR_P) $(@D) $(INSTALL_DATA) $< $@ -$(OSX_APP)/Contents/MacOS/Bitcoin-Qt: $(BITCOIN_QT_BIN) +$(OSX_APP)/Contents/MacOS/Bitcoin-Qt: all-recursive $(MKDIR_P) $(@D) - STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $< $@ + STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIN_QT_BIN) $@ + +$(OSX_APP)/Contents/Resources/Base.lproj/InfoPlist.strings: + $(MKDIR_P) $(@D) + echo '{ CFBundleDisplayName = "$(PACKAGE_NAME)"; CFBundleName = "$(PACKAGE_NAME)"; }' > $@ OSX_APP_BUILT=$(OSX_APP)/Contents/PkgInfo $(OSX_APP)/Contents/Resources/empty.lproj \ $(OSX_APP)/Contents/Resources/bitcoin.icns $(OSX_APP)/Contents/Info.plist \ - $(OSX_APP)/Contents/MacOS/Bitcoin-Qt + $(OSX_APP)/Contents/MacOS/Bitcoin-Qt $(OSX_APP)/Contents/Resources/Base.lproj/InfoPlist.strings + +osx_volname: + echo $(OSX_VOLNAME) >$@ if BUILD_DARWIN $(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING) - $(OSX_DEPLOY_SCRIPT) $(OSX_APP) -add-qt-tr $(OSX_QT_TRANSLATIONS) -translations-dir=$(QT_TRANSLATION_DIR) -dmg -fancy $(OSX_FANCY_PLIST) -verbose 2 + $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -dmg deploydir: $(OSX_DMG) -else +else !BUILD_DARWIN APP_DIST_DIR=$(top_builddir)/dist -APP_DIST_EXTRAS=$(APP_DIST_DIR)/.background/background.png $(APP_DIST_DIR)/.DS_Store $(APP_DIST_DIR)/Applications -$(APP_DIST_DIR)/Applications: - @rm -f $@ - @cd $(@D); $(LN_S) /Applications $(@F) - -$(APP_DIST_EXTRAS): $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Bitcoin-Qt - -$(OSX_DMG): $(APP_DIST_EXTRAS) - $(GENISOIMAGE) -no-cache-inodes -D -l -probe -V "Bitcoin-Qt" -no-pad -r -apple -o $@ dist - -$(APP_DIST_DIR)/.background/background.png: - $(MKDIR_P) $(@D) - $(INSTALL) $(top_srcdir)/contrib/macdeploy/background.png $@ -$(APP_DIST_DIR)/.DS_Store: - $(INSTALL) $(top_srcdir)/contrib/macdeploy/DS_Store $@ +$(OSX_DMG): deploydir + $(XORRISOFS) -D -l -V "$(OSX_VOLNAME)" -no-pad -r -dir-mode 0755 -o $@ $(APP_DIST_DIR) -- $(if $(SOURCE_DATE_EPOCH),-volume_date all_file_dates =$(SOURCE_DATE_EPOCH)) $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Bitcoin-Qt: $(OSX_APP_BUILT) $(OSX_PACKAGING) - INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) -translations-dir=$(QT_TRANSLATION_DIR) -add-qt-tr $(OSX_QT_TRANSLATIONS) -verbose 2 + INSTALL_NAME_TOOL=$(INSTALL_NAME_TOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -deploydir: $(APP_DIST_EXTRAS) -endif +deploydir: $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Bitcoin-Qt +endif !BUILD_DARWIN -if TARGET_DARWIN -appbundle: $(OSX_APP_BUILT) deploy: $(OSX_DMG) endif -if TARGET_WINDOWS -deploy: $(BITCOIN_WIN_INSTALLER) -endif $(BITCOIN_QT_BIN): FORCE $(MAKE) -C src qt/$(@F) @@ -130,71 +150,202 @@ $(BITCOIND_BIN): FORCE $(BITCOIN_CLI_BIN): FORCE $(MAKE) -C src $(@F) -if USE_LCOV +$(BITCOIN_TX_BIN): FORCE + $(MAKE) -C src $(@F) -baseline.info: +$(BITCOIN_UTIL_BIN): FORCE + $(MAKE) -C src $(@F) + +$(BITCOIN_WALLET_BIN): FORCE + $(MAKE) -C src $(@F) + +$(BITCOIN_NODE_BIN): FORCE + $(MAKE) -C src $(@F) + +$(BITCOIN_GUI_BIN): FORCE + $(MAKE) -C src $(@F) + +if USE_LCOV +LCOV_FILTER_PATTERN = \ + -p "/usr/local/" \ + -p "/usr/include/" \ + -p "/usr/lib/" \ + -p "/usr/lib64/" \ + -p "src/leveldb/" \ + -p "src/crc32c/" \ + -p "src/bench/" \ + -p "src/univalue" \ + -p "src/crypto/ctaes" \ + -p "src/minisketch" \ + -p "src/secp256k1" \ + -p "depends" + +DIR_FUZZ_SEED_CORPUS ?= qa-assets/fuzz_seed_corpus + +$(COV_TOOL_WRAPPER): + @echo 'exec $(COV_TOOL) "$$@"' > $(COV_TOOL_WRAPPER) + @chmod +x $(COV_TOOL_WRAPPER) + +baseline.info: $(COV_TOOL_WRAPPER) $(LCOV) -c -i -d $(abs_builddir)/src -o $@ baseline_filtered.info: baseline.info - $(LCOV) -r $< "/usr/include/*" -o $@ - -leveldb_baseline.info: baseline_filtered.info - $(LCOV) -c -i -d $(abs_builddir)/src/leveldb -b $(abs_builddir)/src/leveldb -o $@ + $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@ + $(LCOV) -a $@ $(LCOV_OPTS) -o $@ -leveldb_baseline_filtered.info: leveldb_baseline.info - $(LCOV) -r $< "/usr/include/*" -o $@ +fuzz.info: baseline_filtered.info + @TIMEOUT=15 test/fuzz/test_runner.py $(DIR_FUZZ_SEED_CORPUS) -l DEBUG + $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src --t fuzz-tests -o $@ + $(LCOV) -z $(LCOV_OPTS) -d $(abs_builddir)/src -baseline_filtered_combined.info: leveldb_baseline_filtered.info baseline_filtered.info - $(LCOV) -a leveldb_baseline_filtered.info -a baseline_filtered.info -o $@ +fuzz_filtered.info: fuzz.info + $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@ + $(LCOV) -a $@ $(LCOV_OPTS) -o $@ -test_bitcoin.info: baseline_filtered_combined.info +test_bitcoin.info: baseline_filtered.info $(MAKE) -C src/ check - $(LCOV) -c -d $(abs_builddir)/src -t test_bitcoin -o $@ - $(LCOV) -z -d $(abs_builddir)/src - $(LCOV) -z -d $(abs_builddir)/src/leveldb + $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src -t test_bitcoin -o $@ + $(LCOV) -z $(LCOV_OPTS) -d $(abs_builddir)/src test_bitcoin_filtered.info: test_bitcoin.info - $(LCOV) -r $< "/usr/include/*" -o $@ + $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@ + $(LCOV) -a $@ $(LCOV_OPTS) -o $@ -block_test.info: test_bitcoin_filtered.info - $(MKDIR_P) qa/tmp - -@TIMEOUT=15 qa/pull-tester/run-bitcoind-for-test.sh $(JAVA) -jar $(JAVA_COMPARISON_TOOL) qa/tmp/compTool 0 - $(LCOV) -c -d $(abs_builddir)/src --t BitcoinJBlockTest -o $@ - $(LCOV) -z -d $(abs_builddir)/src - $(LCOV) -z -d $(abs_builddir)/src/leveldb +functional_test.info: test_bitcoin_filtered.info + @TIMEOUT=15 test/functional/test_runner.py $(EXTENDED_FUNCTIONAL_TESTS) + $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src --t functional-tests -o $@ + $(LCOV) -z $(LCOV_OPTS) -d $(abs_builddir)/src -block_test_filtered.info: block_test.info - $(LCOV) -r $< "/usr/include/*" -o $@ +functional_test_filtered.info: functional_test.info + $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@ + $(LCOV) -a $@ $(LCOV_OPTS) -o $@ -test_bitcoin_coverage.info: baseline_filtered_combined.info test_bitcoin_filtered.info - $(LCOV) -a baseline_filtered.info -a leveldb_baseline_filtered.info -a test_bitcoin_filtered.info -o $@ +fuzz_coverage.info: fuzz_filtered.info + $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a fuzz_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt -total_coverage.info: baseline_filtered_combined.info test_bitcoin_filtered.info block_test_filtered.info - $(LCOV) -a baseline_filtered.info -a leveldb_baseline_filtered.info -a test_bitcoin_filtered.info -a block_test_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt +test_bitcoin_coverage.info: baseline_filtered.info test_bitcoin_filtered.info + $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_bitcoin_filtered.info -o $@ + +total_coverage.info: test_bitcoin_filtered.info functional_test_filtered.info + $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_bitcoin_filtered.info -a functional_test_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt + +fuzz.coverage/.dirstamp: fuzz_coverage.info + $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D) + @touch $@ test_bitcoin.coverage/.dirstamp: test_bitcoin_coverage.info - $(GENHTML) -s $< -o $(@D) + $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D) @touch $@ total.coverage/.dirstamp: total_coverage.info - $(GENHTML) -s $< -o $(@D) + $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D) @touch $@ -cov: test_bitcoin.coverage/.dirstamp total.coverage/.dirstamp +cov_fuzz: fuzz.coverage/.dirstamp -endif +cov: test_bitcoin.coverage/.dirstamp total.coverage/.dirstamp -if USE_COMPARISON_TOOL -check-local: - $(MKDIR_P) qa/tmp - @qa/pull-tester/run-bitcoind-for-test.sh $(JAVA) -jar $(JAVA_COMPARISON_TOOL) qa/tmp/compTool $(COMPARISON_TOOL_REORG_TESTS) 2>&1 endif -EXTRA_DIST = $(top_srcdir)/share/genbuild.sh qa/pull-tester/rpc-tests.sh qa/pull-tester/run-bitcoin-cli qa/rpc-tests $(DIST_DOCS) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) +dist_noinst_SCRIPTS = autogen.sh + +EXTRA_DIST = $(DIST_SHARE) $(DIST_CONTRIB) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS) + +EXTRA_DIST += \ + test/functional \ + test/fuzz + +EXTRA_DIST += \ + test/util/test_runner.py \ + test/util/data/bitcoin-util-test.json \ + test/util/data/blanktxv1.hex \ + test/util/data/blanktxv1.json \ + test/util/data/blanktxv2.hex \ + test/util/data/blanktxv2.json \ + test/util/data/tt-delin1-out.hex \ + test/util/data/tt-delin1-out.json \ + test/util/data/tt-delout1-out.hex \ + test/util/data/tt-delout1-out.json \ + test/util/data/tt-locktime317000-out.hex \ + test/util/data/tt-locktime317000-out.json \ + test/util/data/tx394b54bb.hex \ + test/util/data/txcreate1.hex \ + test/util/data/txcreate1.json \ + test/util/data/txcreate2.hex \ + test/util/data/txcreate2.json \ + test/util/data/txcreatedata1.hex \ + test/util/data/txcreatedata1.json \ + test/util/data/txcreatedata2.hex \ + test/util/data/txcreatedata2.json \ + test/util/data/txcreatedata_seq0.hex \ + test/util/data/txcreatedata_seq0.json \ + test/util/data/txcreatedata_seq1.hex \ + test/util/data/txcreatedata_seq1.json \ + test/util/data/txcreatemultisig1.hex \ + test/util/data/txcreatemultisig1.json \ + test/util/data/txcreatemultisig2.hex \ + test/util/data/txcreatemultisig2.json \ + test/util/data/txcreatemultisig3.hex \ + test/util/data/txcreatemultisig3.json \ + test/util/data/txcreatemultisig4.hex \ + test/util/data/txcreatemultisig4.json \ + test/util/data/txcreatemultisig5.json \ + test/util/data/txcreateoutpubkey1.hex \ + test/util/data/txcreateoutpubkey1.json \ + test/util/data/txcreateoutpubkey2.hex \ + test/util/data/txcreateoutpubkey2.json \ + test/util/data/txcreateoutpubkey3.hex \ + test/util/data/txcreateoutpubkey3.json \ + test/util/data/txcreatescript1.hex \ + test/util/data/txcreatescript1.json \ + test/util/data/txcreatescript2.hex \ + test/util/data/txcreatescript2.json \ + test/util/data/txcreatescript3.hex \ + test/util/data/txcreatescript3.json \ + test/util/data/txcreatescript4.hex \ + test/util/data/txcreatescript4.json \ + test/util/data/txcreatescript5.hex \ + test/util/data/txcreatescript6.hex \ + test/util/data/txcreatesignsegwit1.hex \ + test/util/data/txcreatesignv1.hex \ + test/util/data/txcreatesignv1.json \ + test/util/data/txcreatesignv2.hex \ + test/util/rpcauth-test.py CLEANFILES = $(OSX_DMG) $(BITCOIN_WIN_INSTALLER) -.INTERMEDIATE: $(COVERAGE_INFO) +DISTCHECK_CONFIGURE_FLAGS = --enable-man + +doc/doxygen/.stamp: doc/Doxyfile FORCE + $(MKDIR_P) $(@D) + $(DOXYGEN) $^ + $(AM_V_at) touch $@ + +if HAVE_DOXYGEN +docs: doc/doxygen/.stamp +else +docs: + @echo "error: doxygen not found" +endif -clean-local: - rm -rf test_bitcoin.coverage/ total.coverage/ $(OSX_APP) +clean-docs: + rm -rf doc/doxygen + +clean-local: clean-docs + rm -rf coverage_percent.txt test_bitcoin.coverage/ total.coverage/ fuzz.coverage/ test/tmp/ cache/ $(OSX_APP) + rm -rf test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__ + rm -rf osx_volname dist/ + +test-security-check: +if TARGET_DARWIN + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO +endif +if TARGET_WINDOWS + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE +endif +if TARGET_LINUX + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF + $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF +endif diff --git a/README.md b/README.md index cf650fe543334..2eab2315eb6ea 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,40 @@ Bitcoin Core integration/staging tree ===================================== -[![Build Status](https://travis-ci.org/bitcoin/bitcoin.svg?branch=master)](https://travis-ci.org/bitcoin/bitcoin) +https://bitcoincore.org -https://www.bitcoin.org +For an immediately usable, binary version of the Bitcoin Core software, see +https://bitcoincore.org/en/download/. -Copyright (c) 2009-2014 Bitcoin Core Developers +What is Bitcoin Core? +--------------------- -What is Bitcoin? ----------------- +Bitcoin Core connects to the Bitcoin peer-to-peer network to download and fully +validate blocks and transactions. It also includes a wallet and graphical user +interface, which can be optionally built. -Bitcoin is an experimental new digital currency that enables instant payments to -anyone, anywhere in the world. Bitcoin uses peer-to-peer technology to operate -with no central authority: managing transactions and issuing money are carried -out collectively by the network. Bitcoin Core is the name of open source -software which enables the use of this currency. - -For more information, as well as an immediately useable, binary version of -the Bitcoin Core software, see https://www.bitcoin.org/en/download. +Further information about Bitcoin Core is available in the [doc folder](/doc). License ------- Bitcoin Core is released under the terms of the MIT license. See [COPYING](COPYING) for more -information or see http://opensource.org/licenses/MIT. +information or see https://opensource.org/licenses/MIT. -Development process +Development Process ------------------- -Developers work in their own trees, then submit pull requests when they think -their feature or bug fix is ready. - -If it is a simple/trivial/non-controversial change, then one of the Bitcoin -development team members simply pulls it. - -If it is a *more complicated or potentially controversial* change, then the patch -submitter will be asked to start a discussion (if they haven't already) on the -[mailing list](http://sourceforge.net/mailarchive/forum.php?forum_name=bitcoin-development). +The `master` branch is regularly built (see `doc/build-*.md` for instructions) and tested, but it is not guaranteed to be +completely stable. [Tags](https://github.com/bitcoin/bitcoin/tags) are created +regularly from release branches to indicate new official, stable release versions of Bitcoin Core. -The patch will be accepted if there is broad consensus that it is a good thing. -Developers should expect to rework and resubmit patches if the code doesn't -match the project's coding conventions (see [doc/coding.md](doc/coding.md)) or are -controversial. +The https://github.com/bitcoin-core/gui repository is used exclusively for the +development of the GUI. Its master branch is identical in all monotree +repositories. Release branches and tags do not exist, so please do not fork +that repository unless it is for development reasons. -The `master` branch is regularly built and tested, but is not guaranteed to be -completely stable. [Tags](https://github.com/bitcoin/bitcoin/tags) are created -regularly to indicate new official, stable release versions of Bitcoin. +The contribution workflow is described in [CONTRIBUTING.md](CONTRIBUTING.md) +and useful hints for developers can be found in [doc/developer-notes.md](doc/developer-notes.md). Testing ------- @@ -57,66 +46,33 @@ lots of money. ### Automated Testing -Developers are strongly encouraged to write unit tests for new code, and to -submit new unit tests for old code. Unit tests can be compiled and run (assuming they weren't disabled in configure) with: `make check` +Developers are strongly encouraged to write [unit tests](src/test/README.md) for new code, and to +submit new unit tests for old code. Unit tests can be compiled and run +(assuming they weren't disabled in configure) with: `make check`. Further details on running +and extending unit tests can be found in [/src/test/README.md](/src/test/README.md). -Every pull request is built for both Windows and Linux on a dedicated server, -and unit and sanity tests are automatically run. The binaries produced may be -used for manual QA testing — a link to them will appear in a comment on the -pull request posted by [BitcoinPullTester](https://github.com/BitcoinPullTester). See https://github.com/TheBlueMatt/test-scripts -for the build/test scripts. +There are also [regression and integration tests](/test), written +in Python. +These tests can be run (if the [test dependencies](/test) are installed) with: `test/functional/test_runner.py` + +The CI (Continuous Integration) systems make sure that every pull request is built for Windows, Linux, and macOS, +and that unit/sanity tests are run automatically. ### Manual Quality Assurance (QA) Testing -Large changes should have a test plan, and should be tested by somebody other -than the developer who wrote the code. -See https://github.com/bitcoin/QA/ for how to create a test plan. +Changes should be tested by somebody other than the developer who wrote the +code. This is especially important for large or high-risk changes. It is useful +to add a test plan to the pull request description if testing the changes is +not straightforward. Translations ------------ Changes to translations as well as new translations can be submitted to -[Bitcoin Core's Transifex page](https://www.transifex.com/projects/p/bitcoin/). +[Bitcoin Core's Transifex page](https://www.transifex.com/bitcoin/bitcoin/). Translations are periodically pulled from Transifex and merged into the git repository. See the [translation process](doc/translation_process.md) for details on how this works. **Important**: We do not accept translation changes as GitHub pull requests because the next pull from Transifex would automatically overwrite them again. - -Translators should also subscribe to the [mailing list](https://groups.google.com/forum/#!forum/bitcoin-translators). - -Development tips and tricks ---------------------------- - -**compiling for debugging** - -Run configure with the --enable-debug option, then make. Or run configure with -CXXFLAGS="-g -ggdb -O0" or whatever debug flags you need. - -**debug.log** - -If the code is behaving strangely, take a look in the debug.log file in the data directory; -error and debugging messages are written there. - -The -debug=... command-line option controls debugging; running with just -debug will turn -on all categories (and give you a very large debug.log file). - -The Qt code routes qDebug() output to debug.log under category "qt": run with -debug=qt -to see it. - -**testnet and regtest modes** - -Run with the -testnet option to run with "play bitcoins" on the test network, if you -are testing multi-machine code that needs to operate across the internet. - -If you are testing something that can run on one machine, run with the -regtest option. -In regression test mode, blocks can be created on-demand; see qa/rpc-tests/ for tests -that run in -regtest mode. - -**DEBUG_LOCKORDER** - -Bitcoin Core is a multithreaded application, and deadlocks or other multithreading bugs -can be very difficult to track down. Compiling with -DDEBUG_LOCKORDER (configure -CXXFLAGS="-DDEBUG_LOCKORDER -g") inserts run-time checks to keep track of which locks -are held, and adds warnings to the debug.log file if inconsistencies are detected. diff --git a/REVIEWERS b/REVIEWERS new file mode 100644 index 0000000000000..dd962947ffd1a --- /dev/null +++ b/REVIEWERS @@ -0,0 +1,140 @@ +# ============================================================================== +# Bitcoin Core REVIEWERS +# ============================================================================== + +# Configuration of automated review requests for the bitcoin/bitcoin repo +# via DrahtBot. + +# Order is not important; if a modified file or directory matches a fnmatch, +# the reviewer will be mentioned in a PR comment requesting a review. + +# Regular contributors are free to add their names to specific directories or +# files provided that they are willing to provide a review. + +# Absence from this list should not be interpreted as a discouragement to +# review a pull request. Peer review is always welcome and is a critical +# component of the progress of the codebase. Information on peer review +# guidelines can be found in the CONTRIBUTING.md doc. + + +# Maintainers +# @achow101 +# @fanquake +# @hebasto +# @laanwj +# @marcofalke +# @sipa + +# Docs +/doc/*[a-zA-Z-].md @harding +/doc/Doxyfile.in @fanquake +/doc/REST-interface.md @jonasschnelli +/doc/benchmarking.md @ariard +/doc/bitcoin-conf.md @hebasto +/doc/build-freebsd.md @fanquake +/doc/build-netbsd.md @fanquake +/doc/build-openbsd.md @laanwj +/doc/build-osx.md @fanquake +/doc/build-unix.md @laanwj +/doc/build-windows.md @sipsorcery +/doc/dependencies.md @fanquake +/doc/developer-notes.md @laanwj +/doc/files.md @hebasto +/doc/reduce-memory.md @fanquake +/doc/reduce-traffic.md @jonasschnelli +/doc/release-process.md @laanwj +/doc/translation_strings_policy.md @laanwj + +# Build aux +/build-aux/m4/bitcoin_qt.m4 @hebasto + +# MSVC build system +/build_msvc/ @sipsorcery + +# Settings +/src/util/settings.* @ryanofsky + +# Fuzzing + +# Tests +/src/test/net_peer_eviction_tests.cpp @jonatack +/test/functional/mempool_updatefromblock.py @hebasto +/test/functional/feature_asmap.py @jonatack +/test/functional/interface_bitcoin_cli.py @jonatack + +# Backwards compatibility tests +*_compatibility.py @sjors +/test/functional/wallet_upgradewallet.py @sjors @achow101 +/test/get_previous_releases.py @sjors + +# Translations +/src/util/translation.h @hebasto + +# Dev Tools +/contrib/devtools/security-check.py @fanquake +/contrib/devtools/test-security-check.py @fanquake +/contrib/devtools/symbol-check.py @fanquake + +# Guix +/contrib/guix/ @dongcarl + +# Compatibility +/src/compat/glibc_* @fanquake + +# GUI +/src/qt/forms/ @hebasto + +# Wallet +/src/wallet/ @achow101 + +# CLI +/src/bitcoin-cli.cpp @jonatack + +# Coinstats +/src/node/coinstats.* @fjahr + +# Index +/src/index/ @fjahr + +# Descriptors +*descriptor* @achow101 @sipa + +# External signer +*external_signer* @sjors +/doc/external-signer.md @sjors +*signer.py @sjors + +# Interfaces +/src/interfaces/ @ryanofsky + +# DB +/src/txdb.* @jamesob +/src/dbwrapper.* @jamesob + +# Linter +/test/lint/lint-shell.py @hebasto + +# Bech32 +/src/bech32.* @sipa +/src/bench/bech32.* @sipa + +# PSBT +/src/psbt* @achow101 +/src/node/psbt* @achow101 +/doc/psbt.md @achow101 + +# P2P +/src/net_processing.* @sipa +/src/protocol.* @sipa + +# Consensus +/src/coins.* @sipa @jamesob +/src/script/script.* @sipa +/src/script/interpreter.* @sipa +/src/validation.* @sipa +/src/consensus/ @sipa + +# Tracing +/doc/tracing.md @jb55 @0xB10C +/src/util/trace.h @jb55 @0xB10C +/contrib/tracing/ @jb55 @0xB10C diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000000..25b6175c95c88 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,20 @@ +# Security Policy + +## Supported Versions + +See our website for versions of Bitcoin Core that are currently supported with +security updates: https://bitcoincore.org/en/lifecycle/#schedule + +## Reporting a Vulnerability + +To report security issues send an email to security@bitcoincore.org (not for support). + +The following keys may be used to communicate sensitive information to developers: + +| Name | Fingerprint | +|------|-------------| +| Wladimir van der Laan | 71A3 B167 3540 5025 D447 E8F2 7481 0B01 2346 C9A6 | +| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 | +| Michael Ford | E777 299F C265 DD04 7930 70EB 944D 35F9 AC3D B76A | + +You can import a key by running the following command with that individual’s fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys ""` Ensure that you put quotes around fingerprints containing spaces. diff --git a/autogen.sh b/autogen.sh index 3e26a183059c0..de16260b56327 100755 --- a/autogen.sh +++ b/autogen.sh @@ -1,9 +1,16 @@ #!/bin/sh +# Copyright (c) 2013-2019 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C set -e -srcdir="$(dirname $0)" +srcdir="$(dirname "$0")" cd "$srcdir" -if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="`which glibtoolize 2>/dev/null`"; then +if [ -z "${LIBTOOLIZE}" ] && GLIBTOOLIZE="$(command -v glibtoolize)"; then LIBTOOLIZE="${GLIBTOOLIZE}" export LIBTOOLIZE fi +command -v autoreconf >/dev/null || \ + (echo "configuration failed, please install autoconf first" && exit 1) autoreconf --install --force --warnings=all diff --git a/build-aux/m4/ax_boost_base.m4 b/build-aux/m4/ax_boost_base.m4 index 3f24d5ddc617b..7aac53c8155f1 100644 --- a/build-aux/m4/ax_boost_base.m4 +++ b/build-aux/m4/ax_boost_base.m4 @@ -1,5 +1,5 @@ # =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_boost_base.html +# https://www.gnu.org/software/autoconf-archive/ax_boost_base.html # =========================================================================== # # SYNOPSIS @@ -11,7 +11,7 @@ # Test for the Boost C++ libraries of a particular version (or newer) # # If no path to the installed boost library is given the macro searchs -# under /usr, /usr/local, /opt and /opt/local and evaluates the +# under /usr, /usr/local, /opt, /opt/local and /opt/homebrew and evaluates the # $BOOST_ROOT environment variable. Further documentation is available at # . # @@ -33,7 +33,15 @@ # and this notice are preserved. This file is offered as-is, without any # warranty. -#serial 23 +#serial 48 + +# example boost program (need to pass version) +m4_define([_AX_BOOST_BASE_PROGRAM], + [AC_LANG_PROGRAM([[ +#include +]],[[ +(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($1))])); +]])]) AC_DEFUN([AX_BOOST_BASE], [ @@ -44,110 +52,123 @@ AC_ARG_WITH([boost], or disable it (ARG=no) @<:@ARG=yes@:>@ ])], [ - if test "$withval" = "no"; then - want_boost="no" - elif test "$withval" = "yes"; then - want_boost="yes" - ac_boost_path="" - else - want_boost="yes" - ac_boost_path="$withval" - fi + AS_CASE([$withval], + [no],[want_boost="no";_AX_BOOST_BASE_boost_path=""], + [yes],[want_boost="yes";_AX_BOOST_BASE_boost_path=""], + [want_boost="yes";_AX_BOOST_BASE_boost_path="$withval"]) ], [want_boost="yes"]) AC_ARG_WITH([boost-libdir], - AS_HELP_STRING([--with-boost-libdir=LIB_DIR], - [Force given directory for boost libraries. Note that this will override library path detection, so use this parameter only if default library detection fails and you know exactly where your boost libraries are located.]), - [ - if test -d "$withval" - then - ac_boost_lib_path="$withval" - else - AC_MSG_ERROR(--with-boost-libdir expected directory name) - fi - ], - [ac_boost_lib_path=""] -) + [AS_HELP_STRING([--with-boost-libdir=LIB_DIR], + [Force given directory for boost libraries. + Note that this will override library path detection, + so use this parameter only if default library detection fails + and you know exactly where your boost libraries are located.])], + [ + AS_IF([test -d "$withval"], + [_AX_BOOST_BASE_boost_lib_path="$withval"], + [AC_MSG_ERROR([--with-boost-libdir expected directory name])]) + ], + [_AX_BOOST_BASE_boost_lib_path=""]) -if test "x$want_boost" = "xyes"; then - boost_lib_version_req=ifelse([$1], ,1.20.0,$1) - boost_lib_version_req_shorten=`expr $boost_lib_version_req : '\([[0-9]]*\.[[0-9]]*\)'` - boost_lib_version_req_major=`expr $boost_lib_version_req : '\([[0-9]]*\)'` - boost_lib_version_req_minor=`expr $boost_lib_version_req : '[[0-9]]*\.\([[0-9]]*\)'` - boost_lib_version_req_sub_minor=`expr $boost_lib_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` - if test "x$boost_lib_version_req_sub_minor" = "x" ; then - boost_lib_version_req_sub_minor="0" - fi - WANT_BOOST_VERSION=`expr $boost_lib_version_req_major \* 100000 \+ $boost_lib_version_req_minor \* 100 \+ $boost_lib_version_req_sub_minor` - AC_MSG_CHECKING(for boostlib >= $boost_lib_version_req) +BOOST_LDFLAGS="" +BOOST_CPPFLAGS="" +AS_IF([test "x$want_boost" = "xyes"], + [_AX_BOOST_BASE_RUNDETECT([$1],[$2],[$3])]) +AC_SUBST(BOOST_CPPFLAGS) +AC_SUBST(BOOST_LDFLAGS) +]) + + +# convert a version string in $2 to numeric and affect to polymorphic var $1 +AC_DEFUN([_AX_BOOST_BASE_TONUMERICVERSION],[ + AS_IF([test "x$2" = "x"],[_AX_BOOST_BASE_TONUMERICVERSION_req="1.20.0"],[_AX_BOOST_BASE_TONUMERICVERSION_req="$2"]) + _AX_BOOST_BASE_TONUMERICVERSION_req_shorten=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\.[[0-9]]*\)'` + _AX_BOOST_BASE_TONUMERICVERSION_req_major=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\)'` + AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_major" = "x"], + [AC_MSG_ERROR([You should at least specify libboost major version])]) + _AX_BOOST_BASE_TONUMERICVERSION_req_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.\([[0-9]]*\)'` + AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_minor" = "x"], + [_AX_BOOST_BASE_TONUMERICVERSION_req_minor="0"]) + _AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` + AS_IF([test "X$_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor" = "X"], + [_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor="0"]) + _AX_BOOST_BASE_TONUMERICVERSION_RET=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req_major \* 100000 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_minor \* 100 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor` + AS_VAR_SET($1,$_AX_BOOST_BASE_TONUMERICVERSION_RET) +]) + +dnl Run the detection of boost should be run only if $want_boost +AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[ + _AX_BOOST_BASE_TONUMERICVERSION(WANT_BOOST_VERSION,[$1]) succeeded=no + + AC_REQUIRE([AC_CANONICAL_HOST]) dnl On 64-bit systems check for system libraries in both lib64 and lib. dnl The former is specified by FHS, but e.g. Debian does not adhere to dnl this (as it rises problems for generic multi-arch support). dnl The last entry in the list is chosen by default when no libraries dnl are found, e.g. when only header-only libraries are installed! - libsubdirs="lib" - ax_arch=`uname -m` - case $ax_arch in - x86_64) - libsubdirs="lib64 libx32 lib lib64" - ;; - ppc64|s390x|sparc64|aarch64) - libsubdirs="lib64 lib lib64" - ;; - esac + AS_CASE([${host_cpu}], + [x86_64],[libsubdirs="lib64 libx32 lib lib64"], + [mips*64*],[libsubdirs="lib64 lib32 lib lib64"], + [ppc64|powerpc64|s390x|sparc64|aarch64|ppc64le|powerpc64le|riscv64],[libsubdirs="lib64 lib lib64"], + [libsubdirs="lib"] + ) dnl allow for real multi-arch paths e.g. /usr/lib/x86_64-linux-gnu. Give dnl them priority over the other paths since, if libs are found there, they dnl are almost assuredly the ones desired. - AC_REQUIRE([AC_CANONICAL_HOST]) - libsubdirs="lib/${host_cpu}-${host_os} $libsubdirs" - - case ${host_cpu} in - i?86) - libsubdirs="lib/i386-${host_os} $libsubdirs" - ;; - esac - - dnl some arches may advertise a cpu type that doesn't line up with their - dnl prefix's cpu type. For example, uname may report armv7l while libs are - dnl installed to /usr/lib/arm-linux-gnueabihf. Try getting the compiler's - dnl value for an extra chance of finding the correct path. - libsubdirs="lib/`$CXX -dumpmachine 2>/dev/null` $libsubdirs" + AS_CASE([${host_cpu}], + [i?86],[multiarch_libsubdir="lib/i386-${host_os}"], + [armv7l],[multiarch_libsubdir="lib/arm-${host_os}"], + [multiarch_libsubdir="lib/${host_cpu}-${host_os}"] + ) dnl first we check the system location for boost libraries dnl this location ist chosen if boost libraries are installed with the --layout=system option dnl or if you install boost with RPM - if test "$ac_boost_path" != ""; then - BOOST_CPPFLAGS="-I$ac_boost_path/include" - for ac_boost_path_tmp in $libsubdirs; do - if test -d "$ac_boost_path"/"$ac_boost_path_tmp" ; then - BOOST_LDFLAGS="-L$ac_boost_path/$ac_boost_path_tmp" - break - fi - done - elif test "$cross_compiling" != yes; then - for ac_boost_path_tmp in /usr /usr/local /opt /opt/local ; do - if test -d "$ac_boost_path_tmp/include/boost" && test -r "$ac_boost_path_tmp/include/boost"; then - for libsubdir in $libsubdirs ; do - if ls "$ac_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + AS_IF([test "x$_AX_BOOST_BASE_boost_path" != "x"],[ + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) includes in "$_AX_BOOST_BASE_boost_path/include"]) + AS_IF([test -d "$_AX_BOOST_BASE_boost_path/include" && test -r "$_AX_BOOST_BASE_boost_path/include"],[ + AC_MSG_RESULT([yes]) + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include" + for _AX_BOOST_BASE_boost_path_tmp in $multiarch_libsubdir $libsubdirs; do + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) lib path in "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"]) + AS_IF([test -d "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" && test -r "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" ],[ + AC_MSG_RESULT([yes]) + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"; + break; + ], + [AC_MSG_RESULT([no])]) + done],[ + AC_MSG_RESULT([no])]) + ],[ + if test X"$cross_compiling" = Xyes; then + search_libsubdirs=$multiarch_libsubdir + else + search_libsubdirs="$multiarch_libsubdir $libsubdirs" + fi + for _AX_BOOST_BASE_boost_path_tmp in /usr /usr/local /opt /opt/local /opt/homebrew/; do + if test -d "$_AX_BOOST_BASE_boost_path_tmp/include/boost" && test -r "$_AX_BOOST_BASE_boost_path_tmp/include/boost" ; then + for libsubdir in $search_libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi done - BOOST_LDFLAGS="-L$ac_boost_path_tmp/$libsubdir" - BOOST_CPPFLAGS="-I$ac_boost_path_tmp/include" + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path_tmp/$libsubdir" + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path_tmp/include" break; fi done - fi + ]) dnl overwrite ld flags if we have required special directory with dnl --with-boost-libdir parameter - if test "$ac_boost_lib_path" != ""; then - BOOST_LDFLAGS="-L$ac_boost_lib_path" - fi + AS_IF([test "x$_AX_BOOST_BASE_boost_lib_path" != "x"], + [BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_lib_path"]) + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION)]) CPPFLAGS_SAVED="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" export CPPFLAGS @@ -158,19 +179,11 @@ if test "x$want_boost" = "xyes"; then AC_REQUIRE([AC_PROG_CXX]) AC_LANG_PUSH(C++) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - @%:@include - ]], [[ - #if BOOST_VERSION >= $WANT_BOOST_VERSION - // Everything is okay - #else - # error Boost version is too old - #endif - ]])],[ + AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[ AC_MSG_RESULT(yes) succeeded=yes found_system=yes - ],[: + ],[ ]) AC_LANG_POP([C++]) @@ -178,30 +191,50 @@ if test "x$want_boost" = "xyes"; then dnl if we found no boost with system layout we search for boost libraries dnl built and installed without the --layout=system option or for a staged(not installed) version - if test "x$succeeded" != "xyes"; then + if test "x$succeeded" != "xyes" ; then + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + BOOST_CPPFLAGS= + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + BOOST_LDFLAGS= + fi _version=0 - if test "$ac_boost_path" != ""; then - if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then - for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do - _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + if test -n "$_AX_BOOST_BASE_boost_path" ; then + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path"; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` V_CHECK=`expr $_version_tmp \> $_version` - if test "$V_CHECK" = "1" ; then + if test "x$V_CHECK" = "x1" ; then _version=$_version_tmp fi VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` - BOOST_CPPFLAGS="-I$ac_boost_path/include/boost-$VERSION_UNDERSCORE" + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include/boost-$VERSION_UNDERSCORE" done + dnl if nothing found search for layout used in Windows distributions + if test -z "$BOOST_CPPFLAGS"; then + if test -d "$_AX_BOOST_BASE_boost_path/boost" && test -r "$_AX_BOOST_BASE_boost_path/boost"; then + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path" + fi + fi + dnl if we found something and BOOST_LDFLAGS was unset before + dnl (because "$_AX_BOOST_BASE_boost_lib_path" = ""), set it here. + if test -n "$BOOST_CPPFLAGS" && test -z "$BOOST_LDFLAGS"; then + for libsubdir in $libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$libsubdir" + fi fi else - if test "$cross_compiling" != yes; then - for ac_boost_path in /usr /usr/local /opt /opt/local ; do - if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then - for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do - _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + if test "x$cross_compiling" != "xyes" ; then + for _AX_BOOST_BASE_boost_path in /usr /usr/local /opt /opt/local /opt/homebrew ; do + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path" ; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` V_CHECK=`expr $_version_tmp \> $_version` - if test "$V_CHECK" = "1" ; then + if test "x$V_CHECK" = "x1" ; then _version=$_version_tmp - best_path=$ac_boost_path + best_path=$_AX_BOOST_BASE_boost_path fi done fi @@ -209,7 +242,7 @@ if test "x$want_boost" = "xyes"; then VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE" - if test "$ac_boost_lib_path" = ""; then + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then for libsubdir in $libsubdirs ; do if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi done @@ -217,7 +250,7 @@ if test "x$want_boost" = "xyes"; then fi fi - if test "x$BOOST_ROOT" != "x"; then + if test -n "$BOOST_ROOT" ; then for libsubdir in $libsubdirs ; do if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi done @@ -226,7 +259,7 @@ if test "x$want_boost" = "xyes"; then stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'` stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'` V_CHECK=`expr $stage_version_shorten \>\= $_version` - if test "$V_CHECK" = "1" -a "$ac_boost_lib_path" = "" ; then + if test "x$V_CHECK" = "x1" && test -z "$_AX_BOOST_BASE_boost_lib_path" ; then AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT) BOOST_CPPFLAGS="-I$BOOST_ROOT" BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir" @@ -241,34 +274,24 @@ if test "x$want_boost" = "xyes"; then export LDFLAGS AC_LANG_PUSH(C++) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - @%:@include - ]], [[ - #if BOOST_VERSION >= $WANT_BOOST_VERSION - // Everything is okay - #else - # error Boost version is too old - #endif - ]])],[ + AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[ AC_MSG_RESULT(yes) succeeded=yes found_system=yes - ],[: + ],[ ]) AC_LANG_POP([C++]) fi - if test "$succeeded" != "yes" ; then - if test "$_version" = "0" ; then - AC_MSG_NOTICE([[We could not detect the boost libraries (version $boost_lib_version_req_shorten or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation.]]) + if test "x$succeeded" != "xyes" ; then + if test "x$_version" = "x0" ; then + AC_MSG_NOTICE([[We could not detect the boost libraries (version $1 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation.]]) else AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).]) fi # execute ACTION-IF-NOT-FOUND (if present): ifelse([$3], , :, [$3]) else - AC_SUBST(BOOST_CPPFLAGS) - AC_SUBST(BOOST_LDFLAGS) AC_DEFINE(HAVE_BOOST,,[define if the Boost library is available]) # execute ACTION-IF-FOUND (if present): ifelse([$2], , :, [$2]) @@ -276,6 +299,5 @@ if test "x$want_boost" = "xyes"; then CPPFLAGS="$CPPFLAGS_SAVED" LDFLAGS="$LDFLAGS_SAVED" -fi ]) diff --git a/build-aux/m4/ax_boost_chrono.m4 b/build-aux/m4/ax_boost_chrono.m4 deleted file mode 100644 index 318ecea17fab8..0000000000000 --- a/build-aux/m4/ax_boost_chrono.m4 +++ /dev/null @@ -1,119 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_boost_chrono.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_CHRONO -# -# DESCRIPTION -# -# Test for System library from the Boost C++ libraries. The macro requires -# a preceding call to AX_BOOST_BASE. Further documentation is available at -# . -# -# This macro calls: -# -# AC_SUBST(BOOST_CHRONO_LIB) -# -# And sets: -# -# HAVE_BOOST_CHRONO -# -# LICENSE -# -# Copyright (c) 2012 Xiyue Deng -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 1 - -AC_DEFUN([AX_BOOST_CHRONO], -[ - AC_ARG_WITH([boost-chrono], - AS_HELP_STRING([--with-boost-chrono@<:@=special-lib@:>@], - [use the Chrono library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-chrono=boost_chrono-gcc-mt ]), - [ - if test "$withval" = "no"; then - want_boost="no" - elif test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_chrono_lib="" - else - want_boost="yes" - ax_boost_user_chrono_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_BUILD]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::Chrono library is available, - ax_cv_boost_chrono, - [AC_LANG_PUSH([C++]) - CXXFLAGS_SAVE=$CXXFLAGS - - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include ]], - [[boost::chrono::system_clock::time_point time;]])], - ax_cv_boost_chrono=yes, ax_cv_boost_chrono=no) - CXXFLAGS=$CXXFLAGS_SAVE - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_chrono" = "xyes"; then - AC_SUBST(BOOST_CPPFLAGS) - - AC_DEFINE(HAVE_BOOST_CHRONO,,[define if the Boost::Chrono library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - LDFLAGS_SAVE=$LDFLAGS - if test "x$ax_boost_user_chrono_lib" = "x"; then - ax_lib= - for libextension in `ls $BOOSTLIBDIR/libboost_chrono*.so* $BOOSTLIBDIR/libboost_chrono*.dylib* $BOOSTLIBDIR/libboost_chrono*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_chrono.*\)\.so.*$;\1;' -e 's;^lib\(boost_chrono.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_chrono.*\)\.a.*$;\1;'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break], - [link_chrono="no"]) - done - if test "x$link_chrono" != "xyes"; then - for libextension in `ls $BOOSTLIBDIR/boost_chrono*.dll* $BOOSTLIBDIR/boost_chrono*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_chrono.*\)\.dll.*$;\1;' -e 's;^\(boost_chrono.*\)\.a.*$;\1;'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break], - [link_chrono="no"]) - done - fi - - else - for ax_lib in $ax_boost_user_chrono_lib boost_chrono-$ax_boost_user_chrono_lib; do - AC_CHECK_LIB($ax_lib, exit, - [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break], - [link_chrono="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the boost_chrono library!) - fi - if test "x$link_chrono" = "xno"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_boost_filesystem.m4 b/build-aux/m4/ax_boost_filesystem.m4 deleted file mode 100644 index f5c9d56470bf9..0000000000000 --- a/build-aux/m4/ax_boost_filesystem.m4 +++ /dev/null @@ -1,119 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_boost_filesystem.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_FILESYSTEM -# -# DESCRIPTION -# -# Test for Filesystem library from the Boost C++ libraries. The macro -# requires a preceding call to AX_BOOST_BASE. Further documentation is -# available at . -# -# This macro calls: -# -# AC_SUBST(BOOST_FILESYSTEM_LIB) -# -# And sets: -# -# HAVE_BOOST_FILESYSTEM -# -# LICENSE -# -# Copyright (c) 2009 Thomas Porschberg -# Copyright (c) 2009 Michael Tindal -# Copyright (c) 2009 Roman Rybalko -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 26 - -AC_DEFUN([AX_BOOST_FILESYSTEM], -[ - AC_ARG_WITH([boost-filesystem], - AS_HELP_STRING([--with-boost-filesystem@<:@=special-lib@:>@], - [use the Filesystem library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-filesystem=boost_filesystem-gcc-mt ]), - [ - if test "$withval" = "no"; then - want_boost="no" - elif test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_filesystem_lib="" - else - want_boost="yes" - ax_boost_user_filesystem_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - LIBS_SAVED=$LIBS - LIBS="$LIBS $BOOST_SYSTEM_LIB" - export LIBS - - AC_CACHE_CHECK(whether the Boost::Filesystem library is available, - ax_cv_boost_filesystem, - [AC_LANG_PUSH([C++]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include ]], - [[using namespace boost::filesystem; - path my_path( "foo/bar/data.txt" ); - return 0;]])], - ax_cv_boost_filesystem=yes, ax_cv_boost_filesystem=no) - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_filesystem" = "xyes"; then - AC_DEFINE(HAVE_BOOST_FILESYSTEM,,[define if the Boost::Filesystem library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - ax_lib= - if test "x$ax_boost_user_filesystem_lib" = "x"; then - for libextension in `ls -r $BOOSTLIBDIR/libboost_filesystem* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_FILESYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_FILESYSTEM_LIB) link_filesystem="yes"; break], - [link_filesystem="no"]) - done - if test "x$link_filesystem" != "xyes"; then - for libextension in `ls -r $BOOSTLIBDIR/boost_filesystem* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_FILESYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_FILESYSTEM_LIB) link_filesystem="yes"; break], - [link_filesystem="no"]) - done - fi - else - for ax_lib in $ax_boost_user_filesystem_lib boost_filesystem-$ax_boost_user_filesystem_lib; do - AC_CHECK_LIB($ax_lib, exit, - [BOOST_FILESYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_FILESYSTEM_LIB) link_filesystem="yes"; break], - [link_filesystem="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the boost_filesystem library!) - fi - if test "x$link_filesystem" != "xyes"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - LIBS="$LIBS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_boost_program_options.m4 b/build-aux/m4/ax_boost_program_options.m4 deleted file mode 100644 index f591441854f59..0000000000000 --- a/build-aux/m4/ax_boost_program_options.m4 +++ /dev/null @@ -1,109 +0,0 @@ -# ============================================================================ -# http://www.gnu.org/software/autoconf-archive/ax_boost_program_options.html -# ============================================================================ -# -# SYNOPSIS -# -# AX_BOOST_PROGRAM_OPTIONS -# -# DESCRIPTION -# -# Test for program options library from the Boost C++ libraries. The macro -# requires a preceding call to AX_BOOST_BASE. Further documentation is -# available at . -# -# This macro calls: -# -# AC_SUBST(BOOST_PROGRAM_OPTIONS_LIB) -# -# And sets: -# -# HAVE_BOOST_PROGRAM_OPTIONS -# -# LICENSE -# -# Copyright (c) 2009 Thomas Porschberg -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 22 - -AC_DEFUN([AX_BOOST_PROGRAM_OPTIONS], -[ - AC_ARG_WITH([boost-program-options], - AS_HELP_STRING([--with-boost-program-options@<:@=special-lib@:>@], - [use the program options library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-program-options=boost_program_options-gcc-mt-1_33_1 ]), - [ - if test "$withval" = "no"; then - want_boost="no" - elif test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_program_options_lib="" - else - want_boost="yes" - ax_boost_user_program_options_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - export want_boost - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - AC_CACHE_CHECK([whether the Boost::Program_Options library is available], - ax_cv_boost_program_options, - [AC_LANG_PUSH(C++) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include - ]], - [[boost::program_options::options_description generic("Generic options"); - return 0;]])], - ax_cv_boost_program_options=yes, ax_cv_boost_program_options=no) - AC_LANG_POP([C++]) - ]) - if test "$ax_cv_boost_program_options" = yes; then - AC_DEFINE(HAVE_BOOST_PROGRAM_OPTIONS,,[define if the Boost::PROGRAM_OPTIONS library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - if test "x$ax_boost_user_program_options_lib" = "x"; then - ax_lib= - for libextension in `ls $BOOSTLIBDIR/libboost_program_options*.so* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_program_options.*\)\.so.*$;\1;'` `ls $BOOSTLIBDIR/libboost_program_options*.dylib* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_program_options.*\)\.dylib.*$;\1;'` `ls $BOOSTLIBDIR/libboost_program_options*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_program_options.*\)\.a.*$;\1;'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROGRAM_OPTIONS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROGRAM_OPTIONS_LIB) link_program_options="yes"; break], - [link_program_options="no"]) - done - if test "x$link_program_options" != "xyes"; then - for libextension in `ls $BOOSTLIBDIR/boost_program_options*.dll* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_program_options.*\)\.dll.*$;\1;'` `ls $BOOSTLIBDIR/boost_program_options*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_program_options.*\)\.a.*$;\1;'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROGRAM_OPTIONS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROGRAM_OPTIONS_LIB) link_program_options="yes"; break], - [link_program_options="no"]) - done - fi - else - for ax_lib in $ax_boost_user_program_options_lib boost_program_options-$ax_boost_user_program_options_lib; do - AC_CHECK_LIB($ax_lib, main, - [BOOST_PROGRAM_OPTIONS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROGRAM_OPTIONS_LIB) link_program_options="yes"; break], - [link_program_options="no"]) - done - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the boost_program_options library!) - fi - if test "x$link_program_options" != "xyes"; then - AC_MSG_ERROR([Could not link against [$ax_lib] !]) - fi - fi - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_boost_system.m4 b/build-aux/m4/ax_boost_system.m4 deleted file mode 100644 index 9c78280fcae47..0000000000000 --- a/build-aux/m4/ax_boost_system.m4 +++ /dev/null @@ -1,121 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_boost_system.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_SYSTEM -# -# DESCRIPTION -# -# Test for System library from the Boost C++ libraries. The macro requires -# a preceding call to AX_BOOST_BASE. Further documentation is available at -# . -# -# This macro calls: -# -# AC_SUBST(BOOST_SYSTEM_LIB) -# -# And sets: -# -# HAVE_BOOST_SYSTEM -# -# LICENSE -# -# Copyright (c) 2008 Thomas Porschberg -# Copyright (c) 2008 Michael Tindal -# Copyright (c) 2008 Daniel Casimiro -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 17 - -AC_DEFUN([AX_BOOST_SYSTEM], -[ - AC_ARG_WITH([boost-system], - AS_HELP_STRING([--with-boost-system@<:@=special-lib@:>@], - [use the System library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-system=boost_system-gcc-mt ]), - [ - if test "$withval" = "no"; then - want_boost="no" - elif test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_system_lib="" - else - want_boost="yes" - ax_boost_user_system_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_BUILD]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::System library is available, - ax_cv_boost_system, - [AC_LANG_PUSH([C++]) - CXXFLAGS_SAVE=$CXXFLAGS - - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include ]], - [[boost::system::system_category]])], - ax_cv_boost_system=yes, ax_cv_boost_system=no) - CXXFLAGS=$CXXFLAGS_SAVE - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_system" = "xyes"; then - AC_SUBST(BOOST_CPPFLAGS) - - AC_DEFINE(HAVE_BOOST_SYSTEM,,[define if the Boost::System library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - LDFLAGS_SAVE=$LDFLAGS - if test "x$ax_boost_user_system_lib" = "x"; then - ax_lib= - for libextension in `ls -r $BOOSTLIBDIR/libboost_system* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_SYSTEM_LIB) link_system="yes"; break], - [link_system="no"]) - done - if test "x$link_system" != "xyes"; then - for libextension in `ls -r $BOOSTLIBDIR/boost_system* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_SYSTEM_LIB) link_system="yes"; break], - [link_system="no"]) - done - fi - - else - for ax_lib in $ax_boost_user_system_lib boost_system-$ax_boost_user_system_lib; do - AC_CHECK_LIB($ax_lib, exit, - [BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_SYSTEM_LIB) link_system="yes"; break], - [link_system="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the boost_system library!) - fi - if test "x$link_system" = "xno"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_boost_thread.m4 b/build-aux/m4/ax_boost_thread.m4 deleted file mode 100644 index 9f0bd0b23c9b0..0000000000000 --- a/build-aux/m4/ax_boost_thread.m4 +++ /dev/null @@ -1,150 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_boost_thread.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_THREAD -# -# DESCRIPTION -# -# Test for Thread library from the Boost C++ libraries. The macro requires -# a preceding call to AX_BOOST_BASE. Further documentation is available at -# . -# -# This macro calls: -# -# AC_SUBST(BOOST_THREAD_LIB) -# -# And sets: -# -# HAVE_BOOST_THREAD -# -# LICENSE -# -# Copyright (c) 2009 Thomas Porschberg -# Copyright (c) 2009 Michael Tindal -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 27 - -AC_DEFUN([AX_BOOST_THREAD], -[ - AC_ARG_WITH([boost-thread], - AS_HELP_STRING([--with-boost-thread@<:@=special-lib@:>@], - [use the Thread library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-thread=boost_thread-gcc-mt ]), - [ - if test "$withval" = "no"; then - want_boost="no" - elif test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_thread_lib="" - else - want_boost="yes" - ax_boost_user_thread_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_BUILD]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::Thread library is available, - ax_cv_boost_thread, - [AC_LANG_PUSH([C++]) - CXXFLAGS_SAVE=$CXXFLAGS - - if test "x$host_os" = "xsolaris" ; then - CXXFLAGS="-pthreads $CXXFLAGS" - elif test "x$host_os" = "xmingw32" ; then - CXXFLAGS="-mthreads $CXXFLAGS" - else - CXXFLAGS="-pthread $CXXFLAGS" - fi - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include ]], - [[boost::thread_group thrds; - return 0;]])], - ax_cv_boost_thread=yes, ax_cv_boost_thread=no) - CXXFLAGS=$CXXFLAGS_SAVE - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_thread" = "xyes"; then - if test "x$host_os" = "xsolaris" ; then - BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS" - elif test "x$host_os" = "xmingw32" ; then - BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS" - else - BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS" - fi - - AC_SUBST(BOOST_CPPFLAGS) - - AC_DEFINE(HAVE_BOOST_THREAD,,[define if the Boost::Thread library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - LDFLAGS_SAVE=$LDFLAGS - case "x$host_os" in - *bsd* ) - LDFLAGS="-pthread $LDFLAGS" - break; - ;; - esac - if test "x$ax_boost_user_thread_lib" = "x"; then - ax_lib= - for libextension in `ls -r $BOOSTLIBDIR/libboost_thread* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'`; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break], - [link_thread="no"]) - done - if test "x$link_thread" != "xyes"; then - for libextension in `ls -r $BOOSTLIBDIR/boost_thread* 2>/dev/null | sed 's,.*/,,' | sed 's,\..*,,'`; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break], - [link_thread="no"]) - done - fi - - else - for ax_lib in $ax_boost_user_thread_lib boost_thread-$ax_boost_user_thread_lib; do - AC_CHECK_LIB($ax_lib, exit, - [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break], - [link_thread="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the boost_thread library!) - fi - if test "x$link_thread" = "xno"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - else - case "x$host_os" in - *bsd* ) - BOOST_LDFLAGS="-pthread $BOOST_LDFLAGS" - break; - ;; - esac - - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_boost_unit_test_framework.m4 b/build-aux/m4/ax_boost_unit_test_framework.m4 deleted file mode 100644 index 4efd1e2f18be1..0000000000000 --- a/build-aux/m4/ax_boost_unit_test_framework.m4 +++ /dev/null @@ -1,138 +0,0 @@ -# ================================================================================ -# http://www.gnu.org/software/autoconf-archive/ax_boost_unit_test_framework.html -# ================================================================================ -# -# SYNOPSIS -# -# AX_BOOST_UNIT_TEST_FRAMEWORK -# -# DESCRIPTION -# -# Test for Unit_Test_Framework library from the Boost C++ libraries. The -# macro requires a preceding call to AX_BOOST_BASE. Further documentation -# is available at . -# -# This macro calls: -# -# AC_SUBST(BOOST_UNIT_TEST_FRAMEWORK_LIB) -# -# And sets: -# -# HAVE_BOOST_UNIT_TEST_FRAMEWORK -# -# LICENSE -# -# Copyright (c) 2008 Thomas Porschberg -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 19 - -AC_DEFUN([AX_BOOST_UNIT_TEST_FRAMEWORK], -[ - AC_ARG_WITH([boost-unit-test-framework], - AS_HELP_STRING([--with-boost-unit-test-framework@<:@=special-lib@:>@], - [use the Unit_Test_Framework library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-unit-test-framework=boost_unit_test_framework-gcc ]), - [ - if test "$withval" = "no"; then - want_boost="no" - elif test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_unit_test_framework_lib="" - else - want_boost="yes" - ax_boost_user_unit_test_framework_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::Unit_Test_Framework library is available, - ax_cv_boost_unit_test_framework, - [AC_LANG_PUSH([C++]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include ]], - [[using boost::unit_test::test_suite; - test_suite* test= BOOST_TEST_SUITE( "Unit test example 1" ); return 0;]])], - ax_cv_boost_unit_test_framework=yes, ax_cv_boost_unit_test_framework=no) - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_unit_test_framework" = "xyes"; then - AC_DEFINE(HAVE_BOOST_UNIT_TEST_FRAMEWORK,,[define if the Boost::Unit_Test_Framework library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - if test "x$ax_boost_user_unit_test_framework_lib" = "x"; then - saved_ldflags="${LDFLAGS}" - ax_lib= - for monitor_library in `ls $BOOSTLIBDIR/libboost_unit_test_framework*.so* $BOOSTLIBDIR/libboost_unit_test_framework*.dylib* $BOOSTLIBDIR/libboost_unit_test_framework*.a* 2>/dev/null` ; do - if test -r $monitor_library ; then - libextension=`echo $monitor_library | sed 's,.*/,,' | sed -e 's;^lib\(boost_unit_test_framework.*\)\.so.*$;\1;' -e 's;^lib\(boost_unit_test_framework.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_unit_test_framework.*\)\.a.*$;\1;'` - ax_lib=${libextension} - link_unit_test_framework="yes" - else - link_unit_test_framework="no" - fi - - if test "x$link_unit_test_framework" = "xyes"; then - BOOST_UNIT_TEST_FRAMEWORK_LIB="-l$ax_lib" - AC_SUBST(BOOST_UNIT_TEST_FRAMEWORK_LIB) - break - fi - done - if test "x$link_unit_test_framework" != "xyes"; then - for libextension in `ls $BOOSTLIBDIR/boost_unit_test_framework*.dll* $BOOSTLIBDIR/boost_unit_test_framework*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_unit_test_framework.*\)\.dll.*$;\1;' -e 's;^\(boost_unit_test_framework.*\)\.a.*$;\1;'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_UNIT_TEST_FRAMEWORK_LIB="-l$ax_lib"; AC_SUBST(BOOST_UNIT_TEST_FRAMEWORK_LIB) link_unit_test_framework="yes"; break], - [link_unit_test_framework="no"]) - done - fi - else - link_unit_test_framework="no" - saved_ldflags="${LDFLAGS}" - for ax_lib in boost_unit_test_framework-$ax_boost_user_unit_test_framework_lib $ax_boost_user_unit_test_framework_lib ; do - if test "x$link_unit_test_framework" = "xyes"; then - break; - fi - for unittest_library in `ls $BOOSTLIBDIR/lib${ax_lib}.so* $BOOSTLIBDIR/lib${ax_lib}.a* 2>/dev/null` ; do - if test -r $unittest_library ; then - libextension=`echo $unittest_library | sed 's,.*/,,' | sed -e 's;^lib\(boost_unit_test_framework.*\)\.so.*$;\1;' -e 's;^lib\(boost_unit_test_framework.*\)\.a*$;\1;'` - ax_lib=${libextension} - link_unit_test_framework="yes" - else - link_unit_test_framework="no" - fi - - if test "x$link_unit_test_framework" = "xyes"; then - BOOST_UNIT_TEST_FRAMEWORK_LIB="-l$ax_lib" - AC_SUBST(BOOST_UNIT_TEST_FRAMEWORK_LIB) - break - fi - done - done - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the boost_unit_test_framework library!) - fi - if test "x$link_unit_test_framework" != "xyes"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_check_compile_flag.m4 b/build-aux/m4/ax_check_compile_flag.m4 index c3a8d695a1bcd..bd753b34d7dc5 100644 --- a/build-aux/m4/ax_check_compile_flag.m4 +++ b/build-aux/m4/ax_check_compile_flag.m4 @@ -1,10 +1,10 @@ # =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html # =========================================================================== # # SYNOPSIS # -# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS]) +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) # # DESCRIPTION # @@ -19,6 +19,8 @@ # the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to # force the compiler to issue an error when a bad flag is given. # +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# # NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this # macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. # @@ -27,45 +29,24 @@ # Copyright (c) 2008 Guido U. Draheim # Copyright (c) 2011 Maarten Bosmans # -# This program is free software: you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation, either version 3 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# As a special exception, the respective Autoconf Macro's copyright owner -# gives unlimited permission to copy, distribute and modify the configure -# scripts that are the output of Autoconf when processing the Macro. You -# need not follow the terms of the GNU General Public License when using -# or distributing such scripts, even though portions of the text of the -# Macro appear in them. The GNU General Public License (GPL) does govern -# all other use of the material that constitutes the Autoconf Macro. -# -# This special exception to the GPL applies to versions of the Autoconf -# Macro released by the Autoconf Archive. When you make and distribute a -# modified version of the Autoconf Macro, you may extend this special -# exception to the GPL to apply to your modified version as well. +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. -#serial 2 +#serial 6 AC_DEFUN([AX_CHECK_COMPILE_FLAG], -[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" - AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], [AS_VAR_SET(CACHEVAR,[yes])], [AS_VAR_SET(CACHEVAR,[no])]) _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) -AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], +AS_VAR_IF(CACHEVAR,yes, [m4_default([$2], :)], [m4_default([$3], :)]) AS_VAR_POPDEF([CACHEVAR])dnl diff --git a/build-aux/m4/ax_check_link_flag.m4 b/build-aux/m4/ax_check_link_flag.m4 index e2d0d363e4c4a..03a30ce4c739f 100644 --- a/build-aux/m4/ax_check_link_flag.m4 +++ b/build-aux/m4/ax_check_link_flag.m4 @@ -1,10 +1,10 @@ # =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_check_link_flag.html +# https://www.gnu.org/software/autoconf-archive/ax_check_link_flag.html # =========================================================================== # # SYNOPSIS # -# AX_CHECK_LINK_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS]) +# AX_CHECK_LINK_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) # # DESCRIPTION # @@ -19,6 +19,8 @@ # EXTRA-FLAGS FLAG". This can for example be used to force the linker to # issue an error when a bad flag is given. # +# INPUT gives an alternative input source to AC_LINK_IFELSE. +# # NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this # macro in sync with AX_CHECK_{PREPROC,COMPILE}_FLAG. # @@ -27,44 +29,24 @@ # Copyright (c) 2008 Guido U. Draheim # Copyright (c) 2011 Maarten Bosmans # -# This program is free software: you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation, either version 3 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# As a special exception, the respective Autoconf Macro's copyright owner -# gives unlimited permission to copy, distribute and modify the configure -# scripts that are the output of Autoconf when processing the Macro. You -# need not follow the terms of the GNU General Public License when using -# or distributing such scripts, even though portions of the text of the -# Macro appear in them. The GNU General Public License (GPL) does govern -# all other use of the material that constitutes the Autoconf Macro. -# -# This special exception to the GPL applies to versions of the Autoconf -# Macro released by the Autoconf Archive. When you make and distribute a -# modified version of the Autoconf Macro, you may extend this special -# exception to the GPL to apply to your modified version as well. +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. -#serial 2 +#serial 6 AC_DEFUN([AX_CHECK_LINK_FLAG], -[AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_ldflags_$4_$1])dnl +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_ldflags_$4_$1])dnl AC_CACHE_CHECK([whether the linker accepts $1], CACHEVAR, [ ax_check_save_flags=$LDFLAGS LDFLAGS="$LDFLAGS $4 $1" - AC_LINK_IFELSE([AC_LANG_PROGRAM()], + AC_LINK_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], [AS_VAR_SET(CACHEVAR,[yes])], [AS_VAR_SET(CACHEVAR,[no])]) LDFLAGS=$ax_check_save_flags]) -AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], +AS_VAR_IF(CACHEVAR,yes, [m4_default([$2], :)], [m4_default([$3], :)]) AS_VAR_POPDEF([CACHEVAR])dnl diff --git a/build-aux/m4/ax_check_preproc_flag.m4 b/build-aux/m4/ax_check_preproc_flag.m4 index b1cfef6b86dd6..e43560fbd3b66 100644 --- a/build-aux/m4/ax_check_preproc_flag.m4 +++ b/build-aux/m4/ax_check_preproc_flag.m4 @@ -1,10 +1,10 @@ # =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_check_preproc_flag.html +# https://www.gnu.org/software/autoconf-archive/ax_check_preproc_flag.html # =========================================================================== # # SYNOPSIS # -# AX_CHECK_PREPROC_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS]) +# AX_CHECK_PREPROC_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) # # DESCRIPTION # @@ -19,6 +19,8 @@ # "CPPFLAGS EXTRA-FLAGS FLAG". This can for example be used to force the # preprocessor to issue an error when a bad flag is given. # +# INPUT gives an alternative input source to AC_PREPROC_IFELSE. +# # NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this # macro in sync with AX_CHECK_{COMPILE,LINK}_FLAG. # @@ -27,45 +29,24 @@ # Copyright (c) 2008 Guido U. Draheim # Copyright (c) 2011 Maarten Bosmans # -# This program is free software: you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation, either version 3 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# As a special exception, the respective Autoconf Macro's copyright owner -# gives unlimited permission to copy, distribute and modify the configure -# scripts that are the output of Autoconf when processing the Macro. You -# need not follow the terms of the GNU General Public License when using -# or distributing such scripts, even though portions of the text of the -# Macro appear in them. The GNU General Public License (GPL) does govern -# all other use of the material that constitutes the Autoconf Macro. -# -# This special exception to the GPL applies to versions of the Autoconf -# Macro released by the Autoconf Archive. When you make and distribute a -# modified version of the Autoconf Macro, you may extend this special -# exception to the GPL to apply to your modified version as well. +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. -#serial 2 +#serial 6 AC_DEFUN([AX_CHECK_PREPROC_FLAG], -[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]cppflags_$4_$1])dnl AC_CACHE_CHECK([whether _AC_LANG preprocessor accepts $1], CACHEVAR, [ ax_check_save_flags=$CPPFLAGS CPPFLAGS="$CPPFLAGS $4 $1" - AC_PREPROC_IFELSE([AC_LANG_PROGRAM()], + AC_PREPROC_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], [AS_VAR_SET(CACHEVAR,[yes])], [AS_VAR_SET(CACHEVAR,[no])]) CPPFLAGS=$ax_check_save_flags]) -AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], +AS_VAR_IF(CACHEVAR,yes, [m4_default([$2], :)], [m4_default([$3], :)]) AS_VAR_POPDEF([CACHEVAR])dnl diff --git a/build-aux/m4/ax_cxx_compile_stdcxx.m4 b/build-aux/m4/ax_cxx_compile_stdcxx.m4 new file mode 100644 index 0000000000000..51a35054d08ce --- /dev/null +++ b/build-aux/m4/ax_cxx_compile_stdcxx.m4 @@ -0,0 +1,1005 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) +# +# DESCRIPTION +# +# Check for baseline language coverage in the compiler for the specified +# version of the C++ standard. If necessary, add switches to CXX and +# CXXCPP to enable support. VERSION may be '11', '14', '17', or '20' for +# the respective C++ standard version. +# +# The second argument, if specified, indicates whether you insist on an +# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. +# -std=c++11). If neither is specified, you get whatever works, with +# preference for no added switch, and then for an extended mode. +# +# The third argument, if specified 'mandatory' or if left unspecified, +# indicates that baseline support for the specified C++ standard is +# required and that the macro should error out if no mode with that +# support is found. If specified 'optional', then configuration proceeds +# regardless, after defining HAVE_CXX${VERSION} if and only if a +# supporting mode is found. +# +# LICENSE +# +# Copyright (c) 2008 Benjamin Kosnik +# Copyright (c) 2012 Zack Weinberg +# Copyright (c) 2013 Roy Stogner +# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov +# Copyright (c) 2015 Paul Norman +# Copyright (c) 2015 Moritz Klammler +# Copyright (c) 2016, 2018 Krzesimir Nowak +# Copyright (c) 2019 Enji Cooper +# Copyright (c) 2020 Jason Merrill +# Copyright (c) 2021 Jörn Heusipp +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 14 + +dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro +dnl (serial version number 13). + +AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl + m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"], + [$1], [14], [ax_cxx_compile_alternatives="14 1y"], + [$1], [17], [ax_cxx_compile_alternatives="17 1z"], + [$1], [20], [ax_cxx_compile_alternatives="20"], + [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl + m4_if([$2], [], [], + [$2], [ext], [], + [$2], [noext], [], + [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl + m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true], + [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true], + [$3], [optional], [ax_cxx_compile_cxx$1_required=false], + [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])]) + AC_LANG_PUSH([C++])dnl + ac_success=no + + m4_if([$2], [], [dnl + AC_CACHE_CHECK(whether $CXX supports C++$1 features by default, + ax_cv_cxx_compile_cxx$1, + [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [ax_cv_cxx_compile_cxx$1=yes], + [ax_cv_cxx_compile_cxx$1=no])]) + if test x$ax_cv_cxx_compile_cxx$1 = xyes; then + ac_success=yes + fi]) + + m4_if([$2], [noext], [], [dnl + if test x$ac_success = xno; then + for alternative in ${ax_cxx_compile_alternatives}; do + switch="-std=gnu++${alternative}" + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, + $cachevar, + [ac_save_CXX="$CXX" + CXX="$CXX $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXX="$ac_save_CXX"]) + if eval test x\$$cachevar = xyes; then + CXX="$CXX $switch" + if test -n "$CXXCPP" ; then + CXXCPP="$CXXCPP $switch" + fi + ac_success=yes + break + fi + done + fi]) + + m4_if([$2], [ext], [], [dnl + if test x$ac_success = xno; then + dnl HP's aCC needs +std=c++11 according to: + dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf + dnl Cray's crayCC needs "-h std=c++11" + for alternative in ${ax_cxx_compile_alternatives}; do + for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, + $cachevar, + [ac_save_CXX="$CXX" + CXX="$CXX $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXX="$ac_save_CXX"]) + if eval test x\$$cachevar = xyes; then + CXX="$CXX $switch" + if test -n "$CXXCPP" ; then + CXXCPP="$CXXCPP $switch" + fi + ac_success=yes + break + fi + done + if test x$ac_success = xyes; then + break + fi + done + fi]) + AC_LANG_POP([C++]) + if test x$ax_cxx_compile_cxx$1_required = xtrue; then + if test x$ac_success = xno; then + AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.]) + fi + fi + if test x$ac_success = xno; then + HAVE_CXX$1=0 + AC_MSG_NOTICE([No compiler with C++$1 support was found]) + else + HAVE_CXX$1=1 + AC_DEFINE(HAVE_CXX$1,1, + [define if the compiler supports basic C++$1 syntax]) + fi + AC_SUBST(HAVE_CXX$1) +]) + + +dnl Test body for checking C++11 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 +) + +dnl Test body for checking C++14 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 +) + +dnl Test body for checking C++17 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_17 +) + +dnl Test body for checking C++20 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_20], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_17 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_20 +) + + +dnl Tests for new features in C++11 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[ + +// If the compiler admits that it is not ready for C++11, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201103L + +#error "This is not a C++11 compiler" + +#else + +namespace cxx11 +{ + + namespace test_static_assert + { + + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + } + + namespace test_final_override + { + + struct Base + { + virtual ~Base() {} + virtual void f() {} + }; + + struct Derived : public Base + { + virtual ~Derived() override {} + virtual void f() override {} + }; + + } + + namespace test_double_right_angle_brackets + { + + template < typename T > + struct check {}; + + typedef check single_type; + typedef check> double_type; + typedef check>> triple_type; + typedef check>>> quadruple_type; + + } + + namespace test_decltype + { + + int + f() + { + int a = 1; + decltype(a) b = 2; + return a + b; + } + + } + + namespace test_type_deduction + { + + template < typename T1, typename T2 > + struct is_same + { + static const bool value = false; + }; + + template < typename T > + struct is_same + { + static const bool value = true; + }; + + template < typename T1, typename T2 > + auto + add(T1 a1, T2 a2) -> decltype(a1 + a2) + { + return a1 + a2; + } + + int + test(const int c, volatile int v) + { + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == false, ""); + auto ac = c; + auto av = v; + auto sumi = ac + av + 'x'; + auto sumf = ac + av + 1.0; + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == true, ""); + return (sumf > 0.0) ? sumi : add(c, v); + } + + } + + namespace test_noexcept + { + + int f() { return 0; } + int g() noexcept { return 0; } + + static_assert(noexcept(f()) == false, ""); + static_assert(noexcept(g()) == true, ""); + + } + + namespace test_constexpr + { + + template < typename CharT > + unsigned long constexpr + strlen_c_r(const CharT *const s, const unsigned long acc) noexcept + { + return *s ? strlen_c_r(s + 1, acc + 1) : acc; + } + + template < typename CharT > + unsigned long constexpr + strlen_c(const CharT *const s) noexcept + { + return strlen_c_r(s, 0UL); + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("1") == 1UL, ""); + static_assert(strlen_c("example") == 7UL, ""); + static_assert(strlen_c("another\0example") == 7UL, ""); + + } + + namespace test_rvalue_references + { + + template < int N > + struct answer + { + static constexpr int value = N; + }; + + answer<1> f(int&) { return answer<1>(); } + answer<2> f(const int&) { return answer<2>(); } + answer<3> f(int&&) { return answer<3>(); } + + void + test() + { + int i = 0; + const int c = 0; + static_assert(decltype(f(i))::value == 1, ""); + static_assert(decltype(f(c))::value == 2, ""); + static_assert(decltype(f(0))::value == 3, ""); + } + + } + + namespace test_uniform_initialization + { + + struct test + { + static const int zero {}; + static const int one {1}; + }; + + static_assert(test::zero == 0, ""); + static_assert(test::one == 1, ""); + + } + + namespace test_lambdas + { + + void + test1() + { + auto lambda1 = [](){}; + auto lambda2 = lambda1; + lambda1(); + lambda2(); + } + + int + test2() + { + auto a = [](int i, int j){ return i + j; }(1, 2); + auto b = []() -> int { return '0'; }(); + auto c = [=](){ return a + b; }(); + auto d = [&](){ return c; }(); + auto e = [a, &b](int x) mutable { + const auto identity = [](int y){ return y; }; + for (auto i = 0; i < a; ++i) + a += b--; + return x + identity(a + b); + }(0); + return a + b + c + d + e; + } + + int + test3() + { + const auto nullary = [](){ return 0; }; + const auto unary = [](int x){ return x; }; + using nullary_t = decltype(nullary); + using unary_t = decltype(unary); + const auto higher1st = [](nullary_t f){ return f(); }; + const auto higher2nd = [unary](nullary_t f1){ + return [unary, f1](unary_t f2){ return f2(unary(f1())); }; + }; + return higher1st(nullary) + higher2nd(nullary)(unary); + } + + } + + namespace test_variadic_templates + { + + template + struct sum; + + template + struct sum + { + static constexpr auto value = N0 + sum::value; + }; + + template <> + struct sum<> + { + static constexpr auto value = 0; + }; + + static_assert(sum<>::value == 0, ""); + static_assert(sum<1>::value == 1, ""); + static_assert(sum<23>::value == 23, ""); + static_assert(sum<1, 2>::value == 3, ""); + static_assert(sum<5, 5, 11>::value == 21, ""); + static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); + + } + + // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae + // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function + // because of this. + namespace test_template_alias_sfinae + { + + struct foo {}; + + template + using member = typename T::member_type; + + template + void func(...) {} + + template + void func(member*) {} + + void test(); + + void test() { func(0); } + + } + +} // namespace cxx11 + +#endif // __cplusplus >= 201103L + +]]) + + +dnl Tests for new features in C++14 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[ + +// If the compiler admits that it is not ready for C++14, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201402L + +#error "This is not a C++14 compiler" + +#else + +namespace cxx14 +{ + + namespace test_polymorphic_lambdas + { + + int + test() + { + const auto lambda = [](auto&&... args){ + const auto istiny = [](auto x){ + return (sizeof(x) == 1UL) ? 1 : 0; + }; + const int aretiny[] = { istiny(args)... }; + return aretiny[0]; + }; + return lambda(1, 1L, 1.0f, '1'); + } + + } + + namespace test_binary_literals + { + + constexpr auto ivii = 0b0000000000101010; + static_assert(ivii == 42, "wrong value"); + + } + + namespace test_generalized_constexpr + { + + template < typename CharT > + constexpr unsigned long + strlen_c(const CharT *const s) noexcept + { + auto length = 0UL; + for (auto p = s; *p; ++p) + ++length; + return length; + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("x") == 1UL, ""); + static_assert(strlen_c("test") == 4UL, ""); + static_assert(strlen_c("another\0test") == 7UL, ""); + + } + + namespace test_lambda_init_capture + { + + int + test() + { + auto x = 0; + const auto lambda1 = [a = x](int b){ return a + b; }; + const auto lambda2 = [a = lambda1(x)](){ return a; }; + return lambda2(); + } + + } + + namespace test_digit_separators + { + + constexpr auto ten_million = 100'000'000; + static_assert(ten_million == 100000000, ""); + + } + + namespace test_return_type_deduction + { + + auto f(int& x) { return x; } + decltype(auto) g(int& x) { return x; } + + template < typename T1, typename T2 > + struct is_same + { + static constexpr auto value = false; + }; + + template < typename T > + struct is_same + { + static constexpr auto value = true; + }; + + int + test() + { + auto x = 0; + static_assert(is_same::value, ""); + static_assert(is_same::value, ""); + return x; + } + + } + +} // namespace cxx14 + +#endif // __cplusplus >= 201402L + +]]) + + +dnl Tests for new features in C++17 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[ + +// If the compiler admits that it is not ready for C++17, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201703L + +#error "This is not a C++17 compiler" + +#else + +#include +#include +#include + +namespace cxx17 +{ + + namespace test_constexpr_lambdas + { + + constexpr int foo = [](){return 42;}(); + + } + + namespace test::nested_namespace::definitions + { + + } + + namespace test_fold_expression + { + + template + int multiply(Args... args) + { + return (args * ... * 1); + } + + template + bool all(Args... args) + { + return (args && ...); + } + + } + + namespace test_extended_static_assert + { + + static_assert (true); + + } + + namespace test_auto_brace_init_list + { + + auto foo = {5}; + auto bar {5}; + + static_assert(std::is_same, decltype(foo)>::value); + static_assert(std::is_same::value); + } + + namespace test_typename_in_template_template_parameter + { + + template typename X> struct D; + + } + + namespace test_fallthrough_nodiscard_maybe_unused_attributes + { + + int f1() + { + return 42; + } + + [[nodiscard]] int f2() + { + [[maybe_unused]] auto unused = f1(); + + switch (f1()) + { + case 17: + f1(); + [[fallthrough]]; + case 42: + f1(); + } + return f1(); + } + + } + + namespace test_extended_aggregate_initialization + { + + struct base1 + { + int b1, b2 = 42; + }; + + struct base2 + { + base2() { + b3 = 42; + } + int b3; + }; + + struct derived : base1, base2 + { + int d; + }; + + derived d1 {{1, 2}, {}, 4}; // full initialization + derived d2 {{}, {}, 4}; // value-initialized bases + + } + + namespace test_general_range_based_for_loop + { + + struct iter + { + int i; + + int& operator* () + { + return i; + } + + const int& operator* () const + { + return i; + } + + iter& operator++() + { + ++i; + return *this; + } + }; + + struct sentinel + { + int i; + }; + + bool operator== (const iter& i, const sentinel& s) + { + return i.i == s.i; + } + + bool operator!= (const iter& i, const sentinel& s) + { + return !(i == s); + } + + struct range + { + iter begin() const + { + return {0}; + } + + sentinel end() const + { + return {5}; + } + }; + + void f() + { + range r {}; + + for (auto i : r) + { + [[maybe_unused]] auto v = i; + } + } + + } + + namespace test_lambda_capture_asterisk_this_by_value + { + + struct t + { + int i; + int foo() + { + return [*this]() + { + return i; + }(); + } + }; + + } + + namespace test_enum_class_construction + { + + enum class byte : unsigned char + {}; + + byte foo {42}; + + } + + namespace test_constexpr_if + { + + template + int f () + { + if constexpr(cond) + { + return 13; + } + else + { + return 42; + } + } + + } + + namespace test_selection_statement_with_initializer + { + + int f() + { + return 13; + } + + int f2() + { + if (auto i = f(); i > 0) + { + return 3; + } + + switch (auto i = f(); i + 4) + { + case 17: + return 2; + + default: + return 1; + } + } + + } + + namespace test_template_argument_deduction_for_class_templates + { + + template + struct pair + { + pair (T1 p1, T2 p2) + : m1 {p1}, + m2 {p2} + {} + + T1 m1; + T2 m2; + }; + + void f() + { + [[maybe_unused]] auto p = pair{13, 42u}; + } + + } + + namespace test_non_type_auto_template_parameters + { + + template + struct B + {}; + + B<5> b1; + B<'a'> b2; + + } + + namespace test_structured_bindings + { + + int arr[2] = { 1, 2 }; + std::pair pr = { 1, 2 }; + + auto f1() -> int(&)[2] + { + return arr; + } + + auto f2() -> std::pair& + { + return pr; + } + + struct S + { + int x1 : 2; + volatile double y1; + }; + + S f3() + { + return {}; + } + + auto [ x1, y1 ] = f1(); + auto& [ xr1, yr1 ] = f1(); + auto [ x2, y2 ] = f2(); + auto& [ xr2, yr2 ] = f2(); + const auto [ x3, y3 ] = f3(); + + } + + namespace test_exception_spec_type_system + { + + struct Good {}; + struct Bad {}; + + void g1() noexcept; + void g2(); + + template + Bad + f(T*, T*); + + template + Good + f(T1*, T2*); + + static_assert (std::is_same_v); + + } + + namespace test_inline_variables + { + + template void f(T) + {} + + template inline T g(T) + { + return T{}; + } + + template<> inline void f<>(int) + {} + + template<> int g<>(int) + { + return 5; + } + + } + +} // namespace cxx17 + +#endif // __cplusplus < 201703L + +]]) + + +dnl Tests for new features in C++20 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_20], [[ + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 202002L + +#error "This is not a C++20 compiler" + +#else + +#include + +namespace cxx20 +{ + +// As C++20 supports feature test macros in the standard, there is no +// immediate need to actually test for feature availability on the +// Autoconf side. + +} // namespace cxx20 + +#endif // __cplusplus < 202002L + +]]) diff --git a/build-aux/m4/ax_gcc_func_attribute.m4 b/build-aux/m4/ax_gcc_func_attribute.m4 deleted file mode 100644 index 275ca63a2c217..0000000000000 --- a/build-aux/m4/ax_gcc_func_attribute.m4 +++ /dev/null @@ -1,217 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_gcc_func_attribute.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_GCC_FUNC_ATTRIBUTE(ATTRIBUTE) -# -# DESCRIPTION -# -# This macro checks if the compiler supports one of GCC's function -# attributes; many other compilers also provide function attributes with -# the same syntax. Compiler warnings are used to detect supported -# attributes as unsupported ones are ignored by default so quieting -# warnings when using this macro will yield false positives. -# -# The ATTRIBUTE parameter holds the name of the attribute to be checked. -# -# If ATTRIBUTE is supported define HAVE_FUNC_ATTRIBUTE_. -# -# The macro caches its result in the ax_cv_have_func_attribute_ -# variable. -# -# The macro currently supports the following function attributes: -# -# alias -# aligned -# alloc_size -# always_inline -# artificial -# cold -# const -# constructor -# deprecated -# destructor -# dllexport -# dllimport -# error -# externally_visible -# flatten -# format -# format_arg -# gnu_inline -# hot -# ifunc -# leaf -# malloc -# noclone -# noinline -# nonnull -# noreturn -# nothrow -# optimize -# pure -# unused -# used -# visibility -# warning -# warn_unused_result -# weak -# weakref -# -# Unsuppored function attributes will be tested with a prototype returning -# an int and not accepting any arguments and the result of the check might -# be wrong or meaningless so use with care. -# -# LICENSE -# -# Copyright (c) 2013 Gabriele Svelto -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 2 - -AC_DEFUN([AX_GCC_FUNC_ATTRIBUTE], [ - AS_VAR_PUSHDEF([ac_var], [ax_cv_have_func_attribute_$1]) - - AC_CACHE_CHECK([for __attribute__(($1))], [ac_var], [ - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - m4_case([$1], - [alias], [ - int foo( void ) { return 0; } - int bar( void ) __attribute__(($1("foo"))); - ], - [aligned], [ - int foo( void ) __attribute__(($1(32))); - ], - [alloc_size], [ - void *foo(int a) __attribute__(($1(1))); - ], - [always_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [artificial], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [cold], [ - int foo( void ) __attribute__(($1)); - ], - [const], [ - int foo( void ) __attribute__(($1)); - ], - [constructor], [ - int foo( void ) __attribute__(($1)); - ], - [deprecated], [ - int foo( void ) __attribute__(($1(""))); - ], - [destructor], [ - int foo( void ) __attribute__(($1)); - ], - [dllexport], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [dllimport], [ - int foo( void ) __attribute__(($1)); - ], - [error], [ - int foo( void ) __attribute__(($1(""))); - ], - [externally_visible], [ - int foo( void ) __attribute__(($1)); - ], - [flatten], [ - int foo( void ) __attribute__(($1)); - ], - [format], [ - int foo(const char *p, ...) __attribute__(($1(printf, 1, 2))); - ], - [format_arg], [ - char *foo(const char *p) __attribute__(($1(1))); - ], - [gnu_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [hot], [ - int foo( void ) __attribute__(($1)); - ], - [ifunc], [ - int my_foo( void ) { return 0; } - static int (*resolve_foo(void))(void) { return my_foo; } - int foo( void ) __attribute__(($1("resolve_foo"))); - ], - [leaf], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [malloc], [ - void *foo( void ) __attribute__(($1)); - ], - [noclone], [ - int foo( void ) __attribute__(($1)); - ], - [noinline], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [nonnull], [ - int foo(char *p) __attribute__(($1(1))); - ], - [noreturn], [ - void foo( void ) __attribute__(($1)); - ], - [nothrow], [ - int foo( void ) __attribute__(($1)); - ], - [optimize], [ - __attribute__(($1(3))) int foo( void ) { return 0; } - ], - [pure], [ - int foo( void ) __attribute__(($1)); - ], - [unused], [ - int foo( void ) __attribute__(($1)); - ], - [used], [ - int foo( void ) __attribute__(($1)); - ], - [visibility], [ - int foo_def( void ) __attribute__(($1("default"))); - int foo_hid( void ) __attribute__(($1("hidden"))); - ], - [warning], [ - int foo( void ) __attribute__(($1(""))); - ], - [warn_unused_result], [ - int foo( void ) __attribute__(($1)); - ], - [weak], [ - int foo( void ) __attribute__(($1)); - ], - [weakref], [ - static int foo( void ) { return 0; } - static int bar( void ) __attribute__(($1("foo"))); - ], - [ - m4_warn([syntax], [Unsupported attribute $1, the test may fail]) - int foo( void ) __attribute__(($1)); - ] - )], []) - ], - dnl GCC doesn't exit with an error if an unknown attribute is - dnl provided but only outputs a warning, so accept the attribute - dnl only if no warning were issued. - [AS_IF([test -s conftest.err], - [AS_VAR_SET([ac_var], [no])], - [AS_VAR_SET([ac_var], [yes])])], - [AS_VAR_SET([ac_var], [no])]) - ]) - - AS_IF([test yes = AS_VAR_GET([ac_var])], - [AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_FUNC_ATTRIBUTE_$1), 1, - [Define to 1 if the system has the `$1' function attribute])], []) - - AS_VAR_POPDEF([ac_var]) -]) diff --git a/build-aux/m4/ax_pthread.m4 b/build-aux/m4/ax_pthread.m4 index d383ad5c6d6a5..9f35d139149f8 100644 --- a/build-aux/m4/ax_pthread.m4 +++ b/build-aux/m4/ax_pthread.m4 @@ -1,5 +1,5 @@ # =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_pthread.html +# https://www.gnu.org/software/autoconf-archive/ax_pthread.html # =========================================================================== # # SYNOPSIS @@ -14,24 +14,28 @@ # flags that are needed. (The user can also force certain compiler # flags/libs to be tested by setting these environment variables.) # -# Also sets PTHREAD_CC to any special C compiler that is needed for -# multi-threaded programs (defaults to the value of CC otherwise). (This -# is necessary on AIX to use the special cc_r compiler alias.) +# Also sets PTHREAD_CC and PTHREAD_CXX to any special C compiler that is +# needed for multi-threaded programs (defaults to the value of CC +# respectively CXX otherwise). (This is necessary on e.g. AIX to use the +# special cc_r/CC_r compiler alias.) # # NOTE: You are assumed to not only compile your program with these flags, -# but also link it with them as well. e.g. you should link with +# but also to link with them as well. For example, you might link with # $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS +# $PTHREAD_CXX $CXXFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS # -# If you are only building threads programs, you may wish to use these +# If you are only building threaded programs, you may wish to use these # variables in your default LIBS, CFLAGS, and CC: # # LIBS="$PTHREAD_LIBS $LIBS" # CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +# CXXFLAGS="$CXXFLAGS $PTHREAD_CFLAGS" # CC="$PTHREAD_CC" +# CXX="$PTHREAD_CXX" # # In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant -# has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name -# (e.g. PTHREAD_CREATE_UNDETACHED on AIX). +# has a nonstandard name, this macro defines PTHREAD_CREATE_JOINABLE to +# that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX). # # Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the # PTHREAD_PRIO_INHERIT symbol is defined when compiling with @@ -55,6 +59,7 @@ # # Copyright (c) 2008 Steven G. Johnson # Copyright (c) 2011 Daniel Richard G. +# Copyright (c) 2019 Marc Stevens # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the @@ -67,7 +72,7 @@ # Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program. If not, see . +# with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure @@ -82,35 +87,41 @@ # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. -#serial 21 +#serial 31 AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) AC_DEFUN([AX_PTHREAD], [ AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AC_PROG_SED]) AC_LANG_PUSH([C]) ax_pthread_ok=no # We used to check for pthread.h first, but this fails if pthread.h -# requires special compiler flags (e.g. on True64 or Sequent). +# requires special compiler flags (e.g. on Tru64 or Sequent). # It gets checked for in the link test anyway. # First of all, check if the user has set any of the PTHREAD_LIBS, # etcetera environment variables, and if threads linking works using # them: -if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then - save_CFLAGS="$CFLAGS" +if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then + ax_pthread_save_CC="$CC" + ax_pthread_save_CFLAGS="$CFLAGS" + ax_pthread_save_LIBS="$LIBS" + AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"]) + AS_IF([test "x$PTHREAD_CXX" != "x"], [CXX="$PTHREAD_CXX"]) CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" - AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) - AC_TRY_LINK_FUNC([pthread_join], [ax_pthread_ok=yes]) + AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS]) + AC_LINK_IFELSE([AC_LANG_CALL([], [pthread_join])], [ax_pthread_ok=yes]) AC_MSG_RESULT([$ax_pthread_ok]) - if test x"$ax_pthread_ok" = xno; then + if test "x$ax_pthread_ok" = "xno"; then PTHREAD_LIBS="" PTHREAD_CFLAGS="" fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" + CC="$ax_pthread_save_CC" + CFLAGS="$ax_pthread_save_CFLAGS" + LIBS="$ax_pthread_save_LIBS" fi # We must check for the threads library under a number of different @@ -118,12 +129,14 @@ fi # (e.g. DEC) have both -lpthread and -lpthreads, where one of the # libraries is broken (non-POSIX). -# Create a list of thread flags to try. Items starting with a "-" are -# C compiler flags, and other items are library names, except for "none" -# which indicates that we try without any flags at all, and "pthread-config" -# which is a program returning the flags for the Pth emulation library. +# Create a list of thread flags to try. Items with a "," contain both +# C compiler flags (before ",") and linker flags (after ","). Other items +# starting with a "-" are C compiler flags, and remaining items are +# library names, except for "none" which indicates that we try without +# any flags at all, and "pthread-config" which is a program returning +# the flags for the Pth emulation library. -ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" +ax_pthread_flags="pthreads none -Kthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" # The ordering *is* (sometimes) important. Some notes on the # individual items follow: @@ -132,82 +145,163 @@ ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mt # none: in case threads are in libc; should be tried before -Kthread and # other compiler flags to prevent continual compiler warnings # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) -# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) -# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) -# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) -# -pthreads: Solaris/gcc -# -mthreads: Mingw32/gcc, Lynx/gcc +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads), Tru64 +# (Note: HP C rejects this with "bad form for `-t' option") +# -pthreads: Solaris/gcc (Note: HP C also rejects) # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it -# doesn't hurt to check since this sometimes defines pthreads too; -# also defines -D_REENTRANT) -# ... -mt is also the pthreads flag for HP/aCC +# doesn't hurt to check since this sometimes defines pthreads and +# -D_REENTRANT too), HP C (must be checked before -lpthread, which +# is present but should not be used directly; and before -mthreads, +# because the compiler interprets this as "-mt" + "-hreads") +# -mthreads: Mingw32/gcc, Lynx/gcc # pthread: Linux, etcetera # --thread-safe: KAI C++ # pthread-config: use pthread-config program (for GNU Pth library) -case ${host_os} in +case $host_os in + + freebsd*) + + # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) + # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) + + ax_pthread_flags="-kthread lthread $ax_pthread_flags" + ;; + + hpux*) + + # From the cc(1) man page: "[-mt] Sets various -D flags to enable + # multi-threading and also sets -lpthread." + + ax_pthread_flags="-mt -pthread pthread $ax_pthread_flags" + ;; + + openedition*) + + # IBM z/OS requires a feature-test macro to be defined in order to + # enable POSIX threads at all, so give the user a hint if this is + # not set. (We don't define these ourselves, as they can affect + # other portions of the system API in unpredictable ways.) + + AC_EGREP_CPP([AX_PTHREAD_ZOS_MISSING], + [ +# if !defined(_OPEN_THREADS) && !defined(_UNIX03_THREADS) + AX_PTHREAD_ZOS_MISSING +# endif + ], + [AC_MSG_WARN([IBM z/OS requires -D_OPEN_THREADS or -D_UNIX03_THREADS to enable pthreads support.])]) + ;; + solaris*) # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthreads/-mt/ - # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather - # a function called by this macro, so we could check for that, but - # who knows whether they'll stub that too in a future libc.) So, - # we'll just look for -pthreads and -lpthread first: + # tests will erroneously succeed. (N.B.: The stubs are missing + # pthread_cleanup_push, or rather a function called by this macro, + # so we could check for that, but who knows whether they'll stub + # that too in a future libc.) So we'll check first for the + # standard Solaris way of linking pthreads (-mt -lpthread). + + ax_pthread_flags="-mt,-lpthread pthread $ax_pthread_flags" + ;; +esac + +# Are we compiling with Clang? + +AC_CACHE_CHECK([whether $CC is Clang], + [ax_cv_PTHREAD_CLANG], + [ax_cv_PTHREAD_CLANG=no + # Note that Autoconf sets GCC=yes for Clang as well as GCC + if test "x$GCC" = "xyes"; then + AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG], + [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */ +# if defined(__clang__) && defined(__llvm__) + AX_PTHREAD_CC_IS_CLANG +# endif + ], + [ax_cv_PTHREAD_CLANG=yes]) + fi + ]) +ax_pthread_clang="$ax_cv_PTHREAD_CLANG" + + +# GCC generally uses -pthread, or -pthreads on some platforms (e.g. SPARC) + +# Note that for GCC and Clang -pthread generally implies -lpthread, +# except when -nostdlib is passed. +# This is problematic using libtool to build C++ shared libraries with pthread: +# [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=25460 +# [2] https://bugzilla.redhat.com/show_bug.cgi?id=661333 +# [3] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=468555 +# To solve this, first try -pthread together with -lpthread for GCC + +AS_IF([test "x$GCC" = "xyes"], + [ax_pthread_flags="-pthread,-lpthread -pthread -pthreads $ax_pthread_flags"]) + +# Clang takes -pthread (never supported any other flag), but we'll try with -lpthread first + +AS_IF([test "x$ax_pthread_clang" = "xyes"], + [ax_pthread_flags="-pthread,-lpthread -pthread"]) - ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" + +# The presence of a feature test macro requesting re-entrant function +# definitions is, on some systems, a strong hint that pthreads support is +# correctly enabled + +case $host_os in + darwin* | hpux* | linux* | osf* | solaris*) + ax_pthread_check_macro="_REENTRANT" ;; - darwin*) - ax_pthread_flags="-pthread $ax_pthread_flags" + aix*) + ax_pthread_check_macro="_THREAD_SAFE" ;; -esac -# Clang doesn't consider unrecognized options an error unless we specify -# -Werror. We throw in some extra Clang-specific options to ensure that -# this doesn't happen for GCC, which also accepts -Werror. + *) + ax_pthread_check_macro="--" + ;; +esac +AS_IF([test "x$ax_pthread_check_macro" = "x--"], + [ax_pthread_check_cond=0], + [ax_pthread_check_cond="!defined($ax_pthread_check_macro)"]) -AC_MSG_CHECKING([if compiler needs -Werror to reject unknown flags]) -save_CFLAGS="$CFLAGS" -ax_pthread_extra_flags="-Werror" -CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])], - [AC_MSG_RESULT([yes])], - [ax_pthread_extra_flags= - AC_MSG_RESULT([no])]) -CFLAGS="$save_CFLAGS" -if test x"$ax_pthread_ok" = xno; then -for flag in $ax_pthread_flags; do +if test "x$ax_pthread_ok" = "xno"; then +for ax_pthread_try_flag in $ax_pthread_flags; do - case $flag in + case $ax_pthread_try_flag in none) AC_MSG_CHECKING([whether pthreads work without any flags]) ;; + *,*) + PTHREAD_CFLAGS=`echo $ax_pthread_try_flag | sed "s/^\(.*\),\(.*\)$/\1/"` + PTHREAD_LIBS=`echo $ax_pthread_try_flag | sed "s/^\(.*\),\(.*\)$/\2/"` + AC_MSG_CHECKING([whether pthreads work with "$PTHREAD_CFLAGS" and "$PTHREAD_LIBS"]) + ;; + -*) - AC_MSG_CHECKING([whether pthreads work with $flag]) - PTHREAD_CFLAGS="$flag" + AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag]) + PTHREAD_CFLAGS="$ax_pthread_try_flag" ;; pthread-config) AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no]) - if test x"$ax_pthread_config" = xno; then continue; fi + AS_IF([test "x$ax_pthread_config" = "xno"], [continue]) PTHREAD_CFLAGS="`pthread-config --cflags`" PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" ;; *) - AC_MSG_CHECKING([for the pthreads library -l$flag]) - PTHREAD_LIBS="-l$flag" + AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag]) + PTHREAD_LIBS="-l$ax_pthread_try_flag" ;; esac - save_LIBS="$LIBS" - save_CFLAGS="$CFLAGS" + ax_pthread_save_CFLAGS="$CFLAGS" + ax_pthread_save_LIBS="$LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" LIBS="$PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" # Check for various functions. We must include pthread.h, # since some functions may be macros. (On the Sequent, we @@ -218,8 +312,18 @@ for flag in $ax_pthread_flags; do # pthread_cleanup_push because it is one of the few pthread # functions on Solaris that doesn't have a non-functional libc stub. # We try pthread_create on general principles. + AC_LINK_IFELSE([AC_LANG_PROGRAM([#include - static void routine(void *a) { a = 0; } +# if $ax_pthread_check_cond +# error "$ax_pthread_check_macro must be defined" +# endif + static void *some_global = NULL; + static void routine(void *a) + { + /* To avoid any unused-parameter or + unused-but-set-parameter warning. */ + some_global = a; + } static void *start_routine(void *a) { return a; }], [pthread_t th; pthread_attr_t attr; pthread_create(&th, 0, start_routine, 0); @@ -227,101 +331,187 @@ for flag in $ax_pthread_flags; do pthread_attr_init(&attr); pthread_cleanup_push(routine, 0); pthread_cleanup_pop(0) /* ; */])], - [ax_pthread_ok=yes], - []) + [ax_pthread_ok=yes], + []) - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" + CFLAGS="$ax_pthread_save_CFLAGS" + LIBS="$ax_pthread_save_LIBS" AC_MSG_RESULT([$ax_pthread_ok]) - if test "x$ax_pthread_ok" = xyes; then - break; - fi + AS_IF([test "x$ax_pthread_ok" = "xyes"], [break]) PTHREAD_LIBS="" PTHREAD_CFLAGS="" done fi + +# Clang needs special handling, because older versions handle the -pthread +# option in a rather... idiosyncratic way + +if test "x$ax_pthread_clang" = "xyes"; then + + # Clang takes -pthread; it has never supported any other flag + + # (Note 1: This will need to be revisited if a system that Clang + # supports has POSIX threads in a separate library. This tends not + # to be the way of modern systems, but it's conceivable.) + + # (Note 2: On some systems, notably Darwin, -pthread is not needed + # to get POSIX threads support; the API is always present and + # active. We could reasonably leave PTHREAD_CFLAGS empty. But + # -pthread does define _REENTRANT, and while the Darwin headers + # ignore this macro, third-party headers might not.) + + # However, older versions of Clang make a point of warning the user + # that, in an invocation where only linking and no compilation is + # taking place, the -pthread option has no effect ("argument unused + # during compilation"). They expect -pthread to be passed in only + # when source code is being compiled. + # + # Problem is, this is at odds with the way Automake and most other + # C build frameworks function, which is that the same flags used in + # compilation (CFLAGS) are also used in linking. Many systems + # supported by AX_PTHREAD require exactly this for POSIX threads + # support, and in fact it is often not straightforward to specify a + # flag that is used only in the compilation phase and not in + # linking. Such a scenario is extremely rare in practice. + # + # Even though use of the -pthread flag in linking would only print + # a warning, this can be a nuisance for well-run software projects + # that build with -Werror. So if the active version of Clang has + # this misfeature, we search for an option to squash it. + + AC_CACHE_CHECK([whether Clang needs flag to prevent "argument unused" warning when linking with -pthread], + [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG], + [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown + # Create an alternate version of $ac_link that compiles and + # links in two steps (.c -> .o, .o -> exe) instead of one + # (.c -> exe), because the warning occurs only in the second + # step + ax_pthread_save_ac_link="$ac_link" + ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g' + ax_pthread_link_step=`AS_ECHO(["$ac_link"]) | sed "$ax_pthread_sed"` + ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)" + ax_pthread_save_CFLAGS="$CFLAGS" + for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do + AS_IF([test "x$ax_pthread_try" = "xunknown"], [break]) + CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS" + ac_link="$ax_pthread_save_ac_link" + AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])], + [ac_link="$ax_pthread_2step_ac_link" + AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])], + [break]) + ]) + done + ac_link="$ax_pthread_save_ac_link" + CFLAGS="$ax_pthread_save_CFLAGS" + AS_IF([test "x$ax_pthread_try" = "x"], [ax_pthread_try=no]) + ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try" + ]) + + case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in + no | unknown) ;; + *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;; + esac + +fi # $ax_pthread_clang = yes + + + # Various other checks: -if test "x$ax_pthread_ok" = xyes; then - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - save_CFLAGS="$CFLAGS" +if test "x$ax_pthread_ok" = "xyes"; then + ax_pthread_save_CFLAGS="$CFLAGS" + ax_pthread_save_LIBS="$LIBS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. - AC_MSG_CHECKING([for joinable pthread attribute]) - attr_name=unknown - for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do - AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], - [int attr = $attr; return attr /* ; */])], - [attr_name=$attr; break], - []) - done - AC_MSG_RESULT([$attr_name]) - if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then - AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], [$attr_name], - [Define to necessary symbol if this constant - uses a non-standard name on your system.]) - fi - - AC_MSG_CHECKING([if more special flags are required for pthreads]) - flag=no - case ${host_os} in - aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; - osf* | hpux*) flag="-D_REENTRANT";; - solaris*) - if test "$GCC" = "yes"; then - flag="-D_REENTRANT" - else - # TODO: What about Clang on Solaris? - flag="-mt -D_REENTRANT" - fi - ;; - esac - AC_MSG_RESULT([$flag]) - if test "x$flag" != xno; then - PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" - fi + AC_CACHE_CHECK([for joinable pthread attribute], + [ax_cv_PTHREAD_JOINABLE_ATTR], + [ax_cv_PTHREAD_JOINABLE_ATTR=unknown + for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [int attr = $ax_pthread_attr; return attr /* ; */])], + [ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break], + []) + done + ]) + AS_IF([test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \ + test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \ + test "x$ax_pthread_joinable_attr_defined" != "xyes"], + [AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], + [$ax_cv_PTHREAD_JOINABLE_ATTR], + [Define to necessary symbol if this constant + uses a non-standard name on your system.]) + ax_pthread_joinable_attr_defined=yes + ]) + + AC_CACHE_CHECK([whether more special flags are required for pthreads], + [ax_cv_PTHREAD_SPECIAL_FLAGS], + [ax_cv_PTHREAD_SPECIAL_FLAGS=no + case $host_os in + solaris*) + ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS" + ;; + esac + ]) + AS_IF([test "x$ax_cv_PTHREAD_SPECIAL_FLAGS" != "xno" && \ + test "x$ax_pthread_special_flags_added" != "xyes"], + [PTHREAD_CFLAGS="$ax_cv_PTHREAD_SPECIAL_FLAGS $PTHREAD_CFLAGS" + ax_pthread_special_flags_added=yes]) AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT], - [ax_cv_PTHREAD_PRIO_INHERIT], [ - AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[int i = PTHREAD_PRIO_INHERIT;]])], - [ax_cv_PTHREAD_PRIO_INHERIT=yes], - [ax_cv_PTHREAD_PRIO_INHERIT=no]) + [ax_cv_PTHREAD_PRIO_INHERIT], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[int i = PTHREAD_PRIO_INHERIT; + return i;]])], + [ax_cv_PTHREAD_PRIO_INHERIT=yes], + [ax_cv_PTHREAD_PRIO_INHERIT=no]) ]) - AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"], - [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])]) + AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \ + test "x$ax_pthread_prio_inherit_defined" != "xyes"], + [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.]) + ax_pthread_prio_inherit_defined=yes + ]) - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" + CFLAGS="$ax_pthread_save_CFLAGS" + LIBS="$ax_pthread_save_LIBS" # More AIX lossage: compile with *_r variant - if test "x$GCC" != xyes; then + if test "x$GCC" != "xyes"; then case $host_os in aix*) AS_CASE(["x/$CC"], - [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6], - [#handle absolute path differently from PATH based program lookup - AS_CASE(["x$CC"], - [x/*], - [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])], - [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])]) + [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6], + [#handle absolute path differently from PATH based program lookup + AS_CASE(["x$CC"], + [x/*], + [ + AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"]) + AS_IF([test "x${CXX}" != "x"], [AS_IF([AS_EXECUTABLE_P([${CXX}_r])],[PTHREAD_CXX="${CXX}_r"])]) + ], + [ + AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC]) + AS_IF([test "x${CXX}" != "x"], [AC_CHECK_PROGS([PTHREAD_CXX],[${CXX}_r],[$CXX])]) + ] + ) + ]) ;; esac fi fi test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" +test -n "$PTHREAD_CXX" || PTHREAD_CXX="$CXX" AC_SUBST([PTHREAD_LIBS]) AC_SUBST([PTHREAD_CFLAGS]) AC_SUBST([PTHREAD_CC]) +AC_SUBST([PTHREAD_CXX]) # Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x"$ax_pthread_ok" = xyes; then +if test "x$ax_pthread_ok" = "xyes"; then ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1]) : else diff --git a/build-aux/m4/bitcoin_find_bdb48.m4 b/build-aux/m4/bitcoin_find_bdb48.m4 index f3b14461eb9e8..3ef7fab5b5ae8 100644 --- a/build-aux/m4/bitcoin_find_bdb48.m4 +++ b/build-aux/m4/bitcoin_find_bdb48.m4 @@ -1,66 +1,97 @@ +dnl Copyright (c) 2013-2015 The Bitcoin Core developers +dnl Distributed under the MIT software license, see the accompanying +dnl file COPYING or http://www.opensource.org/licenses/mit-license.php. + AC_DEFUN([BITCOIN_FIND_BDB48],[ - AC_MSG_CHECKING([for Berkeley DB C++ headers]) - BDB_CPPFLAGS= - BDB_LIBS= - bdbpath=X - bdb48path=X - bdbdirlist= - for _vn in 4.8 48 4 5 ''; do - for _pfx in b lib ''; do - bdbdirlist="$bdbdirlist ${_pfx}db${_vn}" + AC_ARG_VAR([BDB_CFLAGS], [C compiler flags for BerkeleyDB, bypasses autodetection]) + AC_ARG_VAR([BDB_LIBS], [Linker flags for BerkeleyDB, bypasses autodetection]) + + if test "$use_bdb" = "no"; then + use_bdb=no + elif test "$BDB_CFLAGS" = ""; then + AC_MSG_CHECKING([for Berkeley DB C++ headers]) + BDB_CPPFLAGS= + bdbpath=X + bdb48path=X + bdbdirlist= + for _vn in 4.8 48 4 5 5.3 ''; do + for _pfx in b lib ''; do + bdbdirlist="$bdbdirlist ${_pfx}db${_vn}" + done done - done - for searchpath in $bdbdirlist ''; do - test -n "${searchpath}" && searchpath="${searchpath}/" - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include <${searchpath}db_cxx.h> - ]],[[ - #if !((DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 8) || DB_VERSION_MAJOR > 4) - #error "failed to find bdb 4.8+" - #endif - ]])],[ - if test "x$bdbpath" = "xX"; then - bdbpath="${searchpath}" - fi - ],[ - continue - ]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include <${searchpath}db_cxx.h> - ]],[[ - #if !(DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 8) - #error "failed to find bdb 4.8" - #endif - ]])],[ - bdb48path="${searchpath}" - break - ],[]) - done - if test "x$bdbpath" = "xX"; then - AC_MSG_RESULT([no]) - AC_MSG_ERROR(libdb_cxx headers missing) - elif test "x$bdb48path" = "xX"; then - BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdbpath}],db_cxx) - AC_ARG_WITH([incompatible-bdb],[AS_HELP_STRING([--with-incompatible-bdb], [allow using a bdb version other than 4.8])],[ - AC_MSG_WARN([Found Berkeley DB other than 4.8; wallets opened by this build will not be portable!]) - ],[ - AC_MSG_ERROR([Found Berkeley DB other than 4.8, required for portable wallets (--with-incompatible-bdb to ignore or --disable-wallet to disable wallet functionality)]) - ]) + for searchpath in $bdbdirlist ''; do + test -n "${searchpath}" && searchpath="${searchpath}/" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include <${searchpath}db_cxx.h> + ]],[[ + #if !((DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 8) || DB_VERSION_MAJOR > 4) + #error "failed to find bdb 4.8+" + #endif + ]])],[ + if test "$bdbpath" = "X"; then + bdbpath="${searchpath}" + fi + ],[ + continue + ]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include <${searchpath}db_cxx.h> + ]],[[ + #if !(DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 8) + #error "failed to find bdb 4.8" + #endif + ]])],[ + bdb48path="${searchpath}" + break + ],[]) + done + if test "$bdbpath" = "X"; then + use_bdb=no + AC_MSG_RESULT([no]) + AC_MSG_WARN([libdb_cxx headers missing]) + AC_MSG_WARN(AC_PACKAGE_NAME[ requires this library for BDB (legacy) wallet support]) + AC_MSG_WARN([Passing --without-bdb will suppress this warning]) + elif test "$bdb48path" = "X"; then + BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdbpath}],db_cxx) + AC_ARG_WITH([incompatible-bdb],[AS_HELP_STRING([--with-incompatible-bdb], [allow using a bdb version other than 4.8])],[ + AC_MSG_WARN([Found Berkeley DB other than 4.8]) + AC_MSG_WARN([BDB (legacy) wallets opened by this build will not be portable!]) + use_bdb=yes + ],[ + AC_MSG_WARN([Found Berkeley DB other than 4.8]) + AC_MSG_WARN([BDB (legacy) wallets opened by this build would not be portable!]) + AC_MSG_WARN([If this is intended, pass --with-incompatible-bdb]) + AC_MSG_WARN([Passing --without-bdb will suppress this warning]) + use_bdb=no + ]) + else + BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdb48path}],db_cxx) + bdbpath="${bdb48path}" + use_bdb=yes + fi else - BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdb48path}],db_cxx) - bdbpath="${bdb48path}" + BDB_CPPFLAGS=${BDB_CFLAGS} fi AC_SUBST(BDB_CPPFLAGS) - - # TODO: Ideally this could find the library version and make sure it matches the headers being used - for searchlib in db_cxx-4.8 db_cxx; do - AC_CHECK_LIB([$searchlib],[main],[ - BDB_LIBS="-l${searchlib}" - break - ]) - done - if test "x$BDB_LIBS" = "x"; then - AC_MSG_ERROR([libdb_cxx missing, Bitcoin Core requires this library for wallet functionality (--disable-wallet to disable wallet functionality)]) + + if test "$use_bdb" = "no"; then + use_bdb=no + elif test "$BDB_LIBS" = ""; then + # TODO: Ideally this could find the library version and make sure it matches the headers being used + for searchlib in db_cxx-4.8 db_cxx db4_cxx; do + AC_CHECK_LIB([$searchlib],[main],[ + BDB_LIBS="-l${searchlib}" + break + ]) + done + if test "$BDB_LIBS" = ""; then + AC_MSG_WARN([libdb_cxx headers missing]) + AC_MSG_WARN(AC_PACKAGE_NAME[ requires this library for BDB (legacy) wallet support]) + AC_MSG_WARN([Passing --without-bdb will suppress this warning]) + fi + fi + if test "$use_bdb" != "no"; then + AC_DEFINE([USE_BDB], [1], [Define if BDB support should be compiled in]) + use_bdb=yes fi - AC_SUBST(BDB_LIBS) ]) diff --git a/build-aux/m4/bitcoin_qt.m4 b/build-aux/m4/bitcoin_qt.m4 index 2a72262653f1f..a716cd9a27795 100644 --- a/build-aux/m4/bitcoin_qt.m4 +++ b/build-aux/m4/bitcoin_qt.m4 @@ -1,18 +1,23 @@ +dnl Copyright (c) 2013-2016 The Bitcoin Core developers +dnl Distributed under the MIT software license, see the accompanying +dnl file COPYING or http://www.opensource.org/licenses/mit-license.php. + dnl Helper for cases where a qt dependency is not met. dnl Output: If qt version is auto, set bitcoin_enable_qt to false. Else, exit. AC_DEFUN([BITCOIN_QT_FAIL],[ - if test "x$bitcoin_qt_want_version" = "xauto" && test x$bitcoin_qt_force != xyes; then - if test x$bitcoin_enable_qt != xno; then + if test "$bitcoin_qt_want_version" = "auto" && test "$bitcoin_qt_force" != "yes"; then + if test "$bitcoin_enable_qt" != "no"; then AC_MSG_WARN([$1; bitcoin-qt frontend will not be built]) fi bitcoin_enable_qt=no + bitcoin_enable_qt_test=no else AC_MSG_ERROR([$1]) fi ]) AC_DEFUN([BITCOIN_QT_CHECK],[ - if test "x$bitcoin_enable_qt" != "xno" && test x$bitcoin_qt_want_version != xno; then + if test "$bitcoin_enable_qt" != "no" && test "$bitcoin_qt_want_version" != "no"; then true $1 else @@ -30,12 +35,12 @@ dnl Inputs: $4: If "yes", don't fail if $2 is not found. dnl Output: $1 is set to the path of $2 if found. $2 are searched in order. AC_DEFUN([BITCOIN_QT_PATH_PROGS],[ BITCOIN_QT_CHECK([ - if test "x$3" != "x"; then - AC_PATH_PROGS($1,$2,,$3) + if test "$3" != ""; then + AC_PATH_PROGS([$1], [$2], [], [$3]) else - AC_PATH_PROGS($1,$2) + AC_PATH_PROGS([$1], [$2]) fi - if test "x$$1" = "x" && test "x$4" != "xyes"; then + if test "$$1" = "" && test "$4" != "yes"; then BITCOIN_QT_FAIL([$1 not found]) fi ]) @@ -48,17 +53,25 @@ dnl CAUTION: Do not use this inside of a conditional. AC_DEFUN([BITCOIN_QT_INIT],[ dnl enable qt support AC_ARG_WITH([gui], - [AS_HELP_STRING([--with-gui@<:@=no|qt4|qt5|auto@:>@], - [build bitcoin-qt GUI (default=auto, qt4 tried first)])], + [AS_HELP_STRING([--with-gui@<:@=no|qt5|auto@:>@], + [build bitcoin-qt GUI (default=auto)])], [ bitcoin_qt_want_version=$withval - if test x$bitcoin_qt_want_version = xyes; then + if test "$bitcoin_qt_want_version" = "yes"; then bitcoin_qt_force=yes bitcoin_qt_want_version=auto fi ], [bitcoin_qt_want_version=auto]) + AS_IF([test "$with_gui" = "qt5_debug"], + [AS_CASE([$host], + [*darwin*], [qt_lib_suffix=_debug], + [qt_lib_suffix= ]); bitcoin_qt_want_version=qt5], + [qt_lib_suffix= ]) + + AS_CASE([$host], [*android*], [qt_lib_suffix=_$ANDROID_ARCH]) + AC_ARG_WITH([qt-incdir],[AS_HELP_STRING([--with-qt-incdir=INC_DIR],[specify qt include path (overridden by pkgconfig)])], [qt_include_path=$withval], []) AC_ARG_WITH([qt-libdir],[AS_HELP_STRING([--with-qt-libdir=LIB_DIR],[specify qt lib path (overridden by pkgconfig)])], [qt_lib_path=$withval], []) AC_ARG_WITH([qt-plugindir],[AS_HELP_STRING([--with-qt-plugindir=PLUGIN_DIR],[specify qt plugin path (overridden by pkgconfig)])], [qt_plugin_path=$withval], []) @@ -67,349 +80,318 @@ AC_DEFUN([BITCOIN_QT_INIT],[ AC_ARG_WITH([qtdbus], [AS_HELP_STRING([--with-qtdbus], - [enable DBus support (default is yes if qt is enabled and QtDBus is found)])], + [enable DBus support (default is yes if qt is enabled and QtDBus is found, except on Android)])], [use_dbus=$withval], [use_dbus=auto]) + dnl Android doesn't support D-Bus and certainly doesn't use it for notifications + case $host in + *android*) + if test "$use_dbus" != "yes"; then + use_dbus=no + fi + ;; + esac + AC_SUBST(QT_TRANSLATION_DIR,$qt_translation_path) ]) -dnl Find the appropriate version of Qt libraries and includes. -dnl Inputs: $1: Whether or not pkg-config should be used. yes|no. Default: yes. -dnl Inputs: $2: If $1 is "yes" and --with-gui=auto, which qt version should be -dnl tried first. -dnl Outputs: See _BITCOIN_QT_FIND_LIBS_* +dnl Find Qt libraries and includes. +dnl +dnl BITCOIN_QT_CONFIGURE([MINIMUM-VERSION]) +dnl +dnl Outputs: See _BITCOIN_QT_FIND_LIBS dnl Outputs: Sets variables for all qt-related tools. dnl Outputs: bitcoin_enable_qt, bitcoin_enable_qt_dbus, bitcoin_enable_qt_test AC_DEFUN([BITCOIN_QT_CONFIGURE],[ - use_pkgconfig=$1 - - if test x$use_pkgconfig = x; then - use_pkgconfig=yes - fi - - if test x$use_pkgconfig = xyes; then - BITCOIN_QT_CHECK([_BITCOIN_QT_FIND_LIBS_WITH_PKGCONFIG([$2])]) - else - BITCOIN_QT_CHECK([_BITCOIN_QT_FIND_LIBS_WITHOUT_PKGCONFIG]) - fi + qt_version=">= $1" + qt_lib_prefix="Qt5" + BITCOIN_QT_CHECK([_BITCOIN_QT_FIND_LIBS]) dnl This is ugly and complicated. Yuck. Works as follows: - dnl We can't discern whether Qt4 builds are static or not. For Qt5, we can - dnl check a header to find out. When Qt is built statically, some plugins must - dnl be linked into the final binary as well. These plugins have changed between - dnl Qt4 and Qt5. With Qt5, languages moved into core and the WindowsIntegration - dnl plugin was added. Since we can't tell if Qt4 is static or not, it is - dnl assumed for windows builds. - dnl _BITCOIN_QT_CHECK_STATIC_PLUGINS does a quick link-check and appends the - dnl results to QT_LIBS. + dnl We check a header to find out whether Qt is built statically. + dnl When Qt is built statically, some plugins must be linked into + dnl the final binary as well. _BITCOIN_QT_CHECK_STATIC_PLUGIN does + dnl a quick link-check and appends the results to QT_LIBS. BITCOIN_QT_CHECK([ TEMP_CPPFLAGS=$CPPFLAGS - CPPFLAGS=$QT_INCLUDES - if test x$bitcoin_qt_got_major_vers = x5; then - _BITCOIN_QT_IS_STATIC - if test x$bitcoin_cv_static_qt = xyes; then - AC_DEFINE(QT_STATICPLUGIN, 1, [Define this symbol if qt plugins are static]) - if test x$qt_plugin_path != x; then - QT_LIBS="$QT_LIBS -L$qt_plugin_path/accessible" + TEMP_CXXFLAGS=$CXXFLAGS + CPPFLAGS="$QT_INCLUDES $CORE_CPPFLAGS $CPPFLAGS" + CXXFLAGS="$PIC_FLAGS $CORE_CXXFLAGS $CXXFLAGS" + _BITCOIN_QT_IS_STATIC + if test "$bitcoin_cv_static_qt" = "yes"; then + _BITCOIN_QT_CHECK_STATIC_LIBS + + if test "$qt_plugin_path" != ""; then + if test -d "$qt_plugin_path/platforms"; then QT_LIBS="$QT_LIBS -L$qt_plugin_path/platforms" fi - if test x$use_pkgconfig = xyes; then - PKG_CHECK_MODULES([QTPLATFORM], [Qt5PlatformSupport], [QT_LIBS="$QTPLATFORM_LIBS $QT_LIBS"]) + if test -d "$qt_plugin_path/styles"; then + QT_LIBS="$QT_LIBS -L$qt_plugin_path/styles" fi - _BITCOIN_QT_CHECK_STATIC_PLUGINS([Q_IMPORT_PLUGIN(AccessibleFactory)], [-lqtaccessiblewidgets]) - if test x$TARGET_OS = xwindows; then - _BITCOIN_QT_CHECK_STATIC_PLUGINS([Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin)],[-lqwindows]) - AC_DEFINE(QT_QPA_PLATFORM_WINDOWS, 1, [Define this symbol if the qt platform is windows]) - elif test x$TARGET_OS = xlinux; then - PKG_CHECK_MODULES([X11XCB], [x11-xcb], [QT_LIBS="$X11XCB_LIBS $QT_LIBS"]) - _BITCOIN_QT_CHECK_STATIC_PLUGINS([Q_IMPORT_PLUGIN(QXcbIntegrationPlugin)],[-lqxcb -lxcb-static]) - AC_DEFINE(QT_QPA_PLATFORM_XCB, 1, [Define this symbol if the qt platform is xcb]) - elif test x$TARGET_OS = xdarwin; then - if test x$use_pkgconfig = xyes; then - PKG_CHECK_MODULES([QTPRINT], [Qt5PrintSupport], [QT_LIBS="$QTPRINT_LIBS $QT_LIBS"]) - fi - AX_CHECK_LINK_FLAG([[-framework IOKit]],[QT_LIBS="$QT_LIBS -framework IOKit"],[AC_MSG_ERROR(could not iokit framework)]) - _BITCOIN_QT_CHECK_STATIC_PLUGINS([Q_IMPORT_PLUGIN(QCocoaIntegrationPlugin)],[-lqcocoa]) - AC_DEFINE(QT_QPA_PLATFORM_COCOA, 1, [Define this symbol if the qt platform is cocoa]) - fi - fi - else - if test x$TARGET_OS = xwindows; then - AC_DEFINE(QT_STATICPLUGIN, 1, [Define this symbol if qt plugins are static]) - if test x$qt_plugin_path != x; then + if test -d "$qt_plugin_path/accessible"; then QT_LIBS="$QT_LIBS -L$qt_plugin_path/accessible" - QT_LIBS="$QT_LIBS -L$qt_plugin_path/codecs" fi - _BITCOIN_QT_CHECK_STATIC_PLUGINS([ - Q_IMPORT_PLUGIN(qcncodecs) - Q_IMPORT_PLUGIN(qjpcodecs) - Q_IMPORT_PLUGIN(qtwcodecs) - Q_IMPORT_PLUGIN(qkrcodecs) - Q_IMPORT_PLUGIN(AccessibleFactory)], - [-lqcncodecs -lqjpcodecs -lqtwcodecs -lqkrcodecs -lqtaccessiblewidgets]) + if test -d "$qt_plugin_path/platforms/android"; then + QT_LIBS="$QT_LIBS -L$qt_plugin_path/platforms/android -lqtfreetype -lEGL" + fi + fi + + AC_DEFINE([QT_STATICPLUGIN], [1], [Define this symbol if qt plugins are static]) + if test "$TARGET_OS" != "android"; then + _BITCOIN_QT_CHECK_STATIC_PLUGIN([QMinimalIntegrationPlugin], [-lqminimal]) + AC_DEFINE([QT_QPA_PLATFORM_MINIMAL], [1], [Define this symbol if the minimal qt platform exists]) + fi + if test "$TARGET_OS" = "windows"; then + dnl Linking against wtsapi32 is required. See #17749 and + dnl https://bugreports.qt.io/browse/QTBUG-27097. + AX_CHECK_LINK_FLAG([-lwtsapi32], [QT_LIBS="$QT_LIBS -lwtsapi32"], [AC_MSG_ERROR([could not link against -lwtsapi32])]) + _BITCOIN_QT_CHECK_STATIC_PLUGIN([QWindowsIntegrationPlugin], [-lqwindows]) + _BITCOIN_QT_CHECK_STATIC_PLUGIN([QWindowsVistaStylePlugin], [-lqwindowsvistastyle]) + AC_DEFINE([QT_QPA_PLATFORM_WINDOWS], [1], [Define this symbol if the qt platform is windows]) + elif test "$TARGET_OS" = "linux"; then + _BITCOIN_QT_CHECK_STATIC_PLUGIN([QXcbIntegrationPlugin], [-lqxcb]) + AC_DEFINE([QT_QPA_PLATFORM_XCB], [1], [Define this symbol if the qt platform is xcb]) + elif test "$TARGET_OS" = "darwin"; then + AX_CHECK_LINK_FLAG([-framework Carbon], [QT_LIBS="$QT_LIBS -framework Carbon"], [AC_MSG_ERROR(could not link against Carbon framework)]) + AX_CHECK_LINK_FLAG([-framework IOSurface], [QT_LIBS="$QT_LIBS -framework IOSurface"], [AC_MSG_ERROR(could not link against IOSurface framework)]) + AX_CHECK_LINK_FLAG([-framework Metal], [QT_LIBS="$QT_LIBS -framework Metal"], [AC_MSG_ERROR(could not link against Metal framework)]) + AX_CHECK_LINK_FLAG([-framework QuartzCore], [QT_LIBS="$QT_LIBS -framework QuartzCore"], [AC_MSG_ERROR(could not link against QuartzCore framework)]) + _BITCOIN_QT_CHECK_STATIC_PLUGIN([QCocoaIntegrationPlugin], [-lqcocoa]) + _BITCOIN_QT_CHECK_STATIC_PLUGIN([QMacStylePlugin], [-lqmacstyle]) + AC_DEFINE([QT_QPA_PLATFORM_COCOA], [1], [Define this symbol if the qt platform is cocoa]) + elif test "$TARGET_OS" = "android"; then + QT_LIBS="-Wl,--export-dynamic,--undefined=JNI_OnLoad -lplugins_platforms_qtforandroid${qt_lib_suffix} -ljnigraphics -landroid -lqtfreetype${qt_lib_suffix} $QT_LIBS" + AC_DEFINE([QT_QPA_PLATFORM_ANDROID], [1], [Define this symbol if the qt platform is android]) fi fi CPPFLAGS=$TEMP_CPPFLAGS + CXXFLAGS=$TEMP_CXXFLAGS ]) - if test x$use_pkgconfig$qt_bin_path = xyes; then - if test x$bitcoin_qt_got_major_vers = x5; then - qt_bin_path="`$PKG_CONFIG --variable=host_bins Qt5Core 2>/dev/null`" - fi + if test "$qt_bin_path" = ""; then + qt_bin_path="`$PKG_CONFIG --variable=host_bins ${qt_lib_prefix}Core 2>/dev/null`" + fi + + if test "$use_hardening" != "no"; then + BITCOIN_QT_CHECK([ + AC_MSG_CHECKING([whether -fPIE can be used with this Qt config]) + TEMP_CPPFLAGS=$CPPFLAGS + TEMP_CXXFLAGS=$CXXFLAGS + CPPFLAGS="$QT_INCLUDES $CORE_CPPFLAGS $CPPFLAGS" + CXXFLAGS="$PIE_FLAGS $CORE_CXXFLAGS $CXXFLAGS" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #ifndef QT_VERSION + # include + #endif + ]], + [[ + #if defined(QT_REDUCE_RELOCATIONS) + choke + #endif + ]])], + [ AC_MSG_RESULT([yes]); QT_PIE_FLAGS=$PIE_FLAGS ], + [ AC_MSG_RESULT([no]); QT_PIE_FLAGS=$PIC_FLAGS] + ) + CPPFLAGS=$TEMP_CPPFLAGS + CXXFLAGS=$TEMP_CXXFLAGS + ]) + else + BITCOIN_QT_CHECK([ + AC_MSG_CHECKING([whether -fPIC is needed with this Qt config]) + TEMP_CPPFLAGS=$CPPFLAGS + CPPFLAGS="$QT_INCLUDES $CORE_CPPFLAGS $CPPFLAGS" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #ifndef QT_VERSION + # include + #endif + ]], + [[ + #if defined(QT_REDUCE_RELOCATIONS) + choke + #endif + ]])], + [ AC_MSG_RESULT([no])], + [ AC_MSG_RESULT([yes]); QT_PIE_FLAGS=$PIC_FLAGS] + ) + CPPFLAGS=$TEMP_CPPFLAGS + ]) fi - BITCOIN_QT_PATH_PROGS([MOC], [moc-qt${bitcoin_qt_got_major_vers} moc${bitcoin_qt_got_major_vers} moc], $qt_bin_path) - BITCOIN_QT_PATH_PROGS([UIC], [uic-qt${bitcoin_qt_got_major_vers} uic${bitcoin_qt_got_major_vers} uic], $qt_bin_path) - BITCOIN_QT_PATH_PROGS([RCC], [rcc-qt${bitcoin_qt_got_major_vers} rcc${bitcoin_qt_got_major_vers} rcc], $qt_bin_path) - BITCOIN_QT_PATH_PROGS([LRELEASE], [lrelease-qt${bitcoin_qt_got_major_vers} lrelease${bitcoin_qt_got_major_vers} lrelease], $qt_bin_path) - BITCOIN_QT_PATH_PROGS([LUPDATE], [lupdate-qt${bitcoin_qt_got_major_vers} lupdate${bitcoin_qt_got_major_vers} lupdate],$qt_bin_path, yes) + BITCOIN_QT_PATH_PROGS([MOC], [moc-qt5 moc5 moc], $qt_bin_path) + BITCOIN_QT_PATH_PROGS([UIC], [uic-qt5 uic5 uic], $qt_bin_path) + BITCOIN_QT_PATH_PROGS([RCC], [rcc-qt5 rcc5 rcc], $qt_bin_path) + BITCOIN_QT_PATH_PROGS([LRELEASE], [lrelease-qt5 lrelease5 lrelease], $qt_bin_path) + BITCOIN_QT_PATH_PROGS([LUPDATE], [lupdate-qt5 lupdate5 lupdate],$qt_bin_path, yes) + BITCOIN_QT_PATH_PROGS([LCONVERT], [lconvert-qt5 lconvert5 lconvert], $qt_bin_path, yes) MOC_DEFS='-DHAVE_CONFIG_H -I$(srcdir)' case $host in *darwin*) BITCOIN_QT_CHECK([ MOC_DEFS="${MOC_DEFS} -DQ_OS_MAC" - base_frameworks="-framework Foundation -framework ApplicationServices -framework AppKit" - AX_CHECK_LINK_FLAG([[$base_frameworks]],[QT_LIBS="$QT_LIBS $base_frameworks"],[AC_MSG_ERROR(could not find base frameworks)]) + base_frameworks="-framework Foundation -framework AppKit" + AX_CHECK_LINK_FLAG([$base_frameworks], [QT_LIBS="$QT_LIBS $base_frameworks"], [AC_MSG_ERROR(could not find base frameworks)]) ]) ;; *mingw*) BITCOIN_QT_CHECK([ - AX_CHECK_LINK_FLAG([[-mwindows]],[QT_LDFLAGS="$QT_LDFLAGS -mwindows"],[AC_MSG_WARN(-mwindows linker support not detected)]) + AX_CHECK_LINK_FLAG([-mwindows], [QT_LDFLAGS="$QT_LDFLAGS -mwindows"], [AC_MSG_WARN([-mwindows linker support not detected])]) ]) esac dnl enable qt support - AC_MSG_CHECKING(whether to build Bitcoin Core GUI) + AC_MSG_CHECKING([whether to build ]AC_PACKAGE_NAME[ GUI]) BITCOIN_QT_CHECK([ bitcoin_enable_qt=yes bitcoin_enable_qt_test=yes - if test x$have_qt_test = xno; then + if test "$have_qt_test" = "no"; then bitcoin_enable_qt_test=no fi bitcoin_enable_qt_dbus=no - if test x$use_dbus != xno && test x$have_qt_dbus = xyes; then + if test "$use_dbus" != "no" && test "$have_qt_dbus" = "yes"; then bitcoin_enable_qt_dbus=yes fi - if test x$use_dbus = xyes && test x$have_qt_dbus = xno; then - AC_MSG_ERROR("libQtDBus not found. Install libQtDBus or remove --with-qtdbus.") + if test "$use_dbus" = "yes" && test "$have_qt_dbus" = "no"; then + AC_MSG_ERROR([libQtDBus not found. Install libQtDBus or remove --with-qtdbus.]) + fi + if test "$LUPDATE" = ""; then + AC_MSG_WARN([lupdate tool is required to update Qt translations.]) fi - if test x$LUPDATE = x; then - AC_MSG_WARN("lupdate is required to update qt translations") + if test "$LCONVERT" = ""; then + AC_MSG_WARN([lconvert tool is required to update Qt translations.]) fi ],[ bitcoin_enable_qt=no ]) - AC_MSG_RESULT([$bitcoin_enable_qt (Qt${bitcoin_qt_got_major_vers})]) + if test $bitcoin_enable_qt = "yes"; then + AC_MSG_RESULT([$bitcoin_enable_qt ($qt_lib_prefix)]) + else + AC_MSG_RESULT([$bitcoin_enable_qt]) + fi + AC_SUBST(QT_PIE_FLAGS) AC_SUBST(QT_INCLUDES) AC_SUBST(QT_LIBS) AC_SUBST(QT_LDFLAGS) AC_SUBST(QT_DBUS_INCLUDES) - AC_SUBST(QT_DBUS_LIBS) AC_SUBST(QT_TEST_INCLUDES) - AC_SUBST(QT_TEST_LIBS) - AC_SUBST(QT_SELECT, qt${bitcoin_qt_got_major_vers}) + AC_SUBST(QT_SELECT, qt5) AC_SUBST(MOC_DEFS) ]) -dnl All macros below are internal and should _not_ be used from the main -dnl configure.ac. -dnl ---- +dnl All macros below are internal and should _not_ be used from configure.ac. -dnl Internal. Check if the included version of Qt is Qt5. -dnl Requires: INCLUDES must be populated as necessary. -dnl Output: bitcoin_cv_qt5=yes|no -AC_DEFUN([_BITCOIN_QT_CHECK_QT5],[ - AC_CACHE_CHECK(for Qt 5, bitcoin_cv_qt5,[ - AC_COMPILE_IFELSE([AC_LANG_PROGRAM( - [[#include ]], - [[ - #if QT_VERSION < 0x050000 - choke me - #else - return 0; - #endif - ]])], - [bitcoin_cv_qt5=yes], - [bitcoin_cv_qt5=no]) -])]) - -dnl Internal. Check if the linked version of Qt was built as static libs. -dnl Requires: Qt5. This check cannot determine if Qt4 is static. +dnl Internal. Check if the linked version of Qt was built statically. +dnl +dnl _BITCOIN_QT_IS_STATIC +dnl --------------------- +dnl dnl Requires: INCLUDES and LIBS must be populated as necessary. dnl Output: bitcoin_cv_static_qt=yes|no -dnl Output: Defines QT_STATICPLUGIN if plugins are static. AC_DEFUN([_BITCOIN_QT_IS_STATIC],[ AC_CACHE_CHECK(for static Qt, bitcoin_cv_static_qt,[ - AC_COMPILE_IFELSE([AC_LANG_PROGRAM( - [[#include ]], - [[ - #if defined(QT_STATIC) - return 0; - #else - choke me - #endif - ]])], - [bitcoin_cv_static_qt=yes], - [bitcoin_cv_static_qt=no]) - ]) - if test xbitcoin_cv_static_qt = xyes; then - AC_DEFINE(QT_STATICPLUGIN, 1, [Define this symbol for static Qt plugins]) - fi + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #ifndef QT_VERSION + # include + #endif + ]], + [[ + #if !defined(QT_STATIC) + choke + #endif + ]])], + [bitcoin_cv_static_qt=yes], + [bitcoin_cv_static_qt=no]) + ]) ]) -dnl Internal. Check if the link-requirements for static plugins are met. +dnl Internal. Check if the link-requirements for a static plugin are met. +dnl +dnl _BITCOIN_QT_CHECK_STATIC_PLUGIN(PLUGIN, LIBRARIES) +dnl -------------------------------------------------- +dnl dnl Requires: INCLUDES and LIBS must be populated as necessary. -dnl Inputs: $1: A series of Q_IMPORT_PLUGIN(). +dnl Inputs: $1: A static plugin name. dnl Inputs: $2: The libraries that resolve $1. dnl Output: QT_LIBS is prepended or configure exits. -AC_DEFUN([_BITCOIN_QT_CHECK_STATIC_PLUGINS],[ - AC_MSG_CHECKING(for static Qt plugins: $2) +AC_DEFUN([_BITCOIN_QT_CHECK_STATIC_PLUGIN], [ + AC_MSG_CHECKING([for $1 ($2)]) CHECK_STATIC_PLUGINS_TEMP_LIBS="$LIBS" - LIBS="$2 $QT_LIBS $LIBS" + LIBS="$2${qt_lib_suffix} $QT_LIBS $LIBS" AC_LINK_IFELSE([AC_LANG_PROGRAM([[ - #define QT_STATICPLUGIN - #include - $1]], - [[return 0;]])], - [AC_MSG_RESULT(yes); QT_LIBS="$2 $QT_LIBS"], - [AC_MSG_RESULT(no); BITCOIN_QT_FAIL(Could not resolve: $2)]) + #include + Q_IMPORT_PLUGIN($1) + ]])], + [AC_MSG_RESULT([yes]); QT_LIBS="$2${qt_lib_suffix} $QT_LIBS"], + [AC_MSG_RESULT([no]); BITCOIN_QT_FAIL([$1 not found.])]) LIBS="$CHECK_STATIC_PLUGINS_TEMP_LIBS" ]) -dnl Internal. Find Qt libraries using pkg-config. -dnl Inputs: bitcoin_qt_want_version (from --with-gui=). The version to check -dnl first. -dnl Inputs: $1: If bitcoin_qt_want_version is "auto", check for this version -dnl first. -dnl Outputs: All necessary QT_* variables are set. -dnl Outputs: bitcoin_qt_got_major_vers is set to "4" or "5". -dnl Outputs: have_qt_test and have_qt_dbus are set (if applicable) to yes|no. -AC_DEFUN([_BITCOIN_QT_FIND_LIBS_WITH_PKGCONFIG],[ - m4_ifdef([PKG_CHECK_MODULES],[ - auto_priority_version=$1 - if test x$auto_priority_version = x; then - auto_priority_version=qt5 +dnl Internal. Check Qt static libs with PKG_CHECK_MODULES. +dnl +dnl _BITCOIN_QT_CHECK_STATIC_LIBS +dnl ----------------------------- +dnl +dnl Outputs: QT_LIBS is prepended. +AC_DEFUN([_BITCOIN_QT_CHECK_STATIC_LIBS], [ + PKG_CHECK_MODULES([QT_ACCESSIBILITY], [${qt_lib_prefix}AccessibilitySupport${qt_lib_suffix}], [QT_LIBS="$QT_ACCESSIBILITY_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_DEVICEDISCOVERY], [${qt_lib_prefix}DeviceDiscoverySupport${qt_lib_suffix}], [QT_LIBS="$QT_DEVICEDISCOVERY_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_EDID], [${qt_lib_prefix}EdidSupport${qt_lib_suffix}], [QT_LIBS="$QT_EDID_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_EVENTDISPATCHER], [${qt_lib_prefix}EventDispatcherSupport${qt_lib_suffix}], [QT_LIBS="$QT_EVENTDISPATCHER_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_FB], [${qt_lib_prefix}FbSupport${qt_lib_suffix}], [QT_LIBS="$QT_FB_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_FONTDATABASE], [${qt_lib_prefix}FontDatabaseSupport${qt_lib_suffix}], [QT_LIBS="$QT_FONTDATABASE_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_THEME], [${qt_lib_prefix}ThemeSupport${qt_lib_suffix}], [QT_LIBS="$QT_THEME_LIBS $QT_LIBS"]) + if test "$TARGET_OS" = "linux"; then + PKG_CHECK_MODULES([QT_INPUT], [${qt_lib_prefix}InputSupport], [QT_LIBS="$QT_INPUT_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_SERVICE], [${qt_lib_prefix}ServiceSupport], [QT_LIBS="$QT_SERVICE_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_XCBQPA], [${qt_lib_prefix}XcbQpa], [QT_LIBS="$QT_XCBQPA_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_XKBCOMMON], [${qt_lib_prefix}XkbCommonSupport], [QT_LIBS="$QT_XKBCOMMON_LIBS $QT_LIBS"]) + elif test "$TARGET_OS" = "darwin"; then + PKG_CHECK_MODULES([QT_CLIPBOARD], [${qt_lib_prefix}ClipboardSupport${qt_lib_suffix}], [QT_LIBS="$QT_CLIPBOARD_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_GRAPHICS], [${qt_lib_prefix}GraphicsSupport${qt_lib_suffix}], [QT_LIBS="$QT_GRAPHICS_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_SERVICE], [${qt_lib_prefix}ServiceSupport${qt_lib_suffix}], [QT_LIBS="$QT_SERVICE_LIBS $QT_LIBS"]) + elif test "$TARGET_OS" = "windows"; then + PKG_CHECK_MODULES([QT_WINDOWSUIAUTOMATION], [${qt_lib_prefix}WindowsUIAutomationSupport${qt_lib_suffix}], [QT_LIBS="$QT_WINDOWSUIAUTOMATION_LIBS $QT_LIBS"]) + elif test "$TARGET_OS" = "android"; then + PKG_CHECK_MODULES([QT_EGL], [${qt_lib_prefix}EglSupport${qt_lib_suffix}], [QT_LIBS="$QT_EGL_LIBS $QT_LIBS"]) + PKG_CHECK_MODULES([QT_SERVICE], [${qt_lib_prefix}ServiceSupport${qt_lib_suffix}], [QT_LIBS="$QT_SERVICE_LIBS $QT_LIBS"]) fi - if test x$bitcoin_qt_want_version = xqt5 || ( test x$bitcoin_qt_want_version = xauto && test x$auto_priority_version = xqt5 ); then - QT_LIB_PREFIX=Qt5 - bitcoin_qt_got_major_vers=5 - else - QT_LIB_PREFIX=Qt - bitcoin_qt_got_major_vers=4 - fi - qt5_modules="Qt5Core Qt5Gui Qt5Network Qt5Widgets" - qt4_modules="QtCore QtGui QtNetwork" - BITCOIN_QT_CHECK([ - if test x$bitcoin_qt_want_version = xqt5 || ( test x$bitcoin_qt_want_version = xauto && test x$auto_priority_version = xqt5 ); then - PKG_CHECK_MODULES([QT], [$qt5_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes],[have_qt=no]) - elif test x$bitcoin_qt_want_version = xqt4 || ( test x$bitcoin_qt_want_version = xauto && test x$auto_priority_version = xqt4 ); then - PKG_CHECK_MODULES([QT], [$qt4_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes], [have_qt=no]) - fi - - dnl qt version is set to 'auto' and the preferred version wasn't found. Now try the other. - if test x$have_qt = xno && test x$bitcoin_qt_want_version = xauto; then - if test x$auto_priority_version = x$qt5; then - PKG_CHECK_MODULES([QT], [$qt4_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes; QT_LIB_PREFIX=Qt; bitcoin_qt_got_major_vers=4], [have_qt=no]) - else - PKG_CHECK_MODULES([QT], [$qt5_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes; QT_LIB_PREFIX=Qt5; bitcoin_qt_got_major_vers=5], [have_qt=no]) - fi - fi - if test x$have_qt != xyes; then - have_qt=no - BITCOIN_QT_FAIL([Qt dependencies not found]) - fi - ]) - BITCOIN_QT_CHECK([ - PKG_CHECK_MODULES([QT_TEST], [${QT_LIB_PREFIX}Test], [QT_TEST_INCLUDES="$QT_TEST_CFLAGS"; have_qt_test=yes], [have_qt_test=no]) - if test x$use_dbus != xno; then - PKG_CHECK_MODULES([QT_DBUS], [${QT_LIB_PREFIX}DBus], [QT_DBUS_INCLUDES="$QT_DBUS_CFLAGS"; have_qt_dbus=yes], [have_qt_dbus=no]) - fi - ]) - ]) - true; dnl ]) -dnl Internal. Find Qt libraries without using pkg-config. Version is deduced -dnl from the discovered headers. -dnl Inputs: bitcoin_qt_want_version (from --with-gui=). The version to use. -dnl If "auto", the version will be discovered by _BITCOIN_QT_CHECK_QT5. +dnl Internal. Find Qt libraries using pkg-config. +dnl +dnl _BITCOIN_QT_FIND_LIBS +dnl --------------------- +dnl dnl Outputs: All necessary QT_* variables are set. -dnl Outputs: bitcoin_qt_got_major_vers is set to "4" or "5". dnl Outputs: have_qt_test and have_qt_dbus are set (if applicable) to yes|no. -AC_DEFUN([_BITCOIN_QT_FIND_LIBS_WITHOUT_PKGCONFIG],[ - TEMP_CPPFLAGS="$CPPFLAGS" - TEMP_LIBS="$LIBS" +AC_DEFUN([_BITCOIN_QT_FIND_LIBS],[ BITCOIN_QT_CHECK([ - if test x$qt_include_path != x; then - QT_INCLUDES="-I$qt_include_path -I$qt_include_path/QtCore -I$qt_include_path/QtGui -I$qt_include_path/QtWidgets -I$qt_include_path/QtNetwork -I$qt_include_path/QtTest -I$qt_include_path/QtDBus" - CPPFLAGS="$QT_INCLUDES $CPPFLAGS" - fi + PKG_CHECK_MODULES([QT_CORE], [${qt_lib_prefix}Core${qt_lib_suffix} $qt_version], [QT_INCLUDES="$QT_CORE_CFLAGS $QT_INCLUDES" QT_LIBS="$QT_CORE_LIBS $QT_LIBS"], + [BITCOIN_QT_FAIL([${qt_lib_prefix}Core${qt_lib_suffix} $qt_version not found])]) ]) - - BITCOIN_QT_CHECK([AC_CHECK_HEADER([QtPlugin],,BITCOIN_QT_FAIL(QtCore headers missing))]) - BITCOIN_QT_CHECK([AC_CHECK_HEADER([QApplication],, BITCOIN_QT_FAIL(QtGui headers missing))]) - BITCOIN_QT_CHECK([AC_CHECK_HEADER([QLocalSocket],, BITCOIN_QT_FAIL(QtNetwork headers missing))]) - BITCOIN_QT_CHECK([ - if test x$bitcoin_qt_want_version = xauto; then - _BITCOIN_QT_CHECK_QT5 - fi - if test x$bitcoin_cv_qt5 = xyes || test x$bitcoin_qt_want_version = xqt5; then - QT_LIB_PREFIX=Qt5 - bitcoin_qt_got_major_vers=5 - else - QT_LIB_PREFIX=Qt - bitcoin_qt_got_major_vers=4 - fi + PKG_CHECK_MODULES([QT_GUI], [${qt_lib_prefix}Gui${qt_lib_suffix} $qt_version], [QT_INCLUDES="$QT_GUI_CFLAGS $QT_INCLUDES" QT_LIBS="$QT_GUI_LIBS $QT_LIBS"], + [BITCOIN_QT_FAIL([${qt_lib_prefix}Gui${qt_lib_suffix} $qt_version not found])]) ]) - BITCOIN_QT_CHECK([ - LIBS= - if test x$qt_lib_path != x; then - LIBS="$LIBS -L$qt_lib_path" - fi - - if test x$TARGET_OS = xwindows; then - AC_CHECK_LIB([imm32], [main],, BITCOIN_QT_FAIL(libimm32 not found)) - fi + PKG_CHECK_MODULES([QT_WIDGETS], [${qt_lib_prefix}Widgets${qt_lib_suffix} $qt_version], [QT_INCLUDES="$QT_WIDGETS_CFLAGS $QT_INCLUDES" QT_LIBS="$QT_WIDGETS_LIBS $QT_LIBS"], + [BITCOIN_QT_FAIL([${qt_lib_prefix}Widgets${qt_lib_suffix} $qt_version not found])]) + ]) + BITCOIN_QT_CHECK([ + PKG_CHECK_MODULES([QT_NETWORK], [${qt_lib_prefix}Network${qt_lib_suffix} $qt_version], [QT_INCLUDES="$QT_NETWORK_CFLAGS $QT_INCLUDES" QT_LIBS="$QT_NETWORK_LIBS $QT_LIBS"], + [BITCOIN_QT_FAIL([${qt_lib_prefix}Network${qt_lib_suffix} $qt_version not found])]) ]) - - BITCOIN_QT_CHECK(AC_CHECK_LIB([z] ,[main],,AC_MSG_WARN([zlib not found. Assuming qt has it built-in]))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([png] ,[main],,AC_MSG_WARN([libpng not found. Assuming qt has it built-in]))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([jpeg] ,[main],,AC_MSG_WARN([libjpeg not found. Assuming qt has it built-in]))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([pcre16] ,[main],,AC_MSG_WARN([libpcre16 not found. Assuming qt has it built-in]))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([${QT_LIB_PREFIX}Core] ,[main],,BITCOIN_QT_FAIL(lib$QT_LIB_PREFIXCore not found))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([${QT_LIB_PREFIX}Gui] ,[main],,BITCOIN_QT_FAIL(lib$QT_LIB_PREFIXGui not found))) - BITCOIN_QT_CHECK(AC_CHECK_LIB([${QT_LIB_PREFIX}Network],[main],,BITCOIN_QT_FAIL(lib$QT_LIB_PREFIXNetwork not found))) - if test x$bitcoin_qt_got_major_vers = x5; then - BITCOIN_QT_CHECK(AC_CHECK_LIB([${QT_LIB_PREFIX}Widgets],[main],,BITCOIN_QT_FAIL(lib$QT_LIB_PREFIXWidgets not found))) - fi - QT_LIBS="$LIBS" - LIBS="$TEMP_LIBS" BITCOIN_QT_CHECK([ - LIBS= - if test x$qt_lib_path != x; then - LIBS="-L$qt_lib_path" - fi - AC_CHECK_LIB([${QT_LIB_PREFIX}Test], [main],, have_qt_test=no) - AC_CHECK_HEADER([QTest],, have_qt_test=no) - QT_TEST_LIBS="$LIBS" - if test x$use_dbus != xno; then - LIBS= - if test x$qt_lib_path != x; then - LIBS="-L$qt_lib_path" - fi - AC_CHECK_LIB([${QT_LIB_PREFIX}DBus], [main],, have_qt_dbus=no) - AC_CHECK_HEADER([QtDBus],, have_qt_dbus=no) - QT_DBUS_LIBS="$LIBS" + PKG_CHECK_MODULES([QT_TEST], [${qt_lib_prefix}Test${qt_lib_suffix} $qt_version], [QT_TEST_INCLUDES="$QT_TEST_CFLAGS"; have_qt_test=yes], [have_qt_test=no]) + if test "$use_dbus" != "no"; then + PKG_CHECK_MODULES([QT_DBUS], [${qt_lib_prefix}DBus $qt_version], [QT_DBUS_INCLUDES="$QT_DBUS_CFLAGS"; have_qt_dbus=yes], [have_qt_dbus=no]) fi ]) - CPPFLAGS="$TEMP_CPPFLAGS" - LIBS="$TEMP_LIBS" ]) - diff --git a/build-aux/m4/bitcoin_runtime_lib.m4 b/build-aux/m4/bitcoin_runtime_lib.m4 new file mode 100644 index 0000000000000..1a6922deca485 --- /dev/null +++ b/build-aux/m4/bitcoin_runtime_lib.m4 @@ -0,0 +1,42 @@ +# On some platforms clang builtin implementations +# require compiler-rt as a runtime library to use. +# +# See: +# - https://bugs.llvm.org/show_bug.cgi?id=28629 + +m4_define([_CHECK_RUNTIME_testbody], [[ + bool f(long long x, long long y, long long* p) + { + return __builtin_mul_overflow(x, y, p); + } + int main() { return 0; } +]]) + +AC_DEFUN([CHECK_RUNTIME_LIB], [ + + AC_LANG_PUSH([C++]) + + AC_MSG_CHECKING([for __builtin_mul_overflow]) + AC_LINK_IFELSE( + [AC_LANG_SOURCE([_CHECK_RUNTIME_testbody])], + [ + AC_MSG_RESULT([yes]) + AC_DEFINE([HAVE_BUILTIN_MUL_OVERFLOW], [1], [Define if you have a working __builtin_mul_overflow]) + ], + [ + ax_check_save_flags="$LDFLAGS" + LDFLAGS="$LDFLAGS --rtlib=compiler-rt -lgcc_s" + AC_LINK_IFELSE( + [AC_LANG_SOURCE([_CHECK_RUNTIME_testbody])], + [ + AC_MSG_RESULT([yes, with additional linker flags]) + RUNTIME_LDFLAGS="--rtlib=compiler-rt -lgcc_s" + AC_DEFINE([HAVE_BUILTIN_MUL_OVERFLOW], [1], [Define if you have a working __builtin_mul_overflow]) + ], + [AC_MSG_RESULT([no])]) + LDFLAGS="$ax_check_save_flags" + ]) + + AC_LANG_POP + AC_SUBST([RUNTIME_LDFLAGS]) +]) diff --git a/build-aux/m4/bitcoin_subdir_to_include.m4 b/build-aux/m4/bitcoin_subdir_to_include.m4 index 66f106c7d47b9..736270afea5ab 100644 --- a/build-aux/m4/bitcoin_subdir_to_include.m4 +++ b/build-aux/m4/bitcoin_subdir_to_include.m4 @@ -1,13 +1,17 @@ +dnl Copyright (c) 2013-2014 The Bitcoin Core developers +dnl Distributed under the MIT software license, see the accompanying +dnl file COPYING or http://www.opensource.org/licenses/mit-license.php. + dnl BITCOIN_SUBDIR_TO_INCLUDE([CPPFLAGS-VARIABLE-NAME],[SUBDIRECTORY-NAME],[HEADER-FILE]) dnl SUBDIRECTORY-NAME must end with a path separator AC_DEFUN([BITCOIN_SUBDIR_TO_INCLUDE],[ - if test "x$2" = "x"; then + if test "$2" = ""; then AC_MSG_RESULT([default]) else echo "#include <$2$3.h>" >conftest.cpp newinclpath=`${CXXCPP} ${CPPFLAGS} -M conftest.cpp 2>/dev/null | [ tr -d '\\n\\r\\\\' | sed -e 's/^.*[[:space:]:]\(\/[^[:space:]]*\)]$3[\.h[[:space:]].*$/\1/' -e t -e d`] AC_MSG_RESULT([${newinclpath}]) - if test "x${newinclpath}" != "x"; then + if test "${newinclpath}" != ""; then eval "$1=\"\$$1\"' -I${newinclpath}'" fi fi diff --git a/build-aux/m4/l_atomic.m4 b/build-aux/m4/l_atomic.m4 new file mode 100644 index 0000000000000..40639dfe618e5 --- /dev/null +++ b/build-aux/m4/l_atomic.m4 @@ -0,0 +1,55 @@ +dnl Copyright (c) 2015 Tim Kosse +dnl Copying and distribution of this file, with or without modification, are +dnl permitted in any medium without royalty provided the copyright notice +dnl and this notice are preserved. This file is offered as-is, without any +dnl warranty. + +# Some versions of gcc/libstdc++ require linking with -latomic if +# using the C++ atomic library. +# +# Sourced from http://bugs.debian.org/797228 + +m4_define([_CHECK_ATOMIC_testbody], [[ + #include + #include + #include + + using namespace std::chrono_literals; + + int main() { + std::atomic lock{true}; + std::atomic_exchange(&lock, false); + + std::atomic t{0s}; + t.store(2s); + + std::atomic a{}; + + int64_t v = 5; + int64_t r = a.fetch_add(v); + return static_cast(r); + } +]]) + +AC_DEFUN([CHECK_ATOMIC], [ + + AC_LANG_PUSH(C++) + + AC_MSG_CHECKING([whether std::atomic can be used without link library]) + + AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_ATOMIC_testbody])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + LIBS="$LIBS -latomic" + AC_MSG_CHECKING([whether std::atomic needs -latomic]) + AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_ATOMIC_testbody])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + AC_MSG_FAILURE([cannot figure out how to use std::atomic]) + ]) + ]) + + AC_LANG_POP +]) diff --git a/build-aux/m4/l_filesystem.m4 b/build-aux/m4/l_filesystem.m4 new file mode 100644 index 0000000000000..ca3a0cd41c061 --- /dev/null +++ b/build-aux/m4/l_filesystem.m4 @@ -0,0 +1,47 @@ +dnl Copyright (c) 2022 The Bitcoin Core developers +dnl Distributed under the MIT software license, see the accompanying +dnl file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# GCC 8.1 and earlier requires -lstdc++fs +# Clang 8.0.0 (libc++) and earlier requires -lc++fs + +m4_define([_CHECK_FILESYSTEM_testbody], [[ + #include + + namespace fs = std::filesystem; + + int main() { + (void)fs::current_path().root_name(); + return 0; + } +]]) + +AC_DEFUN([CHECK_FILESYSTEM], [ + + AC_LANG_PUSH(C++) + + AC_MSG_CHECKING([whether std::filesystem can be used without link library]) + + AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_FILESYSTEM_testbody])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + SAVED_LIBS="$LIBS" + LIBS="$SAVED_LIBS -lstdc++fs" + AC_MSG_CHECKING([whether std::filesystem needs -lstdc++fs]) + AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_FILESYSTEM_testbody])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + AC_MSG_CHECKING([whether std::filesystem needs -lc++fs]) + LIBS="$SAVED_LIBS -lc++fs" + AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_FILESYSTEM_testbody])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_FAILURE([cannot figure out how to use std::filesystem]) + ]) + ]) + ]) + + AC_LANG_POP +]) diff --git a/build-aux/m4/l_socket.m4 b/build-aux/m4/l_socket.m4 new file mode 100644 index 0000000000000..38923a98fc966 --- /dev/null +++ b/build-aux/m4/l_socket.m4 @@ -0,0 +1,36 @@ +# Illumos/SmartOS requires linking with -lsocket if +# using getifaddrs & freeifaddrs + +m4_define([_CHECK_SOCKET_testbody], [[ + #include + #include + + int main() { + struct ifaddrs *ifaddr; + getifaddrs(&ifaddr); + freeifaddrs(ifaddr); + } +]]) + +AC_DEFUN([CHECK_SOCKET], [ + + AC_LANG_PUSH(C++) + + AC_MSG_CHECKING([whether ifaddrs funcs can be used without link library]) + + AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_SOCKET_testbody])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + LIBS="$LIBS -lsocket" + AC_MSG_CHECKING([whether getifaddrs needs -lsocket]) + AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_SOCKET_testbody])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + AC_MSG_FAILURE([cannot figure out how to use getifaddrs]) + ]) + ]) + + AC_LANG_POP +]) diff --git a/build_msvc/.gitignore b/build_msvc/.gitignore new file mode 100644 index 0000000000000..b2eb9313a0632 --- /dev/null +++ b/build_msvc/.gitignore @@ -0,0 +1,30 @@ +# Build directories +Debug/* +Release/* +.vs +packages/* +*/Obj +*/Debug +*/Release +*/x64 +*.vcxproj.user + +# .vcxproj files that are auto-generated by the msvc-autogen.py script. +libbitcoin_cli/libbitcoin_cli.vcxproj +libbitcoin_common/libbitcoin_common.vcxproj +libbitcoin_crypto/libbitcoin_crypto.vcxproj +libbitcoin_node/libbitcoin_node.vcxproj +libbitcoin_util/libbitcoin_util.vcxproj +libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj +libbitcoin_wallet/libbitcoin_wallet.vcxproj +libbitcoin_zmq/libbitcoin_zmq.vcxproj +bench_bitcoin/bench_bitcoin.vcxproj +libtest_util/libtest_util.vcxproj + +/bitcoin_config.h +/common.init.vcxproj + +*/Win32 +libbitcoin_qt/QtGeneratedFiles/* +test_bitcoin-qt/QtGeneratedFiles/* +vcpkg_installed \ No newline at end of file diff --git a/build_msvc/README.md b/build_msvc/README.md new file mode 100644 index 0000000000000..7feee6b766184 --- /dev/null +++ b/build_msvc/README.md @@ -0,0 +1,86 @@ +Building Bitcoin Core with Visual Studio +======================================== + +Introduction +--------------------- +Solution and project files to build Bitcoin Core with `msbuild` or Visual Studio can be found in the `build_msvc` directory. The build has been tested with Visual Studio 2019 (building with earlier versions of Visual Studio should not be expected to work). + +To build Bitcoin Core from the command-line, it is sufficient to only install the Visual Studio Build Tools component. + +Building with Visual Studio is an alternative to the Linux based [cross-compiler build](../doc/build-windows.md). + + +Prerequisites +--------------------- +To build [dependencies](../doc/dependencies.md) (except for [Qt](#qt)), +the default approach is to use the [vcpkg](https://docs.microsoft.com/en-us/cpp/vcpkg) package manager from Microsoft: + +1. [Install](https://vcpkg.io/en/getting-started.html) vcpkg. + +2. By default, vcpkg makes both `release` and `debug` builds for each package. +To save build time and disk space, one could skip `debug` builds (example uses PowerShell): +```powershell + +Add-Content -Path "vcpkg\triplets\x64-windows-static.cmake" -Value "set(VCPKG_BUILD_TYPE release)" +``` + +Qt +--------------------- +To build Bitcoin Core with the GUI, a static build of Qt is required. + +1. Download a single ZIP archive of Qt source code from https://download.qt.io/official_releases/qt/ (e.g., [`qt-everywhere-opensource-src-5.15.3.zip`](https://download.qt.io/official_releases/qt/5.15/5.15.3/single/qt-everywhere-opensource-src-5.15.3.zip)), and expand it into a dedicated folder. The following instructions assume that this folder is `C:\dev\qt-source`. + +2. Open "x64 Native Tools Command Prompt for VS 2019", and input the following commands: +```cmd +cd C:\dev\qt-source +mkdir build +cd build +..\configure -release -silent -opensource -confirm-license -opengl desktop -static -static-runtime -mp -qt-zlib -qt-pcre -qt-libpng -nomake examples -nomake tests -nomake tools -no-angle -no-dbus -no-gif -no-gtk -no-ico -no-icu -no-libjpeg -no-libudev -no-sql-sqlite -no-sql-odbc -no-sqlite -no-vulkan -skip qt3d -skip qtactiveqt -skip qtandroidextras -skip qtcharts -skip qtconnectivity -skip qtdatavis3d -skip qtdeclarative -skip doc -skip qtdoc -skip qtgamepad -skip qtgraphicaleffects -skip qtimageformats -skip qtlocation -skip qtlottie -skip qtmacextras -skip qtmultimedia -skip qtnetworkauth -skip qtpurchasing -skip qtquick3d -skip qtquickcontrols -skip qtquickcontrols2 -skip qtquicktimeline -skip qtremoteobjects -skip qtscript -skip qtscxml -skip qtsensors -skip qtserialbus -skip qtserialport -skip qtspeech -skip qtsvg -skip qtvirtualkeyboard -skip qtwayland -skip qtwebchannel -skip qtwebengine -skip qtwebglplugin -skip qtwebsockets -skip qtwebview -skip qtx11extras -skip qtxmlpatterns -no-openssl -no-feature-bearermanagement -no-feature-printdialog -no-feature-printer -no-feature-printpreviewdialog -no-feature-printpreviewwidget -no-feature-sql -no-feature-sqlmodel -no-feature-textbrowser -no-feature-textmarkdownwriter -no-feature-textodfwriter -no-feature-xml -prefix C:\Qt_static +nmake +nmake install +``` + +One could speed up building with [`jom`](https://wiki.qt.io/Jom), a replacement for `nmake` which makes use of all CPU cores. + +To build Bitcoin Core without Qt, unload or disable the `bitcoin-qt`, `libbitcoin_qt` and `test_bitcoin-qt` projects. + + +Building +--------------------- +1. Use Python to generate `*.vcxproj` from Makefile: + +``` +PS >py -3 msvc-autogen.py +``` + +2. An optional step is to adjust the settings in the `build_msvc` directory and the `common.init.vcxproj` file. This project file contains settings that are common to all projects such as the runtime library version and target Windows SDK version. The Qt directories can also be set. To specify a non-default path to a static Qt package directory, use the `QTBASEDIR` environment variable. + +3. To build from the command-line with the Visual Studio 2019 toolchain use: + +```cmd +msbuild -property:Configuration=Release -maxCpuCount -verbosity:minimal bitcoin.sln +``` + +Alternatively, open the `build_msvc/bitcoin.sln` file in Visual Studio 2019. + +Security +--------------------- +[Base address randomization](https://docs.microsoft.com/en-us/cpp/build/reference/dynamicbase-use-address-space-layout-randomization?view=msvc-160) is used to make Bitcoin Core more secure. When building Bitcoin using the `build_msvc` process base address randomization can be disabled by editing `common.init.vcproj` to change `RandomizedBaseAddress` from `true` to `false` and then rebuilding the project. + +To check if `bitcoind` has `RandomizedBaseAddress` enabled or disabled run + +``` +.\dumpbin.exe /headers src/bitcoind.exe +``` + +If is it enabled then in the output `Dynamic base` will be listed in the `DLL characteristics` under `OPTIONAL HEADER VALUES` as shown below + +``` + 8160 DLL characteristics + High Entropy Virtual Addresses + Dynamic base + NX compatible + Terminal Server Aware +``` + +This may not disable all stack randomization as versions of windows employ additional stack randomization protections. These protections must be turned off in the OS configuration. diff --git a/build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in b/build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in new file mode 100644 index 0000000000000..fc9d7cbed6450 --- /dev/null +++ b/build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in @@ -0,0 +1,62 @@ + + + + + {1125654E-E1B2-4431-8B5C-62EA9A2FEECB} + + + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + +@SOURCE_FILES@ + + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {460fee33-1fe1-483f-b3bf-931ff8e969a5} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {93b86837-b543-48a5-a89b-7c87abb77df2} + + + {792d487f-f14c-49fc-a9de-3fc150f31c3f} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + {18430fef-6b61-4c53-b396-718e02850f1b} + + + {1e065f03-3566-47d0-8fa9-daa72b084e7d} + + + + + There was an error executing the raw bench header generation task. + + + + + + + + + + + \ No newline at end of file diff --git a/build_msvc/bitcoin-cli/bitcoin-cli.vcxproj b/build_msvc/bitcoin-cli/bitcoin-cli.vcxproj new file mode 100644 index 0000000000000..e5e0e978f8981 --- /dev/null +++ b/build_msvc/bitcoin-cli/bitcoin-cli.vcxproj @@ -0,0 +1,31 @@ + + + + + {0B2D7431-F876-4A58-87BF-F748338CD3BF} + + + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + + + {0667528c-d734-4009-adf9-c0d6c4a5a5a6} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + + + + diff --git a/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj new file mode 100644 index 0000000000000..0d6358e0d077a --- /dev/null +++ b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj @@ -0,0 +1,85 @@ + + + + + + {7E99172D-7FF2-4CB6-B736-AC9B76ED412A} + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + + + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {0667528c-d734-4009-adf9-c0d6c4a5a5a6} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {2b4abff8-d1fd-4845-88c9-1f3c0a6512bf} + + + {460fee33-1fe1-483f-b3bf-931ff8e969a5} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {93b86837-b543-48a5-a89b-7c87abb77df2} + + + {792d487f-f14c-49fc-a9de-3fc150f31c3f} + + + {18430fef-6b61-4c53-b396-718e02850f1b} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + + + + $(QtIncludes);%(AdditionalIncludeDirectories) + + + Windows + $(QtReleaseLibraries);%(AdditionalDependencies) + /ignore:4206 /LTCG:OFF + + + ..\..\src; + HAVE_CONFIG_H;_UNICODE;UNICODE;%(PreprocessorDefinitions) + + + + + + $(QtIncludes);%(AdditionalIncludeDirectories) + + + $(QtDebugLibraries);%(AdditionalDependencies) + /ignore:4206 + + + ..\..\src; + HAVE_CONFIG_H;_UNICODE;UNICODE;%(PreprocessorDefinitions) + + + + + + diff --git a/build_msvc/bitcoin-tx/bitcoin-tx.vcxproj b/build_msvc/bitcoin-tx/bitcoin-tx.vcxproj new file mode 100644 index 0000000000000..4e9b4916a0fc9 --- /dev/null +++ b/build_msvc/bitcoin-tx/bitcoin-tx.vcxproj @@ -0,0 +1,37 @@ + + + + + {D3022AF6-AD33-4CE3-B358-87CB6A1B29CF} + + + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + + + + diff --git a/build_msvc/bitcoin-util/bitcoin-util.vcxproj b/build_msvc/bitcoin-util/bitcoin-util.vcxproj new file mode 100644 index 0000000000000..8a0964824bcd8 --- /dev/null +++ b/build_msvc/bitcoin-util/bitcoin-util.vcxproj @@ -0,0 +1,37 @@ + + + + + {57A04EC9-542A-4E40-83D0-AC3BE1F36805} + + + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + + + + diff --git a/build_msvc/bitcoin-wallet/bitcoin-wallet.vcxproj b/build_msvc/bitcoin-wallet/bitcoin-wallet.vcxproj new file mode 100644 index 0000000000000..2ac0be9814bdb --- /dev/null +++ b/build_msvc/bitcoin-wallet/bitcoin-wallet.vcxproj @@ -0,0 +1,46 @@ + + + + + {84DE8790-EDE3-4483-81AC-C32F15E861F4} + + + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + $(IntDir)init_bitcoin-wallet.obj + + + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {93b86837-b543-48a5-a89b-7c87abb77df2} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + {f91ac55e-6f5e-4c58-9ac5-b40db7deef93} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + + + + diff --git a/build_msvc/bitcoin.sln b/build_msvc/bitcoin.sln new file mode 100644 index 0000000000000..2a1ccf58fec9c --- /dev/null +++ b/build_msvc/bitcoin.sln @@ -0,0 +1,162 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.28803.452 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoinconsensus", "libbitcoinconsensus\libbitcoinconsensus.vcxproj", "{2B384FA8-9EE1-4544-93CB-0D733C25E8CE}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoind", "bitcoind\bitcoind.vcxproj", "{D4513DDF-6013-44DC-ADCC-12EAF6D1F038}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_util", "libbitcoin_util\libbitcoin_util.vcxproj", "{B53A5535-EE9D-4C6F-9A26-F79EE3BC3754}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_common", "libbitcoin_common\libbitcoin_common.vcxproj", "{7C87E378-DF58-482E-AA2F-1BC129BC19CE}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_crypto", "libbitcoin_crypto\libbitcoin_crypto.vcxproj", "{6190199C-6CF4-4DAD-BFBD-93FA72A760C1}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_node", "libbitcoin_node\libbitcoin_node.vcxproj", "{460FEE33-1FE1-483F-B3BF-931FF8E969A5}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libunivalue", "libunivalue\libunivalue.vcxproj", "{5724BA7D-A09A-4BA8-800B-C4C1561B3D69}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_wallet", "libbitcoin_wallet\libbitcoin_wallet.vcxproj", "{93B86837-B543-48A5-A89B-7C87ABB77DF2}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_zmq", "libbitcoin_zmq\libbitcoin_zmq.vcxproj", "{792D487F-F14C-49FC-A9DE-3FC150F31C3F}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_bitcoin", "test_bitcoin\test_bitcoin.vcxproj", "{A56B73DB-D46D-4882-8374-1FE3FFA08F07}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_cli", "libbitcoin_cli\libbitcoin_cli.vcxproj", "{0667528C-D734-4009-ADF9-C0D6C4A5A5A6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-cli", "bitcoin-cli\bitcoin-cli.vcxproj", "{0B2D7431-F876-4A58-87BF-F748338CD3BF}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bench_bitcoin", "bench_bitcoin\bench_bitcoin.vcxproj", "{1125654E-E1B2-4431-8B5C-62EA9A2FEECB}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-tx", "bitcoin-tx\bitcoin-tx.vcxproj", "{D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-util", "bitcoin-util\bitcoin-util.vcxproj", "{57A04EC9-542A-4E40-83D0-AC3BE1F36805}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-wallet", "bitcoin-wallet\bitcoin-wallet.vcxproj", "{84DE8790-EDE3-4483-81AC-C32F15E861F4}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_wallet_tool", "libbitcoin_wallet_tool\libbitcoin_wallet_tool.vcxproj", "{F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libsecp256k1", "libsecp256k1\libsecp256k1.vcxproj", "{BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libleveldb", "libleveldb\libleveldb.vcxproj", "{18430FEF-6B61-4C53-B396-718E02850F1B}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_qt", "libbitcoin_qt\libbitcoin_qt.vcxproj", "{2B4ABFF8-D1FD-4845-88C9-1F3C0A6512BF}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-qt", "bitcoin-qt\bitcoin-qt.vcxproj", "{7E99172D-7FF2-4CB6-B736-AC9B76ED412A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libtest_util", "libtest_util\libtest_util.vcxproj", "{868474FD-35F6-4400-8EED-30A33E7521D4}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_bitcoin-qt", "test_bitcoin-qt\test_bitcoin-qt.vcxproj", "{51201D5E-D939-4854-AE9D-008F03FF518E}" +EndProject +Project("{542007E3-BE0D-4B0D-A6B0-AA8813E2558D}") = "libminisketch", "libminisketch\libminisketch.vcxproj", "{542007E3-BE0D-4B0D-A6B0-AA8813E2558D}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {2B384FA8-9EE1-4544-93CB-0D733C25E8CE}.Debug|x64.ActiveCfg = Debug|x64 + {2B384FA8-9EE1-4544-93CB-0D733C25E8CE}.Debug|x64.Build.0 = Debug|x64 + {2B384FA8-9EE1-4544-93CB-0D733C25E8CE}.Release|x64.ActiveCfg = Release|x64 + {2B384FA8-9EE1-4544-93CB-0D733C25E8CE}.Release|x64.Build.0 = Release|x64 + {D4513DDF-6013-44DC-ADCC-12EAF6D1F038}.Debug|x64.ActiveCfg = Debug|x64 + {D4513DDF-6013-44DC-ADCC-12EAF6D1F038}.Debug|x64.Build.0 = Debug|x64 + {D4513DDF-6013-44DC-ADCC-12EAF6D1F038}.Release|x64.ActiveCfg = Release|x64 + {D4513DDF-6013-44DC-ADCC-12EAF6D1F038}.Release|x64.Build.0 = Release|x64 + {B53A5535-EE9D-4C6F-9A26-F79EE3BC3754}.Debug|x64.ActiveCfg = Debug|x64 + {B53A5535-EE9D-4C6F-9A26-F79EE3BC3754}.Debug|x64.Build.0 = Debug|x64 + {B53A5535-EE9D-4C6F-9A26-F79EE3BC3754}.Release|x64.ActiveCfg = Release|x64 + {B53A5535-EE9D-4C6F-9A26-F79EE3BC3754}.Release|x64.Build.0 = Release|x64 + {7C87E378-DF58-482E-AA2F-1BC129BC19CE}.Debug|x64.ActiveCfg = Debug|x64 + {7C87E378-DF58-482E-AA2F-1BC129BC19CE}.Debug|x64.Build.0 = Debug|x64 + {7C87E378-DF58-482E-AA2F-1BC129BC19CE}.Release|x64.ActiveCfg = Release|x64 + {7C87E378-DF58-482E-AA2F-1BC129BC19CE}.Release|x64.Build.0 = Release|x64 + {6190199C-6CF4-4DAD-BFBD-93FA72A760C1}.Debug|x64.ActiveCfg = Debug|x64 + {6190199C-6CF4-4DAD-BFBD-93FA72A760C1}.Debug|x64.Build.0 = Debug|x64 + {6190199C-6CF4-4DAD-BFBD-93FA72A760C1}.Release|x64.ActiveCfg = Release|x64 + {6190199C-6CF4-4DAD-BFBD-93FA72A760C1}.Release|x64.Build.0 = Release|x64 + {460FEE33-1FE1-483F-B3BF-931FF8E969A5}.Debug|x64.ActiveCfg = Debug|x64 + {460FEE33-1FE1-483F-B3BF-931FF8E969A5}.Debug|x64.Build.0 = Debug|x64 + {460FEE33-1FE1-483F-B3BF-931FF8E969A5}.Release|x64.ActiveCfg = Release|x64 + {460FEE33-1FE1-483F-B3BF-931FF8E969A5}.Release|x64.Build.0 = Release|x64 + {5724BA7D-A09A-4BA8-800B-C4C1561B3D69}.Debug|x64.ActiveCfg = Debug|x64 + {5724BA7D-A09A-4BA8-800B-C4C1561B3D69}.Debug|x64.Build.0 = Debug|x64 + {5724BA7D-A09A-4BA8-800B-C4C1561B3D69}.Release|x64.ActiveCfg = Release|x64 + {5724BA7D-A09A-4BA8-800B-C4C1561B3D69}.Release|x64.Build.0 = Release|x64 + {93B86837-B543-48A5-A89B-7C87ABB77DF2}.Debug|x64.ActiveCfg = Debug|x64 + {93B86837-B543-48A5-A89B-7C87ABB77DF2}.Debug|x64.Build.0 = Debug|x64 + {93B86837-B543-48A5-A89B-7C87ABB77DF2}.Release|x64.ActiveCfg = Release|x64 + {93B86837-B543-48A5-A89B-7C87ABB77DF2}.Release|x64.Build.0 = Release|x64 + {792D487F-F14C-49FC-A9DE-3FC150F31C3F}.Debug|x64.ActiveCfg = Debug|x64 + {792D487F-F14C-49FC-A9DE-3FC150F31C3F}.Debug|x64.Build.0 = Debug|x64 + {792D487F-F14C-49FC-A9DE-3FC150F31C3F}.Release|x64.ActiveCfg = Release|x64 + {792D487F-F14C-49FC-A9DE-3FC150F31C3F}.Release|x64.Build.0 = Release|x64 + {A56B73DB-D46D-4882-8374-1FE3FFA08F07}.Debug|x64.ActiveCfg = Debug|x64 + {A56B73DB-D46D-4882-8374-1FE3FFA08F07}.Debug|x64.Build.0 = Debug|x64 + {A56B73DB-D46D-4882-8374-1FE3FFA08F07}.Release|x64.ActiveCfg = Release|x64 + {A56B73DB-D46D-4882-8374-1FE3FFA08F07}.Release|x64.Build.0 = Release|x64 + {0667528C-D734-4009-ADF9-C0D6C4A5A5A6}.Debug|x64.ActiveCfg = Debug|x64 + {0667528C-D734-4009-ADF9-C0D6C4A5A5A6}.Debug|x64.Build.0 = Debug|x64 + {0667528C-D734-4009-ADF9-C0D6C4A5A5A6}.Release|x64.ActiveCfg = Release|x64 + {0667528C-D734-4009-ADF9-C0D6C4A5A5A6}.Release|x64.Build.0 = Release|x64 + {0B2D7431-F876-4A58-87BF-F748338CD3BF}.Debug|x64.ActiveCfg = Debug|x64 + {0B2D7431-F876-4A58-87BF-F748338CD3BF}.Debug|x64.Build.0 = Debug|x64 + {0B2D7431-F876-4A58-87BF-F748338CD3BF}.Release|x64.ActiveCfg = Release|x64 + {0B2D7431-F876-4A58-87BF-F748338CD3BF}.Release|x64.Build.0 = Release|x64 + {1125654E-E1B2-4431-8B5C-62EA9A2FEECB}.Debug|x64.ActiveCfg = Debug|x64 + {1125654E-E1B2-4431-8B5C-62EA9A2FEECB}.Debug|x64.Build.0 = Debug|x64 + {1125654E-E1B2-4431-8B5C-62EA9A2FEECB}.Release|x64.ActiveCfg = Release|x64 + {1125654E-E1B2-4431-8B5C-62EA9A2FEECB}.Release|x64.Build.0 = Release|x64 + {D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}.Debug|x64.ActiveCfg = Debug|x64 + {D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}.Debug|x64.Build.0 = Debug|x64 + {D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}.Release|x64.ActiveCfg = Release|x64 + {D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}.Release|x64.Build.0 = Release|x64 + {57A04EC9-542A-4E40-83D0-AC3BE1F36805}.Debug|x64.ActiveCfg = Debug|x64 + {57A04EC9-542A-4E40-83D0-AC3BE1F36805}.Debug|x64.Build.0 = Debug|x64 + {57A04EC9-542A-4E40-83D0-AC3BE1F36805}.Release|x64.ActiveCfg = Release|x64 + {57A04EC9-542A-4E40-83D0-AC3BE1F36805}.Release|x64.Build.0 = Release|x64 + {84DE8790-EDE3-4483-81AC-C32F15E861F4}.Debug|x64.ActiveCfg = Debug|x64 + {84DE8790-EDE3-4483-81AC-C32F15E861F4}.Debug|x64.Build.0 = Debug|x64 + {84DE8790-EDE3-4483-81AC-C32F15E861F4}.Release|x64.ActiveCfg = Release|x64 + {84DE8790-EDE3-4483-81AC-C32F15E861F4}.Release|x64.Build.0 = Release|x64 + {F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93}.Debug|x64.ActiveCfg = Debug|x64 + {F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93}.Debug|x64.Build.0 = Debug|x64 + {F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93}.Release|x64.ActiveCfg = Release|x64 + {F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93}.Release|x64.Build.0 = Release|x64 + {BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6}.Debug|x64.ActiveCfg = Debug|x64 + {BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6}.Debug|x64.Build.0 = Debug|x64 + {BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6}.Release|x64.ActiveCfg = Release|x64 + {BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6}.Release|x64.Build.0 = Release|x64 + {18430FEF-6B61-4C53-B396-718E02850F1B}.Debug|x64.ActiveCfg = Debug|x64 + {18430FEF-6B61-4C53-B396-718E02850F1B}.Debug|x64.Build.0 = Debug|x64 + {18430FEF-6B61-4C53-B396-718E02850F1B}.Release|x64.ActiveCfg = Release|x64 + {18430FEF-6B61-4C53-B396-718E02850F1B}.Release|x64.Build.0 = Release|x64 + {2B4ABFF8-D1FD-4845-88C9-1F3C0A6512BF}.Debug|x64.ActiveCfg = Debug|x64 + {2B4ABFF8-D1FD-4845-88C9-1F3C0A6512BF}.Debug|x64.Build.0 = Debug|x64 + {2B4ABFF8-D1FD-4845-88C9-1F3C0A6512BF}.Release|x64.ActiveCfg = Release|x64 + {2B4ABFF8-D1FD-4845-88C9-1F3C0A6512BF}.Release|x64.Build.0 = Release|x64 + {7E99172D-7FF2-4CB6-B736-AC9B76ED412A}.Debug|x64.ActiveCfg = Debug|x64 + {7E99172D-7FF2-4CB6-B736-AC9B76ED412A}.Debug|x64.Build.0 = Debug|x64 + {7E99172D-7FF2-4CB6-B736-AC9B76ED412A}.Release|x64.ActiveCfg = Release|x64 + {7E99172D-7FF2-4CB6-B736-AC9B76ED412A}.Release|x64.Build.0 = Release|x64 + {868474FD-35F6-4400-8EED-30A33E7521D4}.Debug|x64.ActiveCfg = Debug|x64 + {868474FD-35F6-4400-8EED-30A33E7521D4}.Debug|x64.Build.0 = Debug|x64 + {868474FD-35F6-4400-8EED-30A33E7521D4}.Release|x64.ActiveCfg = Release|x64 + {868474FD-35F6-4400-8EED-30A33E7521D4}.Release|x64.Build.0 = Release|x64 + {51201D5E-D939-4854-AE9D-008F03FF518E}.Debug|x64.ActiveCfg = Debug|x64 + {51201D5E-D939-4854-AE9D-008F03FF518E}.Debug|x64.Build.0 = Debug|x64 + {51201D5E-D939-4854-AE9D-008F03FF518E}.Release|x64.ActiveCfg = Release|x64 + {51201D5E-D939-4854-AE9D-008F03FF518E}.Release|x64.Build.0 = Release|x64 + {542007E3-BE0D-4B0D-A6B0-AA8813E2558D}.Debug|x64.ActiveCfg = Debug|x64 + {542007E3-BE0D-4B0D-A6B0-AA8813E2558D}.Debug|x64.Build.0 = Debug|x64 + {542007E3-BE0D-4B0D-A6B0-AA8813E2558D}.Release|x64.ActiveCfg = Release|x64 + {542007E3-BE0D-4B0D-A6B0-AA8813E2558D}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {58AAB032-7274-49BD-845E-5EF4DBB69B70} + EndGlobalSection +EndGlobal diff --git a/build_msvc/bitcoin_config.h.in b/build_msvc/bitcoin_config.h.in new file mode 100644 index 0000000000000..b37d536947655 --- /dev/null +++ b/build_msvc/bitcoin_config.h.in @@ -0,0 +1,201 @@ +// Copyright (c) 2018-2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_BITCOIN_CONFIG_H +#define BITCOIN_BITCOIN_CONFIG_H + +/* Version Build */ +#define CLIENT_VERSION_BUILD $ + +/* Version is release */ +#define CLIENT_VERSION_IS_RELEASE $ + +/* Major version */ +#define CLIENT_VERSION_MAJOR $ + +/* Minor version */ +#define CLIENT_VERSION_MINOR $ + +/* Copyright holder(s) before %s replacement */ +#define COPYRIGHT_HOLDERS "The %s developers" + +/* Copyright holder(s) */ +#define COPYRIGHT_HOLDERS_FINAL "The Bitcoin Core developers" + +/* Replacement for %s in copyright holders string */ +#define COPYRIGHT_HOLDERS_SUBSTITUTION "Bitcoin Core" + +/* Copyright year */ +#define COPYRIGHT_YEAR $ + +/* Define to 1 to enable wallet functions */ +#define ENABLE_WALLET 1 + +/* Define to 1 to enable BDB wallet */ +#define USE_BDB 1 + +/* Define to 1 to enable SQLite wallet */ +#define USE_SQLITE 1 + +/* Define to 1 to enable ZMQ functions */ +#define ENABLE_ZMQ 1 + +/* define if the Boost library is available */ +#define HAVE_BOOST /**/ + +/* define if external signer support is enabled (requires Boost::Process) */ +#define ENABLE_EXTERNAL_SIGNER /**/ + +/* Define this symbol if the consensus lib has been built */ +#define HAVE_CONSENSUS_LIB 1 + +/* define if the compiler supports basic C++17 syntax */ +#define HAVE_CXX17 1 + +/* Define to 1 if you have the declaration of `be16toh', and to 0 if you + don't. */ +#define HAVE_DECL_BE16TOH 0 + +/* Define to 1 if you have the declaration of `be32toh', and to 0 if you + don't. */ +#define HAVE_DECL_BE32TOH 0 + +/* Define to 1 if you have the declaration of `be64toh', and to 0 if you + don't. */ +#define HAVE_DECL_BE64TOH 0 + +/* Define to 1 if you have the declaration of `bswap_16', and to 0 if you + don't. */ +#define HAVE_DECL_BSWAP_16 0 + +/* Define to 1 if you have the declaration of `bswap_32', and to 0 if you + don't. */ +#define HAVE_DECL_BSWAP_32 0 + +/* Define to 1 if you have the declaration of `bswap_64', and to 0 if you + don't. */ +#define HAVE_DECL_BSWAP_64 0 + +/* Define to 1 if you have the declaration of `fork', and to 0 if you don't. + */ +#define HAVE_DECL_FORK 0 + +/* Define to 1 if you have the declaration of `htobe16', and to 0 if you + don't. */ +#define HAVE_DECL_HTOBE16 0 + +/* Define to 1 if you have the declaration of `htobe32', and to 0 if you + don't. */ +#define HAVE_DECL_HTOBE32 0 + +/* Define to 1 if you have the declaration of `htobe64', and to 0 if you + don't. */ +#define HAVE_DECL_HTOBE64 0 + +/* Define to 1 if you have the declaration of `htole16', and to 0 if you + don't. */ +#define HAVE_DECL_HTOLE16 0 + +/* Define to 1 if you have the declaration of `htole32', and to 0 if you + don't. */ +#define HAVE_DECL_HTOLE32 0 + +/* Define to 1 if you have the declaration of `htole64', and to 0 if you + don't. */ +#define HAVE_DECL_HTOLE64 0 + +/* Define to 1 if you have the declaration of `le16toh', and to 0 if you + don't. */ +#define HAVE_DECL_LE16TOH 0 + +/* Define to 1 if you have the declaration of `le32toh', and to 0 if you + don't. */ +#define HAVE_DECL_LE32TOH 0 + +/* Define to 1 if you have the declaration of `le64toh', and to 0 if you + don't. */ +#define HAVE_DECL_LE64TOH 0 + +/* Define to 1 if you have the declaration of `setsid', and to 0 if you don't. + */ +#define HAVE_DECL_SETSID 0 + +/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you + don't. */ +#define HAVE_DECL_STRERROR_R 0 + +/* Define if the dllexport attribute is supported. */ +#define HAVE_DLLEXPORT_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MINIUPNPC_MINIUPNPC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MINIUPNPC_UPNPCOMMANDS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MINIUPNPC_UPNPERRORS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "https://github.com/bitcoin/bitcoin/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "Bitcoin Core" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING $ + +/* Define to the home page for this package. */ +#define PACKAGE_URL "https://bitcoincore.org/" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION $ + +/* Define this symbol if the minimal qt platform exists */ +#define QT_QPA_PLATFORM_MINIMAL 1 + +/* Define this symbol if the qt platform is windows */ +#define QT_QPA_PLATFORM_WINDOWS 1 + +/* Define this symbol if qt plugins are static */ +#define QT_STATICPLUGIN 1 + +/* Windows Universal Platform constraints */ +#if !defined(WINAPI_FAMILY) || (WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP) +/* Either a desktop application without API restrictions, or and older system + before these macros were defined. */ + +/* ::wsystem is available */ +#define HAVE_SYSTEM 1 + +#endif // !WINAPI_FAMILY || WINAPI_FAMILY_DESKTOP_APP + +#endif //BITCOIN_BITCOIN_CONFIG_H diff --git a/build_msvc/bitcoind/bitcoind.vcxproj b/build_msvc/bitcoind/bitcoind.vcxproj new file mode 100644 index 0000000000000..b1204d0d5d152 --- /dev/null +++ b/build_msvc/bitcoind/bitcoind.vcxproj @@ -0,0 +1,88 @@ + + + + + {D4513DDF-6013-44DC-ADCC-12EAF6D1F038} + + + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + $(IntDir)init_bitcoind.obj + + + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {460fee33-1fe1-483f-b3bf-931ff8e969a5} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {93b86837-b543-48a5-a89b-7c87abb77df2} + + + {792d487f-f14c-49fc-a9de-3fc150f31c3f} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + {18430fef-6b61-4c53-b396-718e02850f1b} + + + + + + + ..\..\test\config.ini.in + ..\..\test\config.ini + + + + + + + + + + + + + + + + + + + + diff --git a/build_msvc/common.init.vcxproj.in b/build_msvc/common.init.vcxproj.in new file mode 100644 index 0000000000000..182efff233289 --- /dev/null +++ b/build_msvc/common.init.vcxproj.in @@ -0,0 +1,106 @@ + + + + + + 16.0 + true + + + + true + true + true + true + true + $(Configuration) + x64-windows-static + + + + $(Registry:HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Microsoft SDKs\Windows\v10.0@ProductVersion) + $(Registry:HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\Microsoft SDKs\Windows\v10.0@ProductVersion) + + $(WindowsTargetPlatformVersion_10).0 + $(WindowsTargetPlatformVersion_10) + + + + + Release + x64 + + + Debug + x64 + + + + + false + false + @TOOLSET@ + Unicode + No + $(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName)\ + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + true + true + @TOOLSET@ + Unicode + $(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName)\ + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + + Disabled + false + true + true + true + MultiThreaded + None + + + false + false + /LTCG:OFF + + + + + + Disabled + false + _DEBUG;%(PreprocessorDefinitions) + true + MultiThreadedDebug + /bigobj %(AdditionalOptions) + + + + + + Level3 + NotUsing + /utf-8 /Zc:__cplusplus /std:c++17 %(AdditionalOptions) + 4018;4244;4267;4334;4715;4805;4834 + true + _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;_SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING;ZMQ_STATIC;NOMINMAX;WIN32;HAVE_CONFIG_H;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_CONSOLE;_WIN32_WINNT=0x0601;_WIN32_IE=0x0501;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + ..\..\src;..\..\src\minisketch\include;..\..\src\univalue\include;..\..\src\secp256k1\include;..\..\src\leveldb\include;..\..\src\leveldb\helpers\memenv;%(AdditionalIncludeDirectories) + + + Console + Iphlpapi.lib;ws2_32.lib;Shlwapi.lib;kernel32.lib;user32.lib;gdi32.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + true + + + /ignore:4221 + + + + diff --git a/build_msvc/common.qt.init.vcxproj b/build_msvc/common.qt.init.vcxproj new file mode 100644 index 0000000000000..cc8063e545b87 --- /dev/null +++ b/build_msvc/common.qt.init.vcxproj @@ -0,0 +1,16 @@ + + + + + C:\Qt_static + $(QtBaseDir)\plugins + $(QtBaseDir)\lib + $(QtBaseDir)\include + $(QtIncludeDir);$(QtIncludeDir)\QtNetwork;$(QtIncludeDir)\QtCore;$(QtIncludeDir)\QtWidgets;$(QtIncludeDir)\QtGui; + .\QtGeneratedFiles\qt + $(QtBaseDir)\bin + $(QtPluginsLibraryDir)\platforms\qminimal.lib;$(QtPluginsLibraryDir)\platforms\qwindows.lib;$(QtPluginsLibraryDir)\styles\qwindowsvistastyle.lib;$(QtLibraryDir)\Qt5WindowsUIAutomationSupport.lib;$(QtLibraryDir)\qtfreetype.lib;$(QtLibraryDir)\qtharfbuzz.lib;$(QtLibraryDir)\qtlibpng.lib;$(QtLibraryDir)\qtpcre2.lib;$(QtLibraryDir)\Qt5AccessibilitySupport.lib;$(QtLibraryDir)\Qt5Core.lib;$(QtLibraryDir)\Qt5Concurrent.lib;$(QtLibraryDir)\Qt5EventDispatcherSupport.lib;$(QtLibraryDir)\Qt5FontDatabaseSupport.lib;$(QtLibraryDir)\Qt5Gui.lib;$(QtLibraryDir)\Qt5Network.lib;$(QtLibraryDir)\Qt5PlatformCompositorSupport.lib;$(QtLibraryDir)\Qt5ThemeSupport.lib;$(QtLibraryDir)\Qt5Widgets.lib;$(QtLibraryDir)\Qt5WinExtras.lib;$(QtLibraryDir)\qtmain.lib;Wtsapi32.lib;userenv.lib;netapi32.lib;imm32.lib;Dwmapi.lib;version.lib;winmm.lib;UxTheme.lib + $(QtPluginsLibraryDir)\platforms\qwindowsd.lib;$(QtPluginsLibraryDir)\platforms\qminimald.lib;$(QtPluginsLibraryDir)\styles\qwindowsvistastyled.lib;$(QtLibraryDir)\*d.lib;Wtsapi32.lib;crypt32.lib;userenv.lib;netapi32.lib;imm32.lib;Dwmapi.lib;version.lib;winmm.lib;UxTheme.lib + + + diff --git a/build_msvc/common.vcxproj b/build_msvc/common.vcxproj new file mode 100644 index 0000000000000..270c75e8a7b1f --- /dev/null +++ b/build_msvc/common.vcxproj @@ -0,0 +1,12 @@ + + +$(BuildDependsOn);CopyBuildArtifacts + + + + + + + + + diff --git a/build_msvc/libbitcoin_cli/libbitcoin_cli.vcxproj.in b/build_msvc/libbitcoin_cli/libbitcoin_cli.vcxproj.in new file mode 100644 index 0000000000000..620df72a2f764 --- /dev/null +++ b/build_msvc/libbitcoin_cli/libbitcoin_cli.vcxproj.in @@ -0,0 +1,16 @@ + + + + + {0667528C-D734-4009-ADF9-C0D6C4A5A5A6} + + + StaticLibrary + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libbitcoin_common/libbitcoin_common.vcxproj.in b/build_msvc/libbitcoin_common/libbitcoin_common.vcxproj.in new file mode 100644 index 0000000000000..b47d62b29587e --- /dev/null +++ b/build_msvc/libbitcoin_common/libbitcoin_common.vcxproj.in @@ -0,0 +1,16 @@ + + + + + {7C87E378-DF58-482E-AA2F-1BC129BC19CE} + + + StaticLibrary + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libbitcoin_crypto/libbitcoin_crypto.vcxproj.in b/build_msvc/libbitcoin_crypto/libbitcoin_crypto.vcxproj.in new file mode 100644 index 0000000000000..32cb75bf871c1 --- /dev/null +++ b/build_msvc/libbitcoin_crypto/libbitcoin_crypto.vcxproj.in @@ -0,0 +1,16 @@ + + + + + {6190199C-6CF4-4DAD-BFBD-93FA72A760C1} + + + StaticLibrary + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libbitcoin_node/libbitcoin_node.vcxproj.in b/build_msvc/libbitcoin_node/libbitcoin_node.vcxproj.in new file mode 100644 index 0000000000000..58e90dbaeb438 --- /dev/null +++ b/build_msvc/libbitcoin_node/libbitcoin_node.vcxproj.in @@ -0,0 +1,19 @@ + + + + + {460FEE33-1FE1-483F-B3BF-931FF8E969A5} + + + StaticLibrary + + +@SOURCE_FILES@ + + $(IntDir)wallet_init.obj + + + + + + \ No newline at end of file diff --git a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj new file mode 100644 index 0000000000000..a64ae881f2566 --- /dev/null +++ b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj @@ -0,0 +1,223 @@ + + + + + + {2B4ABFF8-D1FD-4845-88C9-1F3C0A6512BF} + StaticLibrary + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _AMD64_;%(PreprocessorDefinitions) + $(QtIncludes);$(GeneratedFilesOutDir)\..;%(AdditionalIncludeDirectories) + + + + + + _AMD64_;%(PreprocessorDefinitions) + $(QtIncludes);$(GeneratedFilesOutDir)\..;%(AdditionalIncludeDirectories) + + + + + + + + + + + + + + + + + There was an error executing the libbitcoin_qt moc code include generation task. + + + + + + + + + + There was an error executing the libbitcoin_qt moc header generation task. + + + + + + + + + + There was an error executing the libbitcoin_qt forms header generation task. + + + + + + + + + There was an error executing the libbitcoin_qt translation file generation task. + + + + + + + + There was an error executing the libbitcoin_qt resource code generation task. + + + + + + + + + + + + + + + moccode; + mocheader; + forms; + translation; + resource; + $(BuildDependsOn); + + + + + qtclean; + $(CleanDependsOn); + + + + diff --git a/build_msvc/libbitcoin_util/libbitcoin_util.vcxproj.in b/build_msvc/libbitcoin_util/libbitcoin_util.vcxproj.in new file mode 100644 index 0000000000000..6ec40461c2adc --- /dev/null +++ b/build_msvc/libbitcoin_util/libbitcoin_util.vcxproj.in @@ -0,0 +1,17 @@ + + + + + {B53A5535-EE9D-4C6F-9A26-F79EE3BC3754} + + + StaticLibrary + + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libbitcoin_wallet/libbitcoin_wallet.vcxproj.in b/build_msvc/libbitcoin_wallet/libbitcoin_wallet.vcxproj.in new file mode 100644 index 0000000000000..613d5c7199159 --- /dev/null +++ b/build_msvc/libbitcoin_wallet/libbitcoin_wallet.vcxproj.in @@ -0,0 +1,19 @@ + + + + + {93B86837-B543-48A5-A89B-7C87ABB77DF2} + + + StaticLibrary + + + + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj.in b/build_msvc/libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj.in new file mode 100644 index 0000000000000..1a6b7b6b92670 --- /dev/null +++ b/build_msvc/libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj.in @@ -0,0 +1,16 @@ + + + + + {F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93} + + + StaticLibrary + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libbitcoin_zmq/libbitcoin_zmq.vcxproj.in b/build_msvc/libbitcoin_zmq/libbitcoin_zmq.vcxproj.in new file mode 100644 index 0000000000000..e86eea81e6567 --- /dev/null +++ b/build_msvc/libbitcoin_zmq/libbitcoin_zmq.vcxproj.in @@ -0,0 +1,16 @@ + + + + + {792D487F-F14C-49FC-A9DE-3FC150F31C3F} + + + StaticLibrary + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libbitcoinconsensus/libbitcoinconsensus.vcxproj b/build_msvc/libbitcoinconsensus/libbitcoinconsensus.vcxproj new file mode 100644 index 0000000000000..4cb0bdc90218a --- /dev/null +++ b/build_msvc/libbitcoinconsensus/libbitcoinconsensus.vcxproj @@ -0,0 +1,37 @@ + + + + + {2B384FA8-9EE1-4544-93CB-0D733C25E8CE} + + + StaticLibrary + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/build_msvc/libleveldb/libleveldb.vcxproj b/build_msvc/libleveldb/libleveldb.vcxproj new file mode 100644 index 0000000000000..009be30decbf1 --- /dev/null +++ b/build_msvc/libleveldb/libleveldb.vcxproj @@ -0,0 +1,61 @@ + + + + + {18430FEF-6B61-4C53-B396-718E02850F1B} + + + StaticLibrary + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + HAVE_CRC32C=0;HAVE_SNAPPY=0;__STDC_LIMIT_MACROS;LEVELDB_IS_BIG_ENDIAN=0;_UNICODE;UNICODE;_CRT_NONSTDC_NO_DEPRECATE;LEVELDB_PLATFORM_WINDOWS;LEVELDB_ATOMIC_PRESENT;%(PreprocessorDefinitions) + 4244;4267 + ..\..\src\leveldb;..\..\src\leveldb\include;%(AdditionalIncludeDirectories) + + + + + + diff --git a/build_msvc/libminisketch/libminisketch.vcxproj b/build_msvc/libminisketch/libminisketch.vcxproj new file mode 100644 index 0000000000000..b34593fe5c717 --- /dev/null +++ b/build_msvc/libminisketch/libminisketch.vcxproj @@ -0,0 +1,38 @@ + + + + + {542007E3-BE0D-4B0D-A6B0-AA8813E2558D} + + + StaticLibrary + + + + + + + + + + + + + + + + + + + + + + + 4060;4065;4146;4244;4267;4554 + HAVE_CLMUL;DISABLE_DEFAULT_FIELDS;ENABLE_FIELD_32;%(PreprocessorDefinitions) + + + + + + diff --git a/build_msvc/libsecp256k1/libsecp256k1.vcxproj b/build_msvc/libsecp256k1/libsecp256k1.vcxproj new file mode 100644 index 0000000000000..16ee32d87e10c --- /dev/null +++ b/build_msvc/libsecp256k1/libsecp256k1.vcxproj @@ -0,0 +1,25 @@ + + + + + {BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6} + + + StaticLibrary + + + + + + + + + ENABLE_MODULE_ECDH;ENABLE_MODULE_RECOVERY;ENABLE_MODULE_EXTRAKEYS;ENABLE_MODULE_SCHNORRSIG;%(PreprocessorDefinitions) + ..\..\src\secp256k1;%(AdditionalIncludeDirectories) + 4146;4244;4267;4334 + + + + + + diff --git a/build_msvc/libsecp256k1_config.h b/build_msvc/libsecp256k1_config.h new file mode 100644 index 0000000000000..57f2f144ff6ee --- /dev/null +++ b/build_msvc/libsecp256k1_config.h @@ -0,0 +1,32 @@ +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef BITCOIN_LIBSECP256K1_CONFIG_H +#define BITCOIN_LIBSECP256K1_CONFIG_H + +#undef USE_ASM_X86_64 +#undef USE_ENDOMORPHISM +#undef USE_FIELD_10X26 +#undef USE_FIELD_5X52 +#undef USE_FIELD_INV_BUILTIN +#undef USE_FIELD_INV_NUM +#undef USE_NUM_GMP +#undef USE_NUM_NONE +#undef USE_SCALAR_4X64 +#undef USE_SCALAR_8X32 +#undef USE_SCALAR_INV_BUILTIN +#undef USE_SCALAR_INV_NUM + +#define USE_NUM_NONE 1 +#define USE_FIELD_INV_BUILTIN 1 +#define USE_SCALAR_INV_BUILTIN 1 +#define USE_FIELD_10X26 1 +#define USE_SCALAR_8X32 1 + +#define ECMULT_GEN_PREC_BITS 4 +#define ECMULT_WINDOW_SIZE 15 + +#endif // BITCOIN_LIBSECP256K1_CONFIG_H diff --git a/build_msvc/libtest_util/libtest_util.vcxproj.in b/build_msvc/libtest_util/libtest_util.vcxproj.in new file mode 100644 index 0000000000000..b5e844010e919 --- /dev/null +++ b/build_msvc/libtest_util/libtest_util.vcxproj.in @@ -0,0 +1,16 @@ + + + + + {868474FD-35F6-4400-8EED-30A33E7521D4} + + + StaticLibrary + + +@SOURCE_FILES@ + + + + + diff --git a/build_msvc/libunivalue/libunivalue.vcxproj b/build_msvc/libunivalue/libunivalue.vcxproj new file mode 100644 index 0000000000000..0f13a57241350 --- /dev/null +++ b/build_msvc/libunivalue/libunivalue.vcxproj @@ -0,0 +1,19 @@ + + + + + {5724BA7D-A09A-4BA8-800B-C4C1561B3D69} + + + StaticLibrary + + + + + + + + + + + diff --git a/build_msvc/msbuild/tasks/hexdump.targets b/build_msvc/msbuild/tasks/hexdump.targets new file mode 100644 index 0000000000000..12868a9874181 --- /dev/null +++ b/build_msvc/msbuild/tasks/hexdump.targets @@ -0,0 +1,53 @@ + + + + + + + + + + + + + outFileInfo.LastWriteTime) + { + using (Stream inStm = File.OpenRead(RawFilePath)) + { + using (StreamWriter sw = new StreamWriter(HeaderFilePath)) + { + sw.WriteLine(SourceHeader); + int count = 0; + int rawChar = inStm.ReadByte(); + while(rawChar != -1) + { + sw.Write("0x{0:x2}, ", rawChar); + count++; + if(count % 8 == 0) + { + sw.WriteLine(); + } + rawChar = inStm.ReadByte(); + } + sw.WriteLine(SourceFooter); + } + } + } +} +]]> + + + + \ No newline at end of file diff --git a/build_msvc/msbuild/tasks/replaceinfile.targets b/build_msvc/msbuild/tasks/replaceinfile.targets new file mode 100644 index 0000000000000..2ccb8b30e053f --- /dev/null +++ b/build_msvc/msbuild/tasks/replaceinfile.targets @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/build_msvc/msvc-autogen.py b/build_msvc/msvc-autogen.py new file mode 100755 index 0000000000000..819fe1b7aef4b --- /dev/null +++ b/build_msvc/msvc-autogen.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import os +import re +import argparse +from shutil import copyfile + +SOURCE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src')) +DEFAULT_PLATFORM_TOOLSET = R'v142' + +libs = [ + 'libbitcoin_cli', + 'libbitcoin_common', + 'libbitcoin_crypto', + 'libbitcoin_node', + 'libbitcoin_util', + 'libbitcoin_wallet_tool', + 'libbitcoin_wallet', + 'libbitcoin_zmq', + 'bench_bitcoin', + 'libtest_util', +] + +ignore_list = [ +] + +lib_sources = {} + + +def parse_makefile(makefile): + with open(makefile, 'r', encoding='utf-8') as file: + current_lib = '' + for line in file.read().splitlines(): + if current_lib: + source = line.split()[0] + if source.endswith('.cpp') and not source.startswith('$') and source not in ignore_list: + source_filename = source.replace('/', '\\') + object_filename = source.replace('/', '_')[:-4] + ".obj" + lib_sources[current_lib].append((source_filename, object_filename)) + if not line.endswith('\\'): + current_lib = '' + continue + for lib in libs: + _lib = lib.replace('-', '_') + if re.search(_lib + '.*_SOURCES \\= \\\\', line): + current_lib = lib + lib_sources[current_lib] = [] + break + +def parse_config_into_btc_config(): + def find_between( s, first, last ): + try: + start = s.index( first ) + len( first ) + end = s.index( last, start ) + return s[start:end] + except ValueError: + return "" + + config_info = [] + with open(os.path.join(SOURCE_DIR,'../configure.ac'), encoding="utf8") as f: + for line in f: + if line.startswith("define"): + config_info.append(find_between(line, "(_", ")")) + + config_info = [c for c in config_info if not c.startswith("COPYRIGHT_HOLDERS")] + + config_dict = dict(item.split(", ") for item in config_info) + config_dict["PACKAGE_VERSION"] = f"\"{config_dict['CLIENT_VERSION_MAJOR']}.{config_dict['CLIENT_VERSION_MINOR']}.{config_dict['CLIENT_VERSION_BUILD']}\"" + version = config_dict["PACKAGE_VERSION"].strip('"') + config_dict["PACKAGE_STRING"] = f"\"Bitcoin Core {version}\"" + + with open(os.path.join(SOURCE_DIR,'../build_msvc/bitcoin_config.h.in'), "r", encoding="utf8") as template_file: + template = template_file.readlines() + + for index, line in enumerate(template): + header = "" + if line.startswith("#define"): + header = line.split(" ")[1] + if header in config_dict: + template[index] = line.replace("$", f"{config_dict[header]}") + + with open(os.path.join(SOURCE_DIR,'../build_msvc/bitcoin_config.h'), "w", encoding="utf8") as btc_config: + btc_config.writelines(template) + +def set_properties(vcxproj_filename, placeholder, content): + with open(vcxproj_filename + '.in', 'r', encoding='utf-8') as vcxproj_in_file: + with open(vcxproj_filename, 'w', encoding='utf-8') as vcxproj_file: + vcxproj_file.write(vcxproj_in_file.read().replace(placeholder, content)) + +def main(): + parser = argparse.ArgumentParser(description='Bitcoin-core msbuild configuration initialiser.') + parser.add_argument('-toolset', nargs='?', default=DEFAULT_PLATFORM_TOOLSET, + help='Optionally sets the msbuild platform toolset, e.g. v142 for Visual Studio 2019.' + ' default is %s.'%DEFAULT_PLATFORM_TOOLSET) + args = parser.parse_args() + set_properties(os.path.join(SOURCE_DIR, '../build_msvc/common.init.vcxproj'), '@TOOLSET@', args.toolset) + + for makefile_name in os.listdir(SOURCE_DIR): + if 'Makefile' in makefile_name: + parse_makefile(os.path.join(SOURCE_DIR, makefile_name)) + for key, value in lib_sources.items(): + vcxproj_filename = os.path.abspath(os.path.join(os.path.dirname(__file__), key, key + '.vcxproj')) + content = '' + for source_filename, object_filename in value: + content += ' \n' + content += ' $(IntDir)' + object_filename + '\n' + content += ' \n' + set_properties(vcxproj_filename, '@SOURCE_FILES@\n', content) + parse_config_into_btc_config() + copyfile(os.path.join(SOURCE_DIR,'../build_msvc/bitcoin_config.h'), os.path.join(SOURCE_DIR, 'config/bitcoin-config.h')) + copyfile(os.path.join(SOURCE_DIR,'../build_msvc/libsecp256k1_config.h'), os.path.join(SOURCE_DIR, 'secp256k1/src/libsecp256k1-config.h')) + +if __name__ == '__main__': + main() diff --git a/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj b/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj new file mode 100644 index 0000000000000..ec572b4f2ed2a --- /dev/null +++ b/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj @@ -0,0 +1,123 @@ + + + + + + {51201D5E-D939-4854-AE9D-008F03FF518E} + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + + + + + + + + + + + + + + + + + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {0667528c-d734-4009-adf9-c0d6c4a5a5a6} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {2b4abff8-d1fd-4845-88c9-1f3c0a6512bf} + + + {460fee33-1fe1-483f-b3bf-931ff8e969a5} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {93b86837-b543-48a5-a89b-7c87abb77df2} + + + {792d487f-f14c-49fc-a9de-3fc150f31c3f} + + + {18430fef-6b61-4c53-b396-718e02850f1b} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + + + + + + + ..\libbitcoin_qt\$(GeneratedFilesOutDir)\..\;$(QtIncludeDir)\QtTest;$(QtIncludes);%(AdditionalIncludeDirectories) + + + $(QtLibraryDir)\Qt5Test.lib;$(QtReleaseLibraries);%(AdditionalDependencies) + /ignore:4206 /LTCG:OFF + + + + + + ..\libbitcoin_qt\$(GeneratedFilesOutDir)\..\;$(QtIncludeDir)\QtTest;$(QtIncludes);%(AdditionalIncludeDirectories) + + + $(QtDebugLibraries);%(AdditionalDependencies) + /ignore:4206 + + + + + + + + + + + + + There was an error executing the test_bitcoin-qt moc code generation task. + + + + + + + + + + + + + moccode; + $(BuildDependsOn); + + + + + QtTestCleanGeneratedFiles; + $(CleanDependsOn); + + + diff --git a/build_msvc/test_bitcoin/test_bitcoin.vcxproj b/build_msvc/test_bitcoin/test_bitcoin.vcxproj new file mode 100644 index 0000000000000..4182448ec3568 --- /dev/null +++ b/build_msvc/test_bitcoin/test_bitcoin.vcxproj @@ -0,0 +1,77 @@ + + + + + {A56B73DB-D46D-4882-8374-1FE3FFA08F07} + + + Application + $(SolutionDir)$(Platform)\$(Configuration)\ + + + + + + + + + + + + + + {542007e3-be0d-4b0d-a6b0-aa8813e2558d} + + + {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} + + + {0667528c-d734-4009-adf9-c0d6c4a5a5a6} + + + {7c87e378-df58-482e-aa2f-1bc129bc19ce} + + + {6190199c-6cf4-4dad-bfbd-93fa72a760c1} + + + {460fee33-1fe1-483f-b3bf-931ff8e969a5} + + + {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} + + + {93b86837-b543-48a5-a89b-7c87abb77df2} + + + {792d487f-f14c-49fc-a9de-3fc150f31c3f} + + + {1e065f03-3566-47d0-8fa9-daa72b084e7d} + + + {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} + + + {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} + + + {18430fef-6b61-4c53-b396-718e02850f1b} + + + + + There was an error executing the JSON test header generation task. + + + + + + + + + + + + + diff --git a/build_msvc/vcpkg.json b/build_msvc/vcpkg.json new file mode 100644 index 0000000000000..86773d1fd3cea --- /dev/null +++ b/build_msvc/vcpkg.json @@ -0,0 +1,17 @@ +{ + "name": "bitcoin-core", + "version-string": "1", + "dependencies": [ + "berkeleydb", + "boost-multi-index", + "boost-process", + "boost-signals2", + "boost-test", + "sqlite3", + { + "name": "libevent", + "features": ["thread"] + }, + "zeromq" + ] +} diff --git a/ci/README.md b/ci/README.md new file mode 100644 index 0000000000000..3c5f04c39e348 --- /dev/null +++ b/ci/README.md @@ -0,0 +1,65 @@ +## CI Scripts + +This directory contains scripts for each build step in each build stage. + +### Running a Stage Locally + +Be aware that the tests will be built and run in-place, so please run at your own risk. +If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first. + +The ci needs to perform various sysadmin tasks such as installing packages or writing to the user's home directory. +While most of the actions are done inside a docker container, this is not possible for all. Thus, cache directories, +such as the depends cache, previous release binaries, or ccache, are mounted as read-write into the docker container. While it should be fine to run +the ci system locally on you development box, the ci scripts can generally be assumed to have received less review and +testing compared to other parts of the codebase. If you want to keep the work tree clean, you might want to run the ci +system in a virtual machine with a Linux operating system of your choice. + +To allow for a wide range of tested environments, but also ensure reproducibility to some extent, the test stage +requires `docker` to be installed. To install all requirements on Ubuntu, run + +``` +sudo apt install docker.io bash +``` + +To run the default test stage, + +``` +./ci/test_run_all.sh +``` + +To run the test stage with a specific configuration, + +``` +FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh +``` + +### Configurations + +The test files (`FILE_ENV`) are constructed to test a wide range of +configurations, rather than a single pass/fail. This helps to catch build +failures and logic errors that present on platforms other than the ones the +author has tested. + +Some builders use the dependency-generator in `./depends`, rather than using +the system package manager to install build dependencies. This guarantees that +the tester is using the same versions as the release builds, which also use +`./depends`. + +If no `FILE_ENV` has been specified or values are left out, `00_setup_env.sh` +is used as the default configuration with fallback values. + +It is also possible to force a specific configuration without modifying the +file. For example, + +``` +MAKEJOBS="-j1" FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh +``` + +The files starting with `0n` (`n` greater than 0) are the scripts that are run +in order. + +### Cache + +In order to avoid rebuilding all dependencies for each build, the binaries are +cached and re-used when possible. Changes in the dependency-generator will +trigger cache-invalidation and rebuilds as necessary. diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh new file mode 100755 index 0000000000000..8330df87ebfa4 --- /dev/null +++ b/ci/lint/04_install.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C + +${CI_RETRY_EXE} apt-get update +${CI_RETRY_EXE} apt-get install -y clang-format-9 python3-pip curl git gawk jq +update-alternatives --install /usr/bin/clang-format clang-format "$(which clang-format-9 )" 100 +update-alternatives --install /usr/bin/clang-format-diff clang-format-diff "$(which clang-format-diff-9)" 100 + +${CI_RETRY_EXE} pip3 install codespell==2.1.0 +${CI_RETRY_EXE} pip3 install flake8==4.0.1 +${CI_RETRY_EXE} pip3 install mypy==0.942 +${CI_RETRY_EXE} pip3 install pyzmq==22.3.0 +${CI_RETRY_EXE} pip3 install vulture==2.3 + +SHELLCHECK_VERSION=v0.8.0 +curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar --xz -xf - --directory /tmp/ +export PATH="/tmp/shellcheck-${SHELLCHECK_VERSION}:${PATH}" diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh new file mode 100755 index 0000000000000..f174b4d074979 --- /dev/null +++ b/ci/lint/06_script.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C + +GIT_HEAD=$(git rev-parse HEAD) +if [ -n "$CIRRUS_PR" ]; then + COMMIT_RANGE="${CIRRUS_BASE_SHA}..$GIT_HEAD" + test/lint/commit-script-check.sh "$COMMIT_RANGE" +fi +export COMMIT_RANGE + +# This only checks that the trees are pure subtrees, it is not doing a full +# check with -r to not have to fetch all the remotes. +test/lint/git-subtree-check.sh src/crypto/ctaes +test/lint/git-subtree-check.sh src/secp256k1 +test/lint/git-subtree-check.sh src/minisketch +test/lint/git-subtree-check.sh src/univalue +test/lint/git-subtree-check.sh src/leveldb +test/lint/git-subtree-check.sh src/crc32c +test/lint/check-doc.py +test/lint/lint-all.py + +if [ "$CIRRUS_REPO_FULL_NAME" = "bitcoin/bitcoin" ] && [ "$CIRRUS_PR" = "" ] ; then + # Sanity check only the last few commits to get notified of missing sigs, + # missing keys, or expired keys. Usually there is only one new merge commit + # per push on the master branch and a few commits on release branches, so + # sanity checking only a few (10) commits seems sufficient and cheap. + git log HEAD~10 -1 --format='%H' > ./contrib/verify-commits/trusted-sha512-root-commit + git log HEAD~10 -1 --format='%H' > ./contrib/verify-commits/trusted-git-root + mapfile -t KEYS < contrib/verify-commits/trusted-keys + ${CI_RETRY_EXE} gpg --keyserver hkps://keys.openpgp.org --recv-keys "${KEYS[@]}" && + ./contrib/verify-commits/verify-commits.py; +fi + +if [ -n "$COMMIT_RANGE" ]; then + echo + git log --no-merges --oneline "$COMMIT_RANGE" +fi diff --git a/ci/lint_run_all.sh b/ci/lint_run_all.sh new file mode 100755 index 0000000000000..7adfe71674149 --- /dev/null +++ b/ci/lint_run_all.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +set -o errexit; source ./ci/test/00_setup_env.sh +set -o errexit; source ./ci/lint/04_install.sh +set -o errexit; source ./ci/lint/06_script.sh diff --git a/ci/retry/README.md b/ci/retry/README.md new file mode 100644 index 0000000000000..1b03c652dbbf5 --- /dev/null +++ b/ci/retry/README.md @@ -0,0 +1,123 @@ +retry - The command line retry tool +------------------------------------------ + +Retry any shell command with exponential backoff or constant delay. + +### Instructions + +Install: + +retry is a shell script, so drop it somewhere and make sure it's added to your $PATH. Or you can use the following one-liner: + +```sh +sudo sh -c "curl https://raw.githubusercontent.com/kadwanev/retry/master/retry -o /usr/local/bin/retry && chmod +x /usr/local/bin/retry" +``` + +If you're on OS X, retry is also on Homebrew: + +``` +brew pull 27283 +brew install retry +``` +Not popular enough for homebrew-core. Please star this project to help. + +### Usage + +Help: + +`retry -?` + + Usage: retry [options] -- execute command + -h, -?, --help + -v, --verbose Verbose output + -t, --tries=# Set max retries: Default 10 + -s, --sleep=secs Constant sleep amount (seconds) + -m, --min=secs Exponential Backoff: minimum sleep amount (seconds): Default 0.3 + -x, --max=secs Exponential Backoff: maximum sleep amount (seconds): Default 60 + -f, --fail="script +cmds" Fail Script: run in case of final failure + +### Examples + +No problem: + +`retry echo u work good` + + u work good + +Test functionality: + +`retry 'echo "y u no work"; false'` + + y u no work + Before retry #1: sleeping 0.3 seconds + y u no work + Before retry #2: sleeping 0.6 seconds + y u no work + Before retry #3: sleeping 1.2 seconds + y u no work + Before retry #4: sleeping 2.4 seconds + y u no work + Before retry #5: sleeping 4.8 seconds + y u no work + Before retry #6: sleeping 9.6 seconds + y u no work + Before retry #7: sleeping 19.2 seconds + y u no work + Before retry #8: sleeping 38.4 seconds + y u no work + Before retry #9: sleeping 60.0 seconds + y u no work + Before retry #10: sleeping 60.0 seconds + y u no work + etc.. + +Limit retries: + +`retry -t 4 'echo "y u no work"; false'` + + y u no work + Before retry #1: sleeping 0.3 seconds + y u no work + Before retry #2: sleeping 0.6 seconds + y u no work + Before retry #3: sleeping 1.2 seconds + y u no work + Before retry #4: sleeping 2.4 seconds + y u no work + Retries exhausted + +Bad command: + +`retry poop` + + bash: poop: command not found + +Fail command: + +`retry -t 3 -f 'echo "oh poopsickles"' 'echo "y u no work"; false'` + + y u no work + Before retry #1: sleeping 0.3 seconds + y u no work + Before retry #2: sleeping 0.6 seconds + y u no work + Before retry #3: sleeping 1.2 seconds + y u no work + Retries exhausted, running fail script + oh poopsickles + +Last attempt passed: + +`retry -t 3 -- 'if [ $RETRY_ATTEMPT -eq 3 ]; then echo Passed at attempt $RETRY_ATTEMPT; true; else echo Failed at attempt $RETRY_ATTEMPT; false; fi;'` + + Failed at attempt 0 + Before retry #1: sleeping 0.3 seconds + Failed at attempt 1 + Before retry #2: sleeping 0.6 seconds + Failed at attempt 2 + Before retry #3: sleeping 1.2 seconds + Passed at attempt 3 + +### License + +Apache 2.0 - go nuts diff --git a/ci/retry/retry b/ci/retry/retry new file mode 100755 index 0000000000000..3c06519dbdfeb --- /dev/null +++ b/ci/retry/retry @@ -0,0 +1,163 @@ +#!/usr/bin/env bash + +GETOPT_BIN=$IN_GETOPT_BIN +GETOPT_BIN=${GETOPT_BIN:-getopt} + +__sleep_amount() { + if [ -n "$constant_sleep" ]; then + sleep_time=$constant_sleep + else + #TODO: check for awk + #TODO: check if user would rather use one of the other possible dependencies: python, ruby, bc, dc + sleep_time=`awk "BEGIN {t = $min_sleep * $(( (1<<($attempts -1)) )); print (t > $max_sleep ? $max_sleep : t)}"` + fi +} + +__log_out() { + echo "$1" 1>&2 +} + +# Parameters: max_tries min_sleep max_sleep constant_sleep fail_script EXECUTION_COMMAND +retry() +{ + local max_tries="$1"; shift + local min_sleep="$1"; shift + local max_sleep="$1"; shift + local constant_sleep="$1"; shift + local fail_script="$1"; shift + if [ -n "$VERBOSE" ]; then + __log_out "Retry Parameters: max_tries=$max_tries min_sleep=$min_sleep max_sleep=$max_sleep constant_sleep=$constant_sleep" + if [ -n "$fail_script" ]; then __log_out "Fail script: $fail_script"; fi + __log_out "" + __log_out "Execution Command: $*" + __log_out "" + fi + + local attempts=0 + local return_code=1 + + + while [[ $return_code -ne 0 && $attempts -le $max_tries ]]; do + if [ $attempts -gt 0 ]; then + __sleep_amount + __log_out "Before retry #$attempts: sleeping $sleep_time seconds" + sleep $sleep_time + fi + + P="$1" + for param in "${@:2}"; do P="$P '$param'"; done + #TODO: replace single quotes in each arg with '"'"' ? + export RETRY_ATTEMPT=$attempts + bash -c "$P" + return_code=$? + #__log_out "Process returned $return_code on attempt $attempts" + if [ $return_code -eq 127 ]; then + # command not found + exit $return_code + elif [ $return_code -ne 0 ]; then + attempts=$[$attempts +1] + fi + done + + if [ $attempts -gt $max_tries ]; then + if [ -n "$fail_script" ]; then + __log_out "Retries exhausted, running fail script" + eval $fail_script + else + __log_out "Retries exhausted" + fi + fi + + exit $return_code +} + +# If we're being sourced, don't worry about such things +if [ "$BASH_SOURCE" == "$0" ]; then + # Prints the help text + help() + { + local retry=$(basename $0) + cat < /dev/null + if [[ $? -ne 4 ]]; then + echo "I’m sorry, 'getopt --test' failed in this environment. Please load GNU getopt." + exit 1 + fi + + OPTIONS=vt:s:m:x:f: + LONGOPTIONS=verbose,tries:,sleep:,min:,max:,fail: + + PARSED=$($GETOPT_BIN --options="$OPTIONS" --longoptions="$LONGOPTIONS" --name "$0" -- "$@") + if [[ $? -ne 0 ]]; then + # e.g. $? == 1 + # then getopt has complained about wrong arguments to stdout + exit 2 + fi + # read getopt’s output this way to handle the quoting right: + eval set -- "$PARSED" + + max_tries=10 + min_sleep=0.3 + max_sleep=60.0 + constant_sleep= + fail_script= + + # now enjoy the options in order and nicely split until we see -- + while true; do + case "$1" in + -v|--verbose) + VERBOSE=true + shift + ;; + -t|--tries) + max_tries="$2" + shift 2 + ;; + -s|--sleep) + constant_sleep="$2" + shift 2 + ;; + -m|--min) + min_sleep="$2" + shift 2 + ;; + -x|--max) + max_sleep="$2" + shift 2 + ;; + -f|--fail) + fail_script="$2" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "Programming error" + exit 3 + ;; + esac + done + + retry "$max_tries" "$min_sleep" "$max_sleep" "$constant_sleep" "$fail_script" "$@" + +fi diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh new file mode 100755 index 0000000000000..5a150d5f8031f --- /dev/null +++ b/ci/test/00_setup_env.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +# The root dir. +# The ci system copies this folder. +# This is where the depends build is done. +BASE_ROOT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd ) +export BASE_ROOT_DIR +# The depends dir. +# This folder exists on the ci host and ci guest. Changes are propagated back and forth. +export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends} +# A folder for the ci system to put temporary files (ccache, datadirs for tests, ...) +# This folder only exists on the ci host. +export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch} + +echo "Setting specific values in env" +if [ -n "${FILE_ENV}" ]; then + set -o errexit; + # shellcheck disable=SC1090 + source "${FILE_ENV}" +fi + +echo "Fallback to default values in env (if not yet set)" +# The number of parallel jobs to pass down to make and test_runner.py +export MAKEJOBS=${MAKEJOBS:--j4} +# What host to compile for. See also ./depends/README.md +# Tests that need cross-compilation export the appropriate HOST. +# Tests that run natively guess the host +export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")} +# Whether to prefer BusyBox over GNU utilities +export USE_BUSY_BOX=${USE_BUSY_BOX:-false} + +export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true} +export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true} +export RUN_TIDY=${RUN_TIDY:-false} +export RUN_SECURITY_TESTS=${RUN_SECURITY_TESTS:-false} +# By how much to scale the test_runner timeouts (option --timeout-factor). +# This is needed because some ci machines have slow CPU or disk, so sanitizers +# might be slow or a reindex might be waiting on disk IO. +export TEST_RUNNER_TIMEOUT_FACTOR=${TEST_RUNNER_TIMEOUT_FACTOR:-40} +export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-} +export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false} +export EXPECTED_TESTS_DURATION_IN_SECONDS=${EXPECTED_TESTS_DURATION_IN_SECONDS:-1000} + +export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed} +export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:20.04} +# Randomize test order. +# See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/random.html +export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1} +# See man 7 debconf +export DEBIAN_FRONTEND=noninteractive +export CCACHE_SIZE=${CCACHE_SIZE:-100M} +export CCACHE_TEMPDIR=${CCACHE_TEMPDIR:-/tmp/.ccache-temp} +export CCACHE_COMPRESS=${CCACHE_COMPRESS:-1} +# The cache dir. +# This folder exists on the ci host and ci guest. Changes are propagated back and forth. +export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache} +# Folder where the build result is put (bin and lib). +export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out/$HOST} +# Folder where the build is done (dist and out-of-tree build). +export BASE_BUILD_DIR=${BASE_BUILD_DIR:-$BASE_SCRATCH_DIR/build} +export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/releases/$HOST} +export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks} +export DOCKER_PACKAGES=${DOCKER_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps bison} +export GOAL=${GOAL:-install} +export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets} +export PATH=${BASE_ROOT_DIR}/ci/retry:$PATH +export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry --"} diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh new file mode 100755 index 0000000000000..522a5497fac4e --- /dev/null +++ b/ci/test/00_setup_env_android.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export HOST=aarch64-linux-android +export PACKAGES="clang llvm unzip openjdk-8-jdk gradle" +export CONTAINER_NAME=ci_android +export DOCKER_NAME_TAG="ubuntu:focal" + +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false + +export ANDROID_API_LEVEL=28 +export ANDROID_BUILD_TOOLS_VERSION=28.0.3 +export ANDROID_NDK_VERSION=23.1.7779620 +export ANDROID_TOOLS_URL=https://dl.google.com/android/repository/commandlinetools-linux-6609375_latest.zip +export ANDROID_HOME="${DEPENDS_DIR}/SDKs/android" +export ANDROID_NDK_HOME="${ANDROID_HOME}/ndk/${ANDROID_NDK_VERSION}" +export DEP_OPTS="ANDROID_SDK=${ANDROID_HOME} ANDROID_NDK=${ANDROID_NDK_HOME} ANDROID_API_LEVEL=${ANDROID_API_LEVEL} ANDROID_TOOLCHAIN_BIN=${ANDROID_NDK_HOME}/toolchains/llvm/prebuilt/linux-x86_64/bin/" + +export BITCOIN_CONFIG="--disable-tests --enable-gui-tests --disable-bench --disable-fuzz-binary --without-utils --without-libs --without-daemon" diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh new file mode 100755 index 0000000000000..932be4b43dc71 --- /dev/null +++ b/ci/test/00_setup_env_arm.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export HOST=arm-linux-gnueabihf +# The host arch is unknown, so we run the tests through qemu. +# If the host is arm and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string. +if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-arm -L /usr/arm-linux-gnueabihf/"}"; fi +export DPKG_ADD_ARCH="armhf" +export PACKAGES="python3-zmq g++-arm-linux-gnueabihf busybox libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf" +if [ -n "$QEMU_USER_CMD" ]; then + # Likely cross-compiling, so install the needed gcc and qemu-user + export PACKAGES="$PACKAGES qemu-user" +fi +export CONTAINER_NAME=ci_arm_linux +# Use debian to avoid 404 apt errors when cross compiling +export DOCKER_NAME_TAG="debian:bullseye" +export USE_BUSY_BOX=true +export RUN_UNIT_TESTS=true +export RUN_FUNCTIONAL_TESTS=false +export GOAL="install" +# -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1" +# This could be removed once the ABI change warning does not show up by default +export BITCOIN_CONFIG="--enable-reduce-exports CXXFLAGS=-Wno-psabi" diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh new file mode 100755 index 0000000000000..8f1cc8af29ee5 --- /dev/null +++ b/ci/test/00_setup_env_i686_centos.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export HOST=i686-pc-linux-gnu +export CONTAINER_NAME=ci_i686_centos +export DOCKER_NAME_TAG=quay.io/centos/centos:stream8 +export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python3-zmq which patch lbzip2 xz procps-ng dash rsync coreutils bison" +export GOAL="install" +export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports" +export CONFIG_SHELL="/bin/dash" +export TEST_RUNNER_ENV="LC_ALL=en_US.UTF-8" diff --git a/ci/test/00_setup_env_i686_multiprocess.sh b/ci/test/00_setup_env_i686_multiprocess.sh new file mode 100755 index 0000000000000..766424769dcc6 --- /dev/null +++ b/ci/test/00_setup_env_i686_multiprocess.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export HOST=i686-pc-linux-gnu +export CONTAINER_NAME=ci_i686_multiprocess +export DOCKER_NAME_TAG=ubuntu:20.04 +export PACKAGES="cmake python3 python3-pip llvm clang g++-multilib" +export DEP_OPTS="DEBUG=1 MULTIPROCESS=1" +export GOAL="install" +export BITCOIN_CONFIG="--enable-debug CC='clang -m32' CXX='clang++ -m32' LDFLAGS='--rtlib=compiler-rt -lgcc_s'" +export TEST_RUNNER_ENV="BITCOIND=bitcoin-node" +export TEST_RUNNER_EXTRA="--nosandbox" diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh new file mode 100755 index 0000000000000..c4f22c8f9ea0f --- /dev/null +++ b/ci/test/00_setup_env_mac.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_macos_cross +export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos +export HOST=x86_64-apple-darwin +export PACKAGES="cmake libz-dev libtinfo5 python3-setuptools xorriso" +export XCODE_VERSION=12.2 +export XCODE_BUILD_ID=12B45b +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export GOAL="deploy" +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports" diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh new file mode 100755 index 0000000000000..d176296e769dc --- /dev/null +++ b/ci/test/00_setup_env_mac_host.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export HOST=x86_64-apple-darwin +export PIP_PACKAGES="zmq lief" +export GOAL="install" +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports" +export CI_OS_NAME="macos" +export NO_DEPENDS=1 +export OSX_SDK="" +export CCACHE_SIZE=300M +export RUN_SECURITY_TESTS="true" diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh new file mode 100755 index 0000000000000..69883e3609f7b --- /dev/null +++ b/ci/test/00_setup_env_native_asan.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_asan +export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev" +export DOCKER_NAME_TAG=ubuntu:22.04 +export NO_DEPENDS=1 +export GOAL="install" +export BITCOIN_CONFIG="--enable-c++20 --enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh new file mode 100755 index 0000000000000..d7caec8359060 --- /dev/null +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export DOCKER_NAME_TAG="ubuntu:22.04" +export CONTAINER_NAME=ci_native_fuzz +export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-dev libsqlite3-dev" +export NO_DEPENDS=1 +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export RUN_FUZZ_TESTS=true +export GOAL="install" +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC='clang -ftrivial-auto-var-init=pattern' CXX='clang++ -ftrivial-auto-var-init=pattern'" +export CCACHE_SIZE=200M diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh new file mode 100755 index 0000000000000..071bac8fb3343 --- /dev/null +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export DOCKER_NAME_TAG="ubuntu:20.04" +LIBCXX_DIR="${BASE_SCRATCH_DIR}/msan/build/" +export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" +LIBCXX_FLAGS="-nostdinc++ -stdlib=libc++ -L${LIBCXX_DIR}lib -lc++abi -I${LIBCXX_DIR}include -I${LIBCXX_DIR}include/c++/v1 -lpthread -Wl,-rpath,${LIBCXX_DIR}lib -Wno-unused-command-line-argument" +export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" + +export CONTAINER_NAME="ci_native_msan" +export PACKAGES="clang-12 llvm-12 cmake" +# BDB generates false-positives and will be removed in future +export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' libevent_cflags='${MSAN_FLAGS}' sqlite_cflags='${MSAN_FLAGS}' zeromq_cxxflags='-std=c++17 ${MSAN_AND_LIBCXX_FLAGS}'" +export GOAL="install" +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,memory --disable-hardening --with-asm=no --prefix=${DEPENDS_DIR}/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export USE_MEMORY_SANITIZER="true" +export RUN_UNIT_TESTS="false" +export RUN_FUNCTIONAL_TESTS="false" +export RUN_FUZZ_TESTS=true +export CCACHE_SIZE=250M diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh new file mode 100755 index 0000000000000..9477fb2d9fe85 --- /dev/null +++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export DOCKER_NAME_TAG="ubuntu:22.04" +export CONTAINER_NAME=ci_native_fuzz_valgrind +export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-dev libsqlite3-dev valgrind" +export NO_DEPENDS=1 +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export RUN_FUZZ_TESTS=true +export FUZZ_TESTS_CONFIG="--valgrind" +export GOAL="install" +# Temporarily pin dwarf 4, until valgrind can understand clang's dwarf 5 +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang CXX=clang++ CXXFLAGS='-fdebug-default-version=4'" +export CCACHE_SIZE=200M diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh new file mode 100755 index 0000000000000..34a792ec8ff26 --- /dev/null +++ b/ci/test/00_setup_env_native_msan.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export DOCKER_NAME_TAG="ubuntu:20.04" +LIBCXX_DIR="${BASE_SCRATCH_DIR}/msan/build/" +export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" +LIBCXX_FLAGS="-nostdinc++ -stdlib=libc++ -L${LIBCXX_DIR}lib -lc++abi -I${LIBCXX_DIR}include -I${LIBCXX_DIR}include/c++/v1 -lpthread -Wl,-rpath,${LIBCXX_DIR}lib -Wno-unused-command-line-argument" +export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" + +export CONTAINER_NAME="ci_native_msan" +export PACKAGES="clang-12 llvm-12 cmake" +# BDB generates false-positives and will be removed in future +export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' libevent_cflags='${MSAN_FLAGS}' sqlite_cflags='${MSAN_FLAGS}' zeromq_cxxflags='-std=c++17 ${MSAN_AND_LIBCXX_FLAGS}'" +export GOAL="install" +export BITCOIN_CONFIG="--with-sanitizers=memory --disable-hardening --with-asm=no --prefix=${DEPENDS_DIR}/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export USE_MEMORY_SANITIZER="true" +export RUN_FUNCTIONAL_TESTS="false" +export CCACHE_SIZE=250M diff --git a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh new file mode 100755 index 0000000000000..63560a5f5ccbc --- /dev/null +++ b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_nowallet_libbitcoinkernel +export DOCKER_NAME_TAG=ubuntu:18.04 # Use bionic to have one config run the tests in python3.6, see doc/dependencies.md +export PACKAGES="python3-zmq clang-8 llvm-8 libc++abi-8-dev libc++-8-dev" # Use clang-8 to test C++17 compatibility, see doc/dependencies.md +export DEP_OPTS="NO_WALLET=1 CC=clang-8 CXX='clang++-8 -stdlib=libc++'" +export GOAL="install" +export BITCOIN_CONFIG="--enable-reduce-exports CC=clang-8 CXX='clang++-8 -stdlib=libc++' --enable-experimental-util-chainstate --with-experimental-kernel-lib --enable-shared" diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh new file mode 100755 index 0000000000000..f399e43612b04 --- /dev/null +++ b/ci/test/00_setup_env_native_qt5.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_qt5 +export DOCKER_NAME_TAG=debian:buster # Check that buster gcc-8 can compile our C++17 and run our functional tests in python3, see doc/dependencies.md +export PACKAGES="gcc-8 g++-8 python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev" +export DEP_OPTS="NO_QT=1 NO_UPNP=1 NO_NATPMP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1 CC=gcc-8 CXX=g++-8" +export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash +export RUN_UNIT_TESTS_SEQUENTIAL="true" +export RUN_UNIT_TESTS="false" +export GOAL="install" +export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.14.3 v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1 v0.20.1 v0.21.0 v22.0 v23.0" +export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-reduce-exports \ +--enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" CC=gcc-8 CXX=g++-8" diff --git a/ci/test/00_setup_env_native_tidy.sh b/ci/test/00_setup_env_native_tidy.sh new file mode 100755 index 0000000000000..e4d34684734f2 --- /dev/null +++ b/ci/test/00_setup_env_native_tidy.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export DOCKER_NAME_TAG="ubuntu:22.04" +export CONTAINER_NAME=ci_native_tidy +export PACKAGES="clang libclang-dev llvm-dev clang-tidy bear cmake libevent-dev libboost-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev systemtap-sdt-dev libqt5gui5 libqt5core5a libqt5dbus5 qttools5-dev qttools5-dev-tools libqrencode-dev libsqlite3-dev libdb++-dev" +export NO_DEPENDS=1 +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export RUN_FUZZ_TESTS=false +export RUN_TIDY=true +export GOAL="install" +export BITCOIN_CONFIG="CC=clang CXX=clang++ --with-incompatible-bdb --disable-hardening CFLAGS='-O0 -g0' CXXFLAGS='-O0 -g0'" +export CCACHE_SIZE=200M diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh new file mode 100755 index 0000000000000..ae942d892b382 --- /dev/null +++ b/ci/test/00_setup_env_native_tsan.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_tsan +export DOCKER_NAME_TAG=ubuntu:22.04 +export PACKAGES="clang-13 llvm-13 libc++abi-13-dev libc++-13-dev python3-zmq" +export DEP_OPTS="CC=clang-13 CXX='clang++-13 -stdlib=libc++'" +export GOAL="install" +export BITCOIN_CONFIG="--enable-zmq CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang-13 CXX='clang++-13 -stdlib=libc++'" diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh new file mode 100755 index 0000000000000..7b714dff5c4dd --- /dev/null +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export DOCKER_NAME_TAG="ubuntu:22.04" +export CONTAINER_NAME=ci_native_valgrind +export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev" +export USE_VALGRIND=1 +export NO_DEPENDS=1 +export TEST_RUNNER_EXTRA="--nosandbox --exclude feature_init,rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 +export GOAL="install" +# Temporarily pin dwarf 4, until valgrind can understand clang's dwarf 5 +export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang CXX=clang++ CXXFLAGS='-fdebug-default-version=4'" # TODO enable GUI diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh new file mode 100755 index 0000000000000..136edb6662b7d --- /dev/null +++ b/ci/test/00_setup_env_s390x.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export HOST=s390x-linux-gnu +# The host arch is unknown, so we run the tests through qemu. +# If the host is s390x and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string. +if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-s390x"}"; fi +export PACKAGES="python3-zmq" +if [ -n "$QEMU_USER_CMD" ]; then + # Likely cross-compiling, so install the needed gcc and qemu-user + export DPKG_ADD_ARCH="s390x" + export PACKAGES="$PACKAGES g++-s390x-linux-gnu qemu-user libc6:s390x libstdc++6:s390x" +fi +# Use debian to avoid 404 apt errors +export CONTAINER_NAME=ci_s390x +export DOCKER_NAME_TAG="debian:bookworm" +export TEST_RUNNER_ENV="LC_ALL=C" +export TEST_RUNNER_EXTRA="--exclude feature_init,rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 +export RUN_FUNCTIONAL_TESTS=true +export GOAL="install" +export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests" # GUI tests disabled for now, see https://github.com/bitcoin/bitcoin/issues/23730 diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh new file mode 100755 index 0000000000000..3f43bf227be49 --- /dev/null +++ b/ci/test/00_setup_env_win64.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_win64 +export DOCKER_NAME_TAG=ubuntu:22.04 # Check that Jammy can cross-compile to win64 +export HOST=x86_64-w64-mingw32 +export DPKG_ADD_ARCH="i386" +export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 wine32 file" +export RUN_FUNCTIONAL_TESTS=false +export GOAL="deploy" +export BITCOIN_CONFIG="--enable-reduce-exports --disable-external-signer --disable-gui-tests" diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh new file mode 100755 index 0000000000000..453a34ca78ef2 --- /dev/null +++ b/ci/test/04_install.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +if [[ $QEMU_USER_CMD == qemu-s390* ]]; then + export LC_ALL=C +fi + +if [ "$CI_OS_NAME" == "macos" ]; then + sudo -H pip3 install --upgrade pip + # shellcheck disable=SC2086 + IN_GETOPT_BIN="/usr/local/opt/gnu-getopt/bin/getopt" ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES +fi + +# Create folders that are mounted into the docker +mkdir -p "${CCACHE_DIR}" +mkdir -p "${PREVIOUS_RELEASES_DIR}" + +export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1" +export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan" +export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan" +export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" +env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|DEBIAN_FRONTEND|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|PREVIOUS_RELEASES_DIR)' | tee /tmp/env +if [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764) + DOCKER_ADMIN="--cap-add SYS_PTRACE" +fi + +export P_CI_DIR="$PWD" + +if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then + echo "Creating $DOCKER_NAME_TAG container to run in" + ${CI_RETRY_EXE} docker pull "$DOCKER_NAME_TAG" + + if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then + echo "Restart docker before run to stop and clear all containers started with --rm" + systemctl restart docker + fi + + # shellcheck disable=SC2086 + DOCKER_ID=$(docker run $DOCKER_ADMIN --rm --interactive --detach --tty \ + --mount type=bind,src=$BASE_ROOT_DIR,dst=/ro_base,readonly \ + --mount type=bind,src=$CCACHE_DIR,dst=$CCACHE_DIR \ + --mount type=bind,src=$DEPENDS_DIR,dst=$DEPENDS_DIR \ + --mount type=bind,src=$PREVIOUS_RELEASES_DIR,dst=$PREVIOUS_RELEASES_DIR \ + -w $BASE_ROOT_DIR \ + --env-file /tmp/env \ + --name $CONTAINER_NAME \ + $DOCKER_NAME_TAG) + export DOCKER_CI_CMD_PREFIX="docker exec $DOCKER_ID" +else + echo "Running on host system without docker wrapper" +fi + +CI_EXEC () { + $DOCKER_CI_CMD_PREFIX bash -c "export PATH=$BASE_SCRATCH_DIR/bins/:\$PATH && cd \"$P_CI_DIR\" && $*" +} +export -f CI_EXEC + +if [ -n "$DPKG_ADD_ARCH" ]; then + CI_EXEC dpkg --add-architecture "$DPKG_ADD_ARCH" +fi + +if [[ $DOCKER_NAME_TAG == *centos* ]]; then + ${CI_RETRY_EXE} CI_EXEC dnf -y install epel-release + ${CI_RETRY_EXE} CI_EXEC dnf -y --allowerasing install "$DOCKER_PACKAGES" "$PACKAGES" +elif [ "$CI_USE_APT_INSTALL" != "no" ]; then + ${CI_RETRY_EXE} CI_EXEC apt-get update + ${CI_RETRY_EXE} CI_EXEC apt-get install --no-install-recommends --no-upgrade -y "$PACKAGES" "$DOCKER_PACKAGES" + if [ -n "$PIP_PACKAGES" ]; then + # shellcheck disable=SC2086 + ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES + fi +fi + +if [ "$CI_OS_NAME" == "macos" ]; then + top -l 1 -s 0 | awk ' /PhysMem/ {print}' + echo "Number of CPUs: $(sysctl -n hw.logicalcpu)" +else + CI_EXEC free -m -h + CI_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\) + CI_EXEC echo "$(lscpu | grep Endian)" +fi +CI_EXEC echo "Free disk space:" +CI_EXEC df -h + +if [ "$RUN_FUZZ_TESTS" = "true" ]; then + export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/ + if [ ! -d "$DIR_FUZZ_IN" ]; then + CI_EXEC git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${DIR_QA_ASSETS}" + fi +elif [ "$RUN_UNIT_TESTS" = "true" ] || [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then + export DIR_UNIT_TEST_DATA=${DIR_QA_ASSETS}/unit_test_data/ + if [ ! -d "$DIR_UNIT_TEST_DATA" ]; then + CI_EXEC mkdir -p "$DIR_UNIT_TEST_DATA" + CI_EXEC curl --location --fail https://github.com/bitcoin-core/qa-assets/raw/main/unit_test_data/script_assets_test.json -o "${DIR_UNIT_TEST_DATA}/script_assets_test.json" + fi +fi + +CI_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/" + +if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then + CI_EXEC "update-alternatives --install /usr/bin/clang++ clang++ \$(which clang++-12) 100" + CI_EXEC "update-alternatives --install /usr/bin/clang clang \$(which clang-12) 100" + CI_EXEC "mkdir -p ${BASE_SCRATCH_DIR}/msan/build/" + CI_EXEC "git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-12.0.0 ${BASE_SCRATCH_DIR}/msan/llvm-project" + CI_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && cmake -DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi' -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_SANITIZER=MemoryWithOrigins -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DLLVM_TARGETS_TO_BUILD=X86 ../llvm-project/llvm/" + CI_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && make $MAKEJOBS cxx" +fi + +if [[ "${RUN_TIDY}" == "true" ]]; then + export DIR_IWYU="${BASE_SCRATCH_DIR}/iwyu" + if [ ! -d "${DIR_IWYU}" ]; then + CI_EXEC "mkdir -p ${DIR_IWYU}/build/" + CI_EXEC "git clone --depth=1 https://github.com/include-what-you-use/include-what-you-use -b clang_14 ${DIR_IWYU}/include-what-you-use" + CI_EXEC "cd ${DIR_IWYU}/build && cmake -G 'Unix Makefiles' -DCMAKE_PREFIX_PATH=/usr/lib/llvm-14 ../include-what-you-use" + CI_EXEC "cd ${DIR_IWYU}/build && make install $MAKEJOBS" + fi +fi + +if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then + echo "Create $BASE_ROOT_DIR" + CI_EXEC rsync -a /ro_base/ "$BASE_ROOT_DIR" +fi + +if [ "$USE_BUSY_BOX" = "true" ]; then + echo "Setup to use BusyBox utils" + CI_EXEC mkdir -p "${BASE_SCRATCH_DIR}/bins/" + # tar excluded for now because it requires passing in the exact archive type in ./depends (fixed in later BusyBox version) + # find excluded for now because it does not recognize the -delete option in ./depends (fixed in later BusyBox version) + # ar excluded for now because it does not recognize the -q option in ./depends (unknown if fixed) + # shellcheck disable=SC1010 + CI_EXEC for util in \$\(busybox --list \| grep -v "^ar$" \| grep -v "^tar$" \| grep -v "^find$"\)\; do ln -s \$\(command -v busybox\) "${BASE_SCRATCH_DIR}/bins/\$util"\; done + # Print BusyBox version + CI_EXEC patch --help +fi diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh new file mode 100755 index 0000000000000..fc2f76797c8f1 --- /dev/null +++ b/ci/test/05_before_script.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +# Make sure default datadir does not exist and is never read by creating a dummy file +if [ "$CI_OS_NAME" == "macos" ]; then + echo > "${HOME}/Library/Application Support/Bitcoin" +else + CI_EXEC echo \> \$HOME/.bitcoin +fi + +CI_EXEC mkdir -p "${DEPENDS_DIR}/SDKs" "${DEPENDS_DIR}/sdk-sources" + +OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-libcxx-headers" + +if [ -n "$XCODE_VERSION" ] && [ ! -d "${DEPENDS_DIR}/SDKs/${OSX_SDK_BASENAME}" ]; then + OSX_SDK_FILENAME="${OSX_SDK_BASENAME}.tar.gz" + OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_FILENAME}" + if [ ! -f "$OSX_SDK_PATH" ]; then + CI_EXEC curl --location --fail "${SDK_URL}/${OSX_SDK_FILENAME}" -o "$OSX_SDK_PATH" + fi + CI_EXEC tar -C "${DEPENDS_DIR}/SDKs" -xf "$OSX_SDK_PATH" +fi + +if [ -n "$ANDROID_HOME" ] && [ ! -d "$ANDROID_HOME" ]; then + ANDROID_TOOLS_PATH=${DEPENDS_DIR}/sdk-sources/android-tools.zip + if [ ! -f "$ANDROID_TOOLS_PATH" ]; then + CI_EXEC curl --location --fail "${ANDROID_TOOLS_URL}" -o "$ANDROID_TOOLS_PATH" + fi + CI_EXEC mkdir -p "${ANDROID_HOME}/cmdline-tools" + CI_EXEC unzip -o "$ANDROID_TOOLS_PATH" -d "${ANDROID_HOME}/cmdline-tools" + CI_EXEC "yes | ${ANDROID_HOME}/cmdline-tools/tools/bin/sdkmanager --install \"build-tools;${ANDROID_BUILD_TOOLS_VERSION}\" \"platform-tools\" \"platforms;android-${ANDROID_API_LEVEL}\" \"ndk;${ANDROID_NDK_VERSION}\"" +fi + +if [ -z "$NO_DEPENDS" ]; then + if [[ $DOCKER_NAME_TAG == *centos* ]]; then + # CentOS has problems building the depends if the config shell is not explicitly set + # (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to + # an error as the first command is executed) + SHELL_OPTS="LC_ALL=en_US.UTF-8 CONFIG_SHELL=/bin/dash" + else + SHELL_OPTS="CONFIG_SHELL=" + fi + CI_EXEC "$SHELL_OPTS" make "$MAKEJOBS" -C depends HOST="$HOST" "$DEP_OPTS" LOG=1 +fi +if [ -n "$PREVIOUS_RELEASES_TO_DOWNLOAD" ]; then + CI_EXEC test/get_previous_releases.py -b -t "$PREVIOUS_RELEASES_DIR" "${PREVIOUS_RELEASES_TO_DOWNLOAD}" +fi diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh new file mode 100755 index 0000000000000..6a6bde05a1006 --- /dev/null +++ b/ci/test/06_script_a.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +if [ -n "$ANDROID_TOOLS_URL" ]; then + CI_EXEC make distclean || true + CI_EXEC ./autogen.sh + CI_EXEC ./configure "$BITCOIN_CONFIG" --prefix="${DEPENDS_DIR}/aarch64-linux-android" || ( (CI_EXEC cat config.log) && false) + CI_EXEC "make $MAKEJOBS && cd src/qt && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk" + exit 0 +fi + +BITCOIN_CONFIG_ALL="--enable-external-signer --enable-suppress-external-warnings --disable-dependency-tracking --prefix=$DEPENDS_DIR/$HOST --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib" +if [ -z "$NO_WERROR" ]; then + BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-werror" +fi +CI_EXEC "ccache --zero-stats --max-size=$CCACHE_SIZE" + +if [ -n "$CONFIG_SHELL" ]; then + CI_EXEC "$CONFIG_SHELL" -c "./autogen.sh" +else + CI_EXEC ./autogen.sh +fi + +CI_EXEC mkdir -p "${BASE_BUILD_DIR}" +export P_CI_DIR="${BASE_BUILD_DIR}" + +CI_EXEC "${BASE_ROOT_DIR}/configure" --cache-file=config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false) + +CI_EXEC make distdir VERSION="$HOST" + +export P_CI_DIR="${BASE_BUILD_DIR}/bitcoin-$HOST" + +CI_EXEC ./configure --cache-file=../config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false) + +set -o errtrace +trap 'CI_EXEC "cat ${BASE_SCRATCH_DIR}/sanitizer-output/* 2> /dev/null"' ERR + +if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then + # MemorySanitizer (MSAN) does not support tracking memory initialization done by + # using the Linux getrandom syscall. Avoid using getrandom by undefining + # HAVE_SYS_GETRANDOM. See https://github.com/google/sanitizers/issues/852 for + # details. + CI_EXEC 'grep -v HAVE_SYS_GETRANDOM src/config/bitcoin-config.h > src/config/bitcoin-config.h.tmp && mv src/config/bitcoin-config.h.tmp src/config/bitcoin-config.h' +fi + +if [[ "${RUN_TIDY}" == "true" ]]; then + MAYBE_BEAR="bear --config src/.bear-tidy-config" + MAYBE_TOKEN="--" +fi + +CI_EXEC "${MAYBE_BEAR}" "${MAYBE_TOKEN}" make "$MAKEJOBS" "$GOAL" || ( echo "Build failure. Verbose build follows." && CI_EXEC make "$GOAL" V=1 ; false ) + +CI_EXEC "ccache --version | head -n 1 && ccache --show-stats" +CI_EXEC du -sh "${DEPENDS_DIR}"/*/ +CI_EXEC du -sh "${PREVIOUS_RELEASES_DIR}" diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh new file mode 100755 index 0000000000000..e64af2ad5ddbb --- /dev/null +++ b/ci/test/06_script_b.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +if [[ $HOST = *-mingw32 ]]; then + # Generate all binaries, so that they can be wrapped + CI_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1 + CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh" +fi + +if [ -n "$QEMU_USER_CMD" ]; then + # Generate all binaries, so that they can be wrapped + CI_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1 + CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh" +fi + +if [ -n "$USE_VALGRIND" ]; then + CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh" +fi + +if [ "$RUN_UNIT_TESTS" = "true" ]; then + CI_EXEC "${TEST_RUNNER_ENV}" DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" make "$MAKEJOBS" check VERBOSE=1 +fi + +if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then + CI_EXEC "${TEST_RUNNER_ENV}" DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${BASE_OUTDIR}/bin/test_bitcoin" --catch_system_errors=no -l test_suite +fi + +if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then + CI_EXEC LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${TEST_RUNNER_ENV}" test/functional/test_runner.py --ci "$MAKEJOBS" --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" "${TEST_RUNNER_EXTRA}" --quiet --failfast +fi + +if [ "${RUN_TIDY}" = "true" ]; then + export P_CI_DIR="${BASE_BUILD_DIR}/bitcoin-$HOST/src/" + CI_EXEC run-clang-tidy "${MAKEJOBS}" + export P_CI_DIR="${BASE_BUILD_DIR}/bitcoin-$HOST/" + CI_EXEC "python3 ${DIR_IWYU}/include-what-you-use/iwyu_tool.py"\ + " src/compat"\ + " src/init"\ + " src/rpc/fees.cpp"\ + " src/rpc/signmessage.cpp"\ + " -p . ${MAKEJOBS} -- -Xiwyu --cxx17ns -Xiwyu --mapping_file=${BASE_BUILD_DIR}/bitcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp" +fi + +if [ "$RUN_SECURITY_TESTS" = "true" ]; then + CI_EXEC make test-security-check +fi + +if [ "$RUN_FUZZ_TESTS" = "true" ]; then + CI_EXEC LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/fuzz/test_runner.py "${FUZZ_TESTS_CONFIG}" "$MAKEJOBS" -l DEBUG "${DIR_FUZZ_IN}" +fi diff --git a/ci/test/wrap-qemu.sh b/ci/test/wrap-qemu.sh new file mode 100755 index 0000000000000..fcd56f533e22f --- /dev/null +++ b/ci/test/wrap-qemu.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/minisketch/test{,-verify},src/univalue/{no_nul,test_json,unitester,object}}; do + # shellcheck disable=SC2044 + for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename "$b_name")"); do + echo "Wrap $b ..." + mv "$b" "${b}_orig" + echo '#!/usr/bin/env bash' > "$b" + echo "$QEMU_USER_CMD \"${b}_orig\" \"\$@\"" >> "$b" + chmod +x "$b" + done +done diff --git a/ci/test/wrap-valgrind.sh b/ci/test/wrap-valgrind.sh new file mode 100755 index 0000000000000..27754831848bc --- /dev/null +++ b/ci/test/wrap-valgrind.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +for b_name in "${BASE_OUTDIR}/bin"/*; do + # shellcheck disable=SC2044 + for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename "$b_name")"); do + echo "Wrap $b ..." + mv "$b" "${b}_orig" + echo '#!/usr/bin/env bash' > "$b" + echo "valgrind --gen-suppressions=all --quiet --error-exitcode=1 --suppressions=${BASE_ROOT_DIR}/contrib/valgrind.supp \"${b}_orig\" \"\$@\"" >> "$b" + chmod +x "$b" + done +done diff --git a/ci/test/wrap-wine.sh b/ci/test/wrap-wine.sh new file mode 100755 index 0000000000000..525db9eded561 --- /dev/null +++ b/ci/test/wrap-wine.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/minisketch/test{,-verify},src/univalue/{no_nul,test_json,unitester,object}}.exe; do + # shellcheck disable=SC2044 + for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename "$b_name")"); do + if (file "$b" | grep "Windows"); then + echo "Wrap $b ..." + mv "$b" "${b}_orig" + echo '#!/usr/bin/env bash' > "$b" + echo "( wine \"${b}_orig\" \"\$@\" ) || ( sleep 1 && wine \"${b}_orig\" \"\$@\" )" >> "$b" + chmod +x "$b" + fi + done +done diff --git a/ci/test/wrapped-cl.bat b/ci/test/wrapped-cl.bat new file mode 100644 index 0000000000000..fc2a604c580be --- /dev/null +++ b/ci/test/wrapped-cl.bat @@ -0,0 +1 @@ +ccache cl %* diff --git a/ci/test_run_all.sh b/ci/test_run_all.sh new file mode 100755 index 0000000000000..93b07aab1ec48 --- /dev/null +++ b/ci/test_run_all.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +set -o errexit; source ./ci/test/00_setup_env.sh +set -o errexit; source ./ci/test/04_install.sh +set -o errexit; source ./ci/test/05_before_script.sh +set -o errexit; source ./ci/test/06_script_a.sh +set -o errexit; source ./ci/test/06_script_b.sh diff --git a/configure.ac b/configure.ac index 173c93c538e06..96fe38fe0ddb3 100644 --- a/configure.ac +++ b/configure.ac @@ -1,44 +1,62 @@ -dnl require autoconf 2.60 (AS_ECHO/AS_ECHO_N) -AC_PREREQ([2.60]) -define(_CLIENT_VERSION_MAJOR, 0) -define(_CLIENT_VERSION_MINOR, 9) -define(_CLIENT_VERSION_REVISION, 99) +AC_PREREQ([2.69]) +define(_CLIENT_VERSION_MAJOR, 23) +define(_CLIENT_VERSION_MINOR, 99) define(_CLIENT_VERSION_BUILD, 0) +define(_CLIENT_VERSION_RC, 0) define(_CLIENT_VERSION_IS_RELEASE, false) -define(_COPYRIGHT_YEAR, 2014) -AC_INIT([Bitcoin Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[info@bitcoin.org],[bitcoin]) -AC_CONFIG_SRCDIR([src/main.cpp]) +define(_COPYRIGHT_YEAR, 2022) +define(_COPYRIGHT_HOLDERS,[The %s developers]) +define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Bitcoin Core]]) +AC_INIT([Bitcoin Core],m4_join([.], _CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MINOR, _CLIENT_VERSION_BUILD)m4_if(_CLIENT_VERSION_RC, [0], [], [rc]_CLIENT_VERSION_RC),[https://github.com/bitcoin/bitcoin/issues],[bitcoin],[https://bitcoincore.org/]) +AC_CONFIG_SRCDIR([src/validation.cpp]) AC_CONFIG_HEADERS([src/config/bitcoin-config.h]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([build-aux/m4]) +m4_ifndef([PKG_PROG_PKG_CONFIG], [m4_fatal([PKG_PROG_PKG_CONFIG macro not found. Please install pkg-config and re-run autogen.sh])]) +PKG_PROG_PKG_CONFIG +if test "$PKG_CONFIG" = ""; then + AC_MSG_ERROR([pkg-config not found]) +fi + +BITCOIN_DAEMON_NAME=bitcoind +BITCOIN_GUI_NAME=bitcoin-qt +BITCOIN_CLI_NAME=bitcoin-cli +BITCOIN_TX_NAME=bitcoin-tx +BITCOIN_UTIL_NAME=bitcoin-util +BITCOIN_CHAINSTATE_NAME=bitcoin-chainstate +BITCOIN_WALLET_TOOL_NAME=bitcoin-wallet +dnl Multi Process +BITCOIN_MP_NODE_NAME=bitcoin-node +BITCOIN_MP_GUI_NAME=bitcoin-gui + +dnl Unless the user specified ARFLAGS, force it to be cr +AC_ARG_VAR([ARFLAGS], [Flags for the archiver, defaults to if not set]) +if test "${ARFLAGS+set}" != "set"; then + ARFLAGS="cr" +fi + AC_CANONICAL_HOST AH_TOP([#ifndef BITCOIN_CONFIG_H]) AH_TOP([#define BITCOIN_CONFIG_H]) AH_BOTTOM([#endif //BITCOIN_CONFIG_H]) -dnl faketime breaks configure and is only needed for make. Disable it here. -unset FAKETIME - dnl Automake init set-up and checks -AM_INIT_AUTOMAKE([no-define subdir-objects foreign]) +AM_INIT_AUTOMAKE([1.13 no-define subdir-objects foreign]) -dnl faketime messes with timestamps and causes configure to be re-run. -dnl --disable-maintainer-mode can be used to bypass this. AM_MAINTAINER_MODE([enable]) dnl make the compilation flags quiet unless V=1 is used -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) +AM_SILENT_RULES([yes]) dnl Compiler checks (here before libtool). -if test "x${CXXFLAGS+set}" = "xset"; then +if test "${CXXFLAGS+set}" = "set"; then CXXFLAGS_overridden=yes else CXXFLAGS_overridden=no fi AC_PROG_CXX -m4_ifdef([AC_PROG_OBJCXX],[AC_PROG_OBJCXX]) dnl By default, libtool for mingw refuses to link static libs into a dll for dnl fear of mixing pic/non-pic objects, and import/export complications. Since @@ -48,38 +66,87 @@ case $host in lt_cv_deplibs_check_method="pass_all" ;; esac + +AC_ARG_WITH([seccomp], + [AS_HELP_STRING([--with-seccomp], + [enable experimental syscall sandbox feature (-sandbox), default is yes if seccomp-bpf is detected under Linux x86_64])], + [seccomp_found=$withval], + [seccomp_found=auto]) + +AC_ARG_ENABLE([c++20], + [AS_HELP_STRING([--enable-c++20], + [enable compilation in c++20 mode (disabled by default)])], + [use_cxx20=$enableval], + [use_cxx20=no]) + +dnl Require C++17 compiler (no GNU extensions) +if test "$use_cxx20" = "no"; then +AX_CXX_COMPILE_STDCXX([17], [noext], [mandatory]) +else +AX_CXX_COMPILE_STDCXX([20], [noext], [mandatory]) +fi + +dnl Check if -latomic is required for +CHECK_ATOMIC + +dnl check if additional link flags are required for std::filesystem +CHECK_FILESYSTEM + +dnl Unless the user specified OBJCXX, force it to be the same as CXX. This ensures +dnl that we get the same -std flags for both. +m4_ifdef([AC_PROG_OBJCXX],[ +if test "${OBJCXX+set}" = ""; then + OBJCXX="${CXX}" +fi +AC_PROG_OBJCXX +]) + +dnl OpenBSD ships with 2.4.2 +LT_PREREQ([2.4.2]) dnl Libtool init checks. -LT_INIT([pic-only]) +LT_INIT([pic-only win32-dll]) dnl Check/return PATH for base programs. -AC_PATH_TOOL(AR, ar) -AC_PATH_TOOL(RANLIB, ranlib) -AC_PATH_TOOL(STRIP, strip) -AC_PATH_TOOL(GCOV, gcov) -AC_PATH_PROG(LCOV, lcov) -AC_PATH_PROG(JAVA, java) -AC_PATH_PROG(GENHTML, genhtml) +AC_PATH_TOOL([AR], [ar]) +AC_PATH_TOOL([GCOV], [gcov]) +AC_PATH_TOOL([LLVM_COV], [llvm-cov]) +AC_PATH_PROG([LCOV], [lcov]) +dnl Python 3.6 is specified in .python-version and should be used if available, see doc/dependencies.md +AC_PATH_PROGS([PYTHON], [python3.6 python3.7 python3.8 python3.9 python3.10 python3.11 python3 python]) +AC_PATH_PROG([GENHTML], [genhtml]) AC_PATH_PROG([GIT], [git]) -AC_PATH_PROG(CCACHE,ccache) -AC_PATH_PROG(XGETTEXT,xgettext) -AC_PATH_PROG(HEXDUMP,hexdump) - -# This m4 will only be used if a system copy cannot be found. This is helpful -# on systems where autotools are installed but the pkg-config macros are not in -# a default location. It is currently used for building on OSX where autotools -# are preinstalled but pkg-config comes from macports or homebrew. It should -# probably be removed when building on <= 10.6 is no longer supported. -m4_include([pkg.m4]) - -dnl pkg-config check. -PKG_PROG_PKG_CONFIG +AC_PATH_PROG([CCACHE], [ccache]) +AC_PATH_PROG([XGETTEXT], [xgettext]) +AC_PATH_PROG([HEXDUMP], [hexdump]) +AC_PATH_TOOL([OBJCOPY], [objcopy]) +AC_PATH_PROG([DOXYGEN], [doxygen]) +AM_CONDITIONAL([HAVE_DOXYGEN], [test -n "$DOXYGEN"]) + +AC_ARG_VAR([PYTHONPATH], [Augments the default search path for python module files]) -# Enable wallet AC_ARG_ENABLE([wallet], - [AS_HELP_STRING([--enable-wallet], - [enable wallet (default is yes)])], + [AS_HELP_STRING([--disable-wallet], + [disable wallet (enabled by default)])], [enable_wallet=$enableval], - [enable_wallet=yes]) + [enable_wallet=auto]) + +AC_ARG_WITH([sqlite], + [AS_HELP_STRING([--with-sqlite=yes|no|auto], + [enable sqlite wallet support (default: auto, i.e., enabled if wallet is enabled and sqlite is found)])], + [use_sqlite=$withval], + [use_sqlite=auto]) + +AC_ARG_WITH([bdb], + [AS_HELP_STRING([--without-bdb], + [disable bdb wallet support (default is enabled if wallet is enabled)])], + [use_bdb=$withval], + [use_bdb=auto]) + +AC_ARG_ENABLE([usdt], + [AS_HELP_STRING([--enable-usdt], + [enable tracepoints for Userspace, Statically Defined Tracing (default is yes if sys/sdt.h is found)])], + [use_usdt=$enableval], + [use_usdt=yes]) AC_ARG_WITH([miniupnpc], [AS_HELP_STRING([--with-miniupnpc], @@ -93,20 +160,49 @@ AC_ARG_ENABLE([upnp-default], [use_upnp_default=$enableval], [use_upnp_default=no]) +AC_ARG_WITH([natpmp], + [AS_HELP_STRING([--with-natpmp], + [enable NAT-PMP (default is yes if libnatpmp is found)])], + [use_natpmp=$withval], + [use_natpmp=auto]) + +AC_ARG_ENABLE([natpmp-default], + [AS_HELP_STRING([--enable-natpmp-default], + [if NAT-PMP is enabled, turn it on at startup (default is no)])], + [use_natpmp_default=$enableval], + [use_natpmp_default=no]) + AC_ARG_ENABLE(tests, - AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]), + AS_HELP_STRING([--disable-tests],[do not compile tests (default is to compile)]), [use_tests=$enableval], [use_tests=yes]) -AC_ARG_WITH([comparison-tool], - AS_HELP_STRING([--with-comparison-tool],[path to java comparison tool (requires --enable-tests)]), - [use_comparison_tool=$withval], - [use_comparison_tool=no]) - -AC_ARG_ENABLE([comparison-tool-reorg-tests], - AS_HELP_STRING([--enable-comparison-tool-reorg-tests],[enable expensive reorg tests in the comparison tool (default no)]), - [use_comparison_tool_reorg_tests=$enableval], - [use_comparison_tool_reorg_tests=no]) +AC_ARG_ENABLE(gui-tests, + AS_HELP_STRING([--disable-gui-tests],[do not compile GUI tests (default is to compile if GUI and tests enabled)]), + [use_gui_tests=$enableval], + [use_gui_tests=$use_tests]) + +AC_ARG_ENABLE(bench, + AS_HELP_STRING([--disable-bench],[do not compile benchmarks (default is to compile)]), + [use_bench=$enableval], + [use_bench=yes]) + +AC_ARG_ENABLE([extended-functional-tests], + AS_HELP_STRING([--enable-extended-functional-tests],[enable expensive functional tests when using lcov (default no)]), + [use_extended_functional_tests=$enableval], + [use_extended_functional_tests=no]) + +AC_ARG_ENABLE([fuzz], + AS_HELP_STRING([--enable-fuzz], + [build for fuzzing (default no). enabling this will disable all other targets and override --{enable,disable}-fuzz-binary]), + [enable_fuzz=$enableval], + [enable_fuzz=no]) + +AC_ARG_ENABLE([fuzz-binary], + AS_HELP_STRING([--enable-fuzz-binary], + [enable building of fuzz binary (default yes).]), + [enable_fuzz_binary=$enableval], + [enable_fuzz_binary=yes]) AC_ARG_WITH([qrencode], [AS_HELP_STRING([--with-qrencode], @@ -115,135 +211,504 @@ AC_ARG_WITH([qrencode], [use_qr=auto]) AC_ARG_ENABLE([hardening], - [AS_HELP_STRING([--enable-hardening], - [attempt to harden the resulting executables (default is yes)])], + [AS_HELP_STRING([--disable-hardening], + [do not attempt to harden the resulting executables (default is to harden when possible)])], [use_hardening=$enableval], - [use_hardening=yes]) + [use_hardening=auto]) AC_ARG_ENABLE([reduce-exports], [AS_HELP_STRING([--enable-reduce-exports], - [attempt to reduce exported symbols in the resulting executables (default is yes)])], + [attempt to reduce exported symbols in the resulting executables (default is no)])], [use_reduce_exports=$enableval], - [use_reduce_exports=auto]) + [use_reduce_exports=no]) AC_ARG_ENABLE([ccache], - [AS_HELP_STRING([--enable-ccache], - [use ccache for building (default is yes if ccache is found)])], + [AS_HELP_STRING([--disable-ccache], + [do not use ccache for building (default is to use if found)])], [use_ccache=$enableval], [use_ccache=auto]) +dnl Suppress warnings from external headers (e.g. Boost, Qt). +dnl May be useful if warnings from external headers clutter the build output +dnl too much, so that it becomes difficult to spot Bitcoin Core warnings +dnl or if they cause a build failure with --enable-werror. +AC_ARG_ENABLE([suppress-external-warnings], + [AS_HELP_STRING([--enable-suppress-external-warnings], + [Suppress warnings from external headers (default is no)])], + [suppress_external_warnings=$enableval], + [suppress_external_warnings=no]) + AC_ARG_ENABLE([lcov], [AS_HELP_STRING([--enable-lcov], [enable lcov testing (default is no)])], - [use_lcov=yes], + [use_lcov=$enableval], [use_lcov=no]) -AC_ARG_ENABLE([glibc-back-compat], - [AS_HELP_STRING([--enable-glibc-back-compat], - [enable backwards compatibility with glibc and libstdc++])], - [use_glibc_compat=$enableval], - [use_glibc_compat=no]) - -AC_ARG_WITH([protoc-bindir],[AS_HELP_STRING([--with-protoc-bindir=BIN_DIR],[specify protoc bin path])], [protoc_bin_path=$withval], []) +AC_ARG_ENABLE([lcov-branch-coverage], + [AS_HELP_STRING([--enable-lcov-branch-coverage], + [enable lcov testing branch coverage (default is no)])], + [use_lcov_branch=yes], + [use_lcov_branch=no]) + +AC_ARG_ENABLE([threadlocal], + [AS_HELP_STRING([--enable-threadlocal], + [enable features that depend on the c++ thread_local keyword (currently just thread names in debug logs). (default is to enable if there is platform support)])], + [use_thread_local=$enableval], + [use_thread_local=auto]) + +AC_ARG_ENABLE([asm], + [AS_HELP_STRING([--disable-asm], + [disable assembly routines (enabled by default)])], + [use_asm=$enableval], + [use_asm=yes]) + +if test "$use_asm" = "yes"; then + AC_DEFINE([USE_ASM], [1], [Define this symbol to build in assembly routines]) +fi -# Enable debug +AC_ARG_ENABLE([zmq], + [AS_HELP_STRING([--disable-zmq], + [disable ZMQ notifications])], + [use_zmq=$enableval], + [use_zmq=yes]) + +AC_ARG_WITH([libmultiprocess], + [AS_HELP_STRING([--with-libmultiprocess=yes|no|auto], + [Build with libmultiprocess library. (default: auto, i.e. detect with pkg-config)])], + [with_libmultiprocess=$withval], + [with_libmultiprocess=auto]) + +AC_ARG_WITH([mpgen], + [AS_HELP_STRING([--with-mpgen=yes|no|auto|PREFIX], + [Build with libmultiprocess codegen tool. Useful to specify different libmultiprocess host system library and build system codegen tool prefixes when cross-compiling (default is host system libmultiprocess prefix)])], + [with_mpgen=$withval], + [with_mpgen=auto]) + +AC_ARG_ENABLE([multiprocess], + [AS_HELP_STRING([--enable-multiprocess], + [build multiprocess bitcoin-node, bitcoin-wallet, and bitcoin-gui executables in addition to monolithic bitcoind and bitcoin-qt executables. Requires libmultiprocess library. Experimental (default is no)])], + [enable_multiprocess=$enableval], + [enable_multiprocess=no]) + +AC_ARG_ENABLE(man, + [AS_HELP_STRING([--disable-man], + [do not install man pages (default is to install)])],, + enable_man=yes) +AM_CONDITIONAL([ENABLE_MAN], [test "$enable_man" != "no"]) + +dnl Enable debug AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], - [use debug compiler flags and macros (default is no)])], + [use compiler flags and macros suited for debugging (default is no)])], [enable_debug=$enableval], [enable_debug=no]) -if test "x$enable_debug" = xyes; then - if test "x$GCC" = xyes; then - CFLAGS="-g3 -O0 -DDEBUG" - fi - - if test "x$GXX" = xyes; then - CXXFLAGS="-g3 -O0 -DDEBUG" - fi -fi +dnl Enable different -fsanitize options +AC_ARG_WITH([sanitizers], + [AS_HELP_STRING([--with-sanitizers], + [comma separated list of extra sanitizers to build with (default is none enabled)])], + [use_sanitizers=$withval]) + +dnl Enable gprof profiling +AC_ARG_ENABLE([gprof], + [AS_HELP_STRING([--enable-gprof], + [use gprof profiling compiler flags (default is no)])], + [enable_gprof=$enableval], + [enable_gprof=no]) + +dnl Turn warnings into errors +AC_ARG_ENABLE([werror], + [AS_HELP_STRING([--enable-werror], + [Treat compiler warnings as errors (default is no)])], + [enable_werror=$enableval], + [enable_werror=no]) + +AC_ARG_ENABLE([external-signer], + [AS_HELP_STRING([--enable-external-signer],[compile external signer support (default is yes, requires Boost::Process)])], + [use_external_signer=$enableval], + [use_external_signer=auto]) + +AC_ARG_ENABLE([lto], + [AS_HELP_STRING([--enable-lto],[build using LTO (default is no)])], + [enable_lto=$enableval], + [enable_lto=no]) + +AC_LANG_PUSH([C++]) + +dnl Check for a flag to turn compiler warnings into errors. This is helpful for checks which may +dnl appear to succeed because by default they merely emit warnings when they fail. +dnl +dnl Note that this is not necessarily a check to see if -Werror is supported, but rather to see if +dnl a compile with -Werror can succeed. This is important because the compiler may already be +dnl warning about something unrelated, for example about some path issue. If that is the case, +dnl -Werror cannot be used because all of those warnings would be turned into errors. +AX_CHECK_COMPILE_FLAG([-Werror], [CXXFLAG_WERROR="-Werror"], [CXXFLAG_WERROR=""]) + +dnl Check for a flag to turn linker warnings into errors. When flags are passed to linkers via the +dnl compiler driver using a -Wl,-foo flag, linker warnings may be swallowed rather than bubbling up. +dnl See note above, the same applies here as well. +dnl +dnl LDFLAG_WERROR Should only be used when testing -Wl,* +case $host in + *darwin*) + AX_CHECK_LINK_FLAG([-Wl,-fatal_warnings], [LDFLAG_WERROR="-Wl,-fatal_warnings"], [LDFLAG_WERROR=""]) + ;; + *) + AX_CHECK_LINK_FLAG([-Wl,--fatal-warnings], [LDFLAG_WERROR="-Wl,--fatal-warnings"], [LDFLAG_WERROR=""]) + ;; +esac + +if test "$enable_debug" = "yes"; then + dnl If debugging is enabled, and the user hasn't overridden CXXFLAGS, clear + dnl them, to prevent autoconfs "-g -O2" being added. Otherwise we'd end up + dnl with "-O0 -g3 -g -O2". + if test "$CXXFLAGS_overridden" = "no"; then + CXXFLAGS="" + fi + + dnl Disable all optimizations + AX_CHECK_COMPILE_FLAG([-O0], [DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -O0"], [], [$CXXFLAG_WERROR]) + + dnl Prefer -g3, fall back to -g if that is unavailable. + AX_CHECK_COMPILE_FLAG( + [-g3], + [DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -g3"], + [AX_CHECK_COMPILE_FLAG([-g], [DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -g"], [], [$CXXFLAG_WERROR])], + [$CXXFLAG_WERROR]) + + AX_CHECK_PREPROC_FLAG([-DDEBUG], [DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DDEBUG"], [], [$CXXFLAG_WERROR]) + AX_CHECK_PREPROC_FLAG([-DDEBUG_LOCKORDER], [DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DDEBUG_LOCKORDER"], [], [$CXXFLAG_WERROR]) + AX_CHECK_PREPROC_FLAG([-DRPC_DOC_CHECK], [DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DRPC_DOC_CHECK"], [], [$CXXFLAG_WERROR]) + AX_CHECK_PREPROC_FLAG([-DABORT_ON_FAILED_ASSUME], [DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DABORT_ON_FAILED_ASSUME"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-ftrapv], [DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -ftrapv"], [], [$CXXFLAG_WERROR]) +fi + +if test "$enable_lto" = "yes"; then + AX_CHECK_COMPILE_FLAG([-flto], [LTO_CXXFLAGS="$LTO_CXXFLAGS -flto"], [AC_MSG_ERROR([compile failed with -flto])], [$CXXFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-flto], [LTO_LDFLAGS="$LTO_LDFLAGS -flto"], [AC_MSG_ERROR([link failed with -flto])], [$CXXFLAG_WERROR]) +fi + +if test "$use_sanitizers" != ""; then + dnl First check if the compiler accepts flags. If an incompatible pair like + dnl -fsanitize=address,thread is used here, this check will fail. This will also + dnl fail if a bad argument is passed, e.g. -fsanitize=undfeined + AX_CHECK_COMPILE_FLAG( + [-fsanitize=$use_sanitizers], + [SANITIZER_CXXFLAGS="-fsanitize=$use_sanitizers"], + [AC_MSG_ERROR([compiler did not accept requested flags])]) + + dnl Some compilers (e.g. GCC) require additional libraries like libasan, + dnl libtsan, libubsan, etc. Make sure linking still works with the sanitize + dnl flag. This is a separate check so we can give a better error message when + dnl the sanitize flags are supported by the compiler but the actual sanitizer + dnl libs are missing. + AX_CHECK_LINK_FLAG( + [-fsanitize=$use_sanitizers], + [SANITIZER_LDFLAGS="-fsanitize=$use_sanitizers"], + [AC_MSG_ERROR([linker did not accept requested flags, you are missing required libraries])], + [], + [AC_LANG_PROGRAM([[ + #include + #include + extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { return 0; } + __attribute__((weak)) // allow for libFuzzer linking + ]],[[]])]) +fi + +ERROR_CXXFLAGS= +if test "$enable_werror" = "yes"; then + if test "$CXXFLAG_WERROR" = ""; then + AC_MSG_ERROR([enable-werror set but -Werror is not usable]) + fi + ERROR_CXXFLAGS=$CXXFLAG_WERROR + + dnl -Wreturn-type is broken in GCC for MinGW-w64. + dnl https://sourceforge.net/p/mingw-w64/bugs/306/ + AX_CHECK_COMPILE_FLAG([-Werror=return-type], [], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Wno-error=return-type"], [$CXXFLAG_WERROR], + [AC_LANG_SOURCE([[#include + int f(){ assert(false); }]])]) +fi + +if test "$CXXFLAGS_overridden" = "no"; then + AX_CHECK_COMPILE_FLAG([-Wall], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wall"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wextra], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wextra"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wgnu], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wgnu"], [], [$CXXFLAG_WERROR]) + dnl some compilers will ignore -Wformat-security without -Wformat, so just combine the two here. + AX_CHECK_COMPILE_FLAG([-Wformat -Wformat-security], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat -Wformat-security"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wvla], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wvla"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wshadow-field], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wshadow-field"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wthread-safety], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wthread-safety"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wloop-analysis], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wredundant-decls], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wunused-member-function], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-member-function"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wdate-time], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdate-time"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wconditional-uninitialized], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wconditional-uninitialized"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wduplicated-branches], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-branches"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wduplicated-cond], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-cond"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wlogical-op], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wlogical-op"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Woverloaded-virtual], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Woverloaded-virtual"], [], [$CXXFLAG_WERROR]) + dnl -Wsuggest-override is broken with GCC before 9.2 + dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010 + AX_CHECK_COMPILE_FLAG([-Wsuggest-override], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsuggest-override"], [], [$CXXFLAG_WERROR], + [AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])]) + AX_CHECK_COMPILE_FLAG([-Wunreachable-code-loop-increment], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code-loop-increment"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wimplicit-fallthrough"], [], [$CXXFLAG_WERROR]) + + if test "$suppress_external_warnings" != "no" ; then + AX_CHECK_COMPILE_FLAG([-Wdocumentation], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdocumentation"], [], [$CXXFLAG_WERROR]) + fi + + dnl Some compilers (gcc) ignore unknown -Wno-* options, but warn about all + dnl unknown options if any other warning is produced. Test the -Wfoo case, and + dnl set the -Wno-foo case if it works. + AX_CHECK_COMPILE_FLAG([-Wunused-parameter], [NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-parameter"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wself-assign], [NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-self-assign"], [], [$CXXFLAG_WERROR]) + if test "$suppress_external_warnings" != "yes" ; then + AX_CHECK_COMPILE_FLAG([-Wdeprecated-copy], [NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-deprecated-copy"], [], [$CXXFLAG_WERROR]) + fi +fi + +dnl Don't allow extended (non-ASCII) symbols in identifiers. This is easier for code review. +AX_CHECK_COMPILE_FLAG([-fno-extended-identifiers], [CORE_CXXFLAGS="$CORE_CXXFLAGS -fno-extended-identifiers"], [], [$CXXFLAG_WERROR]) + +enable_arm_crc=no +enable_arm_shani=no +enable_sse42=no +enable_sse41=no +enable_avx2=no +enable_x86_shani=no + +if test "$use_asm" = "yes"; then + +dnl Check for optional instruction set support. Enabling these does _not_ imply that all code will +dnl be compiled with them, rather that specific objects/libs may use them after checking for runtime +dnl compatibility. + +dnl x86 +AX_CHECK_COMPILE_FLAG([-msse4.2], [SSE42_CXXFLAGS="-msse4.2"], [], [$CXXFLAG_WERROR]) +AX_CHECK_COMPILE_FLAG([-msse4.1], [SSE41_CXXFLAGS="-msse4.1"], [], [$CXXFLAG_WERROR]) +AX_CHECK_COMPILE_FLAG([-mavx -mavx2], [AVX2_CXXFLAGS="-mavx -mavx2"], [], [$CXXFLAG_WERROR]) +AX_CHECK_COMPILE_FLAG([-msse4 -msha], [X86_SHANI_CXXFLAGS="-msse4 -msha"], [], [$CXXFLAG_WERROR]) + +enable_clmul= +AX_CHECK_COMPILE_FLAG([-mpclmul], [enable_clmul=yes], [], [$CXXFLAG_WERROR], [AC_LANG_PROGRAM([ + #include + #include +], [ + __m128i a = _mm_cvtsi64_si128((uint64_t)7); + __m128i b = _mm_clmulepi64_si128(a, a, 37); + __m128i c = _mm_srli_epi64(b, 41); + __m128i d = _mm_xor_si128(b, c); + uint64_t e = _mm_cvtsi128_si64(d); + return e == 0; +])]) + +if test "$enable_clmul" = "yes"; then + CLMUL_CXXFLAGS="-mpclmul" + AC_DEFINE([HAVE_CLMUL], [1], [Define this symbol if clmul instructions can be used]) +fi + +TEMP_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS $SSE42_CXXFLAGS" +AC_MSG_CHECKING([for SSE4.2 intrinsics]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #if defined(_MSC_VER) + #include + #elif defined(__GNUC__) && defined(__SSE4_2__) + #include + #endif + ]],[[ + uint64_t l = 0; + l = _mm_crc32_u8(l, 0); + l = _mm_crc32_u32(l, 0); + l = _mm_crc32_u64(l, 0); + return l; + ]])], + [ AC_MSG_RESULT([yes]); enable_sse42=yes], + [ AC_MSG_RESULT([no])] +) +CXXFLAGS="$TEMP_CXXFLAGS" + +TEMP_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS $SSE41_CXXFLAGS" +AC_MSG_CHECKING([for SSE4.1 intrinsics]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]],[[ + __m128i l = _mm_set1_epi32(0); + return _mm_extract_epi32(l, 3); + ]])], + [ AC_MSG_RESULT([yes]); enable_sse41=yes; AC_DEFINE([ENABLE_SSE41], [1], [Define this symbol to build code that uses SSE4.1 intrinsics]) ], + [ AC_MSG_RESULT([no])] +) +CXXFLAGS="$TEMP_CXXFLAGS" + +TEMP_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS $AVX2_CXXFLAGS" +AC_MSG_CHECKING([for AVX2 intrinsics]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]],[[ + __m256i l = _mm256_set1_epi32(0); + return _mm256_extract_epi32(l, 7); + ]])], + [ AC_MSG_RESULT([yes]); enable_avx2=yes; AC_DEFINE([ENABLE_AVX2], [1], [Define this symbol to build code that uses AVX2 intrinsics]) ], + [ AC_MSG_RESULT([no])] +) +CXXFLAGS="$TEMP_CXXFLAGS" + +TEMP_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS $X86_SHANI_CXXFLAGS" +AC_MSG_CHECKING([for x86 SHA-NI intrinsics]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]],[[ + __m128i i = _mm_set1_epi32(0); + __m128i j = _mm_set1_epi32(1); + __m128i k = _mm_set1_epi32(2); + return _mm_extract_epi32(_mm_sha256rnds2_epu32(i, i, k), 0); + ]])], + [ AC_MSG_RESULT([yes]); enable_x86_shani=yes; AC_DEFINE([ENABLE_X86_SHANI], [1], [Define this symbol to build code that uses x86 SHA-NI intrinsics]) ], + [ AC_MSG_RESULT([no])] +) +CXXFLAGS="$TEMP_CXXFLAGS" + +# ARM +AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto], [ARM_CRC_CXXFLAGS="-march=armv8-a+crc+crypto"], [], [$CXXFLAG_WERROR]) +AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto], [ARM_SHANI_CXXFLAGS="-march=armv8-a+crc+crypto"], [], [$CXXFLAG_WERROR]) + +TEMP_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS $ARM_CRC_CXXFLAGS" +AC_MSG_CHECKING([for ARMv8 CRC32 intrinsics]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]],[[ +#ifdef __aarch64__ + __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0); + vmull_p64(0, 0); +#else +#error "crc32c library does not support hardware acceleration on 32-bit ARM" +#endif + ]])], + [ AC_MSG_RESULT([yes]); enable_arm_crc=yes; ], + [ AC_MSG_RESULT([no])] +) +CXXFLAGS="$TEMP_CXXFLAGS" + +TEMP_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS $ARM_SHANI_CXXFLAGS" +AC_MSG_CHECKING([for ARMv8 SHA-NI intrinsics]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]],[[ + uint32x4_t a, b, c; + vsha256h2q_u32(a, b, c); + vsha256hq_u32(a, b, c); + vsha256su0q_u32(a, b); + vsha256su1q_u32(a, b, c); + ]])], + [ AC_MSG_RESULT([yes]); enable_arm_shani=yes; AC_DEFINE([ENABLE_ARM_SHANI], [1], [Define this symbol to build code that uses ARMv8 SHA-NI intrinsics]) ], + [ AC_MSG_RESULT([no])] +) +CXXFLAGS="$TEMP_CXXFLAGS" -## TODO: Remove these hard-coded paths and flags. They are here for the sake of -## compatibility with the legacy buildsystem. -## -if test "x$CXXFLAGS_overridden" = "xno"; then - CXXFLAGS="$CXXFLAGS -Wall -Wextra -Wformat -Wformat-security -Wno-unused-parameter" fi -CPPFLAGS="$CPPFLAGS -DBOOST_SPIRIT_THREADSAFE -DHAVE_BUILD_INFO -D__STDC_FORMAT_MACROS" + +CORE_CPPFLAGS="$CORE_CPPFLAGS -DHAVE_BUILD_INFO" AC_ARG_WITH([utils], [AS_HELP_STRING([--with-utils], - [build bitcoin-cli bitcoin-tx (default=yes)])], + [build bitcoin-cli bitcoin-tx bitcoin-util bitcoin-wallet (default=yes)])], [build_bitcoin_utils=$withval], [build_bitcoin_utils=yes]) +AC_ARG_ENABLE([util-cli], + [AS_HELP_STRING([--enable-util-cli], + [build bitcoin-cli])], + [build_bitcoin_cli=$enableval], + [build_bitcoin_cli=$build_bitcoin_utils]) + +AC_ARG_ENABLE([util-tx], + [AS_HELP_STRING([--enable-util-tx], + [build bitcoin-tx])], + [build_bitcoin_tx=$enableval], + [build_bitcoin_tx=$build_bitcoin_utils]) + +AC_ARG_ENABLE([util-wallet], + [AS_HELP_STRING([--enable-util-wallet], + [build bitcoin-wallet])], + [build_bitcoin_wallet=$enableval], + [build_bitcoin_wallet=$build_bitcoin_utils]) + +AC_ARG_ENABLE([util-util], + [AS_HELP_STRING([--enable-util-util], + [build bitcoin-util])], + [build_bitcoin_util=$enableval], + [build_bitcoin_util=$build_bitcoin_utils]) + +AC_ARG_ENABLE([experimental-util-chainstate], + [AS_HELP_STRING([--enable-experimental-util-chainstate], + [build experimental bitcoin-chainstate executable (default=no)])], + [build_bitcoin_chainstate=$enableval], + [build_bitcoin_chainstate=no]) + AC_ARG_WITH([libs], [AS_HELP_STRING([--with-libs], [build libraries (default=yes)])], [build_bitcoin_libs=$withval], [build_bitcoin_libs=yes]) +AC_ARG_WITH([experimental-kernel-lib], + [AS_HELP_STRING([--with-experimental-kernel-lib], + [build experimental bitcoinkernel library (default is to build if we're building libraries and the experimental build-chainstate executable)])], + [build_experimental_kernel_lib=$withval], + [build_experimental_kernel_lib=auto]) + AC_ARG_WITH([daemon], [AS_HELP_STRING([--with-daemon], [build bitcoind daemon (default=yes)])], [build_bitcoind=$withval], [build_bitcoind=yes]) -AC_LANG_PUSH([C++]) - -use_pkgconfig=yes case $host in *mingw*) - - #pkgconfig does more harm than good with MinGW - use_pkgconfig=no - TARGET_OS=windows - AC_CHECK_LIB([mingwthrd], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([kernel32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([user32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([gdi32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([comdlg32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([winspool], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([winmm], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([shell32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([comctl32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([ole32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([oleaut32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([uuid], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([rpcrt4], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([advapi32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([ws2_32], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([mswsock], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([shlwapi], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([iphlpapi], [main],, AC_MSG_ERROR(lib missing)) - AC_CHECK_LIB([crypt32], [main],, AC_MSG_ERROR(lib missing)) - - # -static is interpreted by libtool, where it has a different meaning. - # In libtool-speak, it's -all-static. - AX_CHECK_LINK_FLAG([[-static]],[LIBTOOL_APP_LDFLAGS="$LIBTOOL_APP_LDFLAGS -all-static"]) - - AC_PATH_PROG([MAKENSIS], [makensis], none) - if test x$MAKENSIS = xnone; then - AC_MSG_WARN("makensis not found. Cannot create installer.") + AC_CHECK_LIB([kernel32], [GetModuleFileNameA], [], [AC_MSG_ERROR([libkernel32 missing])]) + AC_CHECK_LIB([user32], [main], [], [AC_MSG_ERROR([libuser32 missing])]) + AC_CHECK_LIB([gdi32], [main], [], [AC_MSG_ERROR([libgdi32 missing])]) + AC_CHECK_LIB([comdlg32], [main], [], [AC_MSG_ERROR([libcomdlg32 missing])]) + AC_CHECK_LIB([winmm], [main], [], [AC_MSG_ERROR([libwinmm missing])]) + AC_CHECK_LIB([shell32], [SHGetSpecialFolderPathW], [], [AC_MSG_ERROR([libshell32 missing])]) + AC_CHECK_LIB([comctl32], [main], [], [AC_MSG_ERROR([libcomctl32 missing])]) + AC_CHECK_LIB([ole32], [CoCreateInstance], [], [AC_MSG_ERROR([libole32 missing])]) + AC_CHECK_LIB([oleaut32], [main], [], [AC_MSG_ERROR([liboleaut32 missing])]) + AC_CHECK_LIB([uuid], [main], [], [AC_MSG_ERROR([libuuid missing])]) + AC_CHECK_LIB([advapi32], [CryptAcquireContextW], [], [AC_MSG_ERROR([libadvapi32 missing])]) + AC_CHECK_LIB([ws2_32], [WSAStartup], [], [AC_MSG_ERROR([libws2_32 missing])]) + AC_CHECK_LIB([shlwapi], [PathRemoveFileSpecW], [], [AC_MSG_ERROR([libshlwapi missing])]) + AC_CHECK_LIB([iphlpapi], [GetAdaptersAddresses], [], [AC_MSG_ERROR([libiphlpapi missing])]) + + dnl -static is interpreted by libtool, where it has a different meaning. + dnl In libtool-speak, it's -all-static. + AX_CHECK_LINK_FLAG([-static], [LIBTOOL_APP_LDFLAGS="$LIBTOOL_APP_LDFLAGS -all-static"]) + + AC_PATH_PROG([MAKENSIS], [makensis], [none]) + if test "$MAKENSIS" = "none"; then + AC_MSG_WARN([makensis not found. Cannot create installer.]) fi - AC_PATH_TOOL(WINDRES, windres, none) - if test x$WINDRES = xnone; then - AC_MSG_ERROR("windres not found") + AC_PATH_TOOL([WINDRES], [windres], [none]) + if test "$WINDRES" = "none"; then + AC_MSG_ERROR([windres not found]) fi - CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB" - LEVELDB_TARGET_FLAGS="TARGET_OS=OS_WINDOWS_CROSSCOMPILE" - if test "x$CXXFLAGS_overridden" = "xno"; then - CXXFLAGS="$CXXFLAGS -w" - fi - case $host in - i?86-*) WINDOWS_BITS=32 ;; - x86_64-*) WINDOWS_BITS=64 ;; - *) AC_MSG_ERROR("Could not determine win32/win64 for installer") ;; - esac - AC_SUBST(WINDOWS_BITS) + CORE_CPPFLAGS="$CORE_CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -D_WIN32_WINNT=0x0601 -D_WIN32_IE=0x0501 -DWIN32_LEAN_AND_MEAN" dnl libtool insists upon adding -nostdlib and a list of objects/libs to link against. dnl That breaks our ability to build dll's with static libgcc/libstdc++/libssp. Override @@ -253,46 +718,62 @@ case $host in archive_cmds_CXX="\$CC -shared \$libobjs \$deplibs \$compiler_flags -static -o \$output_objdir/\$soname \${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker \$lib" postdeps_CXX= + dnl We require Windows 7 (NT 6.1) or later + AX_CHECK_LINK_FLAG([-Wl,--major-subsystem-version -Wl,6 -Wl,--minor-subsystem-version -Wl,1], [CORE_LDFLAGS="$CORE_LDFLAGS -Wl,--major-subsystem-version -Wl,6 -Wl,--minor-subsystem-version -Wl,1"], [], [$LDFLAG_WERROR]) ;; *darwin*) TARGET_OS=darwin - LEVELDB_TARGET_FLAGS="TARGET_OS=Darwin" - if test x$cross_compiling != xyes; then + if test $cross_compiling != "yes"; then BUILD_OS=darwin - AC_CHECK_PROG([PORT],port, port) - if test x$PORT = xport; then - dnl add default macports paths - CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" - LIBS="$LIBS -L/opt/local/lib" - if test -d /opt/local/include/db48; then - CPPFLAGS="$CPPFLAGS -I/opt/local/include/db48" - LIBS="$LIBS -L/opt/local/lib/db48" - fi - fi - - AC_CHECK_PROG([BREW],brew, brew) - if test x$BREW = xbrew; then + AC_CHECK_PROG([BREW], [brew], [brew]) + if test "$BREW" = "brew"; then dnl These Homebrew packages may be keg-only, meaning that they won't be found dnl in expected paths because they may conflict with system files. Ask dnl Homebrew where each one is located, then adjust paths accordingly. dnl It's safe to add these paths even if the functionality is disabled by dnl the user (--without-wallet or --without-gui for example). - openssl_prefix=`$BREW --prefix openssl 2>/dev/null` - bdb_prefix=`$BREW --prefix berkeley-db4 2>/dev/null` - qt5_prefix=`$BREW --prefix qt5 2>/dev/null` - if test x$openssl_prefix != x; then - PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" - export PKG_CONFIG_PATH + if test "$use_bdb" != "no" && $BREW list --versions berkeley-db@4 >/dev/null && test "$BDB_CFLAGS" = "" && test "$BDB_LIBS" = ""; then + bdb_prefix=$($BREW --prefix berkeley-db@4 2>/dev/null) + dnl This must precede the call to BITCOIN_FIND_BDB48 below. + BDB_CFLAGS="-I$bdb_prefix/include" + BDB_LIBS="-L$bdb_prefix/lib -ldb_cxx-4.8" fi - if test x$bdb_prefix != x; then - CPPFLAGS="$CPPFLAGS -I$bdb_prefix/include" - LIBS="$LIBS -L$bdb_prefix/lib" + + if test "$use_sqlite" != "no" && $BREW list --versions sqlite3 >/dev/null; then + export PKG_CONFIG_PATH="$($BREW --prefix sqlite3 2>/dev/null)/lib/pkgconfig:$PKG_CONFIG_PATH" fi - if test x$qt5_prefix != x; then - PKG_CONFIG_PATH="$qt5_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" - export PKG_CONFIG_PATH + + if $BREW list --versions qt@5 >/dev/null; then + export PKG_CONFIG_PATH="$($BREW --prefix qt@5 2>/dev/null)/lib/pkgconfig:$PKG_CONFIG_PATH" fi + + case $host in + *aarch64*) + dnl The preferred Homebrew prefix for Apple Silicon is /opt/homebrew. + dnl Therefore, as we do not use pkg-config to detect miniupnpc and libnatpmp + dnl packages, we should set the CPPFLAGS and LDFLAGS variables for them + dnl explicitly. + if test "$use_upnp" != "no" && $BREW list --versions miniupnpc >/dev/null; then + miniupnpc_prefix=$($BREW --prefix miniupnpc 2>/dev/null) + if test "$suppress_external_warnings" != "no"; then + MINIUPNPC_CPPFLAGS="-isystem $miniupnpc_prefix/include" + else + MINIUPNPC_CPPFLAGS="-I$miniupnpc_prefix/include" + fi + MINIUPNPC_LIBS="-L$miniupnpc_prefix/lib" + fi + if test "$use_natpmp" != "no" && $BREW list --versions libnatpmp >/dev/null; then + libnatpmp_prefix=$($BREW --prefix libnatpmp 2>/dev/null) + if test "$suppress_external_warnings" != "no"; then + NATPMP_CPPFLAGS="-isystem $libnatpmp_prefix/include" + else + NATPMP_CPPFLAGS="-I$libnatpmp_prefix/include" + fi + NATPMP_LIBS="-L$libnatpmp_prefix/lib" + fi + ;; + esac fi else case $build_os in @@ -300,9 +781,10 @@ case $host in BUILD_OS=darwin ;; *) - AC_PATH_TOOL([INSTALLNAMETOOL], [install_name_tool], install_name_tool) - AC_PATH_TOOL([OTOOL], [otool], otool) - AC_PATH_PROGS([GENISOIMAGE], [genisoimage mkisofs],genisoimage) + AC_PATH_TOOL([DSYMUTIL], [dsymutil], [dsymutil]) + AC_PATH_TOOL([INSTALL_NAME_TOOL], [install_name_tool], [install_name_tool]) + AC_PATH_TOOL([OTOOL], [otool], [otool]) + AC_PATH_PROGS([XORRISOFS], [xorrisofs], [xorrisofs]) dnl libtool will try to strip the static lib, which is a problem for dnl cross-builds because strip attempts to call a hard-coded ld, @@ -313,425 +795,905 @@ case $host in esac fi - AX_CHECK_LINK_FLAG([[-Wl,-headerpad_max_install_names]], [LDFLAGS="$LDFLAGS -Wl,-headerpad_max_install_names"]) - CPPFLAGS="$CPPFLAGS -DMAC_OSX" + AX_CHECK_LINK_FLAG([-Wl,-headerpad_max_install_names], [CORE_LDFLAGS="$CORE_LDFLAGS -Wl,-headerpad_max_install_names"], [], [$LDFLAG_WERROR]) + CORE_CPPFLAGS="$CORE_CPPFLAGS -DMAC_OSX -DOBJC_OLD_DISPATCH_PROTOTYPES=0" + OBJCXXFLAGS="$CXXFLAGS" + ;; + *android*) + dnl make sure android stays above linux for hosts like *linux-android* + TARGET_OS=android + case $host in + *x86_64*) + ANDROID_ARCH=x86_64 + ;; + *aarch64*) + ANDROID_ARCH=arm64-v8a + ;; + *armv7a*) + ANDROID_ARCH=armeabi-v7a + ;; + *) AC_MSG_ERROR([Could not determine Android arch, or it is unsupported]) ;; + esac ;; *linux*) TARGET_OS=linux ;; - *) - ;; esac -if test x$use_comparison_tool != xno; then - AC_SUBST(JAVA_COMPARISON_TOOL, $use_comparison_tool) -fi - -if test x$use_comparison_tool_reorg_tests != xno; then - if test x$use_comparison_tool = x; then - AC_MSG_ERROR("comparison tool reorg tests but comparison tool was not specified") - fi - AC_SUBST(COMPARISON_TOOL_REORG_TESTS, 1) -else - AC_SUBST(COMPARISON_TOOL_REORG_TESTS, 0) +if test "$use_extended_functional_tests" != "no"; then + AC_SUBST(EXTENDED_FUNCTIONAL_TESTS, --extended) fi -if test x$use_lcov = xyes; then - if test x$LCOV = x; then - AC_MSG_ERROR("lcov testing requested but lcov not found") +if test "$use_lcov" = "yes"; then + if test "$LCOV" = ""; then + AC_MSG_ERROR([lcov testing requested but lcov not found]) fi - if test x$GCOV = x; then - AC_MSG_ERROR("lcov testing requested but gcov not found") + if test "$PYTHON" = ""; then + AC_MSG_ERROR([lcov testing requested but python not found]) fi - if test x$JAVA = x; then - AC_MSG_ERROR("lcov testing requested but java not found") + if test "$GENHTML" = ""; then + AC_MSG_ERROR([lcov testing requested but genhtml not found]) fi - if test x$GENHTML = x; then - AC_MSG_ERROR("lcov testing requested but genhtml not found") - fi - if test x$use_comparison_tool = x; then - AC_MSG_ERROR("lcov testing requested but comparison tool was not specified") + + AC_MSG_CHECKING([whether compiler is Clang]) + AC_PREPROC_IFELSE([AC_LANG_SOURCE([[ + #if defined(__clang__) && defined(__llvm__) + // Compiler is Clang + #else + # error Compiler is not Clang + #endif + ]])],[ + AC_MSG_RESULT([yes]) + if test "$LLVM_COV" = ""; then + AC_MSG_ERROR([lcov testing requested but llvm-cov not found]) + fi + COV_TOOL="$LLVM_COV gcov" + ],[ + AC_MSG_RESULT([no]) + if test "$GCOV" = "x"; then + AC_MSG_ERROR([lcov testing requested but gcov not found]) + fi + COV_TOOL="$GCOV" + ]) + AC_SUBST(COV_TOOL) + AC_SUBST(COV_TOOL_WRAPPER, "cov_tool_wrapper.sh") + LCOV="$LCOV --gcov-tool $(pwd)/$COV_TOOL_WRAPPER" + + AX_CHECK_LINK_FLAG([--coverage], [CORE_LDFLAGS="$CORE_LDFLAGS --coverage"], + [AC_MSG_ERROR([lcov testing requested but --coverage linker flag does not work])]) + AX_CHECK_COMPILE_FLAG([--coverage],[CORE_CXXFLAGS="$CORE_CXXFLAGS --coverage"], + [AC_MSG_ERROR([lcov testing requested but --coverage flag does not work])]) + dnl If coverage is enabled, and the user hasn't overridden CXXFLAGS, clear + dnl them, to prevent autoconfs "-g -O2" being added. Otherwise we'd end up + dnl with "--coverage -Og -O0 -g -O2". + if test "$CXXFLAGS_overridden" = "no"; then + CXXFLAGS="" fi - LCOV="$LCOV --gcov-tool=$GCOV" - AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"], - [AC_MSG_ERROR("lcov testing requested but --coverage flag does not work")]) + CORE_CXXFLAGS="$CORE_CXXFLAGS -Og -O0" +fi + +if test "$use_lcov_branch" != "no"; then + AC_SUBST(LCOV_OPTS, "$LCOV_OPTS --rc lcov_branch_coverage=1") fi -dnl Require little endian -AC_C_BIGENDIAN([AC_MSG_ERROR("Big Endian not supported")]) +dnl Check for endianness +AC_C_BIGENDIAN dnl Check for pthread compile/link requirements AX_PTHREAD -# The following macro will add the necessary defines to bitcoin-config.h, but -# they also need to be passed down to any subprojects. Pull the results out of -# the cache and add them to CPPFLAGS. +dnl The following macro will add the necessary defines to bitcoin-config.h, but +dnl they also need to be passed down to any subprojects. Pull the results out of +dnl the cache and add them to CPPFLAGS. AC_SYS_LARGEFILE -# detect POSIX or GNU variant of strerror_r +dnl detect POSIX or GNU variant of strerror_r AC_FUNC_STRERROR_R -if test x$ac_cv_sys_file_offset_bits != x && - test x$ac_cv_sys_file_offset_bits != xno && - test x$ac_cv_sys_file_offset_bits != xunknown; then - CPPFLAGS="$CPPFLAGS -D_FILE_OFFSET_BITS=$ac_cv_sys_file_offset_bits" +if test "$ac_cv_sys_file_offset_bits" != "" && + test "$ac_cv_sys_file_offset_bits" != "no" && + test "$ac_cv_sys_file_offset_bits" != "unknown"; then + CORE_CPPFLAGS="$CORE_CPPFLAGS -D_FILE_OFFSET_BITS=$ac_cv_sys_file_offset_bits" fi -if test x$ac_cv_sys_large_files != x && - test x$ac_cv_sys_large_files != xno && - test x$ac_cv_sys_large_files != xunknown; then - CPPFLAGS="$CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files" +if test "$ac_cv_sys_large_files" != "" && + test "$ac_cv_sys_large_files" != "no" && + test "$ac_cv_sys_large_files" != "unknown"; then + CORE_CPPFLAGS="$CORE_CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files" fi -AX_CHECK_LINK_FLAG([[-Wl,--large-address-aware]], [LDFLAGS="$LDFLAGS -Wl,--large-address-aware"]) +AC_SEARCH_LIBS([clock_gettime],[rt]) -AX_GCC_FUNC_ATTRIBUTE([visibility]) -AX_GCC_FUNC_ATTRIBUTE([dllexport]) -AX_GCC_FUNC_ATTRIBUTE([dllimport]) +if test "$enable_gprof" = "yes"; then + dnl -pg is incompatible with -pie. Since hardening and profiling together doesn't make sense, + dnl we simply make them mutually exclusive here. Additionally, hardened toolchains may force + dnl -pie by default, in which case it needs to be turned off with -no-pie. -if test x$use_glibc_compat != xno; then + if test "$use_hardening" = "yes"; then + AC_MSG_ERROR([gprof profiling is not compatible with hardening. Reconfigure with --disable-hardening or --disable-gprof]) + fi + use_hardening=no + AX_CHECK_COMPILE_FLAG([-pg],[GPROF_CXXFLAGS="-pg"], + [AC_MSG_ERROR([gprof profiling requested but not available])], [$CXXFLAG_WERROR]) - #__fdelt_chk's params and return type have changed from long unsigned int to long int. - # See which one is present here. - AC_MSG_CHECKING(__fdelt_chk type) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#ifdef _FORTIFY_SOURCE - #undef _FORTIFY_SOURCE - #endif - #define _FORTIFY_SOURCE 2 - #include - extern "C" long unsigned int __fdelt_warn(long unsigned int);]],[[]])], - [ fdelt_type="long unsigned int"], - [ fdelt_type="long int"]) - AC_MSG_RESULT($fdelt_type) - AC_DEFINE_UNQUOTED(FDELT_TYPE, $fdelt_type,[parameter and return value type for __fdelt_chk]) + AX_CHECK_LINK_FLAG([-no-pie], [GPROF_LDFLAGS="-no-pie"]) + AX_CHECK_LINK_FLAG([-pg], [GPROF_LDFLAGS="$GPROF_LDFLAGS -pg"], + [AC_MSG_ERROR([gprof profiling requested but not available])], [$GPROF_LDFLAGS]) +fi +if test "$TARGET_OS" != "windows"; then + dnl All windows code is PIC, forcing it on just adds useless compile warnings + AX_CHECK_COMPILE_FLAG([-fPIC], [PIC_FLAGS="-fPIC"]) fi -if test x$use_hardening != xno; then - AX_CHECK_COMPILE_FLAG([-Wstack-protector],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -Wstack-protector"]) - AX_CHECK_COMPILE_FLAG([-fstack-protector-all],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-protector-all"]) +dnl All versions of gcc that we commonly use for building are subject to bug +dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set +dnl -fstack-reuse=none for all gcc builds. (Only gcc understands this flag) +AX_CHECK_COMPILE_FLAG([-fstack-reuse=none], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-reuse=none"]) +if test "$use_hardening" != "no"; then + use_hardening=yes + AX_CHECK_COMPILE_FLAG([-Wstack-protector], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -Wstack-protector"]) + AX_CHECK_COMPILE_FLAG([-fstack-protector-all], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-protector-all"]) - AX_CHECK_PREPROC_FLAG([-D_FORTIFY_SOURCE=2],[ - AX_CHECK_PREPROC_FLAG([-U_FORTIFY_SOURCE],[ - HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -U_FORTIFY_SOURCE" - ]) - HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -D_FORTIFY_SOURCE=2" - ]) + dnl -fcf-protection used with Clang 7 causes ld to emit warnings: + dnl ld: error: ... + dnl Use CHECK_LINK_FLAG & --fatal-warnings to ensure we won't use the flag in this case. + AX_CHECK_LINK_FLAG([-fcf-protection=full], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fcf-protection=full"], [], [$LDFLAG_WERROR]) + + case $host in + *mingw*) + dnl stack-clash-protection doesn't currently work, and likely should just be skipped for Windows. + dnl See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90458 for more details. + ;; + *) + AX_CHECK_COMPILE_FLAG([-fstack-clash-protection], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-clash-protection"], [], [$CXXFLAG_WERROR]) + ;; + esac - AX_CHECK_LINK_FLAG([[-Wl,--dynamicbase]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--dynamicbase"]) - AX_CHECK_LINK_FLAG([[-Wl,--nxcompat]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--nxcompat"]) - AX_CHECK_LINK_FLAG([[-Wl,-z,relro]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,relro"]) - AX_CHECK_LINK_FLAG([[-Wl,-z,now]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,now"]) - if test x$TARGET_OS != xwindows; then - # All windows code is PIC, forcing it on just adds useless compile warnings - AX_CHECK_COMPILE_FLAG([-fPIE],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fPIE"]) - AX_CHECK_LINK_FLAG([[-pie]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -pie"]) + dnl When enable_debug is yes, all optimizations are disabled. + dnl However, FORTIFY_SOURCE requires that there is some level of optimization, otherwise it does nothing and just creates a compiler warning. + dnl Since FORTIFY_SOURCE is a no-op without optimizations, do not enable it when enable_debug is yes. + if test "$enable_debug" != "yes"; then + AX_CHECK_PREPROC_FLAG([-D_FORTIFY_SOURCE=2],[ + AX_CHECK_PREPROC_FLAG([-U_FORTIFY_SOURCE],[ + HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -U_FORTIFY_SOURCE" + ]) + HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -D_FORTIFY_SOURCE=2" + ]) fi + AX_CHECK_LINK_FLAG([-Wl,--enable-reloc-section], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--enable-reloc-section"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,--dynamicbase], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--dynamicbase"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,--nxcompat], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--nxcompat"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,--high-entropy-va], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--high-entropy-va"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,-z,relro], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,relro"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,-z,now], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,now"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,-z,separate-code], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,separate-code"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-fPIE -pie], [PIE_FLAGS="-fPIE"; HARDENED_LDFLAGS="$HARDENED_LDFLAGS -pie"], [], [$CXXFLAG_WERROR]) + case $host in *mingw*) - AC_CHECK_LIB([ssp], [main],, AC_MSG_ERROR(lib missing)) + AC_CHECK_LIB([ssp], [main], [], [AC_MSG_ERROR([libssp missing])]) ;; esac - - CXXFLAGS="$CXXFLAGS $HARDENED_CXXFLAGS" - CPPFLAGS="$CPPFLAGS $HARDENED_CPPFLAGS" - LDFLAGS="$LDFLAGS $HARDENED_LDFLAGS" - OBJCXXFLAGS="$CXXFLAGS" fi -dnl this flag screws up non-darwin gcc even when the check fails. special-case it. -if test x$TARGET_OS = xdarwin; then - AX_CHECK_LINK_FLAG([[-Wl,-dead_strip]], [LDFLAGS="$LDFLAGS -Wl,-dead_strip"]) +dnl These flags are specific to ld64, and may cause issues with other linkers. +dnl For example: GNU ld will interpret -dead_strip as -de and then try and use +dnl "ad_strip" as the symbol for the entry point. +if test "$TARGET_OS" = "darwin"; then + AX_CHECK_LINK_FLAG([-Wl,-dead_strip], [CORE_LDFLAGS="$CORE_LDFLAGS -Wl,-dead_strip"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,-dead_strip_dylibs], [CORE_LDFLAGS="$CORE_LDFLAGS -Wl,-dead_strip_dylibs"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,-bind_at_load], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-bind_at_load"], [], [$LDFLAG_WERROR]) fi -AC_CHECK_HEADERS([endian.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h]) -AC_SEARCH_LIBS([getaddrinfo_a], [anl], [AC_DEFINE(HAVE_GETADDRINFO_A, 1, [Define this symbol if you have getaddrinfo_a])]) -AC_SEARCH_LIBS([inet_pton], [nsl resolv], [AC_DEFINE(HAVE_INET_PTON, 1, [Define this symbol if you have inet_pton])]) +AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h sys/sysctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h]) + +AC_CHECK_DECLS([getifaddrs, freeifaddrs],[CHECK_SOCKET],, + [#include + #include ] +) + +dnl These are used for daemonization in bitcoind +AC_CHECK_DECLS([fork]) +AC_CHECK_DECLS([setsid]) -AC_CHECK_DECLS([strnlen]) +AC_CHECK_DECLS([pipe2]) -AC_CHECK_DECLS([le32toh, le64toh, htole32, htole64, be32toh, be64toh, htobe32, htobe64],,, - [#if HAVE_ENDIAN_H +AC_CHECK_DECLS([le16toh, le32toh, le64toh, htole16, htole32, htole64, be16toh, be32toh, be64toh, htobe16, htobe32, htobe64],,, + [#if HAVE_ENDIAN_H #include + #elif HAVE_SYS_ENDIAN_H + #include #endif]) -dnl Check for MSG_NOSIGNAL -AC_MSG_CHECKING(for MSG_NOSIGNAL) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ int f = MSG_NOSIGNAL; ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_MSG_NOSIGNAL, 1,[Define this symbol if you have MSG_NOSIGNAL]) ], - [ AC_MSG_RESULT(no)] +AC_CHECK_DECLS([bswap_16, bswap_32, bswap_64],,, + [#if HAVE_BYTESWAP_H + #include + #endif]) + +AC_MSG_CHECKING([for __builtin_clzl]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ + (void) __builtin_clzl(0); + ]])], + [ AC_MSG_RESULT([yes]); have_clzl=yes; AC_DEFINE([HAVE_BUILTIN_CLZL], [1], [Define this symbol if you have __builtin_clzl])], + [ AC_MSG_RESULT([no]); have_clzl=no;] +) + +AC_MSG_CHECKING([for __builtin_clzll]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ + (void) __builtin_clzll(0); + ]])], + [ AC_MSG_RESULT([yes]); have_clzll=yes; AC_DEFINE([HAVE_BUILTIN_CLZLL], [1], [Define this symbol if you have __builtin_clzll])], + [ AC_MSG_RESULT([no]); have_clzll=no;] ) -AC_SEARCH_LIBS([clock_gettime],[rt]) +dnl Check for malloc_info (for memory statistics information in getmemoryinfo) +AC_MSG_CHECKING([for getmemoryinfo]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ int f = malloc_info(0, NULL); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_MALLOC_INFO], [1], [Define this symbol if you have malloc_info]) ], + [ AC_MSG_RESULT([no])] +) -AC_MSG_CHECKING([for visibility attribute]) -AC_LINK_IFELSE([AC_LANG_SOURCE([ - int foo_def( void ) __attribute__((visibility("default"))); +dnl Check for mallopt(M_ARENA_MAX) (to set glibc arenas) +AC_MSG_CHECKING([for mallopt M_ARENA_MAX]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ mallopt(M_ARENA_MAX, 1); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_MALLOPT_ARENA_MAX], [1], [Define this symbol if you have mallopt with M_ARENA_MAX]) ], + [ AC_MSG_RESULT([no])] +) + +dnl Check for posix_fallocate +AC_MSG_CHECKING([for posix_fallocate]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + // same as in src/util/system.cpp + #ifdef __linux__ + #ifdef _POSIX_C_SOURCE + #undef _POSIX_C_SOURCE + #endif + #define _POSIX_C_SOURCE 200112L + #endif // __linux__ + #include ]], + [[ int f = posix_fallocate(0, 0, 0); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_POSIX_FALLOCATE], [1], [Define this symbol if you have posix_fallocate]) ], + [ AC_MSG_RESULT([no])] +) + +AC_MSG_CHECKING([for default visibility attribute]) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + int foo(void) __attribute__((visibility("default"))); int main(){} ])], [ - AC_DEFINE(HAVE_VISIBILITY_ATTRIBUTE,1,[Define if the visibility attribute is supported.]) - AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_DEFAULT_VISIBILITY_ATTRIBUTE], [1], [Define if the visibility attribute is supported.]) + AC_MSG_RESULT([yes]) ], [ - AC_MSG_RESULT(no) - if test x$use_reduce_exports = xyes; then - AC_MSG_ERROR([Cannot find a working visibility attribute. Use --disable-reduced-exports.]) + AC_MSG_RESULT([no]) + if test "$use_reduce_exports" = "yes"; then + AC_MSG_ERROR([Cannot find a working visibility attribute. Use --disable-reduce-exports.]) fi - AC_MSG_WARN([Cannot find a working visibility attribute. Disabling reduced exports.]) - use_reduce_exports=no ] ) -if test x$use_reduce_exports != xno; then - AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[RE_CXXFLAGS="-fvisibility=hidden"], +AC_MSG_CHECKING([for dllexport attribute]) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + __declspec(dllexport) int foo(void); + int main(){} + ])], [ - if test x$use_reduce_exports = xyes; then - AC_MSG_ERROR([Cannot set default symbol visibility. Use --disable-reduced-exports.]) - fi - AC_MSG_WARN([Cannot set default symbol visibility. Disabling reduced exports.]) - use_reduce_exports=no - ]) + AC_DEFINE([HAVE_DLLEXPORT_ATTRIBUTE], [1], [Define if the dllexport attribute is supported.]) + AC_MSG_RESULT([yes]) + ], + [AC_MSG_RESULT([no])] +) + +if test "$use_thread_local" = "yes" || test "$use_thread_local" = "auto"; then + TEMP_LDFLAGS="$LDFLAGS" + LDFLAGS="$TEMP_LDFLAGS $PTHREAD_CFLAGS" + AC_MSG_CHECKING([for thread_local support]) + AC_LINK_IFELSE([AC_LANG_SOURCE([ + #include + static thread_local int foo = 0; + static void run_thread() { foo++;} + int main(){ + for(int i = 0; i < 10; i++) { std::thread(run_thread).detach();} + return foo; + } + ])], + [ + case $host in + *mingw*) + dnl mingw32's implementation of thread_local has also been shown to behave + dnl erroneously under concurrent usage; see: + dnl https://gist.github.com/jamesob/fe9a872051a88b2025b1aa37bfa98605 + AC_MSG_RESULT([no]) + ;; + *freebsd*) + dnl FreeBSD's implementation of thread_local is also buggy (per + dnl https://groups.google.com/d/msg/bsdmailinglist/22ncTZAbDp4/Dii_pII5AwAJ) + AC_MSG_RESULT([no]) + ;; + *) + AC_DEFINE([HAVE_THREAD_LOCAL], [1], [Define if thread_local is supported.]) + AC_MSG_RESULT([yes]) + ;; + esac + ], + [ + AC_MSG_RESULT([no]) + ] + ) + LDFLAGS="$TEMP_LDFLAGS" fi -LEVELDB_CPPFLAGS= -LIBLEVELDB= -LIBMEMENV= -AM_CONDITIONAL([EMBEDDED_LEVELDB],[true]) -AC_SUBST(LEVELDB_CPPFLAGS) -AC_SUBST(LIBLEVELDB) -AC_SUBST(LIBMEMENV) +dnl check for gmtime_r(), fallback to gmtime_s() if that is unavailable +dnl fail if neither are available. +AC_MSG_CHECKING([for gmtime_r]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ gmtime_r((const time_t *) nullptr, (struct tm *) nullptr); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_GMTIME_R], [1], [Define this symbol if gmtime_r is available]) ], + [ AC_MSG_RESULT([no]); + AC_MSG_CHECKING([for gmtime_s]); + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ gmtime_s((struct tm *) nullptr, (const time_t *) nullptr); ]])], + [ AC_MSG_RESULT([yes])], + [ AC_MSG_RESULT([no]); AC_MSG_ERROR([Both gmtime_r and gmtime_s are unavailable]) ] + ) + ] +) + +dnl Check for different ways of gathering OS randomness +AC_MSG_CHECKING([for Linux getrandom syscall]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include + #include + #include ]], + [[ syscall(SYS_getrandom, nullptr, 32, 0); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_SYS_GETRANDOM], [1], [Define this symbol if the Linux getrandom system call is available]) ], + [ AC_MSG_RESULT([no])] +) + +AC_MSG_CHECKING([for getentropy via random.h]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include + #include ]], + [[ getentropy(nullptr, 32) ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_GETENTROPY_RAND], [1], [Define this symbol if the BSD getentropy system call is available with sys/random.h]) ], + [ AC_MSG_RESULT([no])] +) + +AC_MSG_CHECKING([for sysctl]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include + #include ]], + [[ #ifdef __linux__ + #error "Don't use sysctl on Linux, it's deprecated even when it works" + #endif + sysctl(nullptr, 2, nullptr, nullptr, nullptr, 0); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_SYSCTL], [1], [Define this symbol if the BSD sysctl() is available]) ], + [ AC_MSG_RESULT([no])] +) -if test x$enable_wallet != xno; then +AC_MSG_CHECKING([for sysctl KERN_ARND]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include + #include ]], + [[ #ifdef __linux__ + #error "Don't use sysctl on Linux, it's deprecated even when it works" + #endif + static int name[2] = {CTL_KERN, KERN_ARND}; + sysctl(name, 2, nullptr, nullptr, nullptr, 0); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_SYSCTL_ARND], [1], [Define this symbol if the BSD sysctl(KERN_ARND) is available]) ], + [ AC_MSG_RESULT([no])] +) + +AC_MSG_CHECKING([for if type char equals int8_t]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include + #include ]], + [[ static_assert(std::is_same::value, ""); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([CHAR_EQUALS_INT8], [1], [Define this symbol if type char equals int8_t]) ], + [ AC_MSG_RESULT([no])] +) + +AC_MSG_CHECKING([for fdatasync]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ fdatasync(0); ]])], + [ AC_MSG_RESULT([yes]); HAVE_FDATASYNC=1 ], + [ AC_MSG_RESULT([no]); HAVE_FDATASYNC=0 ] +) +AC_DEFINE_UNQUOTED([HAVE_FDATASYNC], [$HAVE_FDATASYNC], [Define to 1 if fdatasync is available.]) + +AC_MSG_CHECKING([for F_FULLFSYNC]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ fcntl(0, F_FULLFSYNC, 0); ]])], + [ AC_MSG_RESULT([yes]); HAVE_FULLFSYNC=1 ], + [ AC_MSG_RESULT([no]); HAVE_FULLFSYNC=0 ] +) + +AC_MSG_CHECKING([for O_CLOEXEC]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ open("", O_CLOEXEC); ]])], + [ AC_MSG_RESULT([yes]); HAVE_O_CLOEXEC=1 ], + [ AC_MSG_RESULT([no]); HAVE_O_CLOEXEC=0 ] +) +AC_DEFINE_UNQUOTED([HAVE_O_CLOEXEC], [$HAVE_O_CLOEXEC], [Define to 1 if O_CLOEXEC flag is available.]) + +dnl crc32c platform checks +AC_MSG_CHECKING([for __builtin_prefetch]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ + char data = 0; + const char* address = &data; + __builtin_prefetch(address, 0, 0); + ]])], + [ AC_MSG_RESULT([yes]); HAVE_BUILTIN_PREFETCH=1 ], + [ AC_MSG_RESULT([no]); HAVE_BUILTIN_PREFETCH=0 ] +) + +AC_MSG_CHECKING([for _mm_prefetch]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[ + char data = 0; + const char* address = &data; + _mm_prefetch(address, _MM_HINT_NTA); + ]])], + [ AC_MSG_RESULT([yes]); HAVE_MM_PREFETCH=1 ], + [ AC_MSG_RESULT([no]); HAVE_MM_PREFETCH=0 ] +) + +AC_MSG_CHECKING([for strong getauxval support in the system headers]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + ]], [[ + getauxval(AT_HWCAP); + ]])], + [ AC_MSG_RESULT([yes]); HAVE_STRONG_GETAUXVAL=1; AC_DEFINE([HAVE_STRONG_GETAUXVAL], [1], [Define this symbol to build code that uses getauxval)]) ], + [ AC_MSG_RESULT([no]); HAVE_STRONG_GETAUXVAL=0 ] +) + +have_any_system=no +AC_MSG_CHECKING([for std::system]) +AC_LINK_IFELSE( + [ AC_LANG_PROGRAM( + [[ #include ]], + [[ int nErr = std::system(""); ]] + )], + [ AC_MSG_RESULT([yes]); have_any_system=yes], + [ AC_MSG_RESULT([no]) ] +) + +AC_MSG_CHECKING([for ::_wsystem]) +AC_LINK_IFELSE( + [ AC_LANG_PROGRAM( + [[ ]], + [[ int nErr = ::_wsystem(""); ]] + )], + [ AC_MSG_RESULT([yes]); have_any_system=yes], + [ AC_MSG_RESULT([no]) ] +) + +if test "$have_any_system" != "no"; then + AC_DEFINE([HAVE_SYSTEM], [1], [Define to 1 if std::system or ::wsystem is available.]) +fi + +dnl SUPPRESSED_CPPFLAGS=SUPPRESS_WARNINGS([$SOME_CPPFLAGS]) +dnl Replace -I with -isystem in $SOME_CPPFLAGS to suppress warnings from +dnl headers from its include directories and return the result. +dnl See -isystem documentation: +dnl https://gcc.gnu.org/onlinedocs/gcc/Directory-Options.html +dnl https://clang.llvm.org/docs/ClangCommandLineReference.html#cmdoption-clang-isystem-directory +dnl Do not change "-I/usr/include" to "-isystem /usr/include" because that +dnl is not necessary (/usr/include is already a system directory) and because +dnl it would break GCC's #include_next. +AC_DEFUN([SUPPRESS_WARNINGS], + [[$(echo $1 |${SED} -E -e 's/(^| )-I/\1-isystem /g' -e 's;-isystem /usr/include/*( |$);-I/usr/include\1;g')]]) + +dnl enable-fuzz should disable all other targets +if test "$enable_fuzz" = "yes"; then + AC_MSG_WARN([enable-fuzz will disable all other targets and force --enable-fuzz-binary=yes]) + build_bitcoin_utils=no + build_bitcoin_cli=no + build_bitcoin_tx=no + build_bitcoin_util=no + build_bitcoin_chainstate=no + build_bitcoin_wallet=no + build_bitcoind=no + build_bitcoin_libs=no + bitcoin_enable_qt=no + bitcoin_enable_qt_test=no + bitcoin_enable_qt_dbus=no + use_bench=no + use_external_signer=no + use_upnp=no + use_natpmp=no + use_zmq=no + enable_fuzz_binary=yes + + AX_CHECK_PREPROC_FLAG([-DABORT_ON_FAILED_ASSUME], [DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DABORT_ON_FAILED_ASSUME"], [], [$CXXFLAG_WERROR]) +else + BITCOIN_QT_INIT + + dnl sets $bitcoin_enable_qt, $bitcoin_enable_qt_test, $bitcoin_enable_qt_dbus + BITCOIN_QT_CONFIGURE([5.11.3]) + + dnl Keep a copy of the original $QT_INCLUDES and use it when invoking qt's moc + QT_INCLUDES_UNSUPPRESSED=$QT_INCLUDES + if test "$suppress_external_warnings" != "no" ; then + QT_INCLUDES=SUPPRESS_WARNINGS($QT_INCLUDES) + QT_DBUS_INCLUDES=SUPPRESS_WARNINGS($QT_DBUS_INCLUDES) + QT_TEST_INCLUDES=SUPPRESS_WARNINGS($QT_TEST_INCLUDES) + fi +fi + +if test "$enable_fuzz_binary" = "yes"; then + AC_MSG_CHECKING([whether main function is needed for fuzz binary]) + AX_CHECK_LINK_FLAG( + [], + [AC_MSG_RESULT([no])], + [AC_MSG_RESULT([yes]); CORE_CPPFLAGS="$CORE_CPPFLAGS -DPROVIDE_FUZZ_MAIN_FUNCTION"], + [$SANITIZER_LDFLAGS], + [AC_LANG_PROGRAM([[ + #include + #include + extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { return 0; } + /* comment to remove the main function ... + ]],[[ + */ int not_main() { + ]])]) + + CHECK_RUNTIME_LIB +fi + +if test "$enable_wallet" != "no"; then dnl Check for libdb_cxx only if wallet enabled - BITCOIN_FIND_BDB48 + if test "$use_bdb" != "no"; then + BITCOIN_FIND_BDB48 + if test "$suppress_external_warnings" != "no" ; then + BDB_CPPFLAGS=SUPPRESS_WARNINGS($BDB_CPPFLAGS) + fi + fi + + dnl Check for sqlite3 + if test "$use_sqlite" != "no"; then + PKG_CHECK_MODULES([SQLITE], [sqlite3 >= 3.7.17], [have_sqlite=yes], [have_sqlite=no]) + fi + AC_MSG_CHECKING([whether to build wallet with support for sqlite]) + if test "$use_sqlite" = "no"; then + use_sqlite=no + elif test "$have_sqlite" = "no"; then + if test "$use_sqlite" = "yes"; then + AC_MSG_ERROR([sqlite support requested but cannot be built. Use --without-sqlite]) + fi + use_sqlite=no + else + if test "$use_sqlite" != "no"; then + AC_DEFINE([USE_SQLITE],[1],[Define if sqlite support should be compiled in]) + use_sqlite=yes + fi + fi + AC_MSG_RESULT([$use_sqlite]) + + dnl Disable wallet if both --without-bdb and --without-sqlite + if test "$use_bdb$use_sqlite" = "nono"; then + if test "$enable_wallet" = "yes"; then + AC_MSG_ERROR([wallet functionality requested but no BDB or SQLite support available.]) + fi + enable_wallet=no + fi +fi + +if test "$use_usdt" != "no"; then + AC_MSG_CHECKING([whether Userspace, Statically Defined Tracing tracepoints are supported]) + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM( + [#include ], + [DTRACE_PROBE("context", "event");] + )], + [AC_MSG_RESULT([yes]); AC_DEFINE([ENABLE_TRACING], [1], [Define to 1 to enable tracepoints for Userspace, Statically Defined Tracing])], + [AC_MSG_RESULT([no]); use_usdt=no;] + ) +fi +AM_CONDITIONAL([ENABLE_USDT_TRACEPOINTS], [test "$use_usdt" = "yes"]) + +if test "$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoind$bitcoin_enable_qt$use_bench$use_tests" = "nonononononono"; then + use_upnp=no + use_natpmp=no + use_zmq=no fi dnl Check for libminiupnpc (optional) -if test x$use_upnp != xno; then +if test "$use_upnp" != "no"; then + TEMP_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $MINIUPNPC_CPPFLAGS" AC_CHECK_HEADERS( - [miniupnpc/miniwget.h miniupnpc/miniupnpc.h miniupnpc/upnpcommands.h miniupnpc/upnperrors.h], - [AC_CHECK_LIB([miniupnpc], [main],[MINIUPNPC_LIBS=-lminiupnpc], [have_miniupnpc=no])], + [miniupnpc/miniupnpc.h miniupnpc/upnpcommands.h miniupnpc/upnperrors.h], + [AC_CHECK_LIB([miniupnpc], [upnpDiscover], [MINIUPNPC_LIBS="$MINIUPNPC_LIBS -lminiupnpc"], [have_miniupnpc=no], [$MINIUPNPC_LIBS])], [have_miniupnpc=no] ) + dnl The minimum supported miniUPnPc API version is set to 10. This keeps compatibility + dnl with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages. + if test "$have_miniupnpc" != "no"; then + AC_MSG_CHECKING([whether miniUPnPc API version is supported]) + AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ + @%:@include + ]], [[ + #if MINIUPNPC_API_VERSION >= 10 + // Everything is okay + #else + # error miniUPnPc API version is too old + #endif + ]])],[ + AC_MSG_RESULT([yes]) + ],[ + AC_MSG_RESULT([no]) + AC_MSG_WARN([miniUPnPc API version < 10 is unsupported, disabling UPnP support.]) + have_miniupnpc=no + ]) + fi + CPPFLAGS="$TEMP_CPPFLAGS" fi -BITCOIN_QT_INIT - -dnl sets $bitcoin_enable_qt, $bitcoin_enable_qt_test, $bitcoin_enable_qt_dbus -BITCOIN_QT_CONFIGURE([$use_pkgconfig], [qt4]) +dnl Check for libnatpmp (optional). +if test "$use_natpmp" != "no"; then + TEMP_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $NATPMP_CPPFLAGS" + AC_CHECK_HEADERS([natpmp.h], + [AC_CHECK_LIB([natpmp], [initnatpmp], [NATPMP_LIBS="$NATPMP_LIBS -lnatpmp"], [have_natpmp=no], [$NATPMP_LIBS])], + [have_natpmp=no]) + CPPFLAGS="$TEMP_CPPFLAGS" +fi -if test x$build_bitcoin_utils$build_bitcoind$bitcoin_enable_qt$use_tests = xnononono; then - use_boost=no +if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench" = "nonononononono"; then + use_boost=no else - use_boost=yes + use_boost=yes fi -if test x$use_boost = xyes; then +if test "$use_boost" = "yes"; then -dnl Check for boost libs -AX_BOOST_BASE -AX_BOOST_SYSTEM -AX_BOOST_FILESYSTEM -AX_BOOST_PROGRAM_OPTIONS -AX_BOOST_THREAD -AX_BOOST_CHRONO + dnl Check for Boost headers + AX_BOOST_BASE([1.64.0],[],[AC_MSG_ERROR([Boost is not available!])]) + if test "$want_boost" = "no"; then + AC_MSG_ERROR([only libbitcoinconsensus can be built without Boost]) + fi + dnl we don't use multi_index serialization + BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION" -if test x$use_reduce_exports != xno; then - AC_MSG_CHECKING([for working boost reduced exports]) - TEMP_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$BOOST_CPPFLAGS $CPPFLAGS" + if test "$suppress_external_warnings" != "no"; then + BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) + fi +fi + +if test "$use_external_signer" != "no"; then + case $host in + *mingw*) + dnl Boost Process uses Boost Filesystem when targeting Windows. Also, + dnl since Boost 1.71.0, Process does not work with mingw-w64 without + dnl workarounds. See 67669ab425b52a2b6be3d2f3b3b7e3939b676a2c. + if test "$use_external_signer" = "yes"; then + AC_MSG_ERROR([External signing is not supported on Windows]) + fi + use_external_signer="no"; + ;; + *) + AC_MSG_CHECKING([whether Boost.Process can be used]) + TEMP_CXXFLAGS="$CXXFLAGS" + dnl Boost 1.78 requires the following workaround. + dnl See: https://github.com/boostorg/process/issues/235 + CXXFLAGS="$CXXFLAGS -Wno-error=narrowing" + TEMP_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + TEMP_LDFLAGS="$LDFLAGS" + dnl Boost 1.73 and older require the following workaround. + LDFLAGS="$LDFLAGS $PTHREAD_CFLAGS" + AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]])], + [have_boost_process="yes"], + [have_boost_process="no"]) + LDFLAGS="$TEMP_LDFLAGS" + CPPFLAGS="$TEMP_CPPFLAGS" + CXXFLAGS="$TEMP_CXXFLAGS" + AC_MSG_RESULT([$have_boost_process]) + if test "$have_boost_process" = "yes"; then + use_external_signer="yes" + AC_DEFINE([ENABLE_EXTERNAL_SIGNER], [1], [Define if external signer support is enabled]) + else + if test "$use_external_signer" = "yes"; then + AC_MSG_ERROR([External signing is not supported for this Boost version]) + fi + use_external_signer="no"; + fi + ;; + esac +fi +AM_CONDITIONAL([ENABLE_EXTERNAL_SIGNER], [test "$use_external_signer" = "yes"]) + +dnl Do not compile with syscall sandbox support when compiling under the sanitizers. +dnl The sanitizers introduce use of syscalls that are not typically used in bitcoind +dnl (such as execve when the sanitizers execute llvm-symbolizer). +if test "$use_sanitizers" != ""; then + AC_MSG_WARN([Specifying --with-sanitizers forces --without-seccomp since the sanitizers introduce use of syscalls not allowed by the bitcoind syscall sandbox (-sandbox=).]) + seccomp_found=no +fi +if test "$seccomp_found" != "no"; then + AC_MSG_CHECKING([for seccomp-bpf (Linux x86-64)]) AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ - @%:@include + @%:@include ]], [[ - #if BOOST_VERSION >= 104900 - // Everything is okay - #else - # error Boost version is too old + #if !defined(__x86_64__) + # error Syscall sandbox is an experimental feature currently available only under Linux x86-64. #endif ]])],[ - AC_MSG_RESULT(yes) - ],[: - if test x$use_reduce_exports = xauto; then - use_reduce_exports=no - else - if test x$use_reduce_exports = xyes; then - AC_MSG_ERROR([boost versions < 1.49 are known to be broken with reduced exports. Use --disable-reduced-exports.]) - fi - fi - AC_MSG_RESULT(no) - AC_MSG_WARN([boost versions < 1.49 are known to have symbol visibility issues. Disabling reduced exports.]) + AC_MSG_RESULT([yes]) + seccomp_found="yes" + AC_DEFINE([USE_SYSCALL_SANDBOX], [1], [Define this symbol to build with syscall sandbox support.]) + ],[ + AC_MSG_RESULT([no]) + seccomp_found="no" ]) - CPPFLAGS="$TEMP_CPPFLAGS" fi - -elif test x$use_reduce_exports = xauto; then - use_reduce_exports=yes +dnl Currently only enable -sandbox= feature if seccomp is found. +dnl In the future, sandboxing could be also be supported with other +dnl sandboxing mechanisms besides seccomp. +use_syscall_sandbox=$seccomp_found +AM_CONDITIONAL([ENABLE_SYSCALL_SANDBOX], [test "$use_syscall_sandbox" != "no"]) + +dnl Check for reduced exports +if test "$use_reduce_exports" = "yes"; then + AX_CHECK_COMPILE_FLAG([-fvisibility=hidden], [CORE_CXXFLAGS="$CORE_CXXFLAGS -fvisibility=hidden"], + [AC_MSG_ERROR([Cannot set hidden symbol visibility. Use --disable-reduce-exports.])], [$CXXFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,--exclude-libs,ALL], [RELDFLAGS="-Wl,--exclude-libs,ALL"], [], [$LDFLAG_WERROR]) fi -if test x$use_reduce_exports != xno; then - CXXFLAGS="$CXXFLAGS $RE_CXXFLAGS" - AX_CHECK_LINK_FLAG([[-Wl,--exclude-libs,ALL]], [RELDFLAGS="-Wl,--exclude-libs,ALL"]) +if test "$use_tests" = "yes"; then + + if test "$HEXDUMP" = ""; then + AC_MSG_ERROR([hexdump is required for tests]) + fi fi -if test x$use_tests = xyes; then +dnl libevent check - if test x$HEXDUMP = x; then - AC_MSG_ERROR(hexdump is required for tests) +if test "$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench" != "nonononono"; then + PKG_CHECK_MODULES([EVENT], [libevent >= 2.1.8], [use_libevent=yes], [AC_MSG_ERROR([libevent version 2.1.8 or greater not found.])]) + if test "$TARGET_OS" != "windows"; then + PKG_CHECK_MODULES([EVENT_PTHREADS], [libevent_pthreads >= 2.1.8], [], [AC_MSG_ERROR([libevent_pthreads version 2.1.8 or greater not found.])]) fi + if test "$suppress_external_warnings" != "no"; then + EVENT_CFLAGS=SUPPRESS_WARNINGS($EVENT_CFLAGS) + fi +fi - if test x$use_boost = xyes; then - - AX_BOOST_UNIT_TEST_FRAMEWORK +if test x$use_libevent = xyes; then + TEMP_CXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS $EVENT_CFLAGS" + AC_MSG_CHECKING([if evhttp_connection_get_peer expects const char**]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]], [[ + evhttp_connection *conn = (evhttp_connection *)1; + const char *host; + uint16_t port; + + evhttp_connection_get_peer(conn, &host, &port); + ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_EVHTTP_CONNECTION_GET_PEER_CONST_CHAR], [1], [Define this symbol if evhttp_connection_get_peer expects const char**]) ], + [ AC_MSG_RESULT([no]) ] + ) + CXXFLAGS="$TEMP_CXXFLAGS" +fi - dnl Determine if -DBOOST_TEST_DYN_LINK is needed - AC_MSG_CHECKING([for dynamic linked boost test]) - TEMP_LIBS="$LIBS" - LIBS="$LIBS $BOOST_LDFLAGS $BOOST_UNIT_TEST_FRAMEWORK_LIB" - TEMP_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - AC_LINK_IFELSE([AC_LANG_SOURCE([ - #define BOOST_TEST_DYN_LINK - #define BOOST_TEST_MAIN - #include - - ])], - [AC_MSG_RESULT(yes)] - [TESTDEFS="$TESTDEFS -DBOOST_TEST_DYN_LINK"], - [AC_MSG_RESULT(no)]) - LIBS="$TEMP_LIBS" - CPPFLAGS="$TEMP_CPPFLAGS" +dnl QR Code encoding library check - fi +if test "$use_qr" != "no"; then + BITCOIN_QT_CHECK([PKG_CHECK_MODULES([QR], [libqrencode], [have_qrencode=yes], [have_qrencode=no])]) fi -if test x$use_boost = xyes; then - -BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_PROGRAM_OPTIONS_LIB $BOOST_THREAD_LIB" - -dnl Boost >= 1.50 uses sleep_for rather than the now-deprecated sleep, however -dnl it was broken from 1.50 to 1.52 when backed by nanosleep. Use sleep_for if -dnl a working version is available, else fall back to sleep. sleep was removed -dnl after 1.56. -dnl If neither is available, abort. -dnl If sleep_for is used, boost_chrono becomes a requirement. -if test x$ax_cv_boost_chrono = xyes; then -TEMP_LIBS="$LIBS" -LIBS="$BOOST_LIBS $BOOST_CHRONO_LIB $LIBS" -TEMP_CPPFLAGS="$CPPFLAGS" -CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" -AC_LINK_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - ]],[[ - #if BOOST_VERSION >= 105000 && (!defined(BOOST_HAS_NANOSLEEP) || BOOST_VERSION >= 105200) - boost::this_thread::sleep_for(boost::chrono::milliseconds(0)); - #else - choke me - #endif - ]])], - [boost_sleep=yes; BOOST_LIBS="$BOOST_LIBS $BOOST_CHRONO_LIB"; - AC_DEFINE(HAVE_WORKING_BOOST_SLEEP_FOR, 1, [Define this symbol if boost sleep_for works])], - [boost_sleep=no]) -LIBS="$TEMP_LIBS" -CPPFLAGS="$TEMP_CPPFLAGS" -fi - -if test x$boost_sleep != xyes; then -TEMP_LIBS="$LIBS" -LIBS="$BOOST_LIBS $LIBS" -TEMP_CPPFLAGS="$CPPFLAGS" -CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" -AC_LINK_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - #include - ]],[[ - #if BOOST_VERSION <= 105600 - boost::this_thread::sleep(boost::posix_time::milliseconds(0)); - #else - choke me - #endif - ]])], - [boost_sleep=yes; AC_DEFINE(HAVE_WORKING_BOOST_SLEEP, 1, [Define this symbol if boost sleep works])], - [boost_sleep=no]) -LIBS="$TEMP_LIBS" -CPPFLAGS="$TEMP_CPPFLAGS" +dnl ZMQ check + +if test "$use_zmq" = "yes"; then + PKG_CHECK_MODULES([ZMQ], [libzmq >= 4], + AC_DEFINE([ENABLE_ZMQ], [1], [Define to 1 to enable ZMQ functions]), + [AC_DEFINE([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions]) + AC_MSG_WARN([libzmq version 4.x or greater not found, disabling]) + use_zmq=no]) +else + AC_DEFINE_UNQUOTED([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions]) fi -if test x$boost_sleep != xyes; then - AC_MSG_ERROR(No working boost sleep implementation found.) +if test "$use_zmq" = "yes"; then + dnl Assume libzmq was built for static linking + case $host in + *mingw*) + ZMQ_CFLAGS="$ZMQ_CFLAGS -DZMQ_STATIC" + ;; + esac fi +dnl libmultiprocess library check + +libmultiprocess_found=no +if test "$with_libmultiprocess" = "yes" || test "$with_libmultiprocess" = "auto"; then + PKG_CHECK_MODULES([LIBMULTIPROCESS], [libmultiprocess], [ + libmultiprocess_found=yes; + libmultiprocess_prefix=`$PKG_CONFIG --variable=prefix libmultiprocess`; + ], [true]) +elif test "$with_libmultiprocess" != "no"; then + AC_MSG_ERROR([--with-libmultiprocess=$with_libmultiprocess value is not yes, auto, or no]) fi -if test x$use_pkgconfig = xyes; then +dnl Enable multiprocess check - if test x"$PKG_CONFIG" = "x"; then - AC_MSG_ERROR(pkg-config not found.) +if test "$enable_multiprocess" = "yes"; then + if test "$libmultiprocess_found" != "yes"; then + AC_MSG_ERROR([--enable-multiprocess=yes option specified but libmultiprocess library was not found. May need to install libmultiprocess library, or specify install path with PKG_CONFIG_PATH environment variable. Running 'pkg-config --debug libmultiprocess' may be helpful for debugging.]) fi - - : #NOP - m4_ifdef( - [PKG_CHECK_MODULES], - [ - PKG_CHECK_MODULES([SSL], [libssl],, [AC_MSG_ERROR(openssl not found.)]) - PKG_CHECK_MODULES([CRYPTO], [libcrypto],,[AC_MSG_ERROR(libcrypto not found.)]) - BITCOIN_QT_CHECK([PKG_CHECK_MODULES([PROTOBUF], [protobuf], [have_protobuf=yes], [BITCOIN_QT_FAIL(libprotobuf not found)])]) - if test x$use_qr != xno; then - BITCOIN_QT_CHECK([PKG_CHECK_MODULES([QR], [libqrencode], [have_qrencode=yes], [have_qrencode=no])]) - fi - ] - ) + build_multiprocess=yes +elif test "$enable_multiprocess" = "auto"; then + build_multiprocess=$libmultiprocess_found else - AC_CHECK_HEADER([openssl/crypto.h],,AC_MSG_ERROR(libcrypto headers missing)) - AC_CHECK_LIB([crypto], [main],CRYPTO_LIBS=-lcrypto, AC_MSG_ERROR(libcrypto missing)) + build_multiprocess=no +fi - AC_CHECK_HEADER([openssl/ssl.h],, AC_MSG_ERROR(libssl headers missing),) - AC_CHECK_LIB([ssl], [main],SSL_LIBS=-lssl, AC_MSG_ERROR(libssl missing)) +AM_CONDITIONAL([BUILD_MULTIPROCESS], [test "$build_multiprocess" = "yes"]) +AM_CONDITIONAL([BUILD_BITCOIN_NODE], [test "$build_multiprocess" = "yes"]) +AM_CONDITIONAL([BUILD_BITCOIN_GUI], [test "$build_multiprocess" = "yes"]) - BITCOIN_QT_CHECK(AC_CHECK_LIB([protobuf] ,[main],[PROTOBUF_LIBS=-lprotobuf], BITCOIN_QT_FAIL(libprotobuf not found))) - if test x$use_qr != xno; then - BITCOIN_QT_CHECK([AC_CHECK_LIB([qrencode], [main],[QR_LIBS=-lqrencode], [have_qrencode=no])]) - BITCOIN_QT_CHECK([AC_CHECK_HEADER([qrencode.h],, have_qrencode=no)]) +dnl codegen tools check + +if test "$build_multiprocess" != "no"; then + if test "$with_mpgen" = "yes" || test "$with_mpgen" = "auto"; then + MPGEN_PREFIX="$libmultiprocess_prefix" + elif test "$with_mpgen" != "no"; then + MPGEN_PREFIX="$with_mpgen"; fi + AC_SUBST(MPGEN_PREFIX) fi -CFLAGS_TEMP="$CFLAGS" -LIBS_TEMP="$LIBS" -CFLAGS="$CFLAGS $SSL_CFLAGS $CRYPTO_CFLAGS" -LIBS="$LIBS $SSL_LIBS $CRYPTO_LIBS" -AC_CHECK_HEADER([openssl/ec.h],, AC_MSG_ERROR(OpenSSL ec header missing),) -CFLAGS="$CFLAGS_TEMP" -LIBS="$LIBS_TEMP" - -BITCOIN_QT_PATH_PROGS([PROTOC], [protoc],$protoc_bin_path) - AC_MSG_CHECKING([whether to build bitcoind]) -AM_CONDITIONAL([BUILD_BITCOIND], [test x$build_bitcoind = xyes]) +AM_CONDITIONAL([BUILD_BITCOIND], [test $build_bitcoind = "yes"]) AC_MSG_RESULT($build_bitcoind) -AC_MSG_CHECKING([whether to build utils (bitcoin-cli bitcoin-tx)]) -AM_CONDITIONAL([BUILD_BITCOIN_UTILS], [test x$build_bitcoin_utils = xyes]) -AC_MSG_RESULT($build_bitcoin_utils) +AC_MSG_CHECKING([whether to build bitcoin-cli]) +AM_CONDITIONAL([BUILD_BITCOIN_CLI], [test $build_bitcoin_cli = "yes"]) +AC_MSG_RESULT($build_bitcoin_cli) + +AC_MSG_CHECKING([whether to build bitcoin-tx]) +AM_CONDITIONAL([BUILD_BITCOIN_TX], [test $build_bitcoin_tx = "yes"]) +AC_MSG_RESULT($build_bitcoin_tx) + +AC_MSG_CHECKING([whether to build bitcoin-wallet]) +AM_CONDITIONAL([BUILD_BITCOIN_WALLET], [test $build_bitcoin_wallet = "yes"]) +AC_MSG_RESULT($build_bitcoin_wallet) + +AC_MSG_CHECKING([whether to build bitcoin-util]) +AM_CONDITIONAL([BUILD_BITCOIN_UTIL], [test $build_bitcoin_util = "yes"]) +AC_MSG_RESULT($build_bitcoin_util) + +AC_MSG_CHECKING([whether to build experimental bitcoin-chainstate]) +if test "$build_experimental_kernel_lib" = "no"; then +AC_MSG_ERROR([experimental bitcoin-chainstate cannot be built without the experimental bitcoinkernel library. Use --with-experimental-kernel-lib]); +else + AM_CONDITIONAL([BUILD_BITCOIN_CHAINSTATE], [test $build_bitcoin_chainstate = "yes"]) +fi +AC_MSG_RESULT($build_bitcoin_chainstate) AC_MSG_CHECKING([whether to build libraries]) -AM_CONDITIONAL([BUILD_BITCOIN_LIBS], [test x$build_bitcoin_libs = xyes]) -if test x$build_bitcoin_libs = xyes; then - AC_DEFINE(HAVE_CONSENSUS_LIB, 1, [Define this symbol if the consensus lib has been built]) +AM_CONDITIONAL([BUILD_BITCOIN_LIBS], [test $build_bitcoin_libs = "yes"]) + +if test "$build_bitcoin_libs" = "yes"; then + AC_DEFINE([HAVE_CONSENSUS_LIB], [1], [Define this symbol if the consensus lib has been built]) + AC_CONFIG_FILES([libbitcoinconsensus.pc:libbitcoinconsensus.pc.in]) fi + +AM_CONDITIONAL([BUILD_BITCOIN_KERNEL_LIB], [test "$build_experimental_kernel_lib" != "no" && ( test "$build_experimental_kernel_lib" = "yes" || test "$build_bitcoin_chainstate" = "yes" )]) + AC_MSG_RESULT($build_bitcoin_libs) AC_LANG_POP -if test "x$use_ccache" != "xno"; then - AC_MSG_CHECKING(if ccache should be used) - if test x$CCACHE = x; then - if test "x$use_ccache" = "xyes"; then +if test "$use_ccache" != "no"; then + AC_MSG_CHECKING([if ccache should be used]) + if test "$CCACHE" = ""; then + if test "$use_ccache" = "yes"; then AC_MSG_ERROR([ccache not found.]); else use_ccache=no @@ -742,149 +1704,265 @@ if test "x$use_ccache" != "xno"; then CXX="$ac_cv_path_CCACHE $CXX" fi AC_MSG_RESULT($use_ccache) -fi -if test "x$use_ccache" = "xyes"; then - AX_CHECK_PREPROC_FLAG([-Qunused-arguments],[CPPFLAGS="-Qunused-arguments $CPPFLAGS"]) + if test "$use_ccache" = "yes"; then + AX_CHECK_COMPILE_FLAG([-fdebug-prefix-map=A=B], [DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -fdebug-prefix-map=\$(abs_top_srcdir)=."], [], [$CXXFLAG_WERROR]) + AX_CHECK_PREPROC_FLAG([-fmacro-prefix-map=A=B], [DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -fmacro-prefix-map=\$(abs_top_srcdir)=."], [], [$CXXFLAG_WERROR]) + fi fi dnl enable wallet AC_MSG_CHECKING([if wallet should be enabled]) -if test x$enable_wallet != xno; then - AC_MSG_RESULT(yes) +if test "$enable_wallet" != "no"; then + AC_MSG_RESULT([yes]) AC_DEFINE_UNQUOTED([ENABLE_WALLET],[1],[Define to 1 to enable wallet functions]) + enable_wallet=yes else - AC_MSG_RESULT(no) + AC_MSG_RESULT([no]) fi dnl enable upnp support AC_MSG_CHECKING([whether to build with support for UPnP]) -if test x$have_miniupnpc = xno; then - if test x$use_upnp = xyes; then - AC_MSG_ERROR("UPnP requested but cannot be built. use --without-miniupnpc") +if test "$have_miniupnpc" = "no"; then + if test "$use_upnp" = "yes"; then + AC_MSG_ERROR([UPnP requested but cannot be built. Use --without-miniupnpc]) fi - AC_MSG_RESULT(no) + AC_MSG_RESULT([no]) + use_upnp=no else - if test x$use_upnp != xno; then - AC_MSG_RESULT(yes) + if test "$use_upnp" != "no"; then + AC_MSG_RESULT([yes]) AC_MSG_CHECKING([whether to build with UPnP enabled by default]) use_upnp=yes upnp_setting=0 - if test x$use_upnp_default != xno; then + if test "$use_upnp_default" != "no"; then use_upnp_default=yes upnp_setting=1 fi - AC_MSG_RESULT($use_upnp_default) + AC_MSG_RESULT([$use_upnp_default]) AC_DEFINE_UNQUOTED([USE_UPNP],[$upnp_setting],[UPnP support not compiled if undefined, otherwise value (0 or 1) determines default state]) - if test x$TARGET_OS = xwindows; then - MINIUPNPC_CPPFLAGS="-DSTATICLIB -DMINIUPNP_STATICLIB" + if test "$TARGET_OS" = "windows"; then + MINIUPNPC_CPPFLAGS="$MINIUPNPC_CPPFLAGS -DSTATICLIB -DMINIUPNP_STATICLIB" fi else - AC_MSG_RESULT(no) + AC_MSG_RESULT([no]) + fi +fi + +dnl Enable NAT-PMP support. +AC_MSG_CHECKING([whether to build with support for NAT-PMP]) +if test "$have_natpmp" = "no"; then + if test "$use_natpmp" = "yes"; then + AC_MSG_ERROR([NAT-PMP requested but cannot be built. Use --without-natpmp]) + fi + AC_MSG_RESULT([no]) + use_natpmp=no +else + if test "$use_natpmp" != "no"; then + AC_MSG_RESULT([yes]) + AC_MSG_CHECKING([whether to build with NAT-PMP enabled by default]) + use_natpmp=yes + natpmp_setting=0 + if test "$use_natpmp_default" != "no"; then + use_natpmp_default=yes + natpmp_setting=1 + fi + AC_MSG_RESULT($use_natpmp_default) + AC_DEFINE_UNQUOTED([USE_NATPMP], [$natpmp_setting], [NAT-PMP support not compiled if undefined, otherwise value (0 or 1) determines default state]) + if test "$TARGET_OS" = "windows"; then + NATPMP_CPPFLAGS="$NATPMP_CPPFLAGS -DSTATICLIB -DNATPMP_STATICLIB" + fi + else + AC_MSG_RESULT([no]) fi fi dnl these are only used when qt is enabled -if test x$bitcoin_enable_qt != xno; then - BUILD_QT=qt +BUILD_TEST_QT="" +if test "$bitcoin_enable_qt" != "no"; then dnl enable dbus support AC_MSG_CHECKING([whether to build GUI with support for D-Bus]) - if test x$bitcoin_enable_qt_dbus != xno; then - AC_DEFINE([USE_DBUS],[1],[Define if dbus support should be compiled in]) + if test "$bitcoin_enable_qt_dbus" != "no"; then + AC_DEFINE([USE_DBUS], [1], [Define if dbus support should be compiled in]) fi - AC_MSG_RESULT($bitcoin_enable_qt_dbus) + AC_MSG_RESULT([$bitcoin_enable_qt_dbus]) dnl enable qr support AC_MSG_CHECKING([whether to build GUI with support for QR codes]) - if test x$have_qrencode = xno; then - if test x$use_qr = xyes; then - AC_MSG_ERROR("QR support requested but cannot be built. use --without-qrencode") + if test "$have_qrencode" = "no"; then + if test "$use_qr" = "yes"; then + AC_MSG_ERROR([QR support requested but cannot be built. Use --without-qrencode]) fi - AC_MSG_RESULT(no) + use_qr=no else - if test x$use_qr != xno; then - AC_MSG_RESULT(yes) - AC_DEFINE([USE_QRCODE],[1],[Define if QR support should be compiled in]) + if test "$use_qr" != "no"; then + AC_DEFINE([USE_QRCODE], [1], [Define if QR support should be compiled in]) use_qr=yes - else - AC_MSG_RESULT(no) fi fi + AC_MSG_RESULT([$use_qr]) - if test x$XGETTEXT = x; then - AC_MSG_WARN("xgettext is required to update qt translations") + if test "$XGETTEXT" = ""; then + AC_MSG_WARN([xgettext is required to update qt translations]) fi AC_MSG_CHECKING([whether to build test_bitcoin-qt]) - if test x$use_tests$bitcoin_enable_qt_test = xyesyes; then + if test "$use_gui_tests$bitcoin_enable_qt_test" = "yesyes"; then AC_MSG_RESULT([yes]) - BUILD_TEST_QT="test" + BUILD_TEST_QT="yes" else AC_MSG_RESULT([no]) fi fi +AM_CONDITIONAL([ENABLE_ZMQ], [test "$use_zmq" = "yes"]) + AC_MSG_CHECKING([whether to build test_bitcoin]) -if test x$use_tests = xyes; then - AC_MSG_RESULT([yes]) - BUILD_TEST="test" +if test "$use_tests" = "yes"; then + if test "$enable_fuzz" = "yes"; then + AC_MSG_RESULT([no, because fuzzing is enabled]) + else + AC_MSG_RESULT([yes]) + fi + BUILD_TEST="yes" else AC_MSG_RESULT([no]) + BUILD_TEST="" fi AC_MSG_CHECKING([whether to reduce exports]) -if test x$use_reduce_exports != xno; then +if test "$use_reduce_exports" = "yes"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi -if test x$build_bitcoin_utils$build_bitcoin_libs$build_bitcoind$bitcoin_enable_qt$use_tests = xnonononono; then - AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --with-gui or --enable-tests]) -fi - -AM_CONDITIONAL([TARGET_DARWIN], [test x$TARGET_OS = xdarwin]) -AM_CONDITIONAL([BUILD_DARWIN], [test x$BUILD_OS = xdarwin]) -AM_CONDITIONAL([TARGET_WINDOWS], [test x$TARGET_OS = xwindows]) -AM_CONDITIONAL([ENABLE_WALLET],[test x$enable_wallet = xyes]) -AM_CONDITIONAL([ENABLE_TESTS],[test x$use_tests = xyes]) -AM_CONDITIONAL([ENABLE_QT],[test x$bitcoin_enable_qt = xyes]) -AM_CONDITIONAL([ENABLE_QT_TESTS],[test x$use_tests$bitcoin_enable_qt_test = xyesyes]) -AM_CONDITIONAL([USE_QRCODE], [test x$use_qr = xyes]) -AM_CONDITIONAL([USE_LCOV],[test x$use_lcov = xyes]) -AM_CONDITIONAL([USE_COMPARISON_TOOL],[test x$use_comparison_tool != xno]) -AM_CONDITIONAL([USE_COMPARISON_TOOL_REORG_TESTS],[test x$use_comparison_tool_reorg_test != xno]) -AM_CONDITIONAL([GLIBC_BACK_COMPAT],[test x$use_glibc_compat = xyes]) -AM_CONDITIONAL([USE_LIBSECP256K1],[test x$use_libsecp256k1 = xyes]) - -AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version]) -AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version]) -AC_DEFINE(CLIENT_VERSION_REVISION, _CLIENT_VERSION_REVISION, [Build revision]) -AC_DEFINE(CLIENT_VERSION_BUILD, _CLIENT_VERSION_BUILD, [Version Build]) -AC_DEFINE(CLIENT_VERSION_IS_RELEASE, _CLIENT_VERSION_IS_RELEASE, [Version is release]) -AC_DEFINE(COPYRIGHT_YEAR, _COPYRIGHT_YEAR, [Version is release]) +if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_libs$build_bitcoind$bitcoin_enable_qt$use_bench$use_tests" = "nononononononono"; then + AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --with-gui --enable-bench or --enable-tests]) +fi + +AM_CONDITIONAL([TARGET_DARWIN], [test "$TARGET_OS" = "darwin"]) +AM_CONDITIONAL([BUILD_DARWIN], [test "$BUILD_OS" = "darwin"]) +AM_CONDITIONAL([TARGET_LINUX], [test "$TARGET_OS" = "linux"]) +AM_CONDITIONAL([TARGET_WINDOWS], [test "$TARGET_OS" = "windows"]) +AM_CONDITIONAL([ENABLE_WALLET], [test "$enable_wallet" = "yes"]) +AM_CONDITIONAL([USE_SQLITE], [test "$use_sqlite" = "yes"]) +AM_CONDITIONAL([USE_BDB], [test "$use_bdb" = "yes"]) +AM_CONDITIONAL([ENABLE_TESTS], [test "$BUILD_TEST" = "yes"]) +AM_CONDITIONAL([ENABLE_FUZZ], [test "$enable_fuzz" = "yes"]) +AM_CONDITIONAL([ENABLE_FUZZ_BINARY], [test "$enable_fuzz_binary" = "yes"]) +AM_CONDITIONAL([ENABLE_QT], [test "$bitcoin_enable_qt" = "yes"]) +AM_CONDITIONAL([ENABLE_QT_TESTS], [test "$BUILD_TEST_QT" = "yes"]) +AM_CONDITIONAL([ENABLE_BENCH], [test "$use_bench" = "yes"]) +AM_CONDITIONAL([USE_QRCODE], [test "$use_qr" = "yes"]) +AM_CONDITIONAL([USE_LCOV], [test "$use_lcov" = "yes"]) +AM_CONDITIONAL([USE_LIBEVENT], [test "$use_libevent" = "yes"]) +AM_CONDITIONAL([HARDEN], [test "$use_hardening" = "yes"]) +AM_CONDITIONAL([ENABLE_SSE42], [test "$enable_sse42" = "yes"]) +AM_CONDITIONAL([ENABLE_SSE41], [test "$enable_sse41" = "yes"]) +AM_CONDITIONAL([ENABLE_AVX2], [test "$enable_avx2" = "yes"]) +AM_CONDITIONAL([ENABLE_X86_SHANI], [test "$enable_x86_shani" = "yes"]) +AM_CONDITIONAL([ENABLE_ARM_CRC], [test "$enable_arm_crc" = "yes"]) +AM_CONDITIONAL([ENABLE_ARM_SHANI], [test "$enable_arm_shani" = "yes"]) +AM_CONDITIONAL([USE_ASM], [test "$use_asm" = "yes"]) +AM_CONDITIONAL([WORDS_BIGENDIAN], [test "$ac_cv_c_bigendian" = "yes"]) +AM_CONDITIONAL([USE_NATPMP], [test "$use_natpmp" = "yes"]) +AM_CONDITIONAL([USE_UPNP], [test "$use_upnp" = "yes"]) + +dnl for minisketch +AM_CONDITIONAL([ENABLE_CLMUL], [test "$enable_clmul" = "yes"]) +AM_CONDITIONAL([HAVE_CLZ], [test "$have_clzl$have_clzll" = "yesyes"]) + +AC_DEFINE([CLIENT_VERSION_MAJOR], [_CLIENT_VERSION_MAJOR], [Major version]) +AC_DEFINE([CLIENT_VERSION_MINOR], [_CLIENT_VERSION_MINOR], [Minor version]) +AC_DEFINE([CLIENT_VERSION_BUILD], [_CLIENT_VERSION_BUILD], [Version Build]) +AC_DEFINE([CLIENT_VERSION_IS_RELEASE], [_CLIENT_VERSION_IS_RELEASE], [Version is release]) +AC_DEFINE([COPYRIGHT_YEAR], [_COPYRIGHT_YEAR], [Copyright year]) +AC_DEFINE([COPYRIGHT_HOLDERS], ["_COPYRIGHT_HOLDERS"], [Copyright holder(s) before %s replacement]) +AC_DEFINE([COPYRIGHT_HOLDERS_SUBSTITUTION], ["_COPYRIGHT_HOLDERS_SUBSTITUTION"], [Replacement for %s in copyright holders string]) +define(_COPYRIGHT_HOLDERS_FINAL, [patsubst(_COPYRIGHT_HOLDERS, [%s], [_COPYRIGHT_HOLDERS_SUBSTITUTION])]) +AC_DEFINE([COPYRIGHT_HOLDERS_FINAL], ["_COPYRIGHT_HOLDERS_FINAL"], [Copyright holder(s)]) AC_SUBST(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR) AC_SUBST(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR) -AC_SUBST(CLIENT_VERSION_REVISION, _CLIENT_VERSION_REVISION) AC_SUBST(CLIENT_VERSION_BUILD, _CLIENT_VERSION_BUILD) AC_SUBST(CLIENT_VERSION_IS_RELEASE, _CLIENT_VERSION_IS_RELEASE) AC_SUBST(COPYRIGHT_YEAR, _COPYRIGHT_YEAR) +AC_SUBST(COPYRIGHT_HOLDERS, "_COPYRIGHT_HOLDERS") +AC_SUBST(COPYRIGHT_HOLDERS_SUBSTITUTION, "_COPYRIGHT_HOLDERS_SUBSTITUTION") +AC_SUBST(COPYRIGHT_HOLDERS_FINAL, "_COPYRIGHT_HOLDERS_FINAL") +AC_SUBST(BITCOIN_DAEMON_NAME) +AC_SUBST(BITCOIN_GUI_NAME) +AC_SUBST(BITCOIN_CLI_NAME) +AC_SUBST(BITCOIN_TX_NAME) +AC_SUBST(BITCOIN_UTIL_NAME) +AC_SUBST(BITCOIN_CHAINSTATE_NAME) +AC_SUBST(BITCOIN_WALLET_TOOL_NAME) +AC_SUBST(BITCOIN_MP_NODE_NAME) +AC_SUBST(BITCOIN_MP_GUI_NAME) AC_SUBST(RELDFLAGS) +AC_SUBST(CORE_LDFLAGS) +AC_SUBST(CORE_CPPFLAGS) +AC_SUBST(CORE_CXXFLAGS) +AC_SUBST(DEBUG_CPPFLAGS) +AC_SUBST(WARN_CXXFLAGS) +AC_SUBST(NOWARN_CXXFLAGS) +AC_SUBST(DEBUG_CXXFLAGS) +AC_SUBST(ERROR_CXXFLAGS) +AC_SUBST(GPROF_CXXFLAGS) +AC_SUBST(GPROF_LDFLAGS) +AC_SUBST(HARDENED_CXXFLAGS) +AC_SUBST(HARDENED_CPPFLAGS) +AC_SUBST(HARDENED_LDFLAGS) +AC_SUBST(LTO_CXXFLAGS) +AC_SUBST(LTO_LDFLAGS) +AC_SUBST(PIC_FLAGS) +AC_SUBST(PIE_FLAGS) +AC_SUBST(SANITIZER_CXXFLAGS) +AC_SUBST(SANITIZER_LDFLAGS) +AC_SUBST(SSE42_CXXFLAGS) +AC_SUBST(SSE41_CXXFLAGS) +AC_SUBST(CLMUL_CXXFLAGS) +AC_SUBST(AVX2_CXXFLAGS) +AC_SUBST(X86_SHANI_CXXFLAGS) +AC_SUBST(ARM_CRC_CXXFLAGS) +AC_SUBST(ARM_SHANI_CXXFLAGS) AC_SUBST(LIBTOOL_APP_LDFLAGS) +AC_SUBST(USE_SQLITE) +AC_SUBST(USE_BDB) +AC_SUBST(ENABLE_EXTERNAL_SIGNER) AC_SUBST(USE_UPNP) AC_SUBST(USE_QRCODE) -AC_SUBST(BOOST_LIBS) AC_SUBST(TESTDEFS) -AC_SUBST(LEVELDB_TARGET_FLAGS) -AC_SUBST(BUILD_TEST) -AC_SUBST(BUILD_QT) -AC_SUBST(BUILD_TEST_QT) AC_SUBST(MINIUPNPC_CPPFLAGS) AC_SUBST(MINIUPNPC_LIBS) -AC_CONFIG_FILES([Makefile src/Makefile share/setup.nsi share/qt/Info.plist src/test/buildenv.py]) -AC_CONFIG_FILES([qa/pull-tester/run-bitcoind-for-test.sh],[chmod +x qa/pull-tester/run-bitcoind-for-test.sh]) -AC_CONFIG_FILES([qa/pull-tester/tests-config.sh],[chmod +x qa/pull-tester/tests-config.sh]) +AC_SUBST(NATPMP_CPPFLAGS) +AC_SUBST(NATPMP_LIBS) +AC_SUBST(HAVE_GMTIME_R) +AC_SUBST(HAVE_FDATASYNC) +AC_SUBST(HAVE_FULLFSYNC) +AC_SUBST(HAVE_O_CLOEXEC) +AC_SUBST(HAVE_BUILTIN_PREFETCH) +AC_SUBST(HAVE_MM_PREFETCH) +AC_SUBST(HAVE_STRONG_GETAUXVAL) +AC_SUBST(ANDROID_ARCH) +AC_SUBST(HAVE_EVHTTP_CONNECTION_GET_PEER_CONST_CHAR) +AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile share/setup.nsi share/qt/Info.plist test/config.ini]) +AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh]) +AM_COND_IF([HAVE_DOXYGEN], [AC_CONFIG_FILES([doc/Doxyfile])]) +AC_CONFIG_LINKS([contrib/devtools/security-check.py:contrib/devtools/security-check.py]) +AC_CONFIG_LINKS([contrib/devtools/symbol-check.py:contrib/devtools/symbol-check.py]) +AC_CONFIG_LINKS([contrib/devtools/test-security-check.py:contrib/devtools/test-security-check.py]) +AC_CONFIG_LINKS([contrib/devtools/test-symbol-check.py:contrib/devtools/test-symbol-check.py]) +AC_CONFIG_LINKS([contrib/devtools/iwyu/bitcoin.core.imp:contrib/devtools/iwyu/bitcoin.core.imp]) +AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py]) +AC_CONFIG_LINKS([contrib/macdeploy/background.tiff:contrib/macdeploy/background.tiff]) +AC_CONFIG_LINKS([src/.bear-tidy-config:src/.bear-tidy-config]) +AC_CONFIG_LINKS([src/.clang-tidy:src/.clang-tidy]) +AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py]) +AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py]) +AC_CONFIG_LINKS([test/util/test_runner.py:test/util/test_runner.py]) +AC_CONFIG_LINKS([test/util/rpcauth-test.py:test/util/rpcauth-test.py]) dnl boost's m4 checks do something really nasty: they export these vars. As a dnl result, they leak into secp256k1's configure and crazy things happen. @@ -901,26 +1979,60 @@ LIBS_TEMP="$LIBS" unset LIBS LIBS="$LIBS_TEMP" -PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" -unset PKG_CONFIG_PATH -PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" - -PKGCONFIG_LIBDIR_TEMP="$PKG_CONFIG_LIBDIR" -unset PKG_CONFIG_LIBDIR -PKG_CONFIG_LIBDIR="$PKGCONFIG_LIBDIR_TEMP" - -ac_configure_args="${ac_configure_args} --disable-shared --with-pic" +ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --enable-module-recovery --enable-module-schnorrsig" AC_CONFIG_SUBDIRS([src/secp256k1]) AC_OUTPUT -dnl Taken from https://wiki.debian.org/RpathIssue -case $host in - *-*-linux-gnu) - AC_MSG_RESULT([Fixing libtool for -rpath problems.]) - sed < libtool > libtool-2 \ - 's/^hardcode_libdir_flag_spec.*$'/'hardcode_libdir_flag_spec=" -D__LIBTOOL_IS_A_FOOL__ "/' - mv libtool-2 libtool - chmod 755 libtool +dnl Replace the BUILDDIR path with the correct Windows path if compiling on Native Windows +case ${OS} in + *Windows*) + sed 's/BUILDDIR="\/\([[a-z]]\)/BUILDDIR="\1:/' test/config.ini > test/config-2.ini + mv test/config-2.ini test/config.ini ;; esac + +echo +echo "Options used to compile and link:" +echo " external signer = $use_external_signer" +echo " multiprocess = $build_multiprocess" +echo " with experimental syscall sandbox support = $use_syscall_sandbox" +echo " with libs = $build_bitcoin_libs" +echo " with wallet = $enable_wallet" +if test "$enable_wallet" != "no"; then + echo " with sqlite = $use_sqlite" + echo " with bdb = $use_bdb" +fi +echo " with gui / qt = $bitcoin_enable_qt" +if test $bitcoin_enable_qt != "no"; then + echo " with qr = $use_qr" +fi +echo " with zmq = $use_zmq" +if test $enable_fuzz = "no"; then + echo " with test = $use_tests" +else + echo " with test = not building test_bitcoin because fuzzing is enabled" +fi +echo " with fuzz binary = $enable_fuzz_binary" +echo " with bench = $use_bench" +echo " with upnp = $use_upnp" +echo " with natpmp = $use_natpmp" +echo " use asm = $use_asm" +echo " USDT tracing = $use_usdt" +echo " sanitizers = $use_sanitizers" +echo " debug enabled = $enable_debug" +echo " gprof enabled = $enable_gprof" +echo " werror = $enable_werror" +echo " LTO = $enable_lto" +echo +echo " target os = $host_os" +echo " build os = $build_os" +echo +echo " CC = $CC" +echo " CFLAGS = $PTHREAD_CFLAGS $CFLAGS" +echo " CPPFLAGS = $DEBUG_CPPFLAGS $HARDENED_CPPFLAGS $CORE_CPPFLAGS $CPPFLAGS" +echo " CXX = $CXX" +echo " CXXFLAGS = $LTO_CXXFLAGS $DEBUG_CXXFLAGS $HARDENED_CXXFLAGS $WARN_CXXFLAGS $NOWARN_CXXFLAGS $ERROR_CXXFLAGS $GPROF_CXXFLAGS $CORE_CXXFLAGS $CXXFLAGS" +echo " LDFLAGS = $LTO_LDFLAGS $PTHREAD_LIBS $HARDENED_LDFLAGS $GPROF_LDFLAGS $CORE_LDFLAGS $LDFLAGS" +echo " ARFLAGS = $ARFLAGS" +echo diff --git a/contrib/README.md b/contrib/README.md index dae975e9efc2c..ae1372e95def3 100644 --- a/contrib/README.md +++ b/contrib/README.md @@ -1,20 +1,12 @@ -Wallet Tools ---------------------- - -### [BitRPC](/contrib/bitrpc) ### -Allows for sending of all standard Bitcoin commands via RPC rather than as command line args. - -### [SpendFrom](/contrib/spendfrom) ### - -Use the raw transactions API to send coins received on a particular -address (or addresses). - Repository Tools --------------------- ### [Developer tools](/contrib/devtools) ### Specific tools for developers working on this repository. -Contains the script `github-merge.sh` for merging github pull requests securely and signing them using GPG. +Additional tools, including the `github-merge.py` script, are available in the [maintainer-tools](https://github.com/bitcoin-core/bitcoin-maintainer-tools) repository. + +### [Verify-Commits](/contrib/verify-commits) ### +Tool to verify that every merge commit was signed by a developer using the `github-merge.py` script. ### [Linearize](/contrib/linearize) ### Construct a linear, no-fork, best version of the blockchain. @@ -29,28 +21,22 @@ Utility to generate the pnSeed[] array that is compiled into the client. Build Tools and Keys --------------------- -### [Debian](/contrib/debian) ### -Contains files used to package bitcoind/bitcoin-qt -for Debian-based Linux systems. If you compile bitcoind/bitcoin-qt yourself, there are some useful files here. +### Packaging ### +The [Debian](/contrib/debian) subfolder contains the copyright file. -### [Gitian-descriptors](/contrib/gitian-descriptors) ### -Gavin's notes on getting gitian builds up and running using KVM. +All other packaging related files can be found in the [bitcoin-core/packaging](https://github.com/bitcoin-core/packaging) repository. -### [Gitian-downloader](/contrib/gitian-downloader) -Various PGP files of core developers. +### [Builder keys](/contrib/builder-keys) +PGP keys used for signing Bitcoin Core [release](/doc/release-process.md) results. ### [MacDeploy](/contrib/macdeploy) ### -Scripts and notes for Mac builds. +Scripts and notes for Mac builds. -Test and Verify Tools +Test and Verify Tools --------------------- ### [TestGen](/contrib/testgen) ### Utilities to generate test vectors for the data-driven Bitcoin tests. -### [Test Patches](/contrib/test-patches) ### -These patches are applied when the automated pull-tester -tests each pull and when master is tested using jenkins. - -### [Verify SF Binaries](/contrib/verifysfbinaries) ### -This script attempts to download and verify the signature file SHA256SUMS.asc from SourceForge. +### [Verify Binaries](/contrib/verifybinaries) ### +This script attempts to download and verify the signature file SHA256SUMS.asc from bitcoin.org. diff --git a/contrib/bitcoin-cli.bash-completion b/contrib/bitcoin-cli.bash-completion new file mode 100644 index 0000000000000..ddea58a05cc1a --- /dev/null +++ b/contrib/bitcoin-cli.bash-completion @@ -0,0 +1,141 @@ +# bash programmable completion for bitcoin-cli(1) +# Copyright (c) 2012-2019 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# call $bitcoin-cli for RPC +_bitcoin_rpc() { + # determine already specified args necessary for RPC + local rpcargs=() + for i in ${COMP_LINE}; do + case "$i" in + -conf=*|-datadir=*|-regtest|-rpc*|-testnet) + rpcargs=( "${rpcargs[@]}" "$i" ) + ;; + esac + done + $bitcoin_cli "${rpcargs[@]}" "$@" +} + +_bitcoin_cli() { + local cur prev words=() cword + local bitcoin_cli + + # save and use original argument to invoke bitcoin-cli for -help, help and RPC + # as bitcoin-cli might not be in $PATH + bitcoin_cli="$1" + + COMPREPLY=() + _get_comp_words_by_ref -n = cur prev words cword + + if ((cword > 5)); then + case ${words[cword-5]} in + sendtoaddress) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + esac + fi + + if ((cword > 4)); then + case ${words[cword-4]} in + importaddress|listtransactions|setban) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + signrawtransactionwithkey|signrawtransactionwithwallet) + COMPREPLY=( $( compgen -W "ALL NONE SINGLE ALL|ANYONECANPAY NONE|ANYONECANPAY SINGLE|ANYONECANPAY" -- "$cur" ) ) + return 0 + ;; + esac + fi + + if ((cword > 3)); then + case ${words[cword-3]} in + addmultisigaddress) + return 0 + ;; + getbalance|gettxout|importaddress|importpubkey|importprivkey|listreceivedbyaddress|listsinceblock) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + esac + fi + + if ((cword > 2)); then + case ${words[cword-2]} in + addnode) + COMPREPLY=( $( compgen -W "add remove onetry" -- "$cur" ) ) + return 0 + ;; + setban) + COMPREPLY=( $( compgen -W "add remove" -- "$cur" ) ) + return 0 + ;; + fundrawtransaction|getblock|getblockheader|getmempoolancestors|getmempooldescendants|getrawtransaction|gettransaction|listreceivedbyaddress|sendrawtransaction) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + esac + fi + + case "$prev" in + backupwallet|dumpwallet|importwallet) + _filedir + return 0 + ;; + getaddednodeinfo|getrawmempool|lockunspent) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + getbalance|getnewaddress|listtransactions|sendmany) + return 0 + ;; + esac + + case "$cur" in + -conf=*) + cur="${cur#*=}" + _filedir + return 0 + ;; + -datadir=*) + cur="${cur#*=}" + _filedir -d + return 0 + ;; + -*=*) # prevent nonsense completions + return 0 + ;; + *) + local helpopts commands + + # only parse -help if senseful + if [[ -z "$cur" || "$cur" =~ ^- ]]; then + helpopts=$($bitcoin_cli -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }' ) + fi + + # only parse help if senseful + if [[ -z "$cur" || "$cur" =~ ^[a-z] ]]; then + commands=$(_bitcoin_rpc help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }') + fi + + COMPREPLY=( $( compgen -W "$helpopts $commands" -- "$cur" ) ) + + # Prevent space if an argument is desired + if [[ $COMPREPLY == *= ]]; then + compopt -o nospace + fi + return 0 + ;; + esac +} && +complete -F _bitcoin_cli bitcoin-cli + +# Local variables: +# mode: shell-script +# sh-basic-offset: 4 +# sh-indent-comment: t +# indent-tabs-mode: nil +# End: +# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/bitcoin-qt.pro b/contrib/bitcoin-qt.pro deleted file mode 100644 index 3a72d10f4732b..0000000000000 --- a/contrib/bitcoin-qt.pro +++ /dev/null @@ -1,21 +0,0 @@ -FORMS += \ - ../src/qt/forms/aboutdialog.ui \ - ../src/qt/forms/addressbookpage.ui \ - ../src/qt/forms/askpassphrasedialog.ui \ - ../src/qt/forms/coincontroldialog.ui \ - ../src/qt/forms/editaddressdialog.ui \ - ../src/qt/forms/helpmessagedialog.ui \ - ../src/qt/forms/intro.ui \ - ../src/qt/forms/openuridialog.ui \ - ../src/qt/forms/optionsdialog.ui \ - ../src/qt/forms/overviewpage.ui \ - ../src/qt/forms/receivecoinsdialog.ui \ - ../src/qt/forms/receiverequestdialog.ui \ - ../src/qt/forms/rpcconsole.ui \ - ../src/qt/forms/sendcoinsdialog.ui \ - ../src/qt/forms/sendcoinsentry.ui \ - ../src/qt/forms/signverifymessagedialog.ui \ - ../src/qt/forms/transactiondescdialog.ui \ - -RESOURCES += \ - ../src/qt/bitcoin.qrc diff --git a/contrib/bitcoin-tx.bash-completion b/contrib/bitcoin-tx.bash-completion new file mode 100644 index 0000000000000..a83d2979ed3a8 --- /dev/null +++ b/contrib/bitcoin-tx.bash-completion @@ -0,0 +1,57 @@ +# bash programmable completion for bitcoin-tx(1) +# Copyright (c) 2016 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +_bitcoin_tx() { + local cur prev words=() cword + local bitcoin_tx + + # save and use original argument to invoke bitcoin-tx for -help + # it might not be in $PATH + bitcoin_tx="$1" + + COMPREPLY=() + _get_comp_words_by_ref -n =: cur prev words cword + + case "$cur" in + load=*:*) + cur="${cur#load=*:}" + _filedir + return 0 + ;; + *=*) # prevent attempts to complete other arguments + return 0 + ;; + esac + + if [[ "$cword" == 1 || ( "$prev" != "-create" && "$prev" == -* ) ]]; then + # only options (or an uncompletable hex-string) allowed + # parse bitcoin-tx -help for options + local helpopts + helpopts=$($bitcoin_tx -help | sed -e '/^ -/ p' -e d ) + COMPREPLY=( $( compgen -W "$helpopts" -- "$cur" ) ) + else + # only commands are allowed + # parse -help for commands + local helpcmds + helpcmds=$($bitcoin_tx -help | sed -e '1,/Commands:/d' -e 's/=.*/=/' -e '/^ [a-z]/ p' -e d ) + COMPREPLY=( $( compgen -W "$helpcmds" -- "$cur" ) ) + fi + + # Prevent space if an argument is desired + if [[ $COMPREPLY == *= ]]; then + compopt -o nospace + fi + + return 0 +} && +complete -F _bitcoin_tx bitcoin-tx + +# Local variables: +# mode: shell-script +# sh-basic-offset: 4 +# sh-indent-comment: t +# indent-tabs-mode: nil +# End: +# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/bitcoind.bash-completion b/contrib/bitcoind.bash-completion index 37ece258996af..ec1d9512d4759 100644 --- a/contrib/bitcoind.bash-completion +++ b/contrib/bitcoind.bash-completion @@ -1,102 +1,21 @@ -# bash programmable completion for bitcoind(1) and bitcoin-cli(1) -# Copyright (c) 2012,2014 Christian von Roques -# Distributed under the MIT/X11 software license, see the accompanying +# bash programmable completion for bitcoind(1) and bitcoin-qt(1) +# Copyright (c) 2012-2019 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -have bitcoind && { - -# call $bitcoind for RPC -_bitcoin_rpc() { - # determine already specified args necessary for RPC - local rpcargs=() - for i in ${COMP_LINE}; do - case "$i" in - -conf=*|-proxy*|-rpc*) - rpcargs=( "${rpcargs[@]}" "$i" ) - ;; - esac - done - $bitcoind "${rpcargs[@]}" "$@" -} - -# Add bitcoin accounts to COMPREPLY -_bitcoin_accounts() { - local accounts - accounts=$(_bitcoin_rpc listaccounts | awk '/".*"/ { a=$1; gsub(/"/, "", a); print a}') - COMPREPLY=( "${COMPREPLY[@]}" $( compgen -W "$accounts" -- "$cur" ) ) -} - _bitcoind() { local cur prev words=() cword local bitcoind - # save and use original argument to invoke bitcoind - # bitcoind might not be in $PATH + # save and use original argument to invoke bitcoind for -help + # it might not be in $PATH bitcoind="$1" COMPREPLY=() _get_comp_words_by_ref -n = cur prev words cword - if ((cword > 4)); then - case ${words[cword-4]} in - listtransactions) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - signrawtransaction) - COMPREPLY=( $( compgen -W "ALL NONE SINGLE ALL|ANYONECANPAY NONE|ANYONECANPAY SINGLE|ANYONECANPAY" -- "$cur" ) ) - return 0 - ;; - esac - fi - - if ((cword > 3)); then - case ${words[cword-3]} in - addmultisigaddress) - _bitcoin_accounts - return 0 - ;; - getbalance|gettxout|importaddress|importprivkey|listreceivedbyaccount|listreceivedbyaddress|listsinceblock) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - esac - fi - - if ((cword > 2)); then - case ${words[cword-2]} in - addnode) - COMPREPLY=( $( compgen -W "add remove onetry" -- "$cur" ) ) - return 0 - ;; - getblock|getrawtransaction|gettransaction|listaccounts|listreceivedbyaccount|listreceivedbyaddress|sendrawtransaction) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - move|setaccount) - _bitcoin_accounts - return 0 - ;; - esac - fi - - case "$prev" in - backupwallet|dumpwallet|importwallet) - _filedir - return 0 - ;; - getmempool|lockunspent|setgenerate) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - getaccountaddress|getaddressesbyaccount|getbalance|getnewaddress|getreceivedbyaccount|listtransactions|move|sendfrom|sendmany) - _bitcoin_accounts - return 0 - ;; - esac - case "$cur" in - -conf=*|-pid=*|-loadblock=*|-wallet=*|-rpcsslcertificatechainfile=*|-rpcsslprivatekeyfile=*) + -conf=*|-pid=*|-loadblock=*|-rpccookiefile=*|-wallet=*) cur="${cur#*=}" _filedir return 0 @@ -110,20 +29,14 @@ _bitcoind() { return 0 ;; *) - local helpopts commands - # only parse --help if senseful + # only parse -help if sensible if [[ -z "$cur" || "$cur" =~ ^- ]]; then - helpopts=$($bitcoind --help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }' ) + local helpopts + helpopts=$($bitcoind -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }' ) + COMPREPLY=( $( compgen -W "$helpopts" -- "$cur" ) ) fi - # only parse help if senseful - if [[ -z "$cur" || "$cur" =~ ^[a-z] ]]; then - commands=$(_bitcoin_rpc help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }') - fi - - COMPREPLY=( $( compgen -W "$helpopts $commands" -- "$cur" ) ) - # Prevent space if an argument is desired if [[ $COMPREPLY == *= ]]; then compopt -o nospace @@ -131,10 +44,8 @@ _bitcoind() { return 0 ;; esac -} - -complete -F _bitcoind bitcoind bitcoin-cli -} +} && +complete -F _bitcoind bitcoind bitcoin-qt # Local variables: # mode: shell-script diff --git a/contrib/bitrpc/README.md b/contrib/bitrpc/README.md deleted file mode 100644 index f5ef2f0405a94..0000000000000 --- a/contrib/bitrpc/README.md +++ /dev/null @@ -1,8 +0,0 @@ -### BitRPC -Allows for sending of all standard Bitcoin commands via RPC rather than as command line args. - -### Looking for Wallet Tools? -BitRPC.py is able to do the exact same thing as `walletchangepass.py` and `walletunlock.py`. Their respective commands in BitRPC.py are: - - bitrpc.py walletpassphrasechange - bitrpc.py walletpassphrase \ No newline at end of file diff --git a/contrib/bitrpc/bitrpc.py b/contrib/bitrpc/bitrpc.py deleted file mode 100644 index 02577b1b6aab7..0000000000000 --- a/contrib/bitrpc/bitrpc.py +++ /dev/null @@ -1,337 +0,0 @@ -from jsonrpc import ServiceProxy -import sys -import string -import getpass - -# ===== BEGIN USER SETTINGS ===== -# if you do not set these you will be prompted for a password for every command -rpcuser = "" -rpcpass = "" -# ====== END USER SETTINGS ====== - - -if rpcpass == "": - access = ServiceProxy("http://127.0.0.1:8332") -else: - access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332") -cmd = sys.argv[1].lower() - -if cmd == "backupwallet": - try: - path = raw_input("Enter destination path/filename: ") - print access.backupwallet(path) - except: - print "\n---An error occurred---\n" - -elif cmd == "encryptwallet": - try: - pwd = getpass.getpass(prompt="Enter passphrase: ") - pwd2 = getpass.getpass(prompt="Repeat passphrase: ") - if pwd == pwd2: - access.encryptwallet(pwd) - print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n" - else: - print "\n---Passphrases do not match---\n" - except: - print "\n---An error occurred---\n" - -elif cmd == "getaccount": - try: - addr = raw_input("Enter a Bitcoin address: ") - print access.getaccount(addr) - except: - print "\n---An error occurred---\n" - -elif cmd == "getaccountaddress": - try: - acct = raw_input("Enter an account name: ") - print access.getaccountaddress(acct) - except: - print "\n---An error occurred---\n" - -elif cmd == "getaddressesbyaccount": - try: - acct = raw_input("Enter an account name: ") - print access.getaddressesbyaccount(acct) - except: - print "\n---An error occurred---\n" - -elif cmd == "getbalance": - try: - acct = raw_input("Enter an account (optional): ") - mc = raw_input("Minimum confirmations (optional): ") - try: - print access.getbalance(acct, mc) - except: - print access.getbalance() - except: - print "\n---An error occurred---\n" - -elif cmd == "getblockbycount": - try: - height = raw_input("Height: ") - print access.getblockbycount(height) - except: - print "\n---An error occurred---\n" - -elif cmd == "getblockcount": - try: - print access.getblockcount() - except: - print "\n---An error occurred---\n" - -elif cmd == "getblocknumber": - try: - print access.getblocknumber() - except: - print "\n---An error occurred---\n" - -elif cmd == "getconnectioncount": - try: - print access.getconnectioncount() - except: - print "\n---An error occurred---\n" - -elif cmd == "getdifficulty": - try: - print access.getdifficulty() - except: - print "\n---An error occurred---\n" - -elif cmd == "getgenerate": - try: - print access.getgenerate() - except: - print "\n---An error occurred---\n" - -elif cmd == "gethashespersec": - try: - print access.gethashespersec() - except: - print "\n---An error occurred---\n" - -elif cmd == "getinfo": - try: - print access.getinfo() - except: - print "\n---An error occurred---\n" - -elif cmd == "getnewaddress": - try: - acct = raw_input("Enter an account name: ") - try: - print access.getnewaddress(acct) - except: - print access.getnewaddress() - except: - print "\n---An error occurred---\n" - -elif cmd == "getreceivedbyaccount": - try: - acct = raw_input("Enter an account (optional): ") - mc = raw_input("Minimum confirmations (optional): ") - try: - print access.getreceivedbyaccount(acct, mc) - except: - print access.getreceivedbyaccount() - except: - print "\n---An error occurred---\n" - -elif cmd == "getreceivedbyaddress": - try: - addr = raw_input("Enter a Bitcoin address (optional): ") - mc = raw_input("Minimum confirmations (optional): ") - try: - print access.getreceivedbyaddress(addr, mc) - except: - print access.getreceivedbyaddress() - except: - print "\n---An error occurred---\n" - -elif cmd == "gettransaction": - try: - txid = raw_input("Enter a transaction ID: ") - print access.gettransaction(txid) - except: - print "\n---An error occurred---\n" - -elif cmd == "getwork": - try: - data = raw_input("Data (optional): ") - try: - print access.gettransaction(data) - except: - print access.gettransaction() - except: - print "\n---An error occurred---\n" - -elif cmd == "help": - try: - cmd = raw_input("Command (optional): ") - try: - print access.help(cmd) - except: - print access.help() - except: - print "\n---An error occurred---\n" - -elif cmd == "listaccounts": - try: - mc = raw_input("Minimum confirmations (optional): ") - try: - print access.listaccounts(mc) - except: - print access.listaccounts() - except: - print "\n---An error occurred---\n" - -elif cmd == "listreceivedbyaccount": - try: - mc = raw_input("Minimum confirmations (optional): ") - incemp = raw_input("Include empty? (true/false, optional): ") - try: - print access.listreceivedbyaccount(mc, incemp) - except: - print access.listreceivedbyaccount() - except: - print "\n---An error occurred---\n" - -elif cmd == "listreceivedbyaddress": - try: - mc = raw_input("Minimum confirmations (optional): ") - incemp = raw_input("Include empty? (true/false, optional): ") - try: - print access.listreceivedbyaddress(mc, incemp) - except: - print access.listreceivedbyaddress() - except: - print "\n---An error occurred---\n" - -elif cmd == "listtransactions": - try: - acct = raw_input("Account (optional): ") - count = raw_input("Number of transactions (optional): ") - frm = raw_input("Skip (optional):") - try: - print access.listtransactions(acct, count, frm) - except: - print access.listtransactions() - except: - print "\n---An error occurred---\n" - -elif cmd == "move": - try: - frm = raw_input("From: ") - to = raw_input("To: ") - amt = raw_input("Amount:") - mc = raw_input("Minimum confirmations (optional): ") - comment = raw_input("Comment (optional): ") - try: - print access.move(frm, to, amt, mc, comment) - except: - print access.move(frm, to, amt) - except: - print "\n---An error occurred---\n" - -elif cmd == "sendfrom": - try: - frm = raw_input("From: ") - to = raw_input("To: ") - amt = raw_input("Amount:") - mc = raw_input("Minimum confirmations (optional): ") - comment = raw_input("Comment (optional): ") - commentto = raw_input("Comment-to (optional): ") - try: - print access.sendfrom(frm, to, amt, mc, comment, commentto) - except: - print access.sendfrom(frm, to, amt) - except: - print "\n---An error occurred---\n" - -elif cmd == "sendmany": - try: - frm = raw_input("From: ") - to = raw_input("To (in format address1:amount1,address2:amount2,...): ") - mc = raw_input("Minimum confirmations (optional): ") - comment = raw_input("Comment (optional): ") - try: - print access.sendmany(frm,to,mc,comment) - except: - print access.sendmany(frm,to) - except: - print "\n---An error occurred---\n" - -elif cmd == "sendtoaddress": - try: - to = raw_input("To (in format address1:amount1,address2:amount2,...): ") - amt = raw_input("Amount:") - comment = raw_input("Comment (optional): ") - commentto = raw_input("Comment-to (optional): ") - try: - print access.sendtoaddress(to,amt,comment,commentto) - except: - print access.sendtoaddress(to,amt) - except: - print "\n---An error occurred---\n" - -elif cmd == "setaccount": - try: - addr = raw_input("Address: ") - acct = raw_input("Account:") - print access.setaccount(addr,acct) - except: - print "\n---An error occurred---\n" - -elif cmd == "setgenerate": - try: - gen= raw_input("Generate? (true/false): ") - cpus = raw_input("Max processors/cores (-1 for unlimited, optional):") - try: - print access.setgenerate(gen, cpus) - except: - print access.setgenerate(gen) - except: - print "\n---An error occurred---\n" - -elif cmd == "settxfee": - try: - amt = raw_input("Amount:") - print access.settxfee(amt) - except: - print "\n---An error occurred---\n" - -elif cmd == "stop": - try: - print access.stop() - except: - print "\n---An error occurred---\n" - -elif cmd == "validateaddress": - try: - addr = raw_input("Address: ") - print access.validateaddress(addr) - except: - print "\n---An error occurred---\n" - -elif cmd == "walletpassphrase": - try: - pwd = getpass.getpass(prompt="Enter wallet passphrase: ") - access.walletpassphrase(pwd, 60) - print "\n---Wallet unlocked---\n" - except: - print "\n---An error occurred---\n" - -elif cmd == "walletpassphrasechange": - try: - pwd = getpass.getpass(prompt="Enter old wallet passphrase: ") - pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ") - access.walletpassphrasechange(pwd, pwd2) - print - print "\n---Passphrase changed---\n" - except: - print - print "\n---An error occurred---\n" - print - -else: - print "Command not found or not supported" diff --git a/contrib/builder-keys/README.md b/contrib/builder-keys/README.md new file mode 100644 index 0000000000000..a6179d6012cd3 --- /dev/null +++ b/contrib/builder-keys/README.md @@ -0,0 +1,33 @@ +## PGP keys of builders and Developers + +The file `keys.txt` contains fingerprints of the public keys of builders and +active developers. + +The associated keys are mainly used to sign git commits or the build results +of Guix builds. + +The most recent version of each pgp key can be found on most pgp key servers. + +Fetch the latest version from the key server to see if any key was revoked in +the meantime. +To fetch the latest version of all pgp keys in your gpg homedir, + +```sh +gpg --refresh-keys +``` + +To fetch keys of builders and active developers, feed the list of fingerprints +of the primary keys into gpg: + +On \*NIX: +```sh +while read fingerprint keyholder_name; do gpg --keyserver hkps://keys.openpgp.org --recv-keys ${fingerprint}; done < ./keys.txt +``` + +On Windows (requires Gpg4win >= 4.0.0): +``` +FOR /F "tokens=1" %i IN (keys.txt) DO gpg --keyserver hkps://keys.openpgp.org --recv-keys %i +``` + +Add your key to the list if you provided Guix attestations for two major or +minor releases of Bitcoin Core. diff --git a/contrib/builder-keys/keys.txt b/contrib/builder-keys/keys.txt new file mode 100644 index 0000000000000..c70069b440b03 --- /dev/null +++ b/contrib/builder-keys/keys.txt @@ -0,0 +1,56 @@ +9D3CC86A72F8494342EA5FD10A41BDC3F4FAFF1C Aaron Clauson (sipsorcery) +617C90010B3BD370B0AC7D424BB42E31C79111B8 Akira Takizawa (akx20000) +E944AE667CF960B1004BC32FCA662BE18B877A60 Andreas Schildbach (aschildbach) +152812300785C96444D3334D17565732E08E5E41 Andrew Chow (achow101) +590B7292695AFFA5B672CBB2E13FC145CD3F4304 Antoine Poinsot (darosior) +0AD83877C1F0CD1EE9BD660AD7CC770B81FD22A8 Ben Carman (benthecarman) +912FD3228387123DC97E0E57D5566241A0295FA9 BtcDrak (btcdrak) +04017A2A6D9A0CCDC81D8EC296AB007F1A7ED999 Carl Dong (dongcarl) +C519EBCF3B926298946783EFF6430754120EC2F4 Christian Decker (cdecker) +18AE2F798E0D239755DA4FD24B79F986CBDF8736 Chun Kuan Le (ken2812221) +101598DC823C1B5F9A6624ABA5E0907A0380E6C3 CoinForensics (CoinForensics) +F20F56EF6A067F70E8A5C99FFF95FAA971697405 centaur (centaur) +C060A6635913D98A3587D7DB1C2491FFEB0EF770 Cory Fields (cfields) +BF6273FAEF7CC0BA1F562E50989F6B3048A116B5 Dev Random (devrandom) +6D3170C1DC2C6FD0AEEBCA6743811D1A26623924 Douglas Roark (droark) +948444FCE03B05BA5AB0591EC37B1C1D44C786EE Duncan Dean (dunxen) +1C6621605EC50319C463D56C7F81D87985D61612 Emanuele Cisbani (cisba) +9A1689B60D1B3CCE9262307A2F40A9BF167FBA47 Erik Mossberg (erkmos) +D35176BE9264832E4ACA8986BF0792FBE95DC863 fivepiece (fivepiece) +6F993B250557E7B016ADE5713BDCDA2D87A881D9 Fuzzbawls (Fuzzbawls) +01CDF4627A3B88AAE4A571C87588242FBE38D3A8 Gavin Andresen (gavinandresen) +D1DBF2C4B96F2DEBF4C16654410108112E7EA81F Hennadii Stepanov (hebasto) +A2FD494D0021AA9B4FA58F759102B7AE654A4A5A Ilyas Ridhuan (IlyasRidhuan) +2688F5A9A4BE0F295E921E8A25F27A38A47AD566 James O'Beirne (jamesob) +D3F22A3A4C366C2DCB66D3722DA9C5A7FA81EA35 Jarol Rodriguez (jarolrod) +7480909378D544EA6B6DCEB7535B12980BB8A4D3 Jeffri H Frontz (jhfrontz) +D3CC177286005BB8FF673294C5242A1AB3936517 jl2012 (jl2012) +82921A4B88FD454B7EB8CE3C796C4109063D4EAF Jon Atack (jonatack) +32EE5C4C3FA15CCADB46ABE529D4BCB6416F53EC Jonas Schnelli (jonasschnelli) +4B4E840451149DD7FB0D633477DFAB5C3108B9A8 Jorge Timon (jtimon) +C42AFF7C61B3E44A1454CD3557AF762DB3353322 Karl-Johan Alm (kallewoof) +70A1D47DD44F59DF8B22244333E472FE870C7E5D Kristaps Kaupe (kristapsk) +30DE693AE0DE9E37B3E7EB6BBFF0F67810C1EED1 Lisa Neigut (niftynei) +E463A93F5F3117EEDE6C7316BD02942421F4889F Luke Dashjr (luke-jr) +B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B Marco Falke (marco) +07DF3E57A548CCFB7530709189BBB8663E2E65CE Matt Corallo (BlueMatt) +CA03882CB1FC067B5D3ACFE4D300116E1C875A3D MeshCollider (meshcollider) +E777299FC265DD04793070EB944D35F9AC3DB76A Michael Ford (fanquake) +AD5764F4ADCE1B99BDFD179E12335A271D4D62EC Michael Tidwell (miketwenty1) +9692B91BBF0E8D34DFD33B1882C5C009628ECF0C Michagogo (michagogo) +C57E4B42223FDE851D4F69DD28DF2724F241D8EE midnightmagic (midnightmagic) +F4FC70F07310028424EFC20A8E4256593F177720 Oliver Gugger (guggero, Oliver Gugger) +D62A803E27E7F43486035ADBBCD04D8E9CCCAC2A Paul Rabahy (prab) +37EC7D7B0A217CDB4B4E007E7FAB114267E4FA04 Peter Todd (petertodd) +D762373D24904A3E42F33B08B9A408E71DAAC974 Pieter Wuille [Location: Leuven, Belgium] (sipa) +133EAC179436F14A5CF1B794860FEB804E669320 Pieter Wuille (sipa) +6A8F9C266528E25AEB1D7731C2371D91CB716EA7 Sebastian Falbesoner (theStack) +A8FC55F3B04BA3146F3492E79303B33A305224CB Sebastian Kung (TheCharlatan) +ED9BDF7AD6A55E232E84524257FF9BDBCC301009 Sjors Provoost (sjors) +867345026B6763E8B07EE73AB6737117397F5C4F Stephan Oeste (Emzy) +9EDAFF80E080659604F4A76B2EBB056FD847F8A7 Stephan Oeste (Emzy) +6DEEF79B050C4072509B743F8C275BC595448867 Tomas Kanocz (KanoczTomas) +AEC1884398647C47413C1C3FB1179EB7347DC10D Warren Togami (wtogami) +74E2DEF5D77260B98BC19438099BAD163C70FBFA Will Clark (will8clark) +79D00BAC68B56D422F945A8F8E3A8F3247DBCBBF Willy Ko (willyko) +71A3B16735405025D447E8F274810B012346C9A6 Wladimir J. van der Laan (laanwj) diff --git a/contrib/debian/README.md b/contrib/debian/README.md deleted file mode 100644 index fab9cc2381567..0000000000000 --- a/contrib/debian/README.md +++ /dev/null @@ -1,21 +0,0 @@ - -Debian -==================== -This directory contains files used to package bitcoind/bitcoin-qt -for Debian-based Linux systems. If you compile bitcoind/bitcoin-qt yourself, there are some useful files here. - -## bitcoin: URI support ## - - -bitcoin-qt.desktop (Gnome / Open Desktop) -To install: - - sudo desktop-file-install bitcoin-qt.desktop - sudo update-desktop-database - -If you build yourself, you will either need to modify the paths in -the .desktop file or copy or symlink your bitcoin-qt binary to `/usr/bin` -and the `../../share/pixmaps/bitcoin128.png` to `/usr/share/pixmaps` - -bitcoin-qt.protocol (KDE) - diff --git a/contrib/debian/bitcoin-qt.desktop b/contrib/debian/bitcoin-qt.desktop deleted file mode 100644 index 61e1aca6ad0ce..0000000000000 --- a/contrib/debian/bitcoin-qt.desktop +++ /dev/null @@ -1,12 +0,0 @@ -[Desktop Entry] -Encoding=UTF-8 -Name=Bitcoin -Comment=Bitcoin P2P Cryptocurrency -Comment[fr]=Bitcoin, monnaie virtuelle cryptographique pair à pair -Comment[tr]=Bitcoin, eşten eşe kriptografik sanal para birimi -Exec=bitcoin-qt %u -Terminal=false -Type=Application -Icon=bitcoin128 -MimeType=x-scheme-handler/bitcoin; -Categories=Office;Finance; diff --git a/contrib/debian/bitcoin-qt.install b/contrib/debian/bitcoin-qt.install deleted file mode 100644 index e0b32373be68d..0000000000000 --- a/contrib/debian/bitcoin-qt.install +++ /dev/null @@ -1,6 +0,0 @@ -usr/local/bin/bitcoin-qt usr/bin -share/pixmaps/bitcoin32.xpm usr/share/pixmaps -share/pixmaps/bitcoin16.xpm usr/share/pixmaps -share/pixmaps/bitcoin128.png usr/share/pixmaps -debian/bitcoin-qt.desktop usr/share/applications -debian/bitcoin-qt.protocol usr/share/kde4/services/ diff --git a/contrib/debian/bitcoin-qt.lintian-overrides b/contrib/debian/bitcoin-qt.lintian-overrides deleted file mode 100644 index 7fb230eca8921..0000000000000 --- a/contrib/debian/bitcoin-qt.lintian-overrides +++ /dev/null @@ -1,2 +0,0 @@ -# Linked code is Expat - only Debian packaging is GPL-2+ -bitcoin-qt: possible-gpl-code-linked-with-openssl diff --git a/contrib/debian/bitcoin-qt.protocol b/contrib/debian/bitcoin-qt.protocol deleted file mode 100644 index 014588d53679b..0000000000000 --- a/contrib/debian/bitcoin-qt.protocol +++ /dev/null @@ -1,11 +0,0 @@ -[Protocol] -exec=bitcoin-qt '%u' -protocol=bitcoin -input=none -output=none -helper=true -listing= -reading=false -writing=false -makedir=false -deleting=false diff --git a/contrib/debian/bitcoind.bash-completion b/contrib/debian/bitcoind.bash-completion deleted file mode 100644 index 0f84707b66cb7..0000000000000 --- a/contrib/debian/bitcoind.bash-completion +++ /dev/null @@ -1 +0,0 @@ -contrib/bitcoind.bash-completion bitcoind diff --git a/contrib/debian/bitcoind.examples b/contrib/debian/bitcoind.examples deleted file mode 100644 index 4ded67d98e784..0000000000000 --- a/contrib/debian/bitcoind.examples +++ /dev/null @@ -1 +0,0 @@ -debian/examples/bitcoin.conf diff --git a/contrib/debian/bitcoind.install b/contrib/debian/bitcoind.install deleted file mode 100644 index 798ea851f6ef8..0000000000000 --- a/contrib/debian/bitcoind.install +++ /dev/null @@ -1,2 +0,0 @@ -usr/local/bin/bitcoind usr/bin -usr/local/bin/bitcoin-cli usr/bin diff --git a/contrib/debian/bitcoind.lintian-overrides b/contrib/debian/bitcoind.lintian-overrides deleted file mode 100644 index 3f9f140bd827a..0000000000000 --- a/contrib/debian/bitcoind.lintian-overrides +++ /dev/null @@ -1,2 +0,0 @@ -# Linked code is Expat - only Debian packaging is GPL-2+ -bitcoind: possible-gpl-code-linked-with-openssl diff --git a/contrib/debian/bitcoind.manpages b/contrib/debian/bitcoind.manpages deleted file mode 100644 index 3e4ca63d4ea3f..0000000000000 --- a/contrib/debian/bitcoind.manpages +++ /dev/null @@ -1,2 +0,0 @@ -debian/manpages/bitcoind.1 -debian/manpages/bitcoin.conf.5 diff --git a/contrib/debian/changelog b/contrib/debian/changelog deleted file mode 100644 index fe910b65a5578..0000000000000 --- a/contrib/debian/changelog +++ /dev/null @@ -1,417 +0,0 @@ -bitcoin (0.9.3-precise1) precise; urgency=medium - - * New upstream releases. - - -- Matt Corallo (BlueMatt) Fri, 26 Sep 2014 12:01:00 -0700 - -bitcoin (0.9.1-precise1) precise; urgency=medium - - * New upstream release. - * Backport pull #4019 - - -- Matt Corallo Sat, 19 Apr 2014 17:29:00 -0400 - -bitcoin (0.9.0-precise1) precise; urgency=medium - - * New upstream release. - - -- Matt Corallo Thu, 20 Mar 2014 13:10:00 -0400 - -bitcoin (0.8.6-precise1) precise; urgency=medium - - * New upstream release. - * Make .desktop paths non-fixed (suggested by prusnak@github) - - -- Matt Corallo Fri, 13 Dec 2013 13:31:00 -0400 - -bitcoin (0.8.5-precise1) precise; urgency=medium - - * New upstream release. - - -- Matt Corallo Sun, 15 Sep 2013 14:02:00 -0400 - -bitcoin (0.8.4-precise1) precise; urgency=medium - - * New upstream release. - - -- Matt Corallo Wed, 4 Sep 2013 10:25:00 -0400 - -bitcoin (0.8.3-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Wed, 26 Jun 2013 00:18:00 +0100 - -bitcoin (0.8.2-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Wed, 29 Mar 2013 23:23:00 +0100 - -bitcoin (0.8.1-natty3) natty; urgency=low - - * New pixmaps - - -- Jonas Schnelli Mon, 13 May 2013 16:14:00 +0100 - -bitcoin (0.8.1-natty2) natty; urgency=low - - * Remove dumb broken launcher script - - -- Matt Corallo Sun, 24 Mar 2013 20:01:00 -0400 - -bitcoin (0.8.1-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Tue, 19 Mar 2013 13:03:00 -0400 - -bitcoin (0.8.0-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Sat, 23 Feb 2013 16:01:00 -0500 - -bitcoin (0.7.2-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Sat, 15 Dec 2012 10:59:00 -0400 - -bitcoin (0.7.1-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Wed, 24 Oct 2012 15:06:00 -0400 - -bitcoin (0.7.0-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Mon, 17 Sep 2012 13:45:00 +0200 - -bitcoin (0.6.3-natty1) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Mon, 25 Jun 2012 23:47:00 +0200 - -bitcoin (0.6.2-natty1) natty; urgency=low - - * Update package description and launch scripts. - - -- Matt Corallo Sat, 2 Jun 2012 16:41:00 +0200 - -bitcoin (0.6.2-natty0) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Tue, 8 May 2012 16:27:00 -0500 - -bitcoin (0.6.1-natty0) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Sun, 6 May 2012 20:09:00 -0500 - -bitcoin (0.6.0-natty0) natty; urgency=low - - * New upstream release. - * Add GNOME/KDE support for bitcoin-qt's bitcoin: URI support. - Thanks to luke-jr for the KDE .protocol file. - - -- Matt Corallo Sat, 31 Mar 2012 15:35:00 -0500 - -bitcoin (0.5.3-natty1) natty; urgency=low - - * Mark for upload to PPA. - - -- Matt Corallo Wed, 14 Mar 2012 23:06:00 -0400 - -bitcoin (0.5.3-natty0) natty; urgency=low - - * New upstream release. - - -- Luke Dashjr Tue, 10 Jan 2012 15:57:00 -0500 - -bitcoin (0.5.2-natty1) natty; urgency=low - - * Remove mentions on anonymity in package descriptions and manpage. - These should never have been there, bitcoin isnt anonymous without - a ton of work that virtually no users will ever be willing and - capable of doing - - -- Matt Corallo Sat, 7 Jan 2012 13:37:00 -0500 - -bitcoin (0.5.2-natty0) natty; urgency=low - - * New upstream release. - - -- Luke Dashjr Fri, 16 Dec 2011 17:57:00 -0500 - -bitcoin (0.5.1-natty0) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Fri, 16 Dec 2011 13:27:00 -0500 - -bitcoin (0.5.0-natty0) natty; urgency=low - - * New upstream release. - - -- Matt Corallo Mon, 21 Nov 2011 11:32:00 -0500 - -bitcoin (0.5.0~rc7-natty0) natty; urgency=low - - * New upstream release candidate. - - -- Matt Corallo Sun, 20 Nov 2011 17:08:00 -0500 - -bitcoin (0.5.0~rc3-natty0) natty; urgency=low - - * New upstream release candidate. - * Don't set rpcpassword for bitcoin-qt. - - -- Matt Corallo Tue, 8 Nov 2011 11:56:00 -0400 - -bitcoin (0.5.0~rc1-natty1) natty; urgency=low - - * Add test_bitcoin to build test - * Fix clean - * Remove uneccessary build-dependancies - - -- Matt Corallo Wed, 26 Oct 2011 14:37:18 -0400 - -bitcoin (0.5.0~rc1-natty0) natty; urgency=low - - * Mark for natty - * Fix broken build - * Fix copyright listing - * Remove bitcoin: URL handler until bitcoin actually has support for it (Oops) - - -- Matt Corallo Wed, 26 Oct 2011 14:37:18 -0400 - -bitcoin (0.5.0~rc1-2) experimental; urgency=low - - * Add bitcoin-qt - - -- Matt Corallo Tue, 25 Oct 2011 15:24:18 -0400 - -bitcoin (0.5.0~rc1-1) experimental; urgency=low - - * New upstream prerelease. - * Add Github as alternate upstream source in watch file. - * Stop build-depending on libcrypto++-dev, and drop patch 1000: - Upstream no longer use crypto++. - * Drop patch 1003: Upstream builds dynamic by default now. - * Update copyright file: Drop notes on longer included sources. - - -- Jonas Smedegaard Fri, 14 Oct 2011 00:16:18 +0200 - -bitcoin (0.4.0-1) unstable; urgency=low - - * New upstream release. - * Stop repackaging source tarballs: No DFSG-violating stripping left. - * Update copyright file: - + Add Github URL to Source. - * Drop dpkg-source local-options hint: Declared options are default - since dpkg-source 1.16.1. - + Add irc URL to Upstream-Contact. - + Add comment on Bitcoin Developers to catch-all Files section. - + Add Files sections for newly readded src/cryptopp/* (new custom - BSD-like license), and newly added doc/build-osx.txt and - src/makefile.osx (Expat). - * Bump debhelper compatibility level to 7. - * Suppress binary icns and gpg files. - * Enable regression tests: - + Build-depend on libboost-test-dev. - + Extend patch 1003 to also dynamically link test binary. - + Build and invoke test binary unless tests are disabled. - * Tighten build-dependency on cdbs: Recent version needed to support - debhelper 7. - * Relax build-depend unversioned on debhelper: needed version - satisfied even in oldstable. - * Stop suppress optional build-dependencies: Satisfied in stable. - Build-depend on devscripts (enabling copyright-check). - - -- Jonas Smedegaard Wed, 05 Oct 2011 01:48:53 +0200 - -bitcoin (0.3.24~dfsg-1) unstable; urgency=low - - * New upstream release. - - [ Jonas Smedegaard ] - * Improve various usage hints: - + Explicitly mention in long description that bitcoind contains - daemon and command-line interface. - + Extend README.Debian with section on lack of GUI, and add primary - headline. - + Avoid installing upstream README: contains no parts relevant for - Debian usage. - Thanks to richard for suggestions (see bug#629443). - * Favor final releases over prereleases in rules and watch file. - Thanks to Jan Dittberner. - * Track -src (not -linux) tarballs in rules and watch file. - Thanks to Jan Dittberner. - * Drop patches 1004 and 1005 (integrated upstream) and simplify - CXXFLAGS in rules file. - * Stop stripping no longer included source-less binaries from upstream - tarballs. - - [ Jan Dittberner ] - * refresh debian/patches/1000_use_system_crypto++.patch - - -- Jonas Smedegaard Tue, 19 Jul 2011 15:08:54 +0200 - -bitcoin (0.3.21~dfsg-2) unstable; urgency=low - - * Enable UPNP support: - + Drop patch 1006. - + Build-depend on libminiupnpc-dev. - Thanks to Matt Corallo. - - -- Jonas Smedegaard Sat, 28 May 2011 15:52:44 +0200 - -bitcoin (0.3.21~dfsg-1) unstable; urgency=low - - * New upstream release. - * Refresh patches. - * Drop patch 1002: no longer needed, as upstream use pkgconfig now. - * Add patch 1006 to really unset USE_UPNP as aparently intended. - * Adjust cleanup rule to preserve .gitignore files. - * Update copyright file: - + Bump format to draft 174 of DEP-5. - + Shorten comments. - * Bump policy compliance to standards-version 3.9.2. - * Shorten Vcs-Browser paragraph in control file. - * Fix mention daemon (not CLI tools) in short description. - * Stop conflicting with or replace bitcoin-cli: Only transitional, no - longer needed. - * Link against unversioned berkeleydb. Update NEWS and README.Debian - accordingly (and improve wording while at it). - Closes: Bug#621425. Thanks to Ondřej Surý. - * This release also implicitly updates linkage against libcrypto++, - which closes: bug#626953, #627024. - * Disable linkage against not yet Debian packaged MiniUPnP. - * Silence seemingly harmless noise about unused variables. - - -- Jonas Smedegaard Tue, 17 May 2011 15:31:24 +0200 - -bitcoin (0.3.20.2~dfsg-2) unstable; urgency=medium - - * Fix have wrapper script execute real binary (not loop executing - itself). - Closes: bug#617290. Thanks to Philippe Gauthier and Etienne Laurin. - * Set urgency=medium as the only (user-exposed) binary is useless - without this fix and has been for some time. - - -- Jonas Smedegaard Wed, 16 Mar 2011 09:11:06 +0100 - -bitcoin (0.3.20.2~dfsg-1) unstable; urgency=low - - * New upstream release. - * Fix provide and replace former package name bitcoin-cli. - Closes: bug#618439. Thanks to Shane Wegner. - - -- Jonas Smedegaard Tue, 15 Mar 2011 11:41:43 +0100 - -bitcoin (0.3.20.01~dfsg-1) unstable; urgency=low - - * New upstream release. - - [ Micah Anderson ] - * Add myself as uploader. - - [ Jonas Smedegaard ] - * Add wrapper for bitcoind to ease initial startup. - * Update patches: - + Drop patch 2002: Applied upstream. - + Add patch 1005 to add phtread linker option. - Closes: bug#615619. Thanks to Shane Wegner. - + Refresh patches. - * Extend copyright years in rules file header. - * Rewrite copyright file using draft svn166 of DEP5 format. - * Rename binary package to bitcoind (from bincoin-cli). - Closes: bug#614025. Thanks to Luke-Jr. - - -- Jonas Smedegaard Tue, 01 Mar 2011 15:55:04 +0100 - -bitcoin (0.3.19~dfsg-6) unstable; urgency=low - - * Fix override agressive optimizations. - * Fix tighten build-dependencies to really fit backporting to Lenny: - + Add fallback build-dependency on libdb4.6++-dev. - + Tighten unversioned Boost build-dependencies to recent versions, - To force use of versioned Boost when backporting to Lenny. - ...needs more love, though: actual build fails. - - -- Jonas Smedegaard Mon, 17 Jan 2011 19:48:35 +0100 - -bitcoin (0.3.19~dfsg-5) unstable; urgency=low - - * Fix lower Boost fallback-build-dependencies to 1.35, really - available in Lenny. - * Correct comment in rules file regarding reason for versioned Boost - fallback-build-dependency. - * Add patch 2002 adding -mt decoration to Boost flags, to ease - backporting to Lenny. - * Respect DEB_BUILD_OPTIONS, and suppress arch-specific optimizations: - + Add patch 1004 to allow overriding optimization flags. - + Set optimization flags conditionally at build time. - + Drop patch 2002 unconditionally suppressing arch-optimizations. - - -- Jonas Smedegaard Mon, 17 Jan 2011 16:04:48 +0100 - -bitcoin (0.3.19~dfsg-4) unstable; urgency=low - - [ Micah Anderson ] - * Provide example bitcoin.conf. - * Add bitcoind(1) and bitcoin.conf(5) man pages. - - [ Jonas Smedegaard ] - * Ease backporting: - + Suppress optional build-dependencies. - + Add fallback build-dependencies on the most recent Boost libs - available in Lenny (where unversioned Boost libs are missing). - * Add Micah as copyright holder for manpages, licensed as GPL-3+. - * Bump copyright format to Subversion candidate draft 162 of DEP5. - - -- Jonas Smedegaard Mon, 17 Jan 2011 14:00:48 +0100 - -bitcoin (0.3.19~dfsg-3) unstable; urgency=low - - * Document in copyright file files excluded from repackaged source. - * Update copyright file: - + Bump DEP5 format hint to Subversion draft rev. 153. - + Consistently wrap at 72 chars. - + Refer to GPL-2 file (not GPL symlink). - * Link against Berkeley DB 4.8 (not 4.7): - + Build-depend on libdb4.8++-dev (and on on libdb4.7++-dev). - + Suggest libdb4.8-util and db4.7-util. - + Add README.Debian note on (untested) upgrade routine. - + Add NEWS entry on changed db version, referring to README.Debian. - - -- Jonas Smedegaard Fri, 07 Jan 2011 22:50:57 +0100 - -bitcoin (0.3.19~dfsg-2) unstable; urgency=low - - * Adjust build options to use optimized miner only for amd64. Fixes - FTBFS on i386 (and other archs, if compiling anywhere else at all). - * Avoid static linking. - * Adjust patch 2001 to avoid only arch-specific optimizations (keep - -O3). - * Extend long description to mention disk consumption and initial use - of IRC. - All of above changes thanks to Helmuth Grohne. - * Add lintian override regarding OpenSSL and GPL: Linked code is Expat - - only Debian packaging is GPL-2+. - - -- Jonas Smedegaard Wed, 29 Dec 2010 00:27:54 +0100 - -bitcoin (0.3.19~dfsg-1) unstable; urgency=low - - [ Jonas Smedegaard ] - * Initial release. - Closes: bug#578157. - - -- Jonas Smedegaard Tue, 28 Dec 2010 15:49:22 +0100 diff --git a/contrib/debian/control b/contrib/debian/control deleted file mode 100644 index a653260ad30ee..0000000000000 --- a/contrib/debian/control +++ /dev/null @@ -1,58 +0,0 @@ -Source: bitcoin -Section: utils -Priority: optional -Maintainer: Jonas Smedegaard -Uploaders: Micah Anderson -Build-Depends: debhelper, - devscripts, - automake, - libtool, - bash-completion, - libboost-system-dev (>> 1.35) | libboost-system1.35-dev, - libdb4.8++-dev, - libssl-dev, - pkg-config, - libminiupnpc8-dev, - libboost-filesystem-dev (>> 1.35) | libboost-filesystem1.35-dev, - libboost-program-options-dev (>> 1.35) | libboost-program-options1.35-dev, - libboost-thread-dev (>> 1.35) | libboost-thread1.35-dev, - libboost-test-dev (>> 1.35) | libboost-test1.35-dev, - qt4-qmake, - libqt4-dev, - libqrencode-dev, - libprotobuf-dev, protobuf-compiler -Standards-Version: 3.9.2 -Homepage: http://www.bitcoin.org/ -Vcs-Git: git://github.com/bitcoin/bitcoin.git -Vcs-Browser: http://github.com/bitcoin/bitcoin - -Package: bitcoind -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends} -Description: peer-to-peer network based digital currency - daemon - Bitcoin is a free open source peer-to-peer electronic cash system that - is completely decentralized, without the need for a central server or - trusted parties. Users hold the crypto keys to their own money and - transact directly with each other, with the help of a P2P network to - check for double-spending. - . - Full transaction history is stored locally at each client. This - requires 20+ GB of space, slowly growing. - . - This package provides the daemon, bitcoind, and the CLI tool - bitcoin-cli to interact with the daemon. - -Package: bitcoin-qt -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends} -Description: peer-to-peer network based digital currency - Qt GUI - Bitcoin is a free open source peer-to-peer electronic cash system that - is completely decentralized, without the need for a central server or - trusted parties. Users hold the crypto keys to their own money and - transact directly with each other, with the help of a P2P network to - check for double-spending. - . - Full transaction history is stored locally at each client. This - requires 20+ GB of space, slowly growing. - . - This package provides Bitcoin-Qt, a GUI for Bitcoin based on Qt. diff --git a/contrib/debian/copyright b/contrib/debian/copyright index a6ee201991cc4..95a281ce054e7 100644 --- a/contrib/debian/copyright +++ b/contrib/debian/copyright @@ -1,86 +1,95 @@ -Format: http://svn.debian.org/wsvn/dep/web/deps/dep5.mdwn?rev=174 +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: Bitcoin Upstream-Contact: Satoshi Nakamoto - irc://#bitcoin@freenode.net -Source: http://sourceforge.net/projects/bitcoin/files/ - https://github.com/bitcoin/bitcoin + irc://#bitcoin-core-dev@libera.chat +Source: https://github.com/bitcoin/bitcoin Files: * -Copyright: 2009-2012, Bitcoin Core Developers +Copyright: 2009-2022, Bitcoin Core Developers License: Expat Comment: The Bitcoin Core Developers encompasses the current developers listed on bitcoin.org, as well as the numerous contributors to the project. -Files: src/json/* -Copyright: 2007-2009, John W. Wilkinson -License: Expat - -Files: src/strlcpy.h -Copyright: 1998, Todd C. Miller -License: ISC - Files: debian/* Copyright: 2010-2011, Jonas Smedegaard 2011, Matt Corallo License: GPL-2+ -Files: debian/manpages/* -Copyright: Micah Anderson -License: GPL-3+ - -Files: src/qt/res/icons/clock*.png, src/qt/res/icons/tx*.png, - src/qt/res/src/*.svg -Copyright: Wladimir van der Laan +Files: src/secp256k1/build-aux/m4/ax_jni_include_dir.m4 +Copyright: 2008 Don Anderson +License: GNU-All-permissive-License + +Files: src/secp256k1/build-aux/m4/ax_prog_cc_for_build.m4 +Copyright: 2008 Paolo Bonzini +License: GNU-All-permissive-License + +Files: src/qt/res/icons/add.png + src/qt/res/icons/address-book.png + src/qt/res/icons/chevron.png + src/qt/res/icons/edit.png + src/qt/res/icons/editcopy.png + src/qt/res/icons/editpaste.png + src/qt/res/icons/export.png + src/qt/res/icons/eye.png + src/qt/res/icons/history.png + src/qt/res/icons/lock_*.png + src/qt/res/icons/overview.png + src/qt/res/icons/receive.png + src/qt/res/icons/remove.png + src/qt/res/icons/send.png + src/qt/res/icons/synced.png + src/qt/res/icons/transaction*.png + src/qt/res/icons/tx_output.png + src/qt/res/icons/warning.png +Copyright: Stephen Hutchings (and more) + http://typicons.com License: Expat - -Files: src/qt/res/icons/address-book.png, src/qt/res/icons/export.png, - src/qt/res/icons/history.png, src/qt/res/icons/key.png, - src/qt/res/icons/lock_*.png, src/qt/res/icons/overview.png, - src/qt/res/icons/receive.png, src/qt/res/icons/send.png, - src/qt/res/icons/synced.png, src/qt/res/icons/filesave.png -Copyright: David Vignoni (david@icon-king.com) - ICON KING - www.icon-king.com -License: LGPL -Comment: NUVOLA ICON THEME for KDE 3.x - Original icons: kaddressbook, klipper_dock, view-list-text, - key-password, encrypted/decrypted, go-home, go-down, - go-next, dialog-ok - Site: http://www.icon-king.com/projects/nuvola/ +Comment: Site: https://github.com/stephenhutchings/typicons.font Files: src/qt/res/icons/connect*.png -Copyright: schollidesign -License: GPL-3+ -Comment: Icon Pack: Human-O2 - Site: http://findicons.com/icon/93743/blocks_gnome_netstatus_0 + src/qt/res/src/connect-*.svg + src/qt/res/icons/network_disabled.png + src/qt/res/src/network_disabled.svg +Copyright: Marco Falke + Luke Dashjr +License: Expat +Comment: Inspired by Stephen Hutchings' Typicons + +Files: src/qt/res/icons/tx_mined.png + src/qt/res/src/mine.svg + src/qt/res/icons/fontbigger.png + src/qt/res/icons/fontsmaller.png + src/qt/res/icons/hd_disabled.png + src/qt/res/src/hd_disabled.svg + src/qt/res/icons/hd_enabled.png + src/qt/res/src/hd_enabled.svg +Copyright: Jonas Schnelli +License: Expat -Files: src/qt/res/icons/transaction*.png -Copyright: md2k7 +Files: src/qt/res/icons/clock*.png + src/qt/res/icons/eye_*.png + src/qt/res/icons/tx_in*.png + src/qt/res/src/clock_*.svg + src/qt/res/src/tx_*.svg +Copyright: Stephen Hutchings, Jonas Schnelli License: Expat -Comment: Site: https://bitcointalk.org/index.php?topic=15276.0 - -Files: src/qt/res/icons/configure.png, src/qt/res/icons/quit.png, - src/qt/res/icons/editcopy.png, src/qt/res/icons/editpaste.png, - src/qt/res/icons/add.png, src/qt/res/icons/edit.png, - src/qt/res/icons/remove.png -Copyright: http://www.everaldo.com -License: LGPL -Comment: Icon Pack: Crystal SVG - -Files: src/qt/res/icons/bitcoin.png, src/qt/res/icons/toolbar.png -Copyright: Bitboy (optimized for 16x16 by Wladimir van der Laan) -License: PUB-DOM +Comment: Modifications of Stephen Hutchings' Typicons + +Files: src/qt/res/icons/bitcoin.* + share/pixmaps/bitcoin* + src/qt/res/src/bitcoin.svg +Copyright: Bitboy, Jonas Schnelli +License: public-domain Comment: Site: https://bitcointalk.org/?topic=1756.0 -Files: scripts/img/reload.xcf, src/qt/res/movies/*.png -Copyright: Everaldo (Everaldo Coelho) -License: GPL-3+ -Comment: Icon Pack: Kids - Site: http://findicons.com/icon/17102/reload?id=17102 +Files: src/qt/res/icons/proxy.png + src/qt/res/src/proxy.svg +Copyright: Cristian Mircea Messel +License: public-domain -Files: src/qt/res/images/splash2.jpg -License: PUB-DOM -Copyright: Crobbo (forum) -Comment: Site: https://bitcointalk.org/index.php?topic=32273.0 +Files: src/qt/res/fonts/RobotoMono-Bold.ttf +License: Apache-2.0 +Comment: Site: https://fonts.google.com/specimen/Roboto+Mono License: Expat @@ -103,19 +112,11 @@ License: Expat TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -License: ISC - Permission to use, copy, modify, and distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - . - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR - BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES - OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, - ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS - SOFTWARE. +License: GNU-All-permissive-License + Copying and distribution of this file, with or without modification, are + permitted in any medium without royalty provided the copyright notice + and this notice are preserved. This file is offered as-is, without any + warranty. License: GPL-2+ This program is free software; you can redistribute it and/or modify it @@ -145,22 +146,16 @@ Comment: You should have received a copy of the GNU General Public License along with this program. If not, see . -License: LGPL - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - . - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -Comment: - On Debian systems the GNU Lesser General Public License (LGPL) is - located in '/usr/share/common-licenses/LGPL'. - . - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -License: PUB-DOM +License: public-domain This work is in the public domain. + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/contrib/debian/examples/bitcoin.conf b/contrib/debian/examples/bitcoin.conf deleted file mode 100644 index 31cca981e0a2a..0000000000000 --- a/contrib/debian/examples/bitcoin.conf +++ /dev/null @@ -1,115 +0,0 @@ -## -## bitcoin.conf configuration file. Lines beginning with # are comments. -## - -# Network-related settings: - -# Run on the test network instead of the real bitcoin network. -#testnet=0 - -# Run a regression test network -#regtest=0 - -# Connect via a SOCKS5 proxy -#proxy=127.0.0.1:9050 - -############################################################## -## Quick Primer on addnode vs connect ## -## Let's say for instance you use addnode=4.2.2.4 ## -## addnode will connect you to and tell you about the ## -## nodes connected to 4.2.2.4. In addition it will tell ## -## the other nodes connected to it that you exist so ## -## they can connect to you. ## -## connect will not do the above when you 'connect' to it. ## -## It will *only* connect you to 4.2.2.4 and no one else.## -## ## -## So if you're behind a firewall, or have other problems ## -## finding nodes, add some using 'addnode'. ## -## ## -## If you want to stay private, use 'connect' to only ## -## connect to "trusted" nodes. ## -## ## -## If you run multiple nodes on a LAN, there's no need for ## -## all of them to open lots of connections. Instead ## -## 'connect' them all to one node that is port forwarded ## -## and has lots of connections. ## -## Thanks goes to [Noodle] on Freenode. ## -############################################################## - -# Use as many addnode= settings as you like to connect to specific peers -#addnode=69.164.218.197 -#addnode=10.0.0.2:8333 - -# Alternatively use as many connect= settings as you like to connect ONLY to specific peers -#connect=69.164.218.197 -#connect=10.0.0.1:8333 - -# Listening mode, enabled by default except when 'connect' is being used -#listen=1 - -# Maximum number of inbound+outbound connections. -#maxconnections= - -# -# JSON-RPC options (for controlling a running Bitcoin/bitcoind process) -# - -# server=1 tells Bitcoin-QT and bitcoind to accept JSON-RPC commands -#server=0 - -# You must set rpcuser and rpcpassword to secure the JSON-RPC api -#rpcuser=Ulysseys -#rpcpassword=YourSuperGreatPasswordNumber_DO_NOT_USE_THIS_OR_YOU_WILL_GET_ROBBED_385593 - -# How many seconds bitcoin will wait for a complete RPC HTTP request. -# after the HTTP connection is established. -#rpctimeout=30 - -# By default, only RPC connections from localhost are allowed. -# Specify as many rpcallowip= settings as you like to allow connections from other hosts, -# either as a single IPv4/IPv6 or with a subnet specification. - -# NOTE: opening up the RPC port to hosts outside your local trusted network is NOT RECOMMENDED, -# because the rpcpassword is transmitted over the network unencrypted. - -# server=1 tells Bitcoin-QT to accept JSON-RPC commands. -# it is also read by bitcoind to determine if RPC should be enabled -#rpcallowip=10.1.1.34/255.255.255.0 -#rpcallowip=1.2.3.4/24 -#rpcallowip=2001:db8:85a3:0:0:8a2e:370:7334/96 - -# Listen for RPC connections on this TCP port: -#rpcport=8332 - -# You can use Bitcoin or bitcoind to send commands to Bitcoin/bitcoind -# running on another host using this option: -#rpcconnect=127.0.0.1 - -# Use Secure Sockets Layer (also known as TLS or HTTPS) to communicate -# with Bitcoin -server or bitcoind -#rpcssl=1 - -# OpenSSL settings used when rpcssl=1 -#rpcsslciphers=TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH -#rpcsslcertificatechainfile=server.cert -#rpcsslprivatekeyfile=server.pem - - -# Miscellaneous options - -# Pre-generate this many public/private key pairs, so wallet backups will be valid for -# both prior transactions and several dozen future transactions. -#keypool=100 - -# Pay an optional transaction fee every time you send bitcoins. Transactions with fees -# are more likely than free transactions to be included in generated blocks, so may -# be validated sooner. -#paytxfee=0.00 - -# User interface options - -# Start Bitcoin minimized -#min=1 - -# Minimize to the system tray -#minimizetotray=1 diff --git a/contrib/debian/gbp.conf b/contrib/debian/gbp.conf deleted file mode 100644 index a7281f94b2b53..0000000000000 --- a/contrib/debian/gbp.conf +++ /dev/null @@ -1,5 +0,0 @@ -# Configuration file for git-buildpackage and friends - -[DEFAULT] -pristine-tar = True -sign-tags = True diff --git a/contrib/debian/manpages/bitcoin-qt.1 b/contrib/debian/manpages/bitcoin-qt.1 deleted file mode 100644 index a023582bc0259..0000000000000 --- a/contrib/debian/manpages/bitcoin-qt.1 +++ /dev/null @@ -1,203 +0,0 @@ -.TH BITCOIN-QT "1" "April 2013" "bitcoin-qt 1" -.SH NAME -bitcoin-qt \- peer-to-peer network based digital currency -.SH DESCRIPTION -.SS "Usage:" -.IP -bitcoin\-qt [command\-line options] -.SH OPTIONS -.TP -\-? -This help message -.TP -\fB\-conf=\fR -Specify configuration file (default: bitcoin.conf) -.TP -\fB\-pid=\fR -Specify pid file (default: bitcoind.pid) -.TP -\fB\-gen\fR -Generate coins -.TP -\fB\-gen\fR=\fI0\fR -Don't generate coins -.TP -\fB\-datadir=\fR -Specify data directory -.TP -\fB\-dbcache=\fR -Set database cache size in megabytes (default: 25) -.TP -\fB\-timeout=\fR -Specify connection timeout in milliseconds (default: 5000) -.TP -\fB\-proxy=\fR -Connect through SOCKS5 proxy -.TP -\fB\-tor=\fR -Use proxy to reach tor hidden services (default: same as \fB\-proxy\fR) -.TP -\fB\-dns\fR -Allow DNS lookups for \fB\-addnode\fR, \fB\-seednode\fR and \fB\-connect\fR -.TP -\fB\-port=\fR -Listen for connections on (default: 8333 or testnet: 18333) -.TP -\fB\-maxconnections=\fR -Maintain at most connections to peers (default: 125) -.TP -\fB\-addnode=\fR -Add a node to connect to and attempt to keep the connection open -.TP -\fB\-connect=\fR -Connect only to the specified node(s) -.TP -\fB\-seednode=\fR -Connect to a node to retrieve peer addresses, and disconnect -.TP -\fB\-externalip=\fR -Specify your own public address -.TP -\fB\-onlynet=\fR -Only connect to nodes in network (IPv4, IPv6 or Tor) -.TP -\fB\-discover\fR -Discover own IP address (default: 1 when listening and no \fB\-externalip\fR) -.TP -\fB\-checkpoints\fR -Only accept block chain matching built\-in checkpoints (default: 1) -.TP -\fB\-listen\fR -Accept connections from outside (default: 1 if no \fB\-proxy\fR or \fB\-connect\fR) -.TP -\fB\-bind=\fR -Bind to given address and always listen on it. Use [host]:port notation for IPv6 -.TP -\fB\-dnsseed\fR -Find peers using DNS lookup (default: 1 unless \fB\-connect\fR) -.TP -\fB\-banscore=\fR -Threshold for disconnecting misbehaving peers (default: 100) -.TP -\fB\-bantime=\fR -Number of seconds to keep misbehaving peers from reconnecting (default: 86400) -.TP -\fB\-maxreceivebuffer=\fR -Maximum per\-connection receive buffer, *1000 bytes (default: 5000) -.TP -\fB\-maxsendbuffer=\fR -Maximum per\-connection send buffer, *1000 bytes (default: 1000) -.TP -\fB\-upnp\fR -Use UPnP to map the listening port (default: 1 when listening) -.TP -\fB\-paytxfee=\fR -Fee per KB to add to transactions you send -.TP -\fB\-server\fR -Accept command line and JSON\-RPC commands -.TP -\fB\-testnet\fR -Use the test network -.TP -\fB\-debug\fR -Output extra debugging information. Implies all other \fB\-debug\fR* options -.TP -\fB\-debugnet\fR -Output extra network debugging information -.TP -\fB\-logtimestamps\fR -Prepend debug output with timestamp -.TP -\fB\-shrinkdebugfile\fR -Shrink debug.log file on client startup (default: 1 when no \fB\-debug\fR) -.TP -\fB\-printtoconsole\fR -Send trace/debug info to console instead of debug.log file -.TP -\fB\-rpcuser=\fR -Username for JSON\-RPC connections -.TP -\fB\-rpcpassword=\fR -Password for JSON\-RPC connections -.TP -\fB\-rpcport=\fR -Listen for JSON\-RPC connections on (default: 8332 or testnet: 18332) -.TP -\fB\-rpcallowip=\fR -Allow JSON\-RPC connections from specified IP address -.TP -\fB\-rpcthreads=\fR -Set the number of threads to service RPC calls (default: 4) -.TP -\fB\-blocknotify=\fR -Execute command when the best block changes (%s in cmd is replaced by block hash) -.TP -\fB\-walletnotify=\fR -Execute command when a wallet transaction changes (%s in cmd is replaced by TxID) -.TP -\fB\-alertnotify=\fR -Execute command when a relevant alert is received (%s in cmd is replaced by message) -.TP -\fB\-upgradewallet\fR -Upgrade wallet to latest format -.TP -\fB\-keypool=\fR -Set key pool size to (default: 100) -.TP -\fB\-rescan\fR -Rescan the block chain for missing wallet transactions -.TP -\fB\-salvagewallet\fR -Attempt to recover private keys from a corrupt wallet.dat -.TP -\fB\-checkblocks=\fR -How many blocks to check at startup (default: 288, 0 = all) -.TP -\fB\-checklevel=\fR -How thorough the block verification is (0\-4, default: 3) -.TP -\fB\-txindex\fR -Maintain a full transaction index (default: 0) -.TP -\fB\-loadblock=\fR -Imports blocks from external blk000??.dat file -.TP -\fB\-reindex\fR -Rebuild block chain index from current blk000??.dat files -.TP -\fB\-par=\fR -Set the number of script verification threads (1\-16, 0=auto, default: 0) -.SS "Block creation options:" -.TP -\fB\-blockminsize=\fR -Set minimum block size in bytes (default: 0) -.TP -\fB\-blockmaxsize=\fR -Set maximum block size in bytes (default: 250000) -.HP -\fB\-blockprioritysize=\fR Set maximum size of high\-priority/low\-fee transactions in bytes (default: 27000) -.PP -SSL options: (see the Bitcoin Wiki for SSL setup instructions) -.TP -\fB\-rpcssl\fR -Use OpenSSL (https) for JSON\-RPC connections -.TP -\fB\-rpcsslcertificatechainfile=\fR -Server certificate file (default: server.cert) -.TP -\fB\-rpcsslprivatekeyfile=\fR -Server private key (default: server.pem) -.TP -\fB\-rpcsslciphers=\fR -Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH) -.SS "UI options:" -.TP -\fB\-lang=\fR -Set language, for example "de_DE" (default: system locale) -.TP -\fB\-min\fR -Start minimized -.TP -\fB\-splash\fR -Show splash screen on startup (default: 1) diff --git a/contrib/debian/manpages/bitcoin.conf.5 b/contrib/debian/manpages/bitcoin.conf.5 deleted file mode 100644 index 8a0078d5d58a2..0000000000000 --- a/contrib/debian/manpages/bitcoin.conf.5 +++ /dev/null @@ -1,89 +0,0 @@ -.TH BITCOIN.CONF "5" "January 2011" "bitcoin.conf 3.19" -.SH NAME -bitcoin.conf \- bitcoin configuration file -.SH SYNOPSIS -All command-line options (except for '\-conf') may be specified in a configuration file, and all configuration file options may also be specified on the command line. Command-line options override values set in the configuration file. -.TP -The configuration file is a list of 'setting=value' pairs, one per line, with optional comments starting with the '#' character. -.TP -The configuration file is not automatically created; you can create it using your favorite plain-text editor. By default, bitcoind(1) will look for a file named bitcoin.conf(5) in the bitcoin data directory, but both the data directory and the configuration file path may be changed using the '\-datadir' and '\-conf' command-line arguments. -.SH LOCATION -bitcoin.conf should be located in $HOME/.bitcoin -.SH NETWORK-RELATED SETTINGS -.TP -.TP -\fBtestnet=\fR[\fI'1'\fR|\fI'0'\fR] -Enable or disable run on the test network instead of the real *bitcoin* network. -.TP -\fBproxy=\fR\fI'127.0.0.1:9050'\fR -Connect via a socks4 proxy. -.TP -\fBaddnode=\fR\fI'10.0.0.2:8333'\fR -Use as many *addnode=* settings as you like to connect to specific peers. -.TP -\fBconnect=\fR\fI'10.0.0.1:8333'\fR -Use as many *connect=* settings as you like to connect ONLY to specific peers. -.TP -\fRmaxconnections=\fR\fI'value'\fR -Maximum number of inbound+outbound connections. -.SH JSON-RPC OPTIONS -.TP -\fBserver=\fR[\fI'1'\fR|\fI'0'\fR] -Tells *bitcoin* to accept or not accept JSON-RPC commands. -.TP -\fBrpcuser=\fR\fI'username'\fR -You must set *rpcuser* to secure the JSON-RPC api. -.TP -\fBrpcpassword=\fR\fI'password'\fR -You must set *rpcpassword* to secure the JSON-RPC api. -.TP -\fBrpcallowip=\fR\fI'192.168.1.*'\fR -By default, only RPC connections from localhost are allowed. Specify as many *rpcallowip=* settings as you like to allow connections from other hosts (and you may use * as a wildcard character). -.TP -\fBrpcport=\fR\fI'8332'\fR -Listen for RPC connections on this TCP port. -.TP -\fBrpcconnect=\fR\fI'127.0.0.1'\fR -You can use *bitcoin* or *bitcoind(1)* to send commands to *bitcoin*/*bitcoind(1)* running on another host using this option. -.TP -\fBrpcssl=\fR\fI'1'\fR -Use Secure Sockets Layer (also known as TLS or HTTPS) to communicate with *bitcoin* '\-server' or *bitcoind(1)*. Example of OpenSSL settings used when *rpcssl*='1': -.TP -\fB\-rpcsslciphers=\fR -Acceptable ciphers (default: TLSv1+HIGH:\:!SSLv2:\:!aNULL:\:!eNULL:\:!AH:\:!3DES:\:@STRENGTH) -.TP -\fBrpcsslcertificatechainfile=\fR\fI'server.cert'\fR -.TP -\fBrpcsslprivatekeyfile=\fR\fI'server.pem'\fR -.TP -.SH MISCELLANEOUS OPTIONS -.TP -\fBgen=\fR[\fI'0'\fR|\fI'1'\fR] -Enable or disable attempt to generate bitcoins. -.TP -\fB4way=\fR[\fI'0'\fR|\fI'1'\fR] -Enable or disable use SSE instructions to try to generate bitcoins faster. -.TP -\fBkeypool=\fR\fI'100'\fR -Pre-generate this many public/private key pairs, so wallet backups will be valid for both prior transactions and several dozen future transactions. -.TP -\fBpaytxfee=\fR\fI'0.00'\fR -Pay an optional transaction fee every time you send bitcoins. Transactions with fees are more likely than free transactions to be included in generated blocks, so may be validated sooner. -.TP -\fBallowreceivebyip=\fR\fI'1'\fR -Allow direct connections for the 'pay via IP address' feature. -.TP -.SH USER INTERFACE OPTIONS -.TP -\fBmin=\fR[\fI'0'\fR|\fI'1'\fR] -Enable or disable start bitcoind minimized. -.TP -\fBminimizetotray=\fR[\fI'0'\fR|\fI'1'\fR] -Enable or disable minimize to the system tray. -.SH "SEE ALSO" -bitcoind(1) -.SH AUTHOR -This manual page was written by Micah Anderson for the Debian system (but may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 or any later version published by the Free Software Foundation. - -On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL. - diff --git a/contrib/debian/manpages/bitcoind.1 b/contrib/debian/manpages/bitcoind.1 deleted file mode 100644 index a1b17d6077f4a..0000000000000 --- a/contrib/debian/manpages/bitcoind.1 +++ /dev/null @@ -1,209 +0,0 @@ -.TH BITCOIND "1" "January 2011" "bitcoind 3.19" -.SH NAME -bitcoind \- peer-to-peer network based digital currency -.SH SYNOPSIS -bitcoin [options] [params] -.TP -bitcoin [options] help \- Get help for a command -.SH DESCRIPTION -This manual page documents the bitcoind program. Bitcoin is a peer-to-peer digital currency. Peer-to-peer (P2P) means that there is no central authority to issue new money or keep track of transactions. Instead, these tasks are managed collectively by the nodes of the network. Advantages: - -Bitcoins can be sent easily through the Internet, without having to trust middlemen. Transactions are designed to be irreversible. Be safe from instability caused by fractional reserve banking and central banks. The limited inflation of the Bitcoin system’s money supply is distributed evenly (by CPU power) throughout the network, not monopolized by banks. - -.SH OPTIONS -.TP -\fB\-conf=\fR -Specify configuration file (default: bitcoin.conf) -.TP -\fB\-gen\fR -Generate coins -.TP -\fB\-gen\fR=\fI0\fR -Don't generate coins -.TP -\fB\-min\fR -Start minimized -.TP -\fB\-datadir=\fR -Specify data directory -.TP -\fB\-proxy=\fR -Connect through SOCKS5 proxy -.TP -\fB\-addnode=\fR -Add a node to connect to -.TP -\fB\-connect=\fR -Connect only to the specified node -.TP -\fB\-paytxfee=\fR -Fee per KB to add to transactions you send -.TP -\fB\-server\fR -Accept command line and JSON\-RPC commands -.TP -\fB\-daemon\fR -Run in the background as a daemon and accept commands -.TP -\fB\-testnet\fR -Use the test network -.TP -\fB\-rpcuser=\fR -Username for JSON\-RPC connections -.TP -\fB\-rpcpassword=\fR -Password for JSON\-RPC connections -.TP -\fB\-rpcport=\fR -Listen for JSON\-RPC connections on -.TP -\fB\-rpcallowip=\fR -Allow JSON\-RPC connections from specified IP address -.TP -\fB\-rpcconnect=\fR -Send commands to node running on -.PP -SSL options: (see the Bitcoin Wiki for SSL setup instructions) -.TP -\fB\-rpcssl\fR=\fI1\fR -Use OpenSSL (https) for JSON\-RPC connections -.TP -\fB\-rpcsslcertificatchainfile=\fR -Server certificate file (default: server.cert) -.TP -\fB\-rpcsslprivatekeyfile=\fR -Server private key (default: server.pem) -.TP -\fB\-rpcsslciphers=\fR -Acceptable ciphers (default: TLSv1+HIGH:\:!SSLv2:\:!aNULL:\:!eNULL:\:!AH:\:!3DES:\:@STRENGTH) -.TP -\-? -This help message -.SH COMMANDS -.TP -\fBbackupwallet 'destination'\fR -Safely copies *wallet.dat* to 'destination', which can be a directory or a path with filename. -.TP -\fBgetaccount 'bitcoinaddress'\fR -Returns the account associated with the given address. -.TP -\fBsetaccount 'bitcoinaddress' ['account']\fR -Sets the ['account'] associated with the given address. ['account'] may be omitted to remove an address from ['account']. -.TP -\fBgetaccountaddress 'account'\fR -Returns a new bitcoin address for 'account'. -.TP -\fBgetaddressesbyaccount 'account'\fR -Returns the list of addresses associated with the given 'account'. -.TP -\fBgetbalance 'account'\fR -Returns the server's available balance, or the balance for 'account'. -.TP -\fBgetblockcount\fR -Returns the number of blocks in the longest block chain. -.TP -\fBgetblocknumber\fR -Returns the block number of the latest block in the longest block chain. -.TP -\fBgetconnectioncount\fR -Returns the number of connections to other nodes. -.TP -\fBgetdifficulty\fR -Returns the proof-of-work difficulty as a multiple of the minimum difficulty. -.TP -\fBgetgenerate\fR -Returns boolean true if server is trying to generate bitcoins, false otherwise. -.TP -\fBsetgenerate 'generate' ['genproclimit']\fR -Generation is limited to ['genproclimit'] processors, \-1 is unlimited. -.TP -\fBgethashespersec\fR -Returns a recent hashes per second performance measurement while generating. -.TP -\fBgetinfo\fR -Returns an object containing server information. -.TP -\fBgetnewaddress 'account'\fR -Returns a new bitcoin address for receiving payments. If 'account' is specified (recommended), it is added to the address book so payments received with the address will be credited to 'account'. -.TP -\fBgetreceivedbyaccount 'account' ['minconf=1']\fR -Returns the total amount received by addresses associated with 'account' in transactions with at least ['minconf'] confirmations. -.TP -\fBgetreceivedbyaddress 'bitcoinaddress' ['minconf=1']\fR -Returns the total amount received by 'bitcoinaddress' in transactions with at least ['minconf'] confirmations. -.TP -\fBgettransaction 'txid'\fR -Returns information about a specific transaction, given hexadecimal transaction ID. -.TP -\fBgetwork 'data'\fR -If 'data' is specified, tries to solve the block and returns true if it was successful. If 'data' is not specified, returns formatted hash 'data' to work on: - - "midstate" : precomputed hash state after hashing the first half of the data. - "data" : block data. - "hash1" : formatted hash buffer for second hash. - "target" : little endian hash target. -.TP -\fBhelp 'command'\fR -List commands, or get help for a command. -.TP -\fBlistaccounts ['minconf=1']\fR -List accounts and their current balances. - *note: requires bitcoin 0.3.20 or later. -.TP -\fBlistreceivedbyaccount ['minconf=1'] ['includeempty=false']\fR -['minconf'] is the minimum number of confirmations before payments are included. ['includeempty'] whether to include addresses that haven't received any payments. Returns an array of objects containing: - - "account" : the account of the receiving address. - "amount" : total amount received by the address. - "confirmations" : number of confirmations of the most recent transaction included. -.TP -\fBlistreceivedbyaddress ['minconf=1'] ['includeempty=false']\fR -['minconf'] is the minimum number of confirmations before payments are included. ['includeempty'] whether to include addresses that haven't received any payments. Returns an array of objects containing: - - "address" : receiving address. - "account" : the account of the receiving address. - "amount" : total amount received by the address. - "confirmations" : number of confirmations of the most recent transaction included. -.TP -\fBlisttransactions 'account' ['count=10']\fR -Returns a list of the last ['count'] transactions for 'account' \- for all accounts if 'account' is not specified or is "*". Each entry in the list may contain: - - "category" : will be generate, send, receive, or move. - "amount" : amount of transaction. - "fee" : Fee (if any) paid (only for send transactions). - "confirmations" : number of confirmations (only for generate/send/receive). - "txid" : transaction ID (only for generate/send/receive). - "otheraccount" : account funds were moved to or from (only for move). - "message" : message associated with transaction (only for send). - "to" : message-to associated with transaction (only for send). - - *note: requires bitcoin 0.3.20 or later. -.TP -\fBmove <'fromaccount'> <'toaccount'> <'amount'> ['minconf=1'] ['comment']\fR -Moves funds between accounts. -.TP -\fBsendfrom* <'account'> <'bitcoinaddress'> <'amount'> ['minconf=1'] ['comment'] ['comment-to']\fR -Sends amount from account's balance to 'bitcoinaddress'. This method will fail if there is less than amount bitcoins with ['minconf'] confirmations in the account's balance (unless account is the empty-string-named default account; it behaves like the *sendtoaddress* method). Returns transaction ID on success. -.TP -\fBsendtoaddress 'bitcoinaddress' 'amount' ['comment'] ['comment-to']\fR -Sends amount from the server's available balance to 'bitcoinaddress'. amount is a real and is rounded to the nearest 0.01. Returns transaction id on success. -.TP -\fBstop\fR -Stops the bitcoin server. -.TP -\fBvalidateaddress 'bitcoinaddress'\fR -Checks that 'bitcoinaddress' looks like a proper bitcoin address. Returns an object containing: - - "isvalid" : true or false. - "ismine" : true if the address is in the server's wallet. - "address" : bitcoinaddress. - - *note: ismine and address are only returned if the address is valid. - -.SH "SEE ALSO" -bitcoin.conf(5) -.SH AUTHOR -This manual page was written by Micah Anderson for the Debian system (but may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 or any later version published by the Free Software Foundation. - -On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL. - diff --git a/contrib/debian/patches/README b/contrib/debian/patches/README deleted file mode 100644 index 80c1584376a97..0000000000000 --- a/contrib/debian/patches/README +++ /dev/null @@ -1,3 +0,0 @@ -0xxx: Grabbed from upstream development. -1xxx: Possibly relevant for upstream adoption. -2xxx: Only relevant for official Debian release. diff --git a/contrib/debian/patches/series b/contrib/debian/patches/series deleted file mode 100644 index 8b137891791fe..0000000000000 --- a/contrib/debian/patches/series +++ /dev/null @@ -1 +0,0 @@ - diff --git a/contrib/debian/rules b/contrib/debian/rules deleted file mode 100755 index 52b357cf01a50..0000000000000 --- a/contrib/debian/rules +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/make -f -# -*- mode: makefile; coding: utf-8 -*- - -#DEB_MAKE_CHECK_TARGET = test_bitcoin -#build/bitcoind:: -# $(if $(filter nocheck,$(DEB_BUILD_OPTIONS)),,src/test_bitcoin) - -DEB_INSTALL_EXAMPLES_bitcoind += debian/examples/* -DEB_INSTALL_MANPAGES_bitcoind += debian/manpages/* - -%: - dh --with bash-completion $@ - -override_dh_auto_clean: - if [ -f Makefile ]; then $(MAKE) distclean; fi - rm -rf Makefile.in aclocal.m4 configure src/Makefile.in src/bitcoin-config.h.in src/build-aux src/qt/Makefile.in src/qt/test/Makefile.in src/test/Makefile.in - -# Yea, autogen should be run on the source archive, but I like doing git archive -override_dh_auto_configure: - ./autogen.sh - ./configure - -override_dh_auto_test: - make check diff --git a/contrib/debian/source/format b/contrib/debian/source/format deleted file mode 100644 index 163aaf8d82b6c..0000000000000 --- a/contrib/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/contrib/debian/watch b/contrib/debian/watch deleted file mode 100644 index c96d2f8e75ca4..0000000000000 --- a/contrib/debian/watch +++ /dev/null @@ -1,7 +0,0 @@ -# Run the "uscan" command to check for upstream updates and more. -version=3 -# use qa.debian.org redirector; see man uscan -opts=uversionmangle=s/(\d)(alpha|beta|rc)/$1~$2/;s/\-src//,dversionmangle=s/~dfsg\d*// \ - http://sf.net/bitcoin/bitcoin-(\d.*)-linux\.tar\.gz debian -opts=uversionmangle=s/(\d)(alpha|beta|rc)/$1~$2/,dversionmangle=s/~dfsg\d*// \ - http://githubredir.debian.net/github/bitcoin/bitcoin v(.*).tar.gz diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md index a57b4e561e618..54b1a8558818a 100644 --- a/contrib/devtools/README.md +++ b/contrib/devtools/README.md @@ -1,83 +1,146 @@ Contents -=========== +======== This directory contains tools for developers working on this repository. -github-merge.sh -================== +clang-format-diff.py +=================== + +A script to format unified git diffs according to [.clang-format](../../src/.clang-format). + +Requires `clang-format`, installed e.g. via `brew install clang-format` on macOS, +or `sudo apt install clang-format` on Debian/Ubuntu. + +For instance, to format the last commit with 0 lines of context, +the script should be called from the git root folder as follows. + +``` +git diff -U0 HEAD~1.. | ./contrib/devtools/clang-format-diff.py -p1 -i -v +``` + +copyright\_header.py +==================== + +Provides utilities for managing copyright headers of `The Bitcoin Core +developers` in repository source files. It has three subcommands: + +``` +$ ./copyright_header.py report [verbose] +$ ./copyright_header.py update +$ ./copyright_header.py insert +``` +Running these subcommands without arguments displays a usage string. + +copyright\_header.py report \ [verbose] +--------------------------------------------------------- + +Produces a report of all copyright header notices found inside the source files +of a repository. Useful to quickly visualize the state of the headers. +Specifying `verbose` will list the full filenames of files of each category. + +copyright\_header.py update \ [verbose] +--------------------------------------------------------- +Updates all the copyright headers of `The Bitcoin Core developers` which were +changed in a year more recent than is listed. For example: +``` +// Copyright (c) - The Bitcoin Core developers +``` +will be updated to: +``` +// Copyright (c) - The Bitcoin Core developers +``` +where `` is obtained from the `git log` history. + +This subcommand also handles copyright headers that have only a single year. In +those cases: +``` +// Copyright (c) The Bitcoin Core developers +``` +will be updated to: +``` +// Copyright (c) - The Bitcoin Core developers +``` +where the update is appropriate. + +copyright\_header.py insert \ +------------------------------------ +Inserts a copyright header for `The Bitcoin Core developers` at the top of the +file in either Python or C++ style as determined by the file extension. If the +file is a Python file and it has `#!` starting the first line, the header is +inserted in the line below it. + +The copyright dates will be set to be `-` where +`` is according to the `git log` history. If +`` is equal to ``, it will be set as a single +year rather than two hyphenated years. + +If the file already has a copyright for `The Bitcoin Core developers`, the +script will exit. + +gen-manpages.py +=============== + +A small script to automatically create manpages in ../../doc/man by running the release binaries with the -help option. +This requires help2man which can be found at: https://www.gnu.org/software/help2man/ + +With in-tree builds this tool can be run from any directory within the +repostitory. To use this tool with out-of-tree builds set `BUILDDIR`. For +example: + +```bash +BUILDDIR=$PWD/build contrib/devtools/gen-manpages.py +``` + +gen-bitcoin-conf.sh +=================== + +Generates a bitcoin.conf file in `share/examples/` by parsing the output from `bitcoind --help`. This script is run during the +release process to include a bitcoin.conf with the release binaries and can also be run by users to generate a file locally. +When generating a file as part of the release process, make sure to commit the changes after running the script. + +With in-tree builds this tool can be run from any directory within the +repository. To use this tool with out-of-tree builds set `BUILDDIR`. For +example: + +```bash +BUILDDIR=$PWD/build contrib/devtools/gen-bitcoin-conf.sh +``` + +security-check.py and test-security-check.py +============================================ + +Perform basic security checks on a series of executables. -A small script to automate merging pull-requests securely and sign them with GPG. - -For example: - - ./github-merge.sh bitcoin/bitcoin 3077 - -(in any git repository) will help you merge pull request #3077 for the -bitcoin/bitcoin repository. - -What it does: -* Fetch master and the pull request. -* Locally construct a merge commit. -* Show the diff that merge results in. -* Ask you to verify the resulting source tree (so you can do a make -check or whatever). -* Ask you whether to GPG sign the merge commit. -* Ask you whether to push the result upstream. - -This means that there are no potential race conditions (where a -pullreq gets updated while you're reviewing it, but before you click -merge), and when using GPG signatures, that even a compromised github -couldn't mess with the sources. - -Setup ---------- -Configuring the github-merge tool for the bitcoin repository is done in the following way: - - git config githubmerge.repository bitcoin/bitcoin - git config githubmerge.testcmd "make -j4 check" (adapt to whatever you want to use for testing) - git config --global user.signingkey mykeyid (if you want to GPG sign) - -fix-copyright-headers.py -=========================== - -Every year newly updated files need to have its copyright headers updated to reflect the current year. -If you run this script from src/ it will automatically update the year on the copyright header for all -.cpp and .h files if these have a git commit from the current year. - -For example a file changed in 2014 (with 2014 being the current year): -```// Copyright (c) 2009-2013 The Bitcoin developers``` +symbol-check.py +=============== -would be changed to: -```// Copyright (c) 2009-2014 The Bitcoin developers``` +A script to check that release executables only contain +certain symbols and are only linked against allowed libraries. -symbol-check.py -================== +For Linux this means checking for allowed gcc, glibc and libstdc++ version symbols. +This makes sure they are still compatible with the minimum supported distribution versions. -A script to check that the (Linux) executables produced by gitian only contain -allowed gcc, glibc and libstdc++ version symbols. This makes sure they are -still compatible with the minimum supported Linux distribution versions. +For macOS and Windows we check that the executables are only linked against libraries we allow. -Example usage after a gitian build: +Example usage: - find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py + find ../path/to/executables -type f -executable | xargs python3 contrib/devtools/symbol-check.py -If only supported symbols are used the return value will be 0 and the output will be empty. +If no errors occur the return value will be 0 and the output will be empty. -If there are 'unsupported' symbols, the return value will be 1 a list like this will be printed: +If there are any errors the return value will be 1 and output like this will be printed: .../64/test_bitcoin: symbol memcpy from unsupported version GLIBC_2.14 .../64/test_bitcoin: symbol __fdelt_chk from unsupported version GLIBC_2.15 .../64/test_bitcoin: symbol std::out_of_range::~out_of_range() from unsupported version GLIBCXX_3.4.15 .../64/test_bitcoin: symbol _ZNSt8__detail15_List_nod from unsupported version GLIBCXX_3.4.15 -update-translations.py -======================= - -Run this script from the root of the repository to update all translations from transifex. -It will do the following automatically: +circular-dependencies.py +======================== -- fetch all translations -- post-process them into valid and committable format -- add missing translations to the build system (TODO) +Run this script from the root of the source tree (`src/`) to find circular dependencies in the source code. +This looks only at which files include other files, treating the `.cpp` and `.h` file as one unit. -See doc/translation-process.md for more information. +Example usage: + cd .../src + ../contrib/devtools/circular-dependencies.py {*,*/*,*/*/*}.{h,cpp} diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py new file mode 100755 index 0000000000000..b1d9f2b7db23b --- /dev/null +++ b/contrib/devtools/circular-dependencies.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import sys +import re +from typing import Dict, List, Set + +MAPPING = { + 'core_read.cpp': 'core_io.cpp', + 'core_write.cpp': 'core_io.cpp', +} + +# Directories with header-based modules, where the assumption that .cpp files +# define functions and variables declared in corresponding .h files is +# incorrect. +HEADER_MODULE_PATHS = [ + 'interfaces/' +] + +def module_name(path): + if path in MAPPING: + path = MAPPING[path] + if any(path.startswith(dirpath) for dirpath in HEADER_MODULE_PATHS): + return path + if path.endswith(".h"): + return path[:-2] + if path.endswith(".c"): + return path[:-2] + if path.endswith(".cpp"): + return path[:-4] + return None + +files = dict() +deps: Dict[str, Set[str]] = dict() + +RE = re.compile("^#include <(.*)>") + +# Iterate over files, and create list of modules +for arg in sys.argv[1:]: + module = module_name(arg) + if module is None: + print("Ignoring file %s (does not constitute module)\n" % arg) + else: + files[arg] = module + deps[module] = set() + +# Iterate again, and build list of direct dependencies for each module +# TODO: implement support for multiple include directories +for arg in sorted(files.keys()): + module = files[arg] + with open(arg, 'r', encoding="utf8") as f: + for line in f: + match = RE.match(line) + if match: + include = match.group(1) + included_module = module_name(include) + if included_module is not None and included_module in deps and included_module != module: + deps[module].add(included_module) + +# Loop to find the shortest (remaining) circular dependency +have_cycle: bool = False +while True: + shortest_cycle = None + for module in sorted(deps.keys()): + # Build the transitive closure of dependencies of module + closure: Dict[str, List[str]] = dict() + for dep in deps[module]: + closure[dep] = [] + while True: + old_size = len(closure) + old_closure_keys = sorted(closure.keys()) + for src in old_closure_keys: + for dep in deps[src]: + if dep not in closure: + closure[dep] = closure[src] + [src] + if len(closure) == old_size: + break + # If module is in its own transitive closure, it's a circular dependency; check if it is the shortest + if module in closure and (shortest_cycle is None or len(closure[module]) + 1 < len(shortest_cycle)): + shortest_cycle = [module] + closure[module] + if shortest_cycle is None: + break + # We have the shortest circular dependency; report it + module = shortest_cycle[0] + print("Circular dependency: %s" % (" -> ".join(shortest_cycle + [module]))) + # And then break the dependency to avoid repeating in other cycles + deps[shortest_cycle[-1]] = deps[shortest_cycle[-1]] - set([module]) + have_cycle = True + +sys.exit(1 if have_cycle else 0) diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py new file mode 100755 index 0000000000000..98eee67f43008 --- /dev/null +++ b/contrib/devtools/clang-format-diff.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# +#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. +# +# ============================================================ +# +# University of Illinois/NCSA +# Open Source License +# +# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign. +# All rights reserved. +# +# Developed by: +# +# LLVM Team +# +# University of Illinois at Urbana-Champaign +# +# http://llvm.org +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal with +# the Software without restriction, including without limitation the rights to +# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +# of the Software, and to permit persons to whom the Software is furnished to do +# so, subject to the following conditions: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimers. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimers in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the names of the LLVM Team, University of Illinois at +# Urbana-Champaign, nor the names of its contributors may be used to +# endorse or promote products derived from this Software without specific +# prior written permission. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +# SOFTWARE. +# +# ============================================================ +# +#===------------------------------------------------------------------------===# + +r""" +ClangFormat Diff Reformatter +============================ + +This script reads input from a unified diff and reformats all the changed +lines. This is useful to reformat all the lines touched by a specific patch. +Example usage for git/svn users: + + git diff -U0 HEAD^ | clang-format-diff.py -p1 -i + svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i + +""" + +import argparse +import difflib +import io +import re +import subprocess +import sys + + +# Change this to the full path if clang-format is not on the path. +binary = 'clang-format' + + +def main(): + parser = argparse.ArgumentParser(description= + 'Reformat changed lines in diff. Without -i ' + 'option just output the diff that would be ' + 'introduced.') + parser.add_argument('-i', action='store_true', default=False, + help='apply edits to files instead of displaying a diff') + parser.add_argument('-p', metavar='NUM', default=0, + help='strip the smallest prefix containing P slashes') + parser.add_argument('-regex', metavar='PATTERN', default=None, + help='custom pattern selecting file paths to reformat ' + '(case sensitive, overrides -iregex)') + parser.add_argument('-iregex', metavar='PATTERN', default= + r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto' + r'|protodevel|java)', + help='custom pattern selecting file paths to reformat ' + '(case insensitive, overridden by -regex)') + parser.add_argument('-sort-includes', action='store_true', default=False, + help='let clang-format sort include blocks') + parser.add_argument('-v', '--verbose', action='store_true', + help='be more verbose, ineffective without -i') + args = parser.parse_args() + + # Extract changed lines for each file. + filename = None + lines_by_file = {} + for line in sys.stdin: + match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line) + if match: + filename = match.group(2) + if filename is None: + continue + + if args.regex is not None: + if not re.match('^%s$' % args.regex, filename): + continue + else: + if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE): + continue + + match = re.search(r'^@@.*\+(\d+)(,(\d+))?', line) + if match: + start_line = int(match.group(1)) + line_count = 1 + if match.group(3): + line_count = int(match.group(3)) + if line_count == 0: + continue + end_line = start_line + line_count - 1 + lines_by_file.setdefault(filename, []).extend( + ['-lines', str(start_line) + ':' + str(end_line)]) + + # Reformat files containing changes in place. + for filename, lines in lines_by_file.items(): + if args.i and args.verbose: + print('Formatting {}'.format(filename)) + command = [binary, filename] + if args.i: + command.append('-i') + if args.sort_includes: + command.append('-sort-includes') + command.extend(lines) + command.extend(['-style=file', '-fallback-style=none']) + p = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=None, + stdin=subprocess.PIPE, + universal_newlines=True) + stdout, stderr = p.communicate() + if p.returncode != 0: + sys.exit(p.returncode) + + if not args.i: + with open(filename, encoding="utf8") as f: + code = f.readlines() + formatted_code = io.StringIO(stdout).readlines() + diff = difflib.unified_diff(code, formatted_code, + filename, filename, + '(before formatting)', '(after formatting)') + diff_string = ''.join(diff) + if len(diff_string) > 0: + sys.stdout.write(diff_string) + +if __name__ == '__main__': + main() diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py new file mode 100755 index 0000000000000..e20eb4b0d2b45 --- /dev/null +++ b/contrib/devtools/copyright_header.py @@ -0,0 +1,605 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import re +import fnmatch +import sys +import subprocess +import datetime +import os + +################################################################################ +# file filtering +################################################################################ + +EXCLUDE = [ + # auto generated: + 'src/qt/bitcoinstrings.cpp', + 'src/chainparamsseeds.h', + # other external copyrights: + 'src/reverse_iterator.h', + 'src/test/fuzz/FuzzedDataProvider.h', + 'src/tinyformat.h', + 'src/bench/nanobench.h', + 'test/functional/test_framework/bignum.py', + # python init: + '*__init__.py', +] +EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE])) + +EXCLUDE_DIRS = [ + # git subtrees + "src/crypto/ctaes/", + "src/leveldb/", + "src/minisketch", + "src/secp256k1/", + "src/univalue/", + "src/crc32c/", +] + +INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.mm', '*.py', '*.sh', '*.bash-completion'] +INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE])) + +def applies_to_file(filename): + for excluded_dir in EXCLUDE_DIRS: + if filename.startswith(excluded_dir): + return False + return ((EXCLUDE_COMPILED.match(filename) is None) and + (INCLUDE_COMPILED.match(filename) is not None)) + +################################################################################ +# obtain list of files in repo according to INCLUDE and EXCLUDE +################################################################################ + +GIT_LS_CMD = 'git ls-files --full-name'.split(' ') +GIT_TOPLEVEL_CMD = 'git rev-parse --show-toplevel'.split(' ') + +def call_git_ls(base_directory): + out = subprocess.check_output([*GIT_LS_CMD, base_directory]) + return [f for f in out.decode("utf-8").split('\n') if f != ''] + +def call_git_toplevel(): + "Returns the absolute path to the project root" + return subprocess.check_output(GIT_TOPLEVEL_CMD).strip().decode("utf-8") + +def get_filenames_to_examine(base_directory): + "Returns an array of absolute paths to any project files in the base_directory that pass the include/exclude filters" + root = call_git_toplevel() + filenames = call_git_ls(base_directory) + return sorted([os.path.join(root, filename) for filename in filenames if + applies_to_file(filename)]) + +################################################################################ +# define and compile regexes for the patterns we are looking for +################################################################################ + + +COPYRIGHT_WITH_C = r'Copyright \(c\)' +COPYRIGHT_WITHOUT_C = 'Copyright' +ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C) + +YEAR = "20[0-9][0-9]" +YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR) +YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR) +ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST) +ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE, + ANY_YEAR_STYLE)) + +ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE) + +def compile_copyright_regex(copyright_style, year_style, name): + return re.compile(r'%s %s,? %s( +\*)?\n' % (copyright_style, year_style, name)) + +EXPECTED_HOLDER_NAMES = [ + r"Satoshi Nakamoto", + r"The Bitcoin Core developers", + r"BitPay Inc\.", + r"University of Illinois at Urbana-Champaign\.", + r"Pieter Wuille", + r"Wladimir J\. van der Laan", + r"Jeff Garzik", + r"Jan-Klaas Kollhof", + r"ArtForz -- public domain half-a-node", + r"Intel Corporation ?", + r"The Zcash developers", + r"Jeremy Rubin", +] + +DOMINANT_STYLE_COMPILED = {} +YEAR_LIST_STYLE_COMPILED = {} +WITHOUT_C_STYLE_COMPILED = {} + +for holder_name in EXPECTED_HOLDER_NAMES: + DOMINANT_STYLE_COMPILED[holder_name] = ( + compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name)) + YEAR_LIST_STYLE_COMPILED[holder_name] = ( + compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name)) + WITHOUT_C_STYLE_COMPILED[holder_name] = ( + compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE, + holder_name)) + +################################################################################ +# search file contents for copyright message of particular category +################################################################################ + +def get_count_of_copyrights_of_any_style_any_holder(contents): + return len(ANY_COPYRIGHT_COMPILED.findall(contents)) + +def file_has_dominant_style_copyright_for_holder(contents, holder_name): + match = DOMINANT_STYLE_COMPILED[holder_name].search(contents) + return match is not None + +def file_has_year_list_style_copyright_for_holder(contents, holder_name): + match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents) + return match is not None + +def file_has_without_c_style_copyright_for_holder(contents, holder_name): + match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents) + return match is not None + +################################################################################ +# get file info +################################################################################ + +def read_file(filename): + return open(filename, 'r', encoding="utf8").read() + +def gather_file_info(filename): + info = {} + info['filename'] = filename + c = read_file(filename) + info['contents'] = c + + info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c) + + info['classified_copyrights'] = 0 + info['dominant_style'] = {} + info['year_list_style'] = {} + info['without_c_style'] = {} + for holder_name in EXPECTED_HOLDER_NAMES: + has_dominant_style = ( + file_has_dominant_style_copyright_for_holder(c, holder_name)) + has_year_list_style = ( + file_has_year_list_style_copyright_for_holder(c, holder_name)) + has_without_c_style = ( + file_has_without_c_style_copyright_for_holder(c, holder_name)) + info['dominant_style'][holder_name] = has_dominant_style + info['year_list_style'][holder_name] = has_year_list_style + info['without_c_style'][holder_name] = has_without_c_style + if has_dominant_style or has_year_list_style or has_without_c_style: + info['classified_copyrights'] = info['classified_copyrights'] + 1 + return info + +################################################################################ +# report execution +################################################################################ + +SEPARATOR = '-'.join(['' for _ in range(80)]) + +def print_filenames(filenames, verbose): + if not verbose: + return + for filename in filenames: + print("\t%s" % filename) + +def print_report(file_infos, verbose): + print(SEPARATOR) + examined = [i['filename'] for i in file_infos] + print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" % + len(examined)) + print_filenames(examined, verbose) + + print(SEPARATOR) + print('') + zero_copyrights = [i['filename'] for i in file_infos if + i['all_copyrights'] == 0] + print("%4d with zero copyrights" % len(zero_copyrights)) + print_filenames(zero_copyrights, verbose) + one_copyright = [i['filename'] for i in file_infos if + i['all_copyrights'] == 1] + print("%4d with one copyright" % len(one_copyright)) + print_filenames(one_copyright, verbose) + two_copyrights = [i['filename'] for i in file_infos if + i['all_copyrights'] == 2] + print("%4d with two copyrights" % len(two_copyrights)) + print_filenames(two_copyrights, verbose) + three_copyrights = [i['filename'] for i in file_infos if + i['all_copyrights'] == 3] + print("%4d with three copyrights" % len(three_copyrights)) + print_filenames(three_copyrights, verbose) + four_or_more_copyrights = [i['filename'] for i in file_infos if + i['all_copyrights'] >= 4] + print("%4d with four or more copyrights" % len(four_or_more_copyrights)) + print_filenames(four_or_more_copyrights, verbose) + print('') + print(SEPARATOR) + print('Copyrights with dominant style:\ne.g. "Copyright (c)" and ' + '"" or "-":\n') + for holder_name in EXPECTED_HOLDER_NAMES: + dominant_style = [i['filename'] for i in file_infos if + i['dominant_style'][holder_name]] + if len(dominant_style) > 0: + print("%4d with '%s'" % (len(dominant_style), + holder_name.replace('\n', '\\n'))) + print_filenames(dominant_style, verbose) + print('') + print(SEPARATOR) + print('Copyrights with year list style:\ne.g. "Copyright (c)" and ' + '", , ...":\n') + for holder_name in EXPECTED_HOLDER_NAMES: + year_list_style = [i['filename'] for i in file_infos if + i['year_list_style'][holder_name]] + if len(year_list_style) > 0: + print("%4d with '%s'" % (len(year_list_style), + holder_name.replace('\n', '\\n'))) + print_filenames(year_list_style, verbose) + print('') + print(SEPARATOR) + print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "" or ' + '"-":\n') + for holder_name in EXPECTED_HOLDER_NAMES: + without_c_style = [i['filename'] for i in file_infos if + i['without_c_style'][holder_name]] + if len(without_c_style) > 0: + print("%4d with '%s'" % (len(without_c_style), + holder_name.replace('\n', '\\n'))) + print_filenames(without_c_style, verbose) + + print('') + print(SEPARATOR) + + unclassified_copyrights = [i['filename'] for i in file_infos if + i['classified_copyrights'] < i['all_copyrights']] + print("%d with unexpected copyright holder names" % + len(unclassified_copyrights)) + print_filenames(unclassified_copyrights, verbose) + print(SEPARATOR) + +def exec_report(base_directory, verbose): + filenames = get_filenames_to_examine(base_directory) + file_infos = [gather_file_info(f) for f in filenames] + print_report(file_infos, verbose) + +################################################################################ +# report cmd +################################################################################ + +REPORT_USAGE = """ +Produces a report of all copyright header notices found inside the source files +of a repository. + +Usage: + $ ./copyright_header.py report [verbose] + +Arguments: + - The base directory of a bitcoin source code repository. + [verbose] - Includes a list of every file of each subcategory in the report. +""" + +def report_cmd(argv): + if len(argv) == 2: + sys.exit(REPORT_USAGE) + + base_directory = argv[2] + if not os.path.exists(base_directory): + sys.exit("*** bad : %s" % base_directory) + + if len(argv) == 3: + verbose = False + elif argv[3] == 'verbose': + verbose = True + else: + sys.exit("*** unknown argument: %s" % argv[2]) + + exec_report(base_directory, verbose) + +################################################################################ +# query git for year of last change +################################################################################ + +GIT_LOG_CMD = "git log --pretty=format:%%ai %s" + +def call_git_log(filename): + out = subprocess.check_output((GIT_LOG_CMD % filename).split(' ')) + return out.decode("utf-8").split('\n') + +def get_git_change_years(filename): + git_log_lines = call_git_log(filename) + if len(git_log_lines) == 0: + return [datetime.date.today().year] + # timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600" + return [line.split(' ')[0].split('-')[0] for line in git_log_lines] + +def get_most_recent_git_change_year(filename): + return max(get_git_change_years(filename)) + +################################################################################ +# read and write to file +################################################################################ + +def read_file_lines(filename): + with open(filename, 'r', encoding="utf8") as f: + file_lines = f.readlines() + return file_lines + +def write_file_lines(filename, file_lines): + with open(filename, 'w', encoding="utf8") as f: + f.write(''.join(file_lines)) + +################################################################################ +# update header years execution +################################################################################ + +COPYRIGHT = r'Copyright \(c\)' +YEAR = "20[0-9][0-9]" +YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR) +HOLDER = 'The Bitcoin Core developers' +UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER])) + +def get_updatable_copyright_line(file_lines): + index = 0 + for line in file_lines: + if UPDATEABLE_LINE_COMPILED.search(line) is not None: + return index, line + index = index + 1 + return None, None + +def parse_year_range(year_range): + year_split = year_range.split('-') + start_year = year_split[0] + if len(year_split) == 1: + return start_year, start_year + return start_year, year_split[1] + +def year_range_to_str(start_year, end_year): + if start_year == end_year: + return start_year + return "%s-%s" % (start_year, end_year) + +def create_updated_copyright_line(line, last_git_change_year): + copyright_splitter = 'Copyright (c) ' + copyright_split = line.split(copyright_splitter) + # Preserve characters on line that are ahead of the start of the copyright + # notice - they are part of the comment block and vary from file-to-file. + before_copyright = copyright_split[0] + after_copyright = copyright_split[1] + + space_split = after_copyright.split(' ') + year_range = space_split[0] + start_year, end_year = parse_year_range(year_range) + if end_year >= last_git_change_year: + return line + return (before_copyright + copyright_splitter + + year_range_to_str(start_year, last_git_change_year) + ' ' + + ' '.join(space_split[1:])) + +def update_updatable_copyright(filename): + file_lines = read_file_lines(filename) + index, line = get_updatable_copyright_line(file_lines) + if not line: + print_file_action_message(filename, "No updatable copyright.") + return + last_git_change_year = get_most_recent_git_change_year(filename) + new_line = create_updated_copyright_line(line, last_git_change_year) + if line == new_line: + print_file_action_message(filename, "Copyright up-to-date.") + return + file_lines[index] = new_line + write_file_lines(filename, file_lines) + print_file_action_message(filename, + "Copyright updated! -> %s" % last_git_change_year) + +def exec_update_header_year(base_directory): + for filename in get_filenames_to_examine(base_directory): + update_updatable_copyright(filename) + +################################################################################ +# update cmd +################################################################################ + +UPDATE_USAGE = """ +Updates all the copyright headers of "The Bitcoin Core developers" which were +changed in a year more recent than is listed. For example: + +// Copyright (c) - The Bitcoin Core developers + +will be updated to: + +// Copyright (c) - The Bitcoin Core developers + +where is obtained from the 'git log' history. + +This subcommand also handles copyright headers that have only a single year. In those cases: + +// Copyright (c) The Bitcoin Core developers + +will be updated to: + +// Copyright (c) - The Bitcoin Core developers + +where the update is appropriate. + +Usage: + $ ./copyright_header.py update + +Arguments: + - The base directory of a bitcoin source code repository. +""" + +def print_file_action_message(filename, action): + print("%-52s %s" % (filename, action)) + +def update_cmd(argv): + if len(argv) != 3: + sys.exit(UPDATE_USAGE) + + base_directory = argv[2] + if not os.path.exists(base_directory): + sys.exit("*** bad base_directory: %s" % base_directory) + exec_update_header_year(base_directory) + +################################################################################ +# inserted copyright header format +################################################################################ + +def get_header_lines(header, start_year, end_year): + lines = header.split('\n')[1:-1] + lines[0] = lines[0] % year_range_to_str(start_year, end_year) + return [line + '\n' for line in lines] + +CPP_HEADER = ''' +// Copyright (c) %s The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' + +def get_cpp_header_lines_to_insert(start_year, end_year): + return reversed(get_header_lines(CPP_HEADER, start_year, end_year)) + +SCRIPT_HEADER = ''' +# Copyright (c) %s The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' + +def get_script_header_lines_to_insert(start_year, end_year): + return reversed(get_header_lines(SCRIPT_HEADER, start_year, end_year)) + +################################################################################ +# query git for year of last change +################################################################################ + +def get_git_change_year_range(filename): + years = get_git_change_years(filename) + return min(years), max(years) + +################################################################################ +# check for existing core copyright +################################################################################ + +def file_already_has_core_copyright(file_lines): + index, _ = get_updatable_copyright_line(file_lines) + return index is not None + +################################################################################ +# insert header execution +################################################################################ + +def file_has_hashbang(file_lines): + if len(file_lines) < 1: + return False + if len(file_lines[0]) <= 2: + return False + return file_lines[0][:2] == '#!' + +def insert_script_header(filename, file_lines, start_year, end_year): + if file_has_hashbang(file_lines): + insert_idx = 1 + else: + insert_idx = 0 + header_lines = get_script_header_lines_to_insert(start_year, end_year) + for line in header_lines: + file_lines.insert(insert_idx, line) + write_file_lines(filename, file_lines) + +def insert_cpp_header(filename, file_lines, start_year, end_year): + file_lines.insert(0, '\n') + header_lines = get_cpp_header_lines_to_insert(start_year, end_year) + for line in header_lines: + file_lines.insert(0, line) + write_file_lines(filename, file_lines) + +def exec_insert_header(filename, style): + file_lines = read_file_lines(filename) + if file_already_has_core_copyright(file_lines): + sys.exit('*** %s already has a copyright by The Bitcoin Core developers' + % (filename)) + start_year, end_year = get_git_change_year_range(filename) + if style in ['python', 'shell']: + insert_script_header(filename, file_lines, start_year, end_year) + else: + insert_cpp_header(filename, file_lines, start_year, end_year) + +################################################################################ +# insert cmd +################################################################################ + +INSERT_USAGE = """ +Inserts a copyright header for "The Bitcoin Core developers" at the top of the +file in either Python or C++ style as determined by the file extension. If the +file is a Python file and it has a '#!' starting the first line, the header is +inserted in the line below it. + +The copyright dates will be set to be: + +"-" + +where is according to the 'git log' history. If + is equal to , the date will be set to be: + +"" + +If the file already has a copyright for "The Bitcoin Core developers", the +script will exit. + +Usage: + $ ./copyright_header.py insert + +Arguments: + - A source file in the bitcoin repository. +""" + +def insert_cmd(argv): + if len(argv) != 3: + sys.exit(INSERT_USAGE) + + filename = argv[2] + if not os.path.isfile(filename): + sys.exit("*** bad filename: %s" % filename) + _, extension = os.path.splitext(filename) + if extension not in ['.h', '.cpp', '.cc', '.c', '.py', '.sh']: + sys.exit("*** cannot insert for file extension %s" % extension) + + if extension == '.py': + style = 'python' + elif extension == '.sh': + style = 'shell' + else: + style = 'cpp' + exec_insert_header(filename, style) + +################################################################################ +# UI +################################################################################ + +USAGE = """ +copyright_header.py - utilities for managing copyright headers of 'The Bitcoin +Core developers' in repository source files. + +Usage: + $ ./copyright_header + +Subcommands: + report + update + insert + +To see subcommand usage, run them without arguments. +""" + +SUBCOMMANDS = ['report', 'update', 'insert'] + +if __name__ == "__main__": + if len(sys.argv) == 1: + sys.exit(USAGE) + subcommand = sys.argv[1] + if subcommand not in SUBCOMMANDS: + sys.exit(USAGE) + if subcommand == 'report': + report_cmd(sys.argv) + elif subcommand == 'update': + update_cmd(sys.argv) + elif subcommand == 'insert': + insert_cmd(sys.argv) diff --git a/contrib/devtools/fix-copyright-headers.py b/contrib/devtools/fix-copyright-headers.py deleted file mode 100755 index 52fdc99144b47..0000000000000 --- a/contrib/devtools/fix-copyright-headers.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -''' -Run this script inside of src/ and it will look for all the files -that were changed this year that still have the last year in the -copyright headers, and it will fix the headers on that file using -a perl regex one liner. - -For example: if it finds something like this and we're in 2014 - -// Copyright (c) 2009-2013 The Bitcoin developers - -it will change it to - -// Copyright (c) 2009-2014 The Bitcoin developers - -It will do this for all the files in the folder and its children. - -Author: @gubatron -''' -import os -import time - -year = time.gmtime()[0] -last_year = year - 1 -command = "perl -pi -e 's/%s The Bitcoin/%s The Bitcoin/' %s" -listFilesCommand = "find . | grep %s" - -extensions = [".cpp",".h"] - -def getLastGitModifiedDate(filePath): - gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1" - p = os.popen(gitGetLastCommitDateCommand) - result = "" - for l in p: - result = l - break - result = result.replace("\n","") - return result - -n=1 -for extension in extensions: - foundFiles = os.popen(listFilesCommand % extension) - for filePath in foundFiles: - filePath = filePath[1:-1] - if filePath.endswith(extension): - filePath = os.getcwd() + filePath - modifiedTime = getLastGitModifiedDate(filePath) - if len(modifiedTime) > 0 and str(year) in modifiedTime: - print n,"Last Git Modified: ", modifiedTime, " - ", filePath - os.popen(command % (last_year,year,filePath)) - n = n + 1 - - diff --git a/contrib/devtools/gen-bitcoin-conf.sh b/contrib/devtools/gen-bitcoin-conf.sh new file mode 100755 index 0000000000000..2ebbd42022300 --- /dev/null +++ b/contrib/devtools/gen-bitcoin-conf.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C +TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)} +BUILDDIR=${BUILDDIR:-$TOPDIR} +BINDIR=${BINDIR:-$BUILDDIR/src} +BITCOIND=${BITCOIND:-$BINDIR/bitcoind} +SHARE_EXAMPLES_DIR=${SHARE_EXAMPLES_DIR:-$TOPDIR/share/examples} +EXAMPLE_CONF_FILE=${EXAMPLE_CONF_FILE:-$SHARE_EXAMPLES_DIR/bitcoin.conf} + +[ ! -x "$BITCOIND" ] && echo "$BITCOIND not found or not executable." && exit 1 + +DIRTY="" +VERSION_OUTPUT=$($BITCOIND --version) +if [[ $VERSION_OUTPUT == *"dirty"* ]]; then + DIRTY="${DIRTY}${BITCOIND}\n" +fi + +if [ -n "$DIRTY" ] +then + echo -e "WARNING: $BITCOIND was built from a dirty tree.\n" + echo -e "To safely generate a bitcoin.conf file, please commit your changes to $BITCOIND, rebuild, then run this script again.\n" +fi + +echo 'Generating example bitcoin.conf file in share/examples/' + +# create the directory, if it doesn't exist +mkdir -p "${SHARE_EXAMPLES_DIR}" + +# create the header text +cat > "${EXAMPLE_CONF_FILE}" << 'EOF' +## +## bitcoin.conf configuration file. +## Generated by contrib/devtools/gen-bitcoin-conf.sh. +## +## Lines beginning with # are comments. +## All possible configuration options are provided. To use, copy this file +## to your data directory (default or specified by -datadir), uncomment +## options you would like to change, and save the file. +## + + +### Options +EOF + +# parse the output from bitcoind --help +# adding newlines is a bit funky to ensure portability for BSD +# see here for more details: https://stackoverflow.com/a/24575385 +${BITCOIND} --help \ + | sed '1,/Print this help message and exit/d' \ + | sed -E 's/^[[:space:]]{2}\-/#/' \ + | sed -E 's/^[[:space:]]{7}/# /' \ + | sed -E '/[=[:space:]]/!s/#.*$/&=1/' \ + | awk '/^#[a-z]/{x=$0;next}{if (NF==0) print x"\n",x="";else print}' \ + | sed 's,\(^[[:upper:]].*\)\:$,\ +### \1,' \ + | sed 's/[[:space:]]*$//' >> "${EXAMPLE_CONF_FILE}" + +# create the footer text +cat >> "${EXAMPLE_CONF_FILE}" << 'EOF' + +# [Sections] +# Most options will apply to all networks. To confine an option to a specific +# network, add it under the relevant section below. +# +# Note: If not specified under a network section, the options addnode, connect, +# port, bind, rpcport, rpcbind, and wallet will only apply to mainnet. + +# Options for mainnet +[main] + +# Options for testnet +[test] + +# Options for signet +[signet] + +# Options for regtest +[regtest] +EOF diff --git a/contrib/devtools/gen-manpages.py b/contrib/devtools/gen-manpages.py new file mode 100755 index 0000000000000..26612cc444dd2 --- /dev/null +++ b/contrib/devtools/gen-manpages.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +import os +import subprocess +import sys +import tempfile + +BINARIES = [ +'src/bitcoind', +'src/bitcoin-cli', +'src/bitcoin-tx', +'src/bitcoin-wallet', +'src/bitcoin-util', +'src/qt/bitcoin-qt', +] + +# Paths to external utilities. +git = os.getenv('GIT', 'git') +help2man = os.getenv('HELP2MAN', 'help2man') + +# If not otherwise specified, get top directory from git. +topdir = os.getenv('TOPDIR') +if not topdir: + r = subprocess.run([git, 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE, check=True, universal_newlines=True) + topdir = r.stdout.rstrip() + +# Get input and output directories. +builddir = os.getenv('BUILDDIR', topdir) +mandir = os.getenv('MANDIR', os.path.join(topdir, 'doc/man')) + +# Verify that all the required binaries are usable, and extract copyright +# message in a first pass. +versions = [] +for relpath in BINARIES: + abspath = os.path.join(builddir, relpath) + try: + r = subprocess.run([abspath, '--version'], stdout=subprocess.PIPE, universal_newlines=True) + except IOError: + print(f'{abspath} not found or not an executable', file=sys.stderr) + sys.exit(1) + # take first line (which must contain version) + verstr = r.stdout.splitlines()[0] + # last word of line is the actual version e.g. v22.99.0-5c6b3d5b3508 + verstr = verstr.split()[-1] + assert verstr.startswith('v') + # remaining lines are copyright + copyright = r.stdout.split('\n')[1:] + assert copyright[0].startswith('Copyright (C)') + + versions.append((abspath, verstr, copyright)) + +if any(verstr.endswith('-dirty') for (_, verstr, _) in versions): + print("WARNING: Binaries were built from a dirty tree.") + print('man pages generated from dirty binaries should NOT be committed.') + print('To properly generate man pages, please commit your changes (or discard them), rebuild, then run this script again.') + print() + +with tempfile.NamedTemporaryFile('w', suffix='.h2m') as footer: + # Create copyright footer, and write it to a temporary include file. + # Copyright is the same for all binaries, so just use the first. + footer.write('[COPYRIGHT]\n') + footer.write('\n'.join(versions[0][2]).strip()) + footer.flush() + + # Call the binaries through help2man to produce a manual page for each of them. + for (abspath, verstr, _) in versions: + outname = os.path.join(mandir, os.path.basename(abspath) + '.1') + print(f'Generating {outname}…') + subprocess.run([help2man, '-N', '--version-string=' + verstr, '--include=' + footer.name, '-o', outname, abspath], check=True) diff --git a/contrib/devtools/github-merge.sh b/contrib/devtools/github-merge.sh deleted file mode 100755 index 6f68496ed86b8..0000000000000 --- a/contrib/devtools/github-merge.sh +++ /dev/null @@ -1,176 +0,0 @@ -#!/bin/bash - -# This script will locally construct a merge commit for a pull request on a -# github repository, inspect it, sign it and optionally push it. - -# The following temporary branches are created/overwritten and deleted: -# * pull/$PULL/base (the current master we're merging onto) -# * pull/$PULL/head (the current state of the remote pull request) -# * pull/$PULL/merge (github's merge) -# * pull/$PULL/local-merge (our merge) - -# In case of a clean merge that is accepted by the user, the local branch with -# name $BRANCH is overwritten with the merged result, and optionally pushed. - -REPO="$(git config --get githubmerge.repository)" -if [[ "d$REPO" == "d" ]]; then - echo "ERROR: No repository configured. Use this command to set:" >&2 - echo "git config githubmerge.repository /" >&2 - echo "In addition, you can set the following variables:" >&2 - echo "- githubmerge.host (default git@github.com)" >&2 - echo "- githubmerge.branch (default master)" >&2 - echo "- githubmerge.testcmd (default none)" >&2 - exit 1 -fi - -HOST="$(git config --get githubmerge.host)" -if [[ "d$HOST" == "d" ]]; then - HOST="git@github.com" -fi - -BRANCH="$(git config --get githubmerge.branch)" -if [[ "d$BRANCH" == "d" ]]; then - BRANCH="master" -fi - -TESTCMD="$(git config --get githubmerge.testcmd)" - -PULL="$1" - -if [[ "d$PULL" == "d" ]]; then - echo "Usage: $0 pullnumber [branch]" >&2 - exit 2 -fi - -if [[ "d$2" != "d" ]]; then - BRANCH="$2" -fi - -# Initialize source branches. -git checkout -q "$BRANCH" -if git fetch -q "$HOST":"$REPO" "+refs/pull/$PULL/*:refs/heads/pull/$PULL/*"; then - if ! git log -q -1 "refs/heads/pull/$PULL/head" >/dev/null 2>&1; then - echo "ERROR: Cannot find head of pull request #$PULL on $HOST:$REPO." >&2 - exit 3 - fi - if ! git log -q -1 "refs/heads/pull/$PULL/merge" >/dev/null 2>&1; then - echo "ERROR: Cannot find merge of pull request #$PULL on $HOST:$REPO." >&2 - exit 3 - fi -else - echo "ERROR: Cannot find pull request #$PULL on $HOST:$REPO." >&2 - exit 3 -fi -if git fetch -q "$HOST":"$REPO" +refs/heads/"$BRANCH":refs/heads/pull/"$PULL"/base; then - true -else - echo "ERROR: Cannot find branch $BRANCH on $HOST:$REPO." >&2 - exit 3 -fi -git checkout -q pull/"$PULL"/base -git branch -q -D pull/"$PULL"/local-merge 2>/dev/null -git checkout -q -b pull/"$PULL"/local-merge -TMPDIR="$(mktemp -d -t ghmXXXXX)" - -function cleanup() { - git checkout -q "$BRANCH" - git branch -q -D pull/"$PULL"/head 2>/dev/null - git branch -q -D pull/"$PULL"/base 2>/dev/null - git branch -q -D pull/"$PULL"/merge 2>/dev/null - git branch -q -D pull/"$PULL"/local-merge 2>/dev/null - rm -rf "$TMPDIR" -} - -# Create unsigned merge commit. -( - echo "Merge pull request #$PULL" - echo "" - git log --no-merges --topo-order --pretty='format:%h %s (%an)' pull/"$PULL"/base..pull/"$PULL"/head -)>"$TMPDIR/message" -if git merge -q --commit --no-edit --no-ff -m "$(<"$TMPDIR/message")" pull/"$PULL"/head; then - if [ "d$(git log --pretty='format:%s' -n 1)" != "dMerge pull request #$PULL" ]; then - echo "ERROR: Creating merge failed (already merged?)." >&2 - cleanup - exit 4 - fi -else - echo "ERROR: Cannot be merged cleanly." >&2 - git merge --abort - cleanup - exit 4 -fi - -# Run test command if configured. -if [[ "d$TESTCMD" != "d" ]]; then - # Go up to the repository's root. - while [ ! -d .git ]; do cd ..; done - if ! $TESTCMD; then - echo "ERROR: Running $TESTCMD failed." >&2 - cleanup - exit 5 - fi - # Show the created merge. - git diff pull/"$PULL"/merge..pull/"$PULL"/local-merge >"$TMPDIR"/diff - git diff pull/"$PULL"/base..pull/"$PULL"/local-merge - if [[ "$(<"$TMPDIR"/diff)" != "" ]]; then - echo "WARNING: merge differs from github!" >&2 - read -p "Type 'ignore' to continue. " -r >&2 - if [[ "d$REPLY" =~ ^d[iI][gG][nN][oO][rR][eE]$ ]]; then - echo "Difference with github ignored." >&2 - else - cleanup - exit 6 - fi - fi - read -p "Press 'd' to accept the diff. " -n 1 -r >&2 - echo - if [[ "d$REPLY" =~ ^d[dD]$ ]]; then - echo "Diff accepted." >&2 - else - echo "ERROR: Diff rejected." >&2 - cleanup - exit 6 - fi -else - # Verify the result. - echo "Dropping you on a shell so you can try building/testing the merged source." >&2 - echo "Run 'git diff HEAD~' to show the changes being merged." >&2 - echo "Type 'exit' when done." >&2 - if [[ -f /etc/debian_version ]]; then # Show pull number in prompt on Debian default prompt - export debian_chroot="$PULL" - fi - bash -i - read -p "Press 'm' to accept the merge. " -n 1 -r >&2 - echo - if [[ "d$REPLY" =~ ^d[Mm]$ ]]; then - echo "Merge accepted." >&2 - else - echo "ERROR: Merge rejected." >&2 - cleanup - exit 7 - fi -fi - -# Sign the merge commit. -read -p "Press 's' to sign off on the merge. " -n 1 -r >&2 -echo -if [[ "d$REPLY" =~ ^d[Ss]$ ]]; then - if [[ "$(git config --get user.signingkey)" == "" ]]; then - echo "WARNING: No GPG signing key set, not signing. Set one using:" >&2 - echo "git config --global user.signingkey " >&2 - git commit -q --signoff --amend --no-edit - else - git commit -q --gpg-sign --amend --no-edit - fi -fi - -# Clean up temporary branches, and put the result in $BRANCH. -git checkout -q "$BRANCH" -git reset -q --hard pull/"$PULL"/local-merge -cleanup - -# Push the result. -read -p "Type 'push' to push the result to $HOST:$REPO, branch $BRANCH. " -r >&2 -if [[ "d$REPLY" =~ ^d[Pp][Uu][Ss][Hh]$ ]]; then - git push "$HOST":"$REPO" refs/heads/"$BRANCH" -fi diff --git a/contrib/devtools/iwyu/bitcoin.core.imp b/contrib/devtools/iwyu/bitcoin.core.imp new file mode 100644 index 0000000000000..ce7786f58c24b --- /dev/null +++ b/contrib/devtools/iwyu/bitcoin.core.imp @@ -0,0 +1,6 @@ +# Fixups / upstreamed changes +[ + { include: [ "", private, "", public ] }, + { include: [ "", private, "", public ] }, + { include: [ "", private, "", public ] }, +] diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py new file mode 100755 index 0000000000000..05c0af029ec86 --- /dev/null +++ b/contrib/devtools/security-check.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Perform basic security checks on a series of executables. +Exit status will be 0 if successful, and the program will be silent. +Otherwise the exit status will be 1 and it will log which executables failed which checks. +''' +import sys +from typing import List + +import lief #type:ignore + +def check_ELF_RELRO(binary) -> bool: + ''' + Check for read-only relocations. + GNU_RELRO program header must exist + Dynamic section must have BIND_NOW flag + ''' + have_gnu_relro = False + for segment in binary.segments: + # Note: not checking p_flags == PF_R: here as linkers set the permission differently + # This does not affect security: the permission flags of the GNU_RELRO program + # header are ignored, the PT_LOAD header determines the effective permissions. + # However, the dynamic linker need to write to this area so these are RW. + # Glibc itself takes care of mprotecting this area R after relocations are finished. + # See also https://marc.info/?l=binutils&m=1498883354122353 + if segment.type == lief.ELF.SEGMENT_TYPES.GNU_RELRO: + have_gnu_relro = True + + have_bindnow = False + try: + flags = binary.get(lief.ELF.DYNAMIC_TAGS.FLAGS) + if flags.value & lief.ELF.DYNAMIC_FLAGS.BIND_NOW: + have_bindnow = True + except: + have_bindnow = False + + return have_gnu_relro and have_bindnow + +def check_ELF_Canary(binary) -> bool: + ''' + Check for use of stack canary + ''' + return binary.has_symbol('__stack_chk_fail') + +def check_ELF_separate_code(binary): + ''' + Check that sections are appropriately separated in virtual memory, + based on their permissions. This checks for missing -Wl,-z,separate-code + and potentially other problems. + ''' + R = lief.ELF.SEGMENT_FLAGS.R + W = lief.ELF.SEGMENT_FLAGS.W + E = lief.ELF.SEGMENT_FLAGS.X + EXPECTED_FLAGS = { + # Read + execute + '.init': R | E, + '.plt': R | E, + '.plt.got': R | E, + '.plt.sec': R | E, + '.text': R | E, + '.fini': R | E, + # Read-only data + '.interp': R, + '.note.gnu.property': R, + '.note.gnu.build-id': R, + '.note.ABI-tag': R, + '.gnu.hash': R, + '.dynsym': R, + '.dynstr': R, + '.gnu.version': R, + '.gnu.version_r': R, + '.rela.dyn': R, + '.rela.plt': R, + '.rodata': R, + '.eh_frame_hdr': R, + '.eh_frame': R, + '.qtmetadata': R, + '.gcc_except_table': R, + '.stapsdt.base': R, + # Writable data + '.init_array': R | W, + '.fini_array': R | W, + '.dynamic': R | W, + '.got': R | W, + '.data': R | W, + '.bss': R | W, + } + if binary.header.machine_type == lief.ELF.ARCH.PPC64: + # .plt is RW on ppc64 even with separate-code + EXPECTED_FLAGS['.plt'] = R | W + # For all LOAD program headers get mapping to the list of sections, + # and for each section, remember the flags of the associated program header. + flags_per_section = {} + for segment in binary.segments: + if segment.type == lief.ELF.SEGMENT_TYPES.LOAD: + for section in segment.sections: + flags_per_section[section.name] = segment.flags + # Spot-check ELF LOAD program header flags per section + # If these sections exist, check them against the expected R/W/E flags + for (section, flags) in flags_per_section.items(): + if section in EXPECTED_FLAGS: + if int(EXPECTED_FLAGS[section]) != int(flags): + return False + return True + +def check_ELF_control_flow(binary) -> bool: + ''' + Check for control flow instrumentation + ''' + main = binary.get_function_address('main') + content = binary.get_content_from_virtual_address(main, 4, lief.Binary.VA_TYPES.AUTO) + + if content == [243, 15, 30, 250]: # endbr64 + return True + return False + +def check_PE_DYNAMIC_BASE(binary) -> bool: + '''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)''' + return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists + +# Must support high-entropy 64-bit address space layout randomization +# in addition to DYNAMIC_BASE to have secure ASLR. +def check_PE_HIGH_ENTROPY_VA(binary) -> bool: + '''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR''' + return lief.PE.DLL_CHARACTERISTICS.HIGH_ENTROPY_VA in binary.optional_header.dll_characteristics_lists + +def check_PE_RELOC_SECTION(binary) -> bool: + '''Check for a reloc section. This is required for functional ASLR.''' + return binary.has_relocations + +def check_PE_control_flow(binary) -> bool: + ''' + Check for control flow instrumentation + ''' + main = binary.get_symbol('main').value + + section_addr = binary.section_from_rva(main).virtual_address + virtual_address = binary.optional_header.imagebase + section_addr + main + + content = binary.get_content_from_virtual_address(virtual_address, 4, lief.Binary.VA_TYPES.VA) + + if content == [243, 15, 30, 250]: # endbr64 + return True + return False + +def check_MACHO_NOUNDEFS(binary) -> bool: + ''' + Check for no undefined references. + ''' + return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS) + +def check_MACHO_LAZY_BINDINGS(binary) -> bool: + ''' + Check for no lazy bindings. + We don't use or check for MH_BINDATLOAD. See #18295. + ''' + return binary.dyld_info.lazy_bind == (0,0) + +def check_MACHO_Canary(binary) -> bool: + ''' + Check for use of stack canary + ''' + return binary.has_symbol('___stack_chk_fail') + +def check_PIE(binary) -> bool: + ''' + Check for position independent executable (PIE), + allowing for address space randomization. + ''' + return binary.is_pie + +def check_NX(binary) -> bool: + ''' + Check for no stack execution + ''' + return binary.has_nx + +def check_MACHO_control_flow(binary) -> bool: + ''' + Check for control flow instrumentation + ''' + content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO) + + if content == [243, 15, 30, 250]: # endbr64 + return True + return False + +BASE_ELF = [ + ('PIE', check_PIE), + ('NX', check_NX), + ('RELRO', check_ELF_RELRO), + ('Canary', check_ELF_Canary), + ('separate_code', check_ELF_separate_code), +] + +BASE_PE = [ + ('PIE', check_PIE), + ('DYNAMIC_BASE', check_PE_DYNAMIC_BASE), + ('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA), + ('NX', check_NX), + ('RELOC_SECTION', check_PE_RELOC_SECTION), + ('CONTROL_FLOW', check_PE_control_flow), +] + +BASE_MACHO = [ + ('NOUNDEFS', check_MACHO_NOUNDEFS), + ('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS), + ('Canary', check_MACHO_Canary), +] + +CHECKS = { + lief.EXE_FORMATS.ELF: { + lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_control_flow)], + lief.ARCHITECTURES.ARM: BASE_ELF, + lief.ARCHITECTURES.ARM64: BASE_ELF, + lief.ARCHITECTURES.PPC: BASE_ELF, + lief.ARCHITECTURES.RISCV: BASE_ELF, + }, + lief.EXE_FORMATS.PE: { + lief.ARCHITECTURES.X86: BASE_PE, + }, + lief.EXE_FORMATS.MACHO: { + lief.ARCHITECTURES.X86: BASE_MACHO + [('PIE', check_PIE), + ('NX', check_NX), + ('CONTROL_FLOW', check_MACHO_control_flow)], + lief.ARCHITECTURES.ARM64: BASE_MACHO, + } +} + +if __name__ == '__main__': + retval: int = 0 + for filename in sys.argv[1:]: + try: + binary = lief.parse(filename) + etype = binary.format + arch = binary.abstract.header.architecture + binary.concrete + + if etype == lief.EXE_FORMATS.UNKNOWN: + print(f'{filename}: unknown executable format') + retval = 1 + continue + + if arch == lief.ARCHITECTURES.NONE: + print(f'{filename}: unknown architecture') + retval = 1 + continue + + failed: List[str] = [] + for (name, func) in CHECKS[etype][arch]: + if not func(binary): + failed.append(name) + if failed: + print(f'{filename}: failed {" ".join(failed)}') + retval = 1 + except IOError: + print(f'{filename}: cannot open') + retval = 1 + sys.exit(retval) + diff --git a/contrib/devtools/split-debug.sh.in b/contrib/devtools/split-debug.sh.in new file mode 100644 index 0000000000000..92b72b1446cf6 --- /dev/null +++ b/contrib/devtools/split-debug.sh.in @@ -0,0 +1,10 @@ +#!/bin/sh +set -e +if [ $# -ne 3 ]; + then echo "usage: $0 " +fi + +@OBJCOPY@ --enable-deterministic-archives -p --only-keep-debug $1 $3 +@OBJCOPY@ --enable-deterministic-archives -p --strip-debug $1 $2 +@STRIP@ --enable-deterministic-archives -p -s $2 +@OBJCOPY@ --enable-deterministic-archives -p --add-gnu-debuglink=$3 $2 diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index f3999f1c0b54a..a419e392eed86 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -1,119 +1,291 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # Copyright (c) 2014 Wladimir J. van der Laan -# Distributed under the MIT/X11 software license, see the accompanying +# Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' -A script to check that the (Linux) executables produced by gitian only contain -allowed gcc, glibc and libstdc++ version symbols. This makes sure they are -still compatible with the minimum supported Linux distribution versions. +A script to check that release executables only contain certain symbols +and are only linked against allowed libraries. Example usage: - find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py + find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py ''' -from __future__ import division, print_function -import subprocess -import re import sys +from typing import List, Dict -# Debian 6.0.9 (Squeeze) has: +import lief #type:ignore + +# Debian 9 (Stretch) EOL: 2022. https://wiki.debian.org/DebianReleases#Production_Releases # -# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B) -# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6) -# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6) +# - g++ version 6.3.0 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=g%2B%2B) +# - libc version 2.24 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=libc6) # -# Ubuntu 10.04.4 (Lucid Lynx) has: +# Ubuntu 16.04 (Xenial) EOL: 2026. https://wiki.ubuntu.com/Releases # -# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all) -# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all) -# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names) +# - g++ version 5.3.1 +# - libc version 2.23 # -# Taking the minimum of these as our target. +# CentOS Stream 8 EOL: 2024. https://wiki.centos.org/About/Product # -# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to: -# GCC 4.4.0: GCC_4.4.0 -# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3 -# (glibc) GLIBC_2_11 +# - g++ version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/) +# - libc version 2.28 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/) # +# See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info. + MAX_VERSIONS = { -'GCC': (4,4,0), -'CXXABI': (1,3,3), -'GLIBCXX': (3,4,13), -'GLIBC': (2,11) +'GCC': (4,8,0), +'GLIBC': { + lief.ELF.ARCH.i386: (2,18), + lief.ELF.ARCH.x86_64: (2,18), + lief.ELF.ARCH.ARM: (2,18), + lief.ELF.ARCH.AARCH64:(2,18), + lief.ELF.ARCH.PPC64: (2,18), + lief.ELF.ARCH.RISCV: (2,27), +}, +'LIBATOMIC': (1,0), +'V': (0,5,0), # xkb (bitcoin-qt only) } +# See here for a description of _IO_stdin_used: +# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 + # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -'_edata', '_end', '_init', '__bss_start', '_fini' +'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', +'__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', +'environ', '_environ', '__environ', } -READELF_CMD = '/usr/bin/readelf' -CPPFILT_CMD = '/usr/bin/c++filt' - -class CPPFilt(object): - ''' - Demangle C++ symbol names. - - Use a pipe to the 'c++filt' command. - ''' - def __init__(self): - self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE) - - def __call__(self, mangled): - self.proc.stdin.write(mangled + '\n') - return self.proc.stdout.readline().rstrip() - - def close(self): - self.proc.stdin.close() - self.proc.stdout.close() - self.proc.wait() - -def read_symbols(executable, imports=True): - ''' - Parse an ELF executable and return a list of (symbol,version) tuples - for dynamic, imported symbols. - ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip())) - syms = [] - for line in stdout.split('\n'): - line = line.split() - if len(line)>7 and re.match('[0-9]+:$', line[0]): - (sym, _, version) = line[7].partition('@') - is_import = line[6] == 'UND' - if version.startswith('@'): - version = version[1:] - if is_import == imports: - syms.append((sym, version)) - return syms - -def check_version(max_versions, version): - if '_' in version: - (lib, _, ver) = version.rpartition('_') - else: - lib = version - ver = '0' + +# Expected linker-loader names can be found here: +# https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16 +ELF_INTERPRETER_NAMES: Dict[lief.ELF.ARCH, Dict[lief.ENDIANNESS, str]] = { + lief.ELF.ARCH.i386: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux.so.2", + }, + lief.ELF.ARCH.x86_64: { + lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2", + }, + lief.ELF.ARCH.ARM: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-armhf.so.3", + }, + lief.ELF.ARCH.AARCH64: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-aarch64.so.1", + }, + lief.ELF.ARCH.PPC64: { + lief.ENDIANNESS.BIG: "/lib64/ld64.so.1", + lief.ENDIANNESS.LITTLE: "/lib64/ld64.so.2", + }, + lief.ELF.ARCH.RISCV: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-riscv64-lp64d.so.1", + }, +} + +# Allowed NEEDED libraries +ELF_ALLOWED_LIBRARIES = { +# bitcoind and bitcoin-qt +'libgcc_s.so.1', # GCC base support +'libc.so.6', # C library +'libpthread.so.0', # threading +'libm.so.6', # math library +'librt.so.1', # real-time (clock) +'libatomic.so.1', +'ld-linux-x86-64.so.2', # 64-bit dynamic linker +'ld-linux.so.2', # 32-bit dynamic linker +'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker +'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker +'ld64.so.1', # POWER64 ABIv1 dynamic linker +'ld64.so.2', # POWER64 ABIv2 dynamic linker +'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker +# bitcoin-qt only +'libxcb.so.1', # part of X11 +'libxkbcommon.so.0', # keyboard keymapping +'libxkbcommon-x11.so.0', # keyboard keymapping +'libfontconfig.so.1', # font support +'libfreetype.so.6', # font parsing +'libdl.so.2', # programming interface to dynamic linker +'libxcb-icccm.so.4', +'libxcb-image.so.0', +'libxcb-shm.so.0', +'libxcb-keysyms.so.1', +'libxcb-randr.so.0', +'libxcb-render-util.so.0', +'libxcb-render.so.0', +'libxcb-shape.so.0', +'libxcb-sync.so.1', +'libxcb-xfixes.so.0', +'libxcb-xinerama.so.0', +'libxcb-xkb.so.1', +} + +MACHO_ALLOWED_LIBRARIES = { +# bitcoind and bitcoin-qt +'libc++.1.dylib', # C++ Standard Library +'libSystem.B.dylib', # libc, libm, libpthread, libinfo +# bitcoin-qt only +'AppKit', # user interface +'ApplicationServices', # common application tasks. +'Carbon', # deprecated c back-compat API +'ColorSync', +'CoreFoundation', # low level func, data types +'CoreGraphics', # 2D rendering +'CoreServices', # operating system services +'CoreText', # interface for laying out text and handling fonts. +'CoreVideo', # video processing +'Foundation', # base layer functionality for apps/frameworks +'ImageIO', # read and write image file formats. +'IOKit', # user-space access to hardware devices and drivers. +'IOSurface', # cross process image/drawing buffers +'libobjc.A.dylib', # Objective-C runtime library +'Metal', # 3D graphics +'Security', # access control and authentication +'QuartzCore', # animation +} + +PE_ALLOWED_LIBRARIES = { +'ADVAPI32.dll', # security & registry +'IPHLPAPI.DLL', # IP helper API +'KERNEL32.dll', # win32 base APIs +'msvcrt.dll', # C standard library for MSVC +'SHELL32.dll', # shell API +'USER32.dll', # user interface +'WS2_32.dll', # sockets +# bitcoin-qt only +'dwmapi.dll', # desktop window manager +'GDI32.dll', # graphics device interface +'IMM32.dll', # input method editor +'NETAPI32.dll', +'ole32.dll', # component object model +'OLEAUT32.dll', # OLE Automation API +'SHLWAPI.dll', # light weight shell API +'USERENV.dll', +'UxTheme.dll', +'VERSION.dll', # version checking +'WINMM.dll', # WinMM audio API +'WTSAPI32.dll', +} + +def check_version(max_versions, version, arch) -> bool: + (lib, _, ver) = version.rpartition('_') ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False - return ver <= max_versions[lib] + if isinstance(max_versions[lib], tuple): + return ver <= max_versions[lib] + else: + return ver <= max_versions[lib][arch] + +def check_imported_symbols(binary) -> bool: + ok: bool = True + + for symbol in binary.imported_symbols: + if not symbol.imported: + continue + + version = symbol.symbol_version if symbol.has_version else None + + if version: + aux_version = version.symbol_version_auxiliary.name if version.has_auxiliary_version else None + if aux_version and not check_version(MAX_VERSIONS, aux_version, binary.header.machine_type): + print(f'{filename}: symbol {symbol.name} from unsupported version {version}') + ok = False + return ok + +def check_exported_symbols(binary) -> bool: + ok: bool = True + + for symbol in binary.dynamic_symbols: + if not symbol.exported: + continue + name = symbol.name + if binary.header.machine_type == lief.ELF.ARCH.RISCV or name in IGNORE_EXPORTS: + continue + print(f'{binary.name}: export of symbol {name} not allowed!') + ok = False + return ok + +def check_ELF_libraries(binary) -> bool: + ok: bool = True + for library in binary.libraries: + if library not in ELF_ALLOWED_LIBRARIES: + print(f'{filename}: {library} is not in ALLOWED_LIBRARIES!') + ok = False + return ok + +def check_MACHO_libraries(binary) -> bool: + ok: bool = True + for dylib in binary.libraries: + split = dylib.name.split('/') + if split[-1] not in MACHO_ALLOWED_LIBRARIES: + print(f'{split[-1]} is not in ALLOWED_LIBRARIES!') + ok = False + return ok + +def check_MACHO_min_os(binary) -> bool: + if binary.build_version.minos == [10,15,0]: + return True + return False + +def check_MACHO_sdk(binary) -> bool: + if binary.build_version.sdk == [11, 0, 0]: + return True + return False + +def check_PE_libraries(binary) -> bool: + ok: bool = True + for dylib in binary.libraries: + if dylib not in PE_ALLOWED_LIBRARIES: + print(f'{dylib} is not in ALLOWED_LIBRARIES!') + ok = False + return ok + +def check_PE_subsystem_version(binary) -> bool: + major: int = binary.optional_header.major_subsystem_version + minor: int = binary.optional_header.minor_subsystem_version + if major == 6 and minor == 1: + return True + return False + +def check_ELF_interpreter(binary) -> bool: + expected_interpreter = ELF_INTERPRETER_NAMES[binary.header.machine_type][binary.abstract.header.endianness] + + return binary.concrete.interpreter == expected_interpreter + +CHECKS = { +lief.EXE_FORMATS.ELF: [ + ('IMPORTED_SYMBOLS', check_imported_symbols), + ('EXPORTED_SYMBOLS', check_exported_symbols), + ('LIBRARY_DEPENDENCIES', check_ELF_libraries), + ('INTERPRETER_NAME', check_ELF_interpreter), +], +lief.EXE_FORMATS.MACHO: [ + ('DYNAMIC_LIBRARIES', check_MACHO_libraries), + ('MIN_OS', check_MACHO_min_os), + ('SDK', check_MACHO_sdk), +], +lief.EXE_FORMATS.PE: [ + ('DYNAMIC_LIBRARIES', check_PE_libraries), + ('SUBSYSTEM_VERSION', check_PE_subsystem_version), +] +} if __name__ == '__main__': - cppfilt = CPPFilt() - retval = 0 + retval: int = 0 for filename in sys.argv[1:]: - # Check imported symbols - for sym,version in read_symbols(filename, True): - if version and not check_version(MAX_VERSIONS, version): - print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version)) + try: + binary = lief.parse(filename) + etype = binary.format + if etype == lief.EXE_FORMATS.UNKNOWN: + print(f'{filename}: unknown executable format') retval = 1 - # Check exported symbols - for sym,version in read_symbols(filename, False): - if sym in IGNORE_EXPORTS: continue - print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym))) - retval = 1 - - exit(retval) - + failed: List[str] = [] + for (name, func) in CHECKS[etype]: + if not func(binary): + failed.append(name) + if failed: + print(f'{filename}: failed {" ".join(failed)}') + retval = 1 + except IOError: + print(f'{filename}: cannot open') + retval = 1 + sys.exit(retval) diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py new file mode 100755 index 0000000000000..d3d225f3abdcf --- /dev/null +++ b/contrib/devtools/test-security-check.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Test script for security-check.py +''' +import lief #type:ignore +import os +import subprocess +from typing import List +import unittest + +from utils import determine_wellknown_cmd + +def write_testcode(filename): + with open(filename, 'w', encoding="utf8") as f: + f.write(''' + #include + int main() + { + printf("the quick brown fox jumps over the lazy god\\n"); + return 0; + } + ''') + +def clean_files(source, executable): + os.remove(source) + os.remove(executable) + +def call_security_check(cc, source, executable, options): + # This should behave the same as AC_TRY_LINK, so arrange well-known flags + # in the same order as autoconf would. + # + # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for + # reference. + env_flags: List[str] = [] + for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: + env_flags += filter(None, os.environ.get(var, '').split(' ')) + + subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) + p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True) + return (p.returncode, p.stdout.rstrip()) + +def get_arch(cc, source, executable): + subprocess.run([*cc, source, '-o', executable], check=True) + binary = lief.parse(executable) + arch = binary.abstract.header.architecture + os.remove(executable) + return arch + +class TestSecurityChecks(unittest.TestCase): + def test_ELF(self): + source = 'test1.c' + executable = 'test1' + cc = determine_wellknown_cmd('CC', 'gcc') + write_testcode(source) + arch = get_arch(cc, source, executable) + + if arch == lief.ARCHITECTURES.X86: + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE NX RELRO Canary CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO Canary CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), + (1, executable+': failed RELRO CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), + (1, executable+': failed separate_code CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), + (1, executable+': failed CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']), + (0, '')) + else: + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE NX RELRO Canary')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO Canary')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + (1, executable+': failed PIE RELRO')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), + (1, executable+': failed RELRO')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), + (1, executable+': failed separate_code')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), + (0, '')) + + clean_files(source, executable) + + def test_PE(self): + source = 'test1.c' + executable = 'test1.exe' + cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc') + write_testcode(source) + + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--disable-nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE']), + (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW')) # -pie -fPIE does nothing unless --dynamicbase is also supplied + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE']), + (1, executable+': failed HIGH_ENTROPY_VA CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']), + (1, executable+': failed CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE', '-fcf-protection=full']), + (0, '')) + + clean_files(source, executable) + + def test_MACHO(self): + source = 'test1.c' + executable = 'test1' + cc = determine_wellknown_cmd('CC', 'clang') + write_testcode(source) + arch = get_arch(cc, source, executable) + + if arch == lief.ARCHITECTURES.X86: + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary PIE NX CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE NX CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']), + (1, executable+': failed LAZY_BINDINGS PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']), + (1, executable+': failed PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']), + (1, executable+': failed PIE')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']), + (0, '')) + else: + # arm64 darwin doesn't support non-PIE binaries, control flow or executable stacks + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all']), + (1, executable+': failed NOUNDEFS LAZY_BINDINGS')) + self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all']), + (1, executable+': failed LAZY_BINDINGS')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-bind_at_load','-fstack-protector-all']), + (0, '')) + + + clean_files(source, executable) + +if __name__ == '__main__': + unittest.main() diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py new file mode 100755 index 0000000000000..b4c112b2666b6 --- /dev/null +++ b/contrib/devtools/test-symbol-check.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Test script for symbol-check.py +''' +import os +import subprocess +from typing import List +import unittest + +from utils import determine_wellknown_cmd + +def call_symbol_check(cc: List[str], source, executable, options): + # This should behave the same as AC_TRY_LINK, so arrange well-known flags + # in the same order as autoconf would. + # + # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for + # reference. + env_flags: List[str] = [] + for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: + env_flags += filter(None, os.environ.get(var, '').split(' ')) + + subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) + p = subprocess.run(['./contrib/devtools/symbol-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True) + os.remove(source) + os.remove(executable) + return (p.returncode, p.stdout.rstrip()) + +def get_machine(cc: List[str]): + p = subprocess.run([*cc,'-dumpmachine'], stdout=subprocess.PIPE, universal_newlines=True) + return p.stdout.rstrip() + +class TestSymbolChecks(unittest.TestCase): + def test_ELF(self): + source = 'test1.c' + executable = 'test1' + cc = determine_wellknown_cmd('CC', 'gcc') + + # there's no way to do this test for RISC-V at the moment; we build for + # RISC-V in a glibc 2.27 envinonment and we allow all symbols from 2.27. + if 'riscv' in get_machine(cc): + self.skipTest("test not available for RISC-V") + + # nextup was introduced in GLIBC 2.24, so is newer than our supported + # glibc (2.18), and available in our release build environment (2.24). + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #define _GNU_SOURCE + #include + + double nextup(double x); + + int main() + { + nextup(3.14); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']), + (1, executable + ': symbol nextup from unsupported version GLIBC_2.24(3)\n' + + executable + ': failed IMPORTED_SYMBOLS')) + + # -lutil is part of the libc6 package so a safe bet that it's installed + # it's also out of context enough that it's unlikely to ever become a real dependency + source = 'test2.c' + executable = 'test2' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + login(0); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lutil']), + (1, executable + ': libutil.so.1 is not in ALLOWED_LIBRARIES!\n' + + executable + ': failed LIBRARY_DEPENDENCIES')) + + # finally, check a simple conforming binary + source = 'test3.c' + executable = 'test3' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + printf("42"); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, []), + (0, '')) + + def test_MACHO(self): + source = 'test1.c' + executable = 'test1' + cc = determine_wellknown_cmd('CC', 'clang') + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + XML_ExpatVersion(); + return 0; + } + + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), + (1, 'libexpat.1.dylib is not in ALLOWED_LIBRARIES!\n' + + f'{executable}: failed DYNAMIC_LIBRARIES MIN_OS SDK')) + + source = 'test2.c' + executable = 'test2' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + CGMainDisplayID(); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), + (1, f'{executable}: failed MIN_OS SDK')) + + source = 'test3.c' + executable = 'test3' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,10.15', '-Wl,11.4']), + (1, f'{executable}: failed SDK')) + + def test_PE(self): + source = 'test1.c' + executable = 'test1.exe' + cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc') + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + PdhConnectMachineA(NULL); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lpdh', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), + (1, 'pdh.dll is not in ALLOWED_LIBRARIES!\n' + + executable + ': failed DYNAMIC_LIBRARIES')) + + source = 'test2.c' + executable = 'test2.exe' + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,--major-subsystem-version', '-Wl,9', '-Wl,--minor-subsystem-version', '-Wl,9']), + (1, executable + ': failed SUBSYSTEM_VERSION')) + + source = 'test3.c' + executable = 'test3.exe' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + CoFreeUnusedLibrariesEx(0,0); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-lole32', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), + (0, '')) + + +if __name__ == '__main__': + unittest.main() diff --git a/contrib/devtools/test_deterministic_coverage.sh b/contrib/devtools/test_deterministic_coverage.sh new file mode 100755 index 0000000000000..8501c72f04a55 --- /dev/null +++ b/contrib/devtools/test_deterministic_coverage.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Test for deterministic coverage across unit test runs. + +export LC_ALL=C + +# Use GCOV_EXECUTABLE="gcov" if compiling with gcc. +# Use GCOV_EXECUTABLE="llvm-cov gcov" if compiling with clang. +GCOV_EXECUTABLE="gcov" + +# Disable tests known to cause non-deterministic behaviour and document the source or point of non-determinism. +NON_DETERMINISTIC_TESTS=( + "blockfilter_index_tests/blockfilter_index_initial_sync" # src/checkqueue.h: In CCheckQueue::Loop(): while (queue.empty()) { ... } + "coinselector_tests/knapsack_solver_test" # coinselector_tests.cpp: if (equal_sets(setCoinsRet, setCoinsRet2)) + "fs_tests/fsbridge_fstream" # deterministic test failure? + "miner_tests/CreateNewBlock_validity" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "scheduler_tests/manythreads" # scheduler.cpp: CScheduler::serviceQueue() + "scheduler_tests/singlethreadedscheduler_ordered" # scheduler.cpp: CScheduler::serviceQueue() + "txvalidationcache_tests/checkinputs_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "txvalidationcache_tests/tx_mempool_block_doublespend" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "txindex_tests/txindex_initial_sync" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "txvalidation_tests/tx_mempool_reject_coinbase" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "validation_block_tests/processnewblock_signals_ordering" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "wallet_tests/coin_mark_dirty_immature_credit" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "wallet_tests/dummy_input_size_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "wallet_tests/importmulti_rescan" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "wallet_tests/importwallet_rescan" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "wallet_tests/ListCoins" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "wallet_tests/scan_for_wallet_transactions" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "wallet_tests/wallet_disableprivkeys" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) +) + +TEST_BITCOIN_BINARY="src/test/test_bitcoin" + +print_usage() { + echo "Usage: $0 [custom test filter (default: all but known non-deterministic tests)] [number of test runs (default: 2)]" +} + +N_TEST_RUNS=2 +BOOST_TEST_RUN_FILTERS="" +if [[ $# != 0 ]]; then + if [[ $1 == "--help" ]]; then + print_usage + exit + fi + PARSED_ARGUMENTS=0 + if [[ $1 =~ [a-z] ]]; then + BOOST_TEST_RUN_FILTERS=$1 + PARSED_ARGUMENTS=$((PARSED_ARGUMENTS + 1)) + shift + fi + if [[ $1 =~ ^[0-9]+$ ]]; then + N_TEST_RUNS=$1 + PARSED_ARGUMENTS=$((PARSED_ARGUMENTS + 1)) + shift + fi + if [[ ${PARSED_ARGUMENTS} == 0 || $# -gt 2 || ${N_TEST_RUNS} -lt 2 ]]; then + print_usage + exit + fi +fi +if [[ ${BOOST_TEST_RUN_FILTERS} == "" ]]; then + BOOST_TEST_RUN_FILTERS="$(IFS=":"; echo "!${NON_DETERMINISTIC_TESTS[*]}" | sed 's/:/:!/g')" +else + echo "Using Boost test filter: ${BOOST_TEST_RUN_FILTERS}" + echo +fi + +if ! command -v gcov > /dev/null; then + echo "Error: gcov not installed. Exiting." + exit 1 +fi + +if ! command -v gcovr > /dev/null; then + echo "Error: gcovr not installed. Exiting." + exit 1 +fi + +if [[ ! -e ${TEST_BITCOIN_BINARY} ]]; then + echo "Error: Executable ${TEST_BITCOIN_BINARY} not found. Run \"./configure --enable-lcov\" and compile." + exit 1 +fi + +get_file_suffix_count() { + find src/ -type f -name "*.$1" | wc -l +} + +if [[ $(get_file_suffix_count gcno) == 0 ]]; then + echo "Error: Could not find any *.gcno files. The *.gcno files are generated by the compiler. Run \"./configure --enable-lcov\" and re-compile." + exit 1 +fi + +get_covr_filename() { + echo "gcovr.run-$1.txt" +} + +TEST_RUN_ID=0 +while [[ ${TEST_RUN_ID} -lt ${N_TEST_RUNS} ]]; do + TEST_RUN_ID=$((TEST_RUN_ID + 1)) + echo "[$(date +"%Y-%m-%d %H:%M:%S")] Measuring coverage, run #${TEST_RUN_ID} of ${N_TEST_RUNS}" + find src/ -type f -name "*.gcda" -exec rm {} \; + if [[ $(get_file_suffix_count gcda) != 0 ]]; then + echo "Error: Stale *.gcda files found. Exiting." + exit 1 + fi + TEST_OUTPUT_TEMPFILE=$(mktemp) + if ! BOOST_TEST_RUN_FILTERS="${BOOST_TEST_RUN_FILTERS}" ${TEST_BITCOIN_BINARY} > "${TEST_OUTPUT_TEMPFILE}" 2>&1; then + cat "${TEST_OUTPUT_TEMPFILE}" + rm "${TEST_OUTPUT_TEMPFILE}" + exit 1 + fi + rm "${TEST_OUTPUT_TEMPFILE}" + if [[ $(get_file_suffix_count gcda) == 0 ]]; then + echo "Error: Running the test suite did not create any *.gcda files. The gcda files are generated when the instrumented test programs are executed. Run \"./configure --enable-lcov\" and re-compile." + exit 1 + fi + GCOVR_TEMPFILE=$(mktemp) + if ! gcovr --gcov-executable "${GCOV_EXECUTABLE}" -r src/ > "${GCOVR_TEMPFILE}"; then + echo "Error: gcovr failed. Output written to ${GCOVR_TEMPFILE}. Exiting." + exit 1 + fi + GCOVR_FILENAME=$(get_covr_filename ${TEST_RUN_ID}) + mv "${GCOVR_TEMPFILE}" "${GCOVR_FILENAME}" + if grep -E "^TOTAL *0 *0 " "${GCOVR_FILENAME}"; then + echo "Error: Spurious gcovr output. Make sure the correct GCOV_EXECUTABLE variable is set in $0 (\"gcov\" for gcc, \"llvm-cov gcov\" for clang)." + exit 1 + fi + if [[ ${TEST_RUN_ID} != 1 ]]; then + COVERAGE_DIFF=$(diff -u "$(get_covr_filename 1)" "${GCOVR_FILENAME}") + if [[ ${COVERAGE_DIFF} != "" ]]; then + echo + echo "The line coverage is non-deterministic between runs. Exiting." + echo + echo "The test suite must be deterministic in the sense that the set of lines executed at least" + echo "once must be identical between runs. This is a necessary condition for meaningful" + echo "coverage measuring." + echo + echo "${COVERAGE_DIFF}" + exit 1 + fi + rm "${GCOVR_FILENAME}" + fi +done + +echo +echo "Coverage test passed: Deterministic coverage across ${N_TEST_RUNS} runs." +exit diff --git a/contrib/devtools/update-translations.py b/contrib/devtools/update-translations.py deleted file mode 100755 index 0be632069a942..0000000000000 --- a/contrib/devtools/update-translations.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2014 Wladimir J. van der Laan -# Distributed under the MIT/X11 software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' -Run this script from the root of the repository to update all translations from -transifex. -It will do the following automatically: - -- fetch all translations using the tx tool -- post-process them into valid and committable format - - remove invalid control characters - - remove location tags (makes diffs less noisy) - -TODO: -- auto-add new translations to the build system according to the translation process -''' -from __future__ import division, print_function -import subprocess -import re -import sys -import os -import io -import xml.etree.ElementTree as ET - -# Name of transifex tool -TX = 'tx' -# Name of source language file -SOURCE_LANG = 'bitcoin_en.ts' -# Directory with locale files -LOCALE_DIR = 'src/qt/locale' - -def check_at_repository_root(): - if not os.path.exists('.git'): - print('No .git directory found') - print('Execute this script at the root of the repository', file=sys.stderr) - exit(1) - -def fetch_all_translations(): - if subprocess.call([TX, 'pull', '-f']): - print('Error while fetching translations', file=sys.stderr) - exit(1) - -def find_format_specifiers(s): - '''Find all format specifiers in a string.''' - pos = 0 - specifiers = [] - while True: - percent = s.find('%', pos) - if percent < 0: - break - specifiers.append(s[percent+1]) - pos = percent+2 - return specifiers - -def split_format_specifiers(specifiers): - '''Split format specifiers between numeric (Qt) and others (strprintf)''' - numeric = [] - other = [] - for s in specifiers: - if s in {'1','2','3','4','5','6','7','8','9'}: - numeric.append(s) - else: - other.append(s) - - # numeric (Qt) can be present in any order, others (strprintf) must be in specified order - return set(numeric),other - -def sanitize_string(s): - '''Sanitize string for printing''' - return s.replace('\n',' ') - -def check_format_specifiers(source, translation, errors): - source_f = split_format_specifiers(find_format_specifiers(source)) - # assert that no source messages contain both Qt and strprintf format specifiers - # if this fails, go change the source as this is hacky and confusing! - assert(not(source_f[0] and source_f[1])) - try: - translation_f = split_format_specifiers(find_format_specifiers(translation)) - except IndexError: - errors.append("Parse error in translation '%s'" % sanitize_string(translation)) - return False - else: - if source_f != translation_f: - errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation))) - return False - return True - -def all_ts_files(suffix=''): - for filename in os.listdir(LOCALE_DIR): - # process only language files, and do not process source language - if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix: - continue - if suffix: # remove provided suffix - filename = filename[0:-len(suffix)] - filepath = os.path.join(LOCALE_DIR, filename) - yield(filename, filepath) - -FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]') -def remove_invalid_characters(s): - '''Remove invalid characters from translation string''' - return FIX_RE.sub(b'', s) - -# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for -# comparison, disable by default) -_orig_escape_cdata = None -def escape_cdata(text): - text = _orig_escape_cdata(text) - text = text.replace("'", ''') - text = text.replace('"', '"') - return text - -def postprocess_translations(reduce_diff_hacks=False): - print('Checking and postprocessing...') - - if reduce_diff_hacks: - global _orig_escape_cdata - _orig_escape_cdata = ET._escape_cdata - ET._escape_cdata = escape_cdata - - for (filename,filepath) in all_ts_files(): - os.rename(filepath, filepath+'.orig') - - have_errors = False - for (filename,filepath) in all_ts_files('.orig'): - # pre-fixups to cope with transifex output - parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8' - with open(filepath + '.orig', 'rb') as f: - data = f.read() - # remove control characters; this must be done over the entire file otherwise the XML parser will fail - data = remove_invalid_characters(data) - tree = ET.parse(io.BytesIO(data), parser=parser) - - # iterate over all messages in file - root = tree.getroot() - for context in root.findall('context'): - for message in context.findall('message'): - numerus = message.get('numerus') == 'yes' - source = message.find('source').text - translation_node = message.find('translation') - # pick all numerusforms - if numerus: - translations = [i.text for i in translation_node.findall('numerusform')] - else: - translations = [translation_node.text] - - for translation in translations: - if translation is None: - continue - errors = [] - valid = check_format_specifiers(source, translation, errors) - - for error in errors: - print('%s: %s' % (filename, error)) - - if not valid: # set type to unfinished and clear string if invalid - translation_node.clear() - translation_node.set('type', 'unfinished') - have_errors = True - - # Remove location tags - for location in message.findall('location'): - message.remove(location) - - # Remove entire message if it is an unfinished translation - if translation_node.get('type') == 'unfinished': - context.remove(message) - - # write fixed-up tree - # if diff reduction requested, replace some XML to 'sanitize' to qt formatting - if reduce_diff_hacks: - out = io.BytesIO() - tree.write(out, encoding='utf-8') - out = out.getvalue() - out = out.replace(b' />', b'/>') - with open(filepath, 'wb') as f: - f.write(out) - else: - tree.write(filepath, encoding='utf-8') - return have_errors - -if __name__ == '__main__': - check_at_repository_root() - fetch_all_translations() - postprocess_translations() - diff --git a/contrib/devtools/utils.py b/contrib/devtools/utils.py new file mode 100755 index 0000000000000..68ad1c3aba191 --- /dev/null +++ b/contrib/devtools/utils.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Common utility functions +''' +import shutil +import sys +import os +from typing import List + + +def determine_wellknown_cmd(envvar, progname) -> List[str]: + maybe_env = os.getenv(envvar) + maybe_which = shutil.which(progname) + if maybe_env: + return maybe_env.split(' ') # Well-known vars are often meant to be word-split + elif maybe_which: + return [ maybe_which ] + else: + sys.exit(f"{progname} not found") diff --git a/contrib/devtools/utxo_snapshot.sh b/contrib/devtools/utxo_snapshot.sh new file mode 100755 index 0000000000000..dee25ff67b61e --- /dev/null +++ b/contrib/devtools/utxo_snapshot.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +export LC_ALL=C + +set -ueo pipefail + +if (( $# < 3 )); then + echo 'Usage: utxo_snapshot.sh ' + echo + echo " if is '-', don't produce a snapshot file but instead print the " + echo " expected assumeutxo hash" + echo + echo 'Examples:' + echo + echo " ./contrib/devtools/utxo_snapshot.sh 570000 utxo.dat ./src/bitcoin-cli -datadir=\$(pwd)/testdata" + echo ' ./contrib/devtools/utxo_snapshot.sh 570000 - ./src/bitcoin-cli' + exit 1 +fi + +GENERATE_AT_HEIGHT="${1}"; shift; +OUTPUT_PATH="${1}"; shift; +# Most of the calls we make take a while to run, so pad with a lengthy timeout. +BITCOIN_CLI_CALL="${*} -rpcclienttimeout=9999999" + +# Block we'll invalidate/reconsider to rewind/fast-forward the chain. +PIVOT_BLOCKHASH=$($BITCOIN_CLI_CALL getblockhash $(( GENERATE_AT_HEIGHT + 1 )) ) + +(>&2 echo "Rewinding chain back to height ${GENERATE_AT_HEIGHT} (by invalidating ${PIVOT_BLOCKHASH}); this may take a while") +${BITCOIN_CLI_CALL} invalidateblock "${PIVOT_BLOCKHASH}" + +if [[ "${OUTPUT_PATH}" = "-" ]]; then + (>&2 echo "Generating txoutset info...") + ${BITCOIN_CLI_CALL} gettxoutsetinfo | grep hash_serialized_2 | sed 's/^.*: "\(.\+\)\+",/\1/g' +else + (>&2 echo "Generating UTXO snapshot...") + ${BITCOIN_CLI_CALL} dumptxoutset "${OUTPUT_PATH}" +fi + +(>&2 echo "Restoring chain to original height; this may take a while") +${BITCOIN_CLI_CALL} reconsiderblock "${PIVOT_BLOCKHASH}" diff --git a/contrib/filter-lcov.py b/contrib/filter-lcov.py new file mode 100755 index 0000000000000..db780ad53bd39 --- /dev/null +++ b/contrib/filter-lcov.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import argparse + +parser = argparse.ArgumentParser(description='Remove the coverage data from a tracefile for all files matching the pattern.') +parser.add_argument('--pattern', '-p', action='append', help='the pattern of files to remove', required=True) +parser.add_argument('tracefile', help='the tracefile to remove the coverage data from') +parser.add_argument('outfile', help='filename for the output to be written to') + +args = parser.parse_args() +tracefile = args.tracefile +pattern = args.pattern +outfile = args.outfile + +in_remove = False +with open(tracefile, 'r', encoding="utf8") as f: + with open(outfile, 'w', encoding="utf8") as wf: + for line in f: + for p in pattern: + if line.startswith("SF:") and p in line: + in_remove = True + if not in_remove: + wf.write(line) + if line == 'end_of_record\n': + in_remove = False diff --git a/contrib/gitian-descriptors/README.md b/contrib/gitian-descriptors/README.md deleted file mode 100644 index 061b897d2a2c5..0000000000000 --- a/contrib/gitian-descriptors/README.md +++ /dev/null @@ -1,66 +0,0 @@ -### Gavin's notes on getting gitian builds up and running using KVM:### - -These instructions distilled from: -[ https://help.ubuntu.com/community/KVM/Installation]( https://help.ubuntu.com/community/KVM/Installation) -... see there for complete details. - -You need the right hardware: you need a 64-bit-capable CPU with hardware virtualization support (Intel VT-x or AMD-V). Not all modern CPUs support hardware virtualization. - -You probably need to enable hardware virtualization in your machine's BIOS. - -You need to be running a recent version of 64-bit-Ubuntu, and you need to install several prerequisites: - - sudo apt-get install ruby apache2 git apt-cacher-ng python-vm-builder qemu-kvm - -Sanity checks: - - sudo service apt-cacher-ng status # Should return apt-cacher-ng is running - ls -l /dev/kvm # Should show a /dev/kvm device - - -Once you've got the right hardware and software: - - git clone git://github.com/bitcoin/bitcoin.git - git clone git://github.com/devrandom/gitian-builder.git - mkdir gitian-builder/inputs - cd gitian-builder/inputs - - # Create base images - cd gitian-builder - bin/make-base-vm --suite precise --arch amd64 - cd .. - - # Get inputs (see doc/release-process.md for exact inputs needed and where to get them) - ... - - # For further build instructions see doc/release-notes.md - ... - ---------------------- - -`gitian-builder` now also supports building using LXC. See -[ https://help.ubuntu.com/12.04/serverguide/lxc.html]( https://help.ubuntu.com/12.04/serverguide/lxc.html) -... for how to get LXC up and running under Ubuntu. - -If your main machine is a 64-bit Mac or PC with a few gigabytes of memory -and at least 10 gigabytes of free disk space, you can `gitian-build` using -LXC running inside a virtual machine. - -Here's a description of Gavin's setup on OSX 10.6: - -1. Download and install VirtualBox from [https://www.virtualbox.org/](https://www.virtualbox.org/) - -2. Download the 64-bit Ubuntu Desktop 12.04 LTS .iso CD image from - [http://www.ubuntu.com/](http://www.ubuntu.com/) - -3. Run VirtualBox and create a new virtual machine, using the Ubuntu .iso (see the [VirtualBox documentation](https://www.virtualbox.org/wiki/Documentation) for details). Create it with at least 2 gigabytes of memory and a disk that is at least 20 gigabytes big. - -4. Inside the running Ubuntu desktop, install: - - sudo apt-get install debootstrap lxc ruby apache2 git apt-cacher-ng python-vm-builder - -5. Still inside Ubuntu, tell gitian-builder to use LXC, then follow the "Once you've got the right hardware and software" instructions above: - - export USE_LXC=1 - git clone git://github.com/bitcoin/bitcoin.git - ... etc diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml deleted file mode 100644 index bba2104edb7ae..0000000000000 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ /dev/null @@ -1,109 +0,0 @@ ---- -name: "bitcoin-linux-0.10" -enable_cache: true -suites: -- "precise" -architectures: -- "amd64" -packages: -- "g++-multilib" -- "git-core" -- "pkg-config" -- "autoconf2.13" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "binutils-gold" -reference_datetime: "2013-06-01 00:00:00" -remotes: -- "url": "https://github.com/bitcoin/bitcoin.git" - "dir": "bitcoin" -files: [] -script: | - WRAP_DIR=$HOME/wrapped - HOSTS="i686-pc-linux-gnu x86_64-unknown-linux-gnu" - CONFIGFLAGS="--enable-upnp-default --enable-glibc-back-compat" - FAKETIME_HOST_PROGS="" - FAKETIME_PROGS="date ar ranlib nm strip" - - export QT_RCC_TEST=1 - export GZIP="-9n" - export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - # Create global faketime wrappers - for prog in ${FAKETIME_PROGS}; do - echo '#!/bin/bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - - # Create per-host faketime wrappers - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - echo '#!/bin/bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - export PATH=${WRAP_DIR}:${PATH} - - cd bitcoin - BASEPREFIX=`pwd`/depends - # Build dependencies for each host - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Create the release tarball using (arbitrarily) the first host - ./autogen.sh - ./configure --prefix=${BASEPREFIX}/`echo "${HOSTS}" | awk '{print $1;}'` - make dist - SOURCEDIST=`echo bitcoin-*.tar.gz` - DISTNAME=`echo ${SOURCEDIST} | sed 's/.tar.*//'` - # Correct tar file order - mkdir -p temp - pushd temp - tar xf ../$SOURCEDIST - find bitcoin-* | sort | tar --no-recursion -c -T - | gzip -9n > ../$SOURCEDIST - popd - - ORIGPATH="$PATH" - # Extract the release tarball into a dir for each host and build - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir -p distsrc-${i} - cd distsrc-${i} - INSTALLPATH=`pwd`/installed/${DISTNAME} - mkdir -p ${INSTALLPATH} - tar --strip-components=1 -xf ../$SOURCEDIST - - ./configure --prefix=${BASEPREFIX}/${i} --bindir=${INSTALLPATH}/bin --includedir=${INSTALLPATH}/include --libdir=${INSTALLPATH}/lib --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} - make ${MAKEOPTS} - make install-strip - cd installed - find . -name "lib*.la" -delete - find . -name "lib*.a" -delete - rm -rf ${DISTNAME}/lib/pkgconfig - find . | sort | tar --no-recursion -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz - cd ../../ - done - mkdir -p $OUTDIR/src - mv $SOURCEDIST $OUTDIR/src - mv ${OUTDIR}/${DISTNAME}-x86_64-*.tar.gz ${OUTDIR}/${DISTNAME}-linux64.tar.gz - mv ${OUTDIR}/${DISTNAME}-i686-*.tar.gz ${OUTDIR}/${DISTNAME}-linux32.tar.gz - diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml deleted file mode 100644 index db9b4af93d0ef..0000000000000 --- a/contrib/gitian-descriptors/gitian-osx-signer.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -name: "bitcoin-dmg-signer" -suites: -- "precise" -architectures: -- "amd64" -packages: -- "libc6:i386" -- "faketime" -reference_datetime: "2013-06-01 00:00:00" -remotes: [] -files: -- "bitcoin-0.9.99-osx-unsigned.tar.gz" -- "signature.tar.gz" -script: | - WRAP_DIR=$HOME/wrapped - mkdir -p ${WRAP_DIR} - export PATH=`pwd`:$PATH - FAKETIME_PROGS="dmg genisoimage" - - # Create global faketime wrappers - for prog in ${FAKETIME_PROGS}; do - echo '#!/bin/bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - - UNSIGNED=`echo bitcoin-*.tar.gz` - SIGNED=`echo ${UNSIGNED} | sed 's/.tar.*//' | sed 's/-unsigned//'`.dmg - - tar -xf ${UNSIGNED} - ./detached-sig-apply.sh ${UNSIGNED} signature.tar.gz - ${WRAP_DIR}/genisoimage -no-cache-inodes -D -l -probe -V "Bitcoin-Qt" -no-pad -r -apple -o uncompressed.dmg signed-app - ${WRAP_DIR}/dmg dmg uncompressed.dmg ${OUTDIR}/${SIGNED} diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml deleted file mode 100644 index eb6df2096eb1f..0000000000000 --- a/contrib/gitian-descriptors/gitian-osx.yml +++ /dev/null @@ -1,134 +0,0 @@ ---- -name: "bitcoin-osx-0.10" -enable_cache: true -suites: -- "precise" -architectures: -- "amd64" -packages: -- "g++-multilib" -- "git-core" -- "pkg-config" -- "autoconf2.13" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "cmake" -- "libcap-dev" -- "libz-dev" -- "libbz2-dev" -reference_datetime: "2013-06-01 00:00:00" -remotes: -- "url": "https://github.com/bitcoin/bitcoin.git" - "dir": "bitcoin" -files: -- "MacOSX10.7.sdk.tar.gz" -script: | - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-apple-darwin11" - CONFIGFLAGS="--enable-upnp-default GENISOIMAGE=$WRAP_DIR/genisoimage" - FAKETIME_HOST_PROGS="" - FAKETIME_PROGS="ar ranlib date dmg genisoimage" - - export QT_RCC_TEST=1 - export GZIP="-9n" - export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - export ZERO_AR_DATE=1 - - # Create global faketime wrappers - for prog in ${FAKETIME_PROGS}; do - echo '#!/bin/bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - - # Create per-host faketime wrappers - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - echo '#!/bin/bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - export PATH=${WRAP_DIR}:${PATH} - - cd bitcoin - BASEPREFIX=`pwd`/depends - - mkdir -p ${BASEPREFIX}/SDKs - tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/MacOSX10.7.sdk.tar.gz - - # Build dependencies for each host - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Create the release tarball using (arbitrarily) the first host - ./autogen.sh - ./configure --prefix=${BASEPREFIX}/`echo "${HOSTS}" | awk '{print $1;}'` - make dist - SOURCEDIST=`echo bitcoin-*.tar.gz` - DISTNAME=`echo ${SOURCEDIST} | sed 's/.tar.*//'` - - # Correct tar file order - mkdir -p temp - pushd temp - tar xf ../$SOURCEDIST - find bitcoin-* | sort | tar --no-recursion -c -T - | gzip -9n > ../$SOURCEDIST - popd - - ORIGPATH="$PATH" - # Extract the release tarball into a dir for each host and build - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir -p distsrc-${i} - cd distsrc-${i} - INSTALLPATH=`pwd`/installed/${DISTNAME} - mkdir -p ${INSTALLPATH} - tar --strip-components=1 -xf ../$SOURCEDIST - - ./configure --prefix=${BASEPREFIX}/${i} --bindir=${INSTALLPATH}/bin --includedir=${INSTALLPATH}/include --libdir=${INSTALLPATH}/lib --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} - make ${MAKEOPTS} - make install-strip - - make deploydir - mkdir -p unsigned-app-${i} - cp contrib/macdeploy/detached-sig-apply.sh unsigned-app-${i} - cp contrib/macdeploy/detached-sig-create.sh unsigned-app-${i} - cp ${BASEPREFIX}/${i}/native/bin/dmg ${BASEPREFIX}/${i}/native/bin/genisoimage unsigned-app-${i} - cp ${BASEPREFIX}/${i}/native/bin/${i}-codesign_allocate unsigned-app-${i}/codesign_allocate - cp ${BASEPREFIX}/${i}/native/bin/${i}-pagestuff unsigned-app-${i}/pagestuff - mv dist unsigned-app-${i} - pushd unsigned-app-${i} - find . | sort | tar --no-recursion -czf ${OUTDIR}/${DISTNAME}-osx-unsigned.tar.gz -T - - popd - - make deploy - ${WRAP_DIR}/dmg dmg Bitcoin-Qt.dmg ${OUTDIR}/${DISTNAME}-osx-unsigned.dmg - - cd installed - find . -name "lib*.la" -delete - find . -name "lib*.a" -delete - rm -rf ${DISTNAME}/lib/pkgconfig - find . | sort | tar --no-recursion -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz - cd ../../ - done - mkdir -p $OUTDIR/src - mv $SOURCEDIST $OUTDIR/src - mv ${OUTDIR}/${DISTNAME}-x86_64-*.tar.gz ${OUTDIR}/${DISTNAME}-osx64.tar.gz diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml deleted file mode 100644 index 97c823cde6943..0000000000000 --- a/contrib/gitian-descriptors/gitian-win.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- -name: "bitcoin-win-0.10" -enable_cache: true -suites: -- "precise" -architectures: -- "amd64" -packages: -- "g++" -- "git-core" -- "pkg-config" -- "autoconf2.13" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "mingw-w64" -- "g++-mingw-w64" -- "nsis" -- "zip" -reference_datetime: "2013-06-01 00:00:00" -remotes: -- "url": "https://github.com/bitcoin/bitcoin.git" - "dir": "bitcoin" -files: [] -script: | - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-w64-mingw32 i686-w64-mingw32" - CONFIGFLAGS="--enable-upnp-default" - FAKETIME_HOST_PROGS="g++ ar ranlib nm windres strip" - FAKETIME_PROGS="date makensis zip" - - export QT_RCC_TEST=1 - export GZIP="-9n" - export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - # Create global faketime wrappers - for prog in ${FAKETIME_PROGS}; do - echo '#!/bin/bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - - # Create per-host faketime wrappers - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - echo '#!/bin/bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - export PATH=${WRAP_DIR}:${PATH} - - cd bitcoin - BASEPREFIX=`pwd`/depends - # Build dependencies for each host - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Create the release tarball using (arbitrarily) the first host - ./autogen.sh - ./configure --prefix=${BASEPREFIX}/`echo "${HOSTS}" | awk '{print $1;}'` - make dist - SOURCEDIST=`echo bitcoin-*.tar.gz` - DISTNAME=`echo ${SOURCEDIST} | sed 's/.tar.*//'` - - # Correct tar file order - mkdir -p temp - pushd temp - tar xf ../$SOURCEDIST - find bitcoin-* | sort | tar --no-recursion -c -T - | gzip -9n > ../$SOURCEDIST - popd - - ORIGPATH="$PATH" - # Extract the release tarball into a dir for each host and build - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir -p distsrc-${i} - cd distsrc-${i} - INSTALLPATH=`pwd`/installed/${DISTNAME} - mkdir -p ${INSTALLPATH} - tar --strip-components=1 -xf ../$SOURCEDIST - - ./configure --prefix=${BASEPREFIX}/${i} --bindir=${INSTALLPATH}/bin --includedir=${INSTALLPATH}/include --libdir=${INSTALLPATH}/lib --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} - make ${MAKEOPTS} - make deploy - make install-strip - cp -f bitcoin-*setup*.exe $OUTDIR/ - cd installed - mv ${DISTNAME}/bin/*.dll ${DISTNAME}/lib/ - find . -name "lib*.la" -delete - find . -name "lib*.a" -delete - rm -rf ${DISTNAME}/lib/pkgconfig - find . -type f | sort | zip -X@ ${OUTDIR}/${DISTNAME}-${i}.zip - cd ../.. - done - mkdir -p $OUTDIR/src - mv $SOURCEDIST $OUTDIR/src - mv ${OUTDIR}/${DISTNAME}-x86_64-*.zip ${OUTDIR}/${DISTNAME}-win64.zip - mv ${OUTDIR}/${DISTNAME}-i686-*.zip ${OUTDIR}/${DISTNAME}-win32.zip diff --git a/contrib/gitian-downloader/aschildbach-key.pgp b/contrib/gitian-downloader/aschildbach-key.pgp deleted file mode 100644 index df06e19fa4b10..0000000000000 Binary files a/contrib/gitian-downloader/aschildbach-key.pgp and /dev/null differ diff --git a/contrib/gitian-downloader/bluematt-key.pgp b/contrib/gitian-downloader/bluematt-key.pgp deleted file mode 100644 index fb6d9eb28423d..0000000000000 Binary files a/contrib/gitian-downloader/bluematt-key.pgp and /dev/null differ diff --git a/contrib/gitian-downloader/cfields-key.pgp b/contrib/gitian-downloader/cfields-key.pgp deleted file mode 100644 index 6b0bd240ba4cf..0000000000000 --- a/contrib/gitian-downloader/cfields-key.pgp +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.12 (GNU/Linux) - -mQINBFOHTh4BEADdKsRvmNhX+B+bcPsgMkp8ztwJA5g/rmrOlHQpKOOf4P2tAr6w -FmXCChWF9Iq3pDFQ0t0iq5rgisFPyrGVT/VToMmH+/PSLTyIdAlgkRYDMAPsMAFV -MaADH4yiAgJ3cdXtysjaNQV5O25ypqq6/obUjZJD5Enn6b/UgHe2+7LTmTNsskOx -5s/WPPht79EY1kM4JQfmDx68CsmqeSAlT6yeO3RQcLn/l46cfXiwzMO4h1hsZS1r -pgciRp0EHK9uAjF2rjqt8v4SDxwyTnwfpBBulzvH9mBf+HRXWzoTMR4sC/oOZext -hKAH/ex47BxN3HU3ftNhCK2c1xcU1UOGSjbf0RdbwuSCxxa7mktEDumvOxAk9EBB -+PDPv7jO1FBK3rsJdscYQIL0AiRyO49VfNLARa34OqUi8pOAxKBQ9plO02W1gp7a -DVBPI05TZ46Y8dTR2Bc1raAgOyxnXM7jfiQG2gSULiKAJAI4HwOiodaiiHAxDaIo -a3mtsmfN25TZUQuA0I0BvHbJvLRlVnyZm3XVOcwReKJpZJV4qRhd3XNrERZdz6ZK -cAZnyC/X+Uzo4HfnVSsJk1GpIa4seYyrVCFfHMiAA6SkgAUFbV26KCOv4rNR2GlV -l2fVhu1RKOEUJ8nRcEqf93SehRVYdI67LepIPgmIwi0KG4HhoTbIHDAKWQARAQAB -tCtDb3J5IEZpZWxkcyA8Y2ZpZWxkc0BiaXRjb2luZm91bmRhdGlvbi5vcmc+iQI4 -BBMBAgAiBQJTh04eAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAcJJH/ -6w73cBTiEADIGZSueBFmaOTJCgasKGguHns/n8P94EQBZr07rrgN99Rzp85WvDUN -Qa72wj3GNcAffN7aZlIWv4g+fjyr9AzHekjI/7iwwSYIfjfTR/xRUW7czRfKAOrK -iwpEzgv440i7PBvkS/AhNdUNkm+cJvaQUej/F2/O52qDLEpHuzvjAUUWlSeF9/oO -AjM9dfC24L5k5cVwQvH9noxk3EyuE7BuiGE5a+kKiORrtxiHeUG6GYQxuqrPucLU -fI67ETyXa0YSpYm5/O65BKMTMpmkMvv1JC2kqqsYTrO5p158CrKzq2xvpuG4ABsb -9KwICUGW31Ndr6TXwQJFa1b7VK4G1g6M1DFkVTOLJnEyOwgYxsXrV5QFpzpAOAji -6KcxNGeow1avAFYbqjjLgu9UNuq6b8du13hjkQxVs2NAP1Kd/u2ADwxQHMhZGVEC -9LIcLVSP9ShY6fR8m6fwSlJfpiV81uLNVD8KIyvp+pYTQ/FnxoPhPIwalYquBZKi -0u38igW75IzZ0fYvJgTumE/8ofSVkutVtrQb21eJclVrJGMNweTlJcJhAWdKkjDC -e6mSj8GItKV1ef+eusXSzs/wPyTaqgkELvvAOZdwUq3kobQErE5HOuPEOvcwuY96 -DcxLexirCGW5wCUq7Db0c0dUjQwzzb5OTW2jdnPVR0qxi29TnOJ2aLkCDQRTh04e -ARAAuJKpI6NTCQrjEqe9AYywN8676+fPS5bqXkyb/iub6MXeQdwpH0K42lXAaYMq -ow/0aLlvGWCHuJJGozoOWpTzQ+VPbhpdARoLCop5fYTpy8Q17ubLeeODDtr6jtDN -lmg+9PBIErIVUnUS2wNZuJRVsfwlLaU3T2v8kQnQ6AEbl/QwyWW9nB8rAWBu6Hvs -VdtcBmtHSr9xAGBGfW6rSVhTitikR4lWJPdNJxI3pLaswpLIUIQ1rssKO4glljcp -C6nhMvRkDLvDFvDP9QnmwY/A4ch5S6ANPrhOjQuu9njjQ+/ImrJTjAXqHwg5KdTc -NKxufgvi9elOQ422o0No3yKdRoRA4kdcUmqA9gNZDyX0ZTd17aNqc42Zt3aYLJ11 -bLZZp0qnfhkmhbsBZZtaLNkuF+RGPWysxY7KPMm+nHn6f3Wpr18E+T02wi02r4nS -HOQI+gppDqy3Vq3ZZNoUZynctiLZVHkqi+WYXqfD2tEn8UJKpht7jrZlNgkHFgT7 -T0/U4+JmaQ/HltE+IexAIH0GP0Jt6hmRoZimdoy8Q8NY5t/fn9CQNJm5InrHvooN -aFmZMvzGTGiTqBqnA/7k9FCUEG98LK11MsIssY8YE/F6HD69R3ISyRvhUbpFvhD8 -c6zOkEKngTWvyRevrDrDz2yoZ1+T1X350+92rbEc/8WyutcAEQEAAYkCHwQYAQIA -CQUCU4dOHgIbDAAKCRAcJJH/6w73cAakEACv4EUEjtFjqnGB0Lru5FKs1obWcf37 -c4a5yYvOw58dkEZ9hsq34qWGLT128n6R24KEG+3O4CbplAD5Kt2eAPracbPHMAn8 -TGmC+KjiGlBR5xCY9dD0fn5EbRWOa+Fdcj1DpneaqMl9vLnBbqGp7pa/MwSOc+FB -0Ms2rcGJJMNHgITfP22eCf6pvf/xq7kKbUJ3Kjqdc2hWlRMjC/OOeITdrgycfDk/ -AOzLNqk5q7bYOxna6rWDLGSkCATyQKaBTVK7wRd1VrIhI4vfFqy+BWYXyXJ0pxjS -eaCDwbWHX/KW+0qLsmHxFMAyHJPjs8LEwK/DRbmWhe1HzPcBKmpyjqlkuxPjAdSl -hP4+IBvVNLf2Kh3uFHehk9A6oCYZGe3lLfQnOxIantXF7IROTmiZZsb+08w6cIXE -+r6kWG6vP2aCVtzYNfY+2p5xfg3yMxcxENJki1WSCOq6WVf9IWFzSJu+0+eazD3L -3QpZoSX5VvT6x05C0Ay1ert0Q5MyF84Eh8mDqL4PhpWtQhZMp8SG4jqFVgrhM4sl -vWGYXGns4tbnNPiiksjBD8TTvG3+mt48sNJIpHThjdWJSZjllYG7jV8oi7HrX8M2 -LOwWWLYxHkqi9wpmrWHSmniex6ABozcqrb+EgSMnHuSd7glmOJxHToJIudJbKG5D -MrD0ofsytfy1LQ== -=DE4h ------END PGP PUBLIC KEY BLOCK----- diff --git a/contrib/gitian-downloader/devrandom-key.pgp b/contrib/gitian-downloader/devrandom-key.pgp deleted file mode 100644 index 71898127ba0d6..0000000000000 Binary files a/contrib/gitian-downloader/devrandom-key.pgp and /dev/null differ diff --git a/contrib/gitian-downloader/gavinandresen-key.pgp b/contrib/gitian-downloader/gavinandresen-key.pgp deleted file mode 100644 index f81f44e874707..0000000000000 Binary files a/contrib/gitian-downloader/gavinandresen-key.pgp and /dev/null differ diff --git a/contrib/gitian-downloader/laanwj-key.pgp b/contrib/gitian-downloader/laanwj-key.pgp deleted file mode 100644 index 559295109d964..0000000000000 --- a/contrib/gitian-downloader/laanwj-key.pgp +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: SKS 1.1.0 - -mQENBE5UtMEBCADOUz2i9l/D8xYINCmfUDnxi+DXvX5LmZ39ZdvsoE+ugO0SRRGdIHEFO2is -0xezX50wXu9aneb+tEqM0BuiLo6VxaXpxrkxHpr6c4jf37SkE/H0qsi/txEUp7337y3+4HMG -lUjiuh802I72p1qusjsKBnmnnR0rwNouTcoDmGUDh7jpKCtzFv+2TR2dRthJn7vmmjq3+bG6 -PYfqoFY1yHrAGT1lrDBULZsQ/NBLI2+J4oo2LYv3GCq8GNnzrovqvTvui50VSROhLrOe58o2 -shE+sjQShAy5wYkPt1R1fQnpfx+5vf+TPnkxVwRb3h5GhCp0YL8XC/BXsd5vM4KlVH2rABEB -AAG0K1dsYWRpbWlyIEouIHZhbiBkZXIgTGFhbiA8bGFhbndqQGdtYWlsLmNvbT6JATgEEwEC -ACIFAk5UtMECGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEHSBCwEjRsmmy6YIAK09 -buNXyYQrJBsX16sXxEhx5QPKyF3uHJDFJv66SdnpvIkNoznsaPiRJkbTANop93FZmaGa6wVn -zGDiz7jPA8Dpxx5aAYPhIT+zPJAdXWM3wJ/Gio9besRNzniai8Lwi5MZ9R/5yFGBobm6/AcN -4sUoqA3NSV2U3I29R0Vwlzo8GVtmyi9ENSi6Oo7AcXNTRt69cxW4nAHkB+amwwDJlcAb31ex -bogYXPhScwqQZixRr+JBkKxBjkTXXnQypT4KI5SegYwQVYfyiZmDP7UHKe/u6pSKKbVphLg8 -xLB5spcXse8/a2+onrbNlw6y8TXiJ++Z54PE7zztWTXf2huakeG5AQ0ETlS0wQEIAMNO3OkP -xoPRKWzBLcI7JRITAW+HNaLTq3uN2+4WxA57DEjbL9EDoAv+7wTkDAL40f0T+xiu6GJcLFjw -GJZu/tYu7+mErHjrdo+K4suCQt7w5EXCBvOLjhW4tyYMzNx8hP+oqzOW9iEC+6VV91+DYeqt -EkJuyVXOI4vzBlTw8uGow8aMMsCq8XVvKUZFTPsjGl197Q5B3A+ZOFCR8xqiqdPjuz6MglVV -oFdDNu3EZn8zkGsQlovXoE9ndVeVzx/XMNmsxFaMYsReUs253RIf1FEfgExID0fg2OnyLCjS -2iFW1RgajS+/saIkKl+N1iuMzJA7wMAM0plhRueOG0MtZSsAEQEAAYkBHwQYAQIACQUCTlS0 -wQIbDAAKCRB0gQsBI0bJpmsDB/4waenn2CvSHXyomykfpwf5lMte1V5LvH3z5R2LY+1NopRv -LSz3iC39x69XWiTbhywDfgafnGPW4pWBOff2/bu5/A6z1Hnan1vyrRRD/hx1uMJ7S6q+bIvZ -iVIg1p0jH6tdIIhwX3cydhdRZHo7e9oSMgOUWsr6Ar59NRo9CENwGPE4U61HXfOnxWdrFWoA -XdwZczBeLxmUy6Vo6sKqv+gE4bqrtAM0sY/MsQ9cU95x+52ox/sq44lQMwd3ZBYUP7B1qbHI -hZSZuch6MLi5scLPeau0ZvCaljiaMeivP5+x0gWPRs0kI+9sZxInbqvrsJ6oOBJM3xYGhtn1 -zZ7qmZR7 -=si/k ------END PGP PUBLIC KEY BLOCK----- diff --git a/contrib/gitian-downloader/linux-download-config b/contrib/gitian-downloader/linux-download-config deleted file mode 100644 index f5e6382b846d1..0000000000000 --- a/contrib/gitian-downloader/linux-download-config +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: bitcoin -urls: -- http://bitcoin.org/bitcoin-latest-linux-gitian.zip -rss: -- url: http://sourceforge.net/api/file/index/project-id/244765/mtime/desc/limit/100/rss - xpath: //item/link/text() - pattern: bitcoin-\d+.\d+.\d+-linux-gitian.zip -signers: - 0A82509767C7D4A5D14DA2301AE1D35043E08E54: - name: BlueMatt - key: bluematt - BF6273FAEF7CC0BA1F562E50989F6B3048A116B5: - name: Devrandom - key: devrandom - E463A93F5F3117EEDE6C7316BD02942421F4889F: - name: Luke-Jr - key: luke-jr - D762373D24904A3E42F33B08B9A408E71DAAC974: - name: "Pieter Wuille" - key: sipa - 77E72E69DA7EE0A148C06B21B34821D4944DE5F7: - name: tcatm - key: tcatm - 01CDF4627A3B88AAE4A571C87588242FBE38D3A8: - name: "Gavin Andresen" - key: gavinandresen - 71A3B16735405025D447E8F274810B012346C9A6: - name: "Wladimir J. van der Laan" - key: laanwj - AEC1884398647C47413C1C3FB1179EB7347DC10D: - name: "Warren Togami" - key: wtogami - 9692B91BBF0E8D34DFD33B1882C5C009628ECF0C: - name: michagogo - key: michagogo - E944AE667CF960B1004BC32FCA662BE18B877A60: - name: "Andreas Schildbach" - key: aschildbach - C060A6635913D98A3587D7DB1C2491FFEB0EF770: - name: "Cory Fields" - key: "cfields" diff --git a/contrib/gitian-downloader/luke-jr-key.pgp b/contrib/gitian-downloader/luke-jr-key.pgp deleted file mode 100644 index 275b041d2059e..0000000000000 Binary files a/contrib/gitian-downloader/luke-jr-key.pgp and /dev/null differ diff --git a/contrib/gitian-downloader/michagogo-key.pgp b/contrib/gitian-downloader/michagogo-key.pgp deleted file mode 100644 index 47bc404554b41..0000000000000 --- a/contrib/gitian-downloader/michagogo-key.pgp +++ /dev/null @@ -1,59 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.12 (GNU/Linux) - -mQENBFGeqJ4BCADb7SI3/+q93gIvN0AGRg9Mtz73OLIOzCHeeoyn+tp7JcYNzxkQ -9lfeXiEfn72Sh8gHkLtLIqr7HlIMo8DxSS8JPRVjlJGkNyAW4SeEwN2wNa5OV8k0 -N4jBa9a1csFyCyrEkPKvkUpBkQDvNXjNxyEhHwyZqPanKxy6NXIHOJji8ObOMQXI -T9HwJrpjRth3u4uKG968JBTEyAXAmkt0Zidl1Ykgzcedk4mJSE9uZCW8DjSv2wML -XcQz8+dYsoskT3KRdkowLHxAfj1BNyNc1+rKLghliM5vSQWi+Lbhi1Bxh4sY1UwA -lKnAGqrnAGyIvCtkwTq5QI6ufF2ZY44bvVgpABEBAAG0IU1pY2hhZ29nbyA8bWlj -aGFnb2dvQHNlcnZlci5mYWtlPokBOAQTAQIAIgUCUZ6ongIbAwYLCQgHAwIGFQgC -CQoLBBYCAwECHgECF4AACgkQgsXACWKOzwzMUAgAuqUmK10xE5C3lUym2f72z0t6 -a2NM5Wfjr9//Y1/okC36C5XAMEtN2UwckPzzJ5p5D5y5yzwfZq5Jd8Py29VQIMsV -7FbC1a0H3D+bCyX+JJ6FAmUbnWOQ/+mydYc74RvD8iwjePNT6kziZNv6dMGctJTl -0alwjtQYgyGkeYKnIxbcyjHX/IawLUrunb/6mSKun87T8+NM/omfFCTc3l8TakpM -0wyNYRiUkIfUBvB8sDUU3A80qKN/hqRKvlFu3+/kMiAc9ZYQrbmsB+sYWdmM+4zw -8NBw3yuYzWyPuoa4PR5ZmS9F11WLMR5vTRCdLudAqYsWu3LtV6vAIvlOUa2LMLRg -TWljaGFnb2dvIChSZWdpc3RlcmVkIG5pY2sgbWljaGFnb2dvIG9uIGZyZWVub2Rl -IGFzIG9mIE9jdG9iZXIgMTIsIDIwMTMpIDxtaWNoYWdvZ29Ac2VydmVyLmZha2U+ -iQE4BBMBAgAiBQJSWarzAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCC -xcAJYo7PDA7nB/91wAiaMlU5nHLUu0anhNQbGvUdFgKK1zO90S5KzUdJcY438jcS -UJW1az8l9U9JBRIfPRYVhz/Z1TAJ+dCzD7D8BXHFeGEr0zNOh87ly9aB5du7dpN2 -oSBD6wLcJpqxt4h+XjSS2CX98/2ZIJxXENE2KySaTXP39Xl3eNwvJTUBA4XlcMey -J8KMp/IERli4H0O7vRyLgu3yYpUArTqAonzG1g2lfB35PQJfeInrRSniQ336otnZ -A8qwJ63kfUtWVDRz0g1fnvtiLGPivDJaI5hyIaUeJPaXU1+sg7YNroDu60o2NGZh -F+0IjHlvRfzzA+F9Vw38rpSqR3BmCdjf6Sv3iQEcBBABAgAGBQJSWa9/AAoJEH+r -EUJn5PoE/hgH/1T2dAthVucA/hzY0nl4SMjbg+dzNlYBq00Qwx8DRKVjk5et8+kY -oPI3DGILcr+ELnxNekeMv9WQBBtJanUh1K5ohZ6ohoR7lG18LXf5HCdspflB5Me6 -LMA6iMryEP6gIs9GFuoGe2YQavm58YrkqhcPu34dGN7kdurfEXLvDfVlh5ZbKCsP -Gyd7Pbz04SpqykgK1udiTsLVjc70Xhv+jAMqeaCugDX6TLEwjVmZH/xsyKk2Uh3V -Oib5FXADAtKH+vSqqhFpXrw7R/NaBzvCbas8l61DFHiUg1/bo8vsV8MtGcyZmzXJ -C5Gm0njtGOil/g7JF9siUrpxs9Yyt/h+T2W0W01pY2hhZ29nbyAoVXNlciBhY2Nv -dW50IG1pY2hhZ29nbyBvbiBHaXRodWIgYXMgb2YgT2N0b2JlciAxMiwgMjAxMykg -PG1pY2hhZ29nb0BzZXJ2ZXIuZmFrZT6JATcEEwECACIFAlJZqxkCGwMGCwkIBwMC -BhUIAgkKCwQWAgMBAh4BAheAAAoJEILFwAlijs8M+1AH+IU78ARblqTnJeSl0iWH -mEsg4IBK30Q6/exDAcqOEm1Yc171uw2WnGmIvPYOQqxrRTvj3LoQ816dU6jrj6vY -s+XX0R2hxy7ILh17D/3UKnHcddu7rmc7pNEqZeBXaMughqQaPOWkAIe52+qK5tsl -sWllzTYE4jo29uZ3dAtDcKEJjBo/pIXnu1GOslE1+V4X1H9WDlwrS/JXHzyDQAjt -maPR+3gNesDanhrRmrnT3ZXW2ZVd3vGBibhia8PWUhU1uwOH23ySWXncgsHH0Zad -UMjd4w3YliZP/mLn2ghAxHB70IO7lgAgN3HYZeFoufP3pcK440A+CezfQiRcjHl/ -oIkBHAQQAQIABgUCUlmvfwAKCRB/qxFCZ+T6BOq9CACItsrUZPKGeWSTkMHknMrV -K5vxIXJVCBb+Tppc0Q/J5p4EkW/RFhTwIP2zw8NLDKMh5oO9md4LXhvfIZkqQJFo -6ZtLa3Vf+Kj7uyxezBo4QHA+G7tDsRGaMKVrEMiyLCwS1+hg9VaNzsf7zmQW7mYE -vTLMHp3cVaSU7Mh2Dl8rnAaM/DpTUZQwZ+32Qrb/Z4HSa4f278iqoFpjEbBE2KCr -vT5yEVvpCZ4lwSgA2a+uTlRTvVV6NA/kpsxU64tmhuEOjy+ToDqJ8wv4mqvWZxMv -C6OhfVaXBy3U9gG8aQV0ffXGs+TbCtv8ApHd6E1/AVk0oyZGJaBVrEl688bBIWd/ -uQENBFGeqJ4BCADFmgR7oEGkFFB5qXnuNYFq1nUGDAh0dLNtAD3J6EMxUZEXdmp+ -DQHJw6/eDRQaG9EbjNZheycbVUoI8K2Y/Z268HQueGuIEIJv6cZYXoXdWCbDD4fn -HMNUX2wNlpDqWxb7PNUEtfU9hI3gmHGlr5OiEh3iV06uiZg4n2rbWPbj45m5LJzv -wpCrUA+pLcl9Xjw2cajaSTjdXHk9gvXTCo6s2ZS3/3Q4l+xuzZp1MGNzPQHASMKs -wecSJKkYg6W8I5WsVlPd9a8oQCc/Nfz7BPw31MRVR/SF5FAMqaXx5uLwghVdHB2i -cLURsOtJlCfP8W06gB7yS+MH45Jq/oxBRiJBABEBAAGJAR8EGAECAAkFAlGeqJ4C -GwwACgkQgsXACWKOzwwT4wgAy6ICcnBZ9l2jSu+ldy57F6jf5kpKZgB9NV8V2mMA -NeY1wMQ4VTVpU4t3s4E2LYtGNJNkPQVHbt1Pf4dGPasvMPaHMamgwgyqgYixqs0x -D5PdKzVrfnjwTTr/ZAFdccSPmvy5/hbY0geQ/+mzdbL07+xaT58JIoG5nySDKhmC -VeOvhDZtXMVAhEWBDPEgh/H9sEuBgMgZrzfE1j3q802qiXeQs6WtadWlQ1RN9Iq1 -ZzIi6u9/BifEIRI0pO/WwKOZdXLTemFUoakoe7uT3A74N96t0G9LZVihYbEoO+Pc -5IaHPBV5VLeR3TB1LnnjHVf/Fwi8cnGy50kNWjcbMyEDag== -=jyQ4 ------END PGP PUBLIC KEY BLOCK----- diff --git a/contrib/gitian-downloader/sipa-key.pgp b/contrib/gitian-downloader/sipa-key.pgp deleted file mode 100644 index ffa09bb4ad276..0000000000000 Binary files a/contrib/gitian-downloader/sipa-key.pgp and /dev/null differ diff --git a/contrib/gitian-downloader/tcatm-key.pgp b/contrib/gitian-downloader/tcatm-key.pgp deleted file mode 100644 index baaec76b8c656..0000000000000 Binary files a/contrib/gitian-downloader/tcatm-key.pgp and /dev/null differ diff --git a/contrib/gitian-downloader/win32-download-config b/contrib/gitian-downloader/win32-download-config deleted file mode 100644 index 06c164180d507..0000000000000 --- a/contrib/gitian-downloader/win32-download-config +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: bitcoin -urls: -- http://bitcoin.org/bitcoin-latest-win32-gitian.zip -rss: -- url: http://sourceforge.net/api/file/index/project-id/244765/mtime/desc/limit/100/rss - xpath: //item/link/text() - pattern: bitcoin-\d+.\d+.\d+-win32-gitian.zip -signers: - 0A82509767C7D4A5D14DA2301AE1D35043E08E54: - name: BlueMatt - key: bluematt - BF6273FAEF7CC0BA1F562E50989F6B3048A116B5: - name: Devrandom - key: devrandom - E463A93F5F3117EEDE6C7316BD02942421F4889F: - name: Luke-Jr - key: luke-jr - D762373D24904A3E42F33B08B9A408E71DAAC974: - name: "Pieter Wuille" - key: sipa - 77E72E69DA7EE0A148C06B21B34821D4944DE5F7: - name: tcatm - key: tcatm - 01CDF4627A3B88AAE4A571C87588242FBE38D3A8: - name: "Gavin Andresen" - key: gavinandresen - 71A3B16735405025D447E8F274810B012346C9A6: - name: "Wladimir J. van der Laan" - key: laanwj - AEC1884398647C47413C1C3FB1179EB7347DC10D: - name: "Warren Togami" - key: wtogami - 9692B91BBF0E8D34DFD33B1882C5C009628ECF0C: - name: michagogo - key: michagogo - E944AE667CF960B1004BC32FCA662BE18B877A60: - name: "Andreas Schildbach" - key: aschildbach - C060A6635913D98A3587D7DB1C2491FFEB0EF770: - name: "Cory Fields" - key: "cfields" diff --git a/contrib/gitian-downloader/wtogami-key.pgp b/contrib/gitian-downloader/wtogami-key.pgp deleted file mode 100644 index e0f6c4c5fdf56..0000000000000 --- a/contrib/gitian-downloader/wtogami-key.pgp +++ /dev/null @@ -1,131 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.13 (GNU/Linux) - -mQQNBFHOzpUBIADYwJ1vC5npnYCthOtiSna/siS6tdol0OXc82QRgK4Q2YeFCkpN -Fw/T5YK34BLVGWDHPoafG2+r1nXIuMZnJIiGw6QVOL2sP9f7PrMmzck5KJPHD14Y -GRd9BPkhmt3dXzOCjhig7jI6hKEYayfJNUNs9nlZEvl4QWIBMmk+IyqQz3f1HMfl -/GkFDShBYF8Ny7Ktlx7AaXymajm4DCrTkbj5V2ZDqJgyQM549EoPSwXBQYrEjye3 -g2viC8rUFRFWFjdnx7jFEb1uhx71YGuqiLxKihUW9pbSNK2cLweFazHSVmh+B/pz -fxHfUn+ijLSIAnprTmc/rq89un/iiPt0O/mspcCZ6hE5pFIyX+SC+9PrGz+bFSmw -PkMOZzG489G8k4t/uZsit6helkl0emg6JiXLTmS/oTuT7B9Z9/MeEhOXFcxUb0fr -2aZkEmH5d1oxSBis3D5nylmNJXOUSCpJAZ8E5Sr/5FbF9IPR+NSzosVacqCx5Dxj -vJ7HpZKn6pJfmwrghVXQv04NRTcxbHNmwd98cofBtWX8yBO8M2M+jZrU+BVDUbb/ -A1oAyIbUUswBP768Oh11bELhCly774VwBqTojm2yodLGSyysx4zoa6qL7myfor0m -a+K29y8WH9XGmKGMdUOg+q9z+ODky9aToGvEo2eVhKIlJsk0aFAGy/8awy6qRIIj -UqLMq6XoFcYlE7SmnFUDDDPlBK/NkFFqySpFhKNRyt69Ea9kYXOxDnf/EnBwHn8m -PiFQpeZqgnmhyj8Nk1SSQBgUi07NyXdQ/WIYpWmqqqfHRVQgSE9C1920T1zg/E97 -n5yYjI/gQQwq9wikkJmog6Ny7MSiwIU4LYV0pTUdI4//EJMId2FH8YEUfvG5ds+F -H/o/D4CAJ86KjspizfH8jEjhn0Rm/OtrxLz1rwA1gtF//P3TYNWw5qruL4stP3Rx -9Gve8Bm7oCBU73UT2ZJomEsWE3oqXinLRl3YCsjGDg/d3ySD6i0/BBROLIeXkh3M -M1CNCqREDGLA0vxQi1o7Zi7ZA4gWPSzvi/8KtSzY1iAQODxWUmOICRP7KQODWJmt -roTqhKgZ39wlR6eqkO8ZfAvRYsjvkL+EZFbbKbHxVJLhKchd2qHS+/Q3ov4SFzWY -/cE0ChOPDM587Jkps2bynKQAzQ6810FXmJc0ztrPeD3PEbuyY4KNJV8HGViRDJXi -wvs8eqfvTDGDPl4aLYVCKO9VqZ2OJvqhRhh71LQ2xRrX1LGnYLnUGCMuEQYKvMcI -TSssM/VAfeWAPJDklD0lVNJ7d9Z5ugvJHFc01SaaB47Aod2SPWp5DeiY4A8dcy2w -7f4Wx6FcdP1RXqaRZKCapBooN04vsvGllCshABEBAAG0KFdhcnJlbiBUb2dhbWkg -KDIwMTMpIDx3dG9nYW1pQGdtYWlsLmNvbT6JBDgEEwECACIFAlHOzpUCGwMGCwkI -BwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELEXnrc0fcENY4Ef/23L9iC/39ekJ8Is -1IZdCoDD7/DgVaZqydDcy/ha9uaDFY4MQ0h9RZYo1axVBth/Yxzh1XnvitW8HFKn -DXn5wJI++KWpdLMUsTrc2iWsjAGgicmN5bkQvfTnRwn2pF17EUUEhZ8YyE3qMSVD -rDBECLAswT4Oiq9r9yw3VCFsRaxz5bhk9AAzWjam4H7mAfaEAOUvuX221v+KGSDM -UsGAAe+GjMPL8KnGgEbISlSUF1Ubcw3EChcqjf3BID2gMLkAnGAoxlCZSYievytg -71mcHyIf9yF861QrGcrCh6/objtRdt4IDUVwo9wapunRmYCdZux4ApD0Hit8nAsm -QtxftSK6FWBTOCIRoOQTjwE8qj9GYTIbUFppX66Dzh00td5NKkWz0PVze7YSk2hC -KCVBYyUYHgkQYVlYLZw7dBrXSXv7ph95vc93RDS031cU7tPOrthqnMmhtg1WAwzH -xc2v3az9Gsw1RyxBAOVpkB0AFODiEiVg46xqmxaBPXfQOg/buZA2l4gK4U/pVUZH -72lle2CbBw6FoSx40Y3GYZWB2uEdXBTNLlhX7q2Jvo8WdeTxEv5ACZsjI7K/wrzt -nmvCHefOmVf4tefkXy1MyEvBt2+Ek9bHmHDL1BSk/JdJzJtam2uaP5pGum/PwIUW -KBatmHKZUKwgOIml9btB413C4zSK3GQmC5Y/+TxYybACIdxTDqPSczVZ5Q+jSywX -shdOoLXDRyrYhT2sHjZ1W29B8ebokqwousF77EA94sqfQvDDnmFpvfq9+m0WYtOh -PFF/yxOtlbPJYX7mnC8+dUgobSA4AR5Yrclt+levgivIyNuBwzevHRDMreMZKl2J -uiOT8tkuu66fAwEltIowjjV7TBRfij4QLXl/zfFo8jKU8efL3xluXoRn7g+E5FZ3 -19KTF/DWMcttfeTUYVnv0QTnstb1RGnVj7w8JMy90mKdMQFpl7IzHd2n6LrhEw1V -1AaPF7EcQBOlvsvlZdIFQrFyhKozKoGi3wRrl/bNdebxjIjPzfN9GgbiufFjz2d7 -DMR9GFXfUMVxLncaqBBy1X7MV17ZF7K4uw6DET4fRoecb4N5mJVUxvYq4iZApnNP -npgGdmlcyPD6o3ynx/vkw78m13Gfgw8i2OaUY7xBdOyNVEvkJZBLaC2hw+TKLaZa -v0RExtAO0i0QO4Y1eo78Pl9jOpz0wkJ4KG0270l1Jza4IyaIhYRDWagWOfOp/cXU -cvKKiuJhLOsX1Bapz+O2Aor9+EwWRdPd3BzE2ABdmKHPwrKobNp75wrCpQ5mZifn -DSTJRMPQQJV3wGfB2sP0NE47U8w5CCmVK8gEuqYr6wBl/CCq5tjiRc63VM+to5V4 -tVNTCJWIRgQQEQIABgUCUc7PqwAKCRBr3f6OVKKs8cYAAKCFCLJ5wc+iAVCFRevh -xTcJct0fiQCePHpY37CIeP8s9BH8GqCDftUqh8SIRgQQEQIABgUCUc7YwAAKCRDd -f+mrhdawLOVxAJ9Tjud26LtbM2mWcPj2eT7dhqgZrQCdGyMwMMVzp40lsCK44PrV -+mpFO7KJAhwEEAECAAYFAlHO0BkACgkQw35HI5aSdvXfLw//c2zZxXg4bI2W7gkB -ZQJIOWnmPZfhrXQNeFuetyGoWTm4ZWxW362AdDGiQSGNNkXqeBPOitKOkRyZP/Z3 -h1vwkLkwdFZyWXK00BzYBKfjThWV1BAnArQLewSiLlE7qSnsPEY6FW0PNv711cbL -lXSUP1/lW25Nx7L76GAF6sHreoIdglE8YH5y310JuFnqPa0uaJG+qDo8Mb+WkyLy -Q2A3Atws1tIB9vHsq2FCt9ACyAEA3AqtHR4uMFmIWpUYy77fJAZdzLZTWf0X5XYw -XILNPOl/I0iZrq3LYQAvJfIwjWAC/lm6uTLlvkIJHKyhcIT+RocjMV7bY9ezrC5i -Cag3gaOZ7USMt0h59KdmBaHHNa32n3PSHg9XWljqoWMRjuaRdcA7ofK0BHDJbHWE -cldKXC09laWOXbyNmJsfug/23vNE7fS/cAKSIgEWszEwHJCahB2i/HqOQF0DUGpq -3s5oIXs2xIuN0yT6yIIiQnTU/FkWDDu4D1OZNrDW6QG3cde0PRak/0fr4Kv4iB3E -CAzlsRBlWKNu/eE4QBx6cbvLqjriijhGAF+8Y1zvRKNKPr96hSsETfVytuKDTp6F -u7PAarrSATGXI92Hy3ThAZla0VOYUyeWPktqUMDNq90tIBZbwKpOMMqvJmZfgdOU -4ldDq1f5+2WhAt1aTL1GJVCuYcCJAhwEEAECAAYFAlHO3MQACgkQnSOpPExjO3Gi -jxAAsD+luooqqoz3A28ZxwfCDV+ovazQ4Bw6hVU0zKKZIz/2H4jwmLtLSHtucCRM -xRksZmnqf1p2nn+BKBXDInx9vI9HziMu7fWkzhuovAIf9+X/l6EYV1kQx0bIM1qU -BxXWPgGdrgSZZHl9Qff/BOBnrI8NJmVBDzOh3BSs0BrSR7aFbkSNbjk/JcP0JEyk -j6wDKQsop/Ca5AboLL0uQPgTvhxCu4VROKjhu7o3s7G3xlxTpimwYklDQuYFaGKj -ZNIGFq2orfIMBnj7ZEQVXzhWltlHcgPVP5TDfgd4pVUbyUB6ras7odJWWIHnUFmj -1l5bGidIwRXGFusE4iR8pR528LG2KxNDNQYipsKRY9m+wH+N7gbSgK8DxmocvieV -vcILFS5VrPLbEO2oC13NMljmvua3ovDB0CEh9rybaH+/oA+VDS2L3pkgATTju+Vx -6+mVdlvnrA4mJ5BoLHzrleKybS4ZkbtVBh1KOYmo95NgVifRvpVPB6hKzwqcjYFV -fVYBxTryTBRyd9MLsqpPKnGLBENTFvKDxRCK3iioNyVhXdS0z/UyF1C2hwNTpnjY -pGCu+Es3SILJg2TvQcwLM0OoYBA1bcONm2XbkTrdCpTOtQcSewQSkijREunx14iu -pvNSWeNmbjQU7gNYhvwcBgh90tWgNCfqTtSa5xSe46tmv0SJAhwEEAECAAYFAlHQ -1hgACgkQZwn/QC8Dr2hT/g/+OFUYPXfWo0+ILdxyTGP/v2mSw/X3dBCEYUqefWxD -umcwnksey+thEGFBlxbwpyOfAoTzZLUupaG6BacVgRUvv8bTne4v2H1d22aBXyjC -HMtQPhupn/giamu8q8hCPFrDp6inIAeFuz1GmQaH6xWO5eYBuYXQtxlvZLWBsuMT -74en4e3vjczxGmJu/nvM9ugcYsexA/zcN6SRGr7t2pV4ZElPzPBRyAzhYqhP1YlB -Rydz60OjgcWYEoJKWhJOfmFJ3ZoNGAz4TGoBkDIq4olCF0/cxqrtHN+ZnEOLwiZ7 -4ZX90avcjEFtM+Wb5dBHNpni4ISoHcVI1X0ye6tuAOOt7RywbET/0oIW5iSNMgJ0 -X4XYgOIQ2+a8yjGBjo9I57k0vp1mL6Ji/eaa0dlppcCGnzvSHss+O0qO212pg5Yk -GGfjX1y1ZeSP3ca9C2XyOGIVw2d2Iu7OyqAv/N81xt6ZgG3qixQC0nmgOmn7Kh2B -20W12KpLxKS8RQdHawGau3MBGKeqbfK6/eAzm22yD4/yJAoW4hKgm84z3FbKUN8w -ulYMK9hS2c4egpoDAOJ/QZLLXFWiyi7/sHZz69G2AweWCjOJh28Otg0cUHoLo7jw -oO/L0rCsOQMbUuIumYXBPHNnDwv1xfv2lT8tVzf6GksFJBAw0DybxOMTaOg45Lhz -jGS5BA0EUc7OlQEgAN6t+BV705uoCsdHtQBq/HKGGD5tBiOzy7Wd4nF/c6EWzET4 -QUnmw6bDnqjxrk9MWniPDf1O9MvuB4qIY6g9kEjZ+VSQpWUZpZ5bMXCNHrfh9J2Q -6oLWqDmpeZv2OI0O9wxT62QaFei2qBtimSnBudLSCnvmU3S0h1PflmJsbj+tVcko -w2yOh2bjH1jkVAODHvEbxqyD6fiZhbfUVbPC49SBmXv8Gv0UywNSkP+iqJdwZAb0 -XtjRx4WjZCkTwJAnbM4CJ63+5Hd83BtWZAZbGAh76XY/cSkDirXtXC+2LNUmP5W2 -QY+ur5Bvz8LHaqJMXLAtePdkv5kpd+jXBrZieXUtqovxZaQTinl7C3L2TZd/ivxD -F3Rko9BFDuXXcdZrxBY5b3146IvSPp1y0WmHRxhAPb+RuiHQMt8K92nOhPyvtWXB -mWz0GnW9L6+CW4LKSPRSnE057hyxYNP/DcDd+fWFH+MmhU9noqHfJXSaLVzdI5PI -L8N44AndPIojnlxrxRs7Ik/nW6cTV9H3agg+24yyTdFkACbfIS6wWXOHeHuBzmO6 -VI7pXOZJ9vZT7zI7M/hVci0R3putsGqgRfByRWWQ2DNeyrwUHexZNR/NYz1uhvA6 -dBfKcuAwqxbdSrW/BxJ+iJWdkgYGCV67VLlO6S9sO33HgOanpPr5R9V1KsFVh4dN -j6BjZ4ALE5FPNW+iONnuXvtZbN2cBlBzMDeFC9oZoYCs1Pkmk8xUY2sAXPUt1R0G -D/miIb7ig1N52j9P6vv6fPs1ghmc/hGkhaXyjS54B5T33V6M9g+yba9mIgi8ZxZa -G+4rlFFKA4HS7wYYRJoqMvnc/qBYvoWLaPu3Xq6AXrJyuAaN+e3L8++cWbYHBXF9 -qt+Q2RFL0FNiYUQuwkiaerysnm1a0H7ZtJ4zjl4ZgA1Ej7QcylTIbgFW3L7FnyMH -/5weLLN2wdjAtzjhRPYJLbV6V/gFbbpCpr+caDUaxSNizQuhhzVI5UrJegaHCCrx -DCiwWRFYzN5pqhtgzcaImK76DmPIk+Yrsum5KJZQeGfzKxvF0YnwxU0bxFzcDZJD -X2oCJn828Aw2j0nIlVlrrao0JMkvTBeZehO/11U68M2vKGEqrsQOb/BTXyLCeZwn -UGow1WvYfRxEZTrhhiYw94EH06gbqmKG1xsuV4LDI5z63/6ACcQW3orMbMymJCky -4HiNVZ7SNeGoYe380CJCwv6GN1opKTAWp84cr2KzhAzONGqNWNpUhznAXlI+GzCc -D2H330L1atMqZHjgpEfrkowvJ7WBM5KFKDfylaTKhYvfZcTOZs5OmRZSW3U54wRD -RMP0d2+k3vRililNhHIErHbjhYFc6zubVbBhvUMAEQEAAYkEHwQYAQIACQUCUc7O -lQIbDAAKCRCxF563NH3BDSX2IACugAdZqX+o/+pTkSrj+NEAcP0ZMci8w5nm/yOP -VlGyY6PXGuQKcBtvz3LWtIDdddMc/bD/zmZPwSzTx1MMOWc+gjR0azXe2RrdMHYk -8pb4X4Op2Nkasoc/8hNsRKaU24WUAQMqrRREIVBEOuHGl1A52Lj+aFB04rRHrkMl -AqjB5bwArPorIBdM417EEl4hjEZ9BpQxbUgBhTgGTZuc1u9PsKz1YvQ79YJIRmSH -n72Zaf35zY55eOQeoVBzGmFPq+/UFqtRNWA7jmRhHvMz/yR33B/RSxyTJuPb79zi -2mIZOrViG3X/UNL4qtOc1cKXQBi+FjHAMlGrCc+D5lnyOhEvqoEuvQic7V6C8Pvk -9q+jngn2Gs4pdJO8FOnwaC5xp/ZNE0v7x/KtAHyBA6iKcaepgoRQPSt1ONiHyfh1 -iGgJn+Y6IHx4YDYKEY0UIzHhCfWUl8XZWcf4wLGEbGztkRbkCFqrsja5IeaO7umB -i6C4f95uSGjV7SiIMJOE8xo/m2g4VCnnmk7U996JwtBMKREMMqa3ABK4trfBL3Kq -P6I6ZTlA/C5svkVUVwWOMZau9kLDsxv8keGrFteZtfYa1KPAROFwNuBU82UW0KtX -QQbZoBKt1o3LhqEu+hXU3iKocYWSbBThH8u6vPNgSnW2Qcv3gcUU3jGmYeHrGiUO -SuEWxwlKUxCxBNfmz1FGswlwve1LsS3RTz/XB/L6Ubhq5L7FevrXz8152kuMqnpy -m93sXkL1eJVo07hH+otcRnMzy4vUar9z/N12t3hfTffx29PBKUCc2PKPVpLfJX2i -hieHk23fhLnptjc3lm9S+bHO3rqEWHqgNgNp9bpuwiLRsIy6qTtmC8jxXkGXvQrS -+2Hv6+jRfDcqEAK3vqi1XL7Td81KRjnheBtsKpjS2PFatK3uTo6v1oRWJCdRCxg1 -HT6a9KvZ+DNKcxlQISKAOLX72qpziaDl4CpBdQy4Zg2pr9oYkLdlfkaDK/OH4J3M -wJiVf/uNPPd+yy6xZXK0SPZHf+mf5Yt+Sim93hIbdS9AMdvHKB5n3DR27H+/okPj -w3J9z85hxgP5KspizQR6t77AWddPRy/l3BBZeb+HiaeKGBJeSNWXpkPXHkdjLW8U -QStzFR8r15FWJTmamIknjJ3XNbytMCpu8cj2ZVZdyjPcHEBL3WbNYYtauSuYmyUO -yXBaecM/KoTdvHiERU/mMuf7f1ftftCHehZoNaP+BeIbIud9IHIdrSQBCW+RC1Y1 -8opDLMtnIOX3OnyCN38ELYcuNLMJxBqnQgi7MVDVcT1+BN/+lFQtG44+rPUkK+T1 -Jk1/tIJqcyc1BfY6uFHFXWWnqQnjl0XpZo+/bMDxTVy8yND2 -=icdI ------END PGP PUBLIC KEY BLOCK----- diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md new file mode 100644 index 0000000000000..68aae18731d04 --- /dev/null +++ b/contrib/guix/INSTALL.md @@ -0,0 +1,801 @@ +# Guix Installation and Setup + +This only needs to be done once per machine. If you have already completed the +installation and setup, please proceed to [perform a build](./README.md). + +Otherwise, you may choose from one of the following options to install Guix: + +1. Using the official **shell installer script** [⤓ skip to section][install-script] + - Maintained by Guix developers + - Easiest (automatically performs *most* setup) + - Works on nearly all Linux distributions + - Only installs latest release + - Binary installation only, requires high level of trust + - Note: The script needs to be run as root, so it should be inspected before it's run +2. Using the official **binary tarball** [⤓ skip to section][install-bin-tarball] + - Maintained by Guix developers + - Normal difficulty (full manual setup required) + - Works on nearly all Linux distributions + - Installs any release + - Binary installation only, requires high level of trust +3. Using fanquake's **Docker image** [↗︎ external instructions][install-fanquake-docker] + - Maintained by fanquake + - Easy (automatically performs *some* setup) + - Works wherever Docker images work + - Installs any release + - Binary installation only, requires high level of trust +4. Using a **distribution-maintained package** [⤓ skip to section][install-distro-pkg] + - Maintained by distribution's Guix package maintainer + - Normal difficulty (manual setup required) + - Works only on distributions with Guix packaged, see: https://repology.org/project/guix/versions + - Installs a release decided on by package maintainer + - Source or binary installation depending on the distribution +5. Building **from source** [⤓ skip to section][install-source] + - Maintained by you + - Hard, but rewarding + - Can be made to work on most Linux distributions + - Installs any commit (more granular) + - Source installation, requires lower level of trust + +## Options 1 and 2: Using the official shell installer script or binary tarball + +The installation instructions for both the official shell installer script and +the binary tarballs can be found in the GNU Guix Manual's [Binary Installation +section](https://guix.gnu.org/manual/en/html_node/Binary-Installation.html). + +Note that running through the binary tarball installation steps is largely +equivalent to manually performing what the shell installer script does. + +Note that at the time of writing (July 5th, 2021), the shell installer script +automatically creates an `/etc/profile.d` entry which the binary tarball +installation instructions do not ask you to create. However, you will likely +need this entry for better desktop integration. Please see [this +section](#add-an-etcprofiled-entry) for instructions on how to add a +`/etc/profile.d/guix.sh` entry. + +Regardless of which installation option you chose, the changes to +`/etc/profile.d` will not take effect until the next shell or desktop session, +so you should log out and log back in. + +## Option 3: Using fanquake's Docker image + +Please refer to fanquake's instructions +[here](https://github.com/fanquake/core-review/tree/master/guix). + +Note that the `Dockerfile` is largely equivalent to running through the binary +tarball installation steps. + +## Option 4: Using a distribution-maintained package + +Note that this section is based on the distro packaging situation at the time of +writing (July 2021). Guix is expected to be more widely packaged over time. For +an up-to-date view on Guix's package status/version across distros, please see: +https://repology.org/project/guix/versions + +### Debian 11 (Bullseye)/Ubuntu 21.04 (Hirsute Hippo) + +Guix v1.2.0 is available as a distribution package starting in [Debian +11](https://packages.debian.org/bullseye/guix) and [Ubuntu +21.04](https://packages.ubuntu.com/hirsute/guix). + +Note that if you intend on using Guix without using any substitutes (more +details [here][security-model]), v1.2.0 has a known problem when building GnuTLS +from source. Solutions and workarounds are documented +[here](#gnutls-test-suite-fail-status-request-revoked). + + +To install: +```sh +sudo apt install guix +``` + +For up-to-date information on Debian and Ubuntu's release history: +- [Debian release history](https://www.debian.org/releases/) +- [Ubuntu release history](https://ubuntu.com/about/release-cycle) + +### Arch Linux + +Guix is available in the AUR as +[`guix`](https://aur.archlinux.org/packages/guix/), please follow the +installation instructions in the Arch Linux Wiki ([live +link](https://wiki.archlinux.org/index.php/Guix#AUR_Package_Installation), +[2021/03/30 +permalink](https://wiki.archlinux.org/index.php?title=Guix&oldid=637559#AUR_Package_Installation)) +to install Guix. + +At the time of writing (2021/03/30), the `check` phase will fail if the path to +guix's build directory is longer than 36 characters due to an anachronistic +character limit on the shebang line. Since the `check` phase happens after the +`build` phase, which may take quite a long time, it is recommended that users +either: + +1. Skip the `check` phase + - For `makepkg`: `makepkg --nocheck ...` + - For `yay`: `yay --mflags="--nocheck" ...` + - For `paru`: `paru --nocheck ...` +2. Or, check their build directory's length beforehand + - For those building with `makepkg`: `pwd | wc -c` + +## Option 5: Building from source + +Building Guix from source is a rather involved process but a rewarding one for +those looking to minimize trust and maximize customizability (e.g. building a +particular commit of Guix). Previous experience with using autotools-style build +systems to build packages from source will be helpful. *hic sunt dracones.* + +I strongly urge you to at least skim through the entire section once before you +start issuing commands, as it will save you a lot of unnecessary pain and +anguish. + +### Installing common build tools + +There are a few basic build tools that are required for most things we'll build, +so let's install them now: + +Text transformation/i18n: +- `autopoint` (sometimes packaged in `gettext`) +- `help2man` +- `po4a` +- `texinfo` + +Build system tools: +- `g++` w/ C++11 support +- `libtool` +- `autoconf` +- `automake` +- `pkg-config` (sometimes packaged as `pkgconf`) +- `make` +- `cmake` + +Miscellaneous: +- `git` +- `gnupg` +- `python3` + +### Building and Installing Guix's dependencies + +In order to build Guix itself from source, we need to first make sure that the +necessary dependencies are installed and discoverable. The most up-to-date list +of Guix's dependencies is kept in the ["Requirements" +section](https://guix.gnu.org/manual/en/html_node/Requirements.html) of the Guix +Reference Manual. + +Depending on your distribution, most or all of these dependencies may already be +packaged and installable without manually building and installing. + +For reference, the graphic below outlines Guix v1.3.0's dependency graph: + +![bootstrap map](https://user-images.githubusercontent.com/6399679/125064185-a9a59880-e0b0-11eb-82c1-9b8e5dc9950d.png) + +#### Guile + +##### Choosing a Guile version and sticking to it + +One of the first things you need to decide is which Guile version you want to +use: Guile v2.2 or Guile v3.0. Unlike the python2 to python3 transition, Guile +v2.2 and Guile v3.0 are largely compatible, as evidenced by the fact that most +Guile packages and even [Guix +itself](https://guix.gnu.org/en/blog/2020/guile-3-and-guix/) support running on +both. + +What is important here is that you **choose one**, and you **remain consistent** +with your choice throughout **all Guile-related packages**, no matter if they +are installed via the distribution's package manager or installed from source. +This is because the files for Guile packages are installed to directories which +are separated based on the Guile version. + +###### Example: Checking that Ubuntu's `guile-git` is compatible with your chosen Guile version + +On Ubuntu Focal: + +```sh +$ apt show guile-git +Package: guile-git +... +Depends: guile-2.2, guile-bytestructures, libgit2-dev +... +``` + +As you can see, the package `guile-git` depends on `guile-2.2`, meaning that it +was likely built for Guile v2.2. This means that if you decided to use Guile +v3.0 on Ubuntu Focal, you would need to build guile-git from source instead of +using the distribution package. + +On Ubuntu Hirsute: + +```sh +$ apt show guile-git +Package: guile-git +... +Depends: guile-3.0 | guile-2.2, guile-bytestructures (>= 1.0.7-3~), libgit2-dev (>= 1.0) +... +``` + +In this case, `guile-git` depends on either `guile-3.0` or `guile-2.2`, meaning +that it would work no matter what Guile version you decided to use. + +###### Corner case: Multiple versions of Guile on one system + +It is recommended to only install one version of Guile, so that build systems do +not get confused about which Guile to use. + +However, if you insist on having both Guile v2.2 and Guile v3.0 installed on +your system, then you need to **consistently** specify one of +`GUILE_EFFECTIVE_VERSION=3.0` or `GUILE_EFFECTIVE_VERSION=2.2` to all +`./configure` invocations for Guix and its dependencies. + +##### Installing Guile + +Guile is most likely already packaged for your distribution, so after you have +[chosen a Guile version](#choosing-a-guile-version-and-sticking-to-it), install +it via your distribution's package manager. + +If your distribution splits packages into `-dev`-suffixed and +non-`-dev`-suffixed sub-packages (as is the case for Debian-derived +distributions), please make sure to install both. For example, to install Guile +v2.2 on Debian/Ubuntu: + +```sh +apt install guile-2.2 guile-2.2-dev +``` + +#### Mixing distribution packages and source-built packages + +At the time of writing, most distributions have _some_ of Guix's dependencies +packaged, but not all. This means that you may want to install the distribution +package for some dependencies, and manually build-from-source for others. + +Distribution packages usually install to `/usr`, which is different from the +default `./configure` prefix of source-built packages: `/usr/local`. + +This means that if you mix-and-match distribution packages and source-built +packages and do not specify exactly `--prefix=/usr` to `./configure` for +source-built packages, you will need to augment the `GUILE_LOAD_PATH` and +`GUILE_LOAD_COMPILED_PATH` environment variables so that Guile will look +under the right prefix and find your source-built packages. + +For example, if you are using Guile v2.2, and have Guile packages in the +`/usr/local` prefix, either add the following lines to your `.profile` or +`.bash_profile` so that the environment variable is properly set for all future +shell logins, or paste the lines into a POSIX-style shell to temporarily modify +the environment variables of your current shell session. + +```sh +# Help Guile v2.2.x find packages in /usr/local +export GUILE_LOAD_PATH="/usr/local/share/guile/site/2.2${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH" +export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/2.2/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH" +``` + +Note that these environment variables are used to check for packages during +`./configure`, so they should be set as soon as possible should you want to use +a prefix other than `/usr`. + +#### Building and installing source-built packages + +***IMPORTANT**: A few dependencies have non-obvious quirks/errata which are +documented in the sub-sections immediately below. Please read these sections +before proceeding to build and install these packages.* + +Although you should always refer to the README or INSTALL files for the most +accurate information, most of these dependencies use autoconf-style build +systems (check if there's a `configure.ac` file), and will likely do the right +thing with the following: + +Clone the repository and check out the latest release: +```sh +git clone /.git +cd +git tag -l # check for the latest release +git checkout +``` + +For autoconf-based build systems (if `./autogen.sh` or `configure.ac` exists at +the root of the repository): + +```sh +./autogen.sh || autoreconf -vfi +./configure --prefix= +make +sudo make install +``` + +For CMake-based build systems (if `CMakeLists.txt` exists at the root of the +repository): + +```sh +mkdir build && cd build +cmake .. -DCMAKE_INSTALL_PREFIX= +sudo cmake --build . --target install +``` + +If you choose not to specify exactly `--prefix=/usr` to `./configure`, please +make sure you've carefully read the [previous section] on mixing distribution +packages and source-built packages. + +##### Binding packages require `-dev`-suffixed packages + +Relevant for: +- Everyone + +When building bindings, the `-dev`-suffixed version of the original package +needs to be installed. For example, building `Guile-zlib` on Debian-derived +distributions requires that `zlib1g-dev` is installed. + +When using bindings, the `-dev`-suffixed version of the original package still +needs to be installed. This is particularly problematic when distribution +packages are mispackaged like `guile-sqlite3` is in Ubuntu Focal such that +installing `guile-sqlite3` does not automatically install `libsqlite3-dev` as a +dependency. + +Below is a list of relevant Guile bindings and their corresponding `-dev` +packages in Debian at the time of writing. + +| Guile binding package | -dev Debian package | +|-----------------------|---------------------| +| guile-gcrypt | libgcrypt-dev | +| guile-git | libgit2-dev | +| guile-lzlib | liblz-dev | +| guile-ssh | libssh-dev | +| guile-sqlite3 | libsqlite3-dev | +| guile-zlib | zlib1g-dev | + +##### `guile-git` actually depends on `libgit2 >= 1.1` + +Relevant for: +- Those building `guile-git` from source against `libgit2 < 1.1` +- Those installing `guile-git` from their distribution where `guile-git` is + built against `libgit2 < 1.1` + +As of v0.4.0, `guile-git` claims to only require `libgit2 >= 0.28.0`, however, +it actually requires `libgit2 >= 1.1`, otherwise, it will be confused by a +reference of `origin/keyring`: instead of interpreting the reference as "the +'keyring' branch of the 'origin' remote", the reference is interpreted as "the +branch literally named 'origin/keyring'" + +This is especially notable because Ubuntu Focal packages `libgit2 v0.28.4`, and +`guile-git` is built against it. + +Should you be in this situation, you need to build both `libgit2 v1.1.x` and +`guile-git` from source. + +Source: https://logs.guix.gnu.org/guix/2020-11-12.log#232527 + +##### `{scheme,guile}-bytestructures` v1.0.8 and v1.0.9 are broken for Guile v2.2 + +Relevant for: +- Those building `{scheme,guile}-bytestructures` from source against Guile v2.2 + +Commit +[707eea3](https://github.com/TaylanUB/scheme-bytestructures/commit/707eea3a85e1e375e86702229ebf73d496377669) +introduced a regression for Guile v2.2 and was first included in v1.0.8, this +was later corrected in commit +[ec9a721](https://github.com/TaylanUB/scheme-bytestructures/commit/ec9a721957c17bcda13148f8faa5f06934431ff7) +and included in v1.1.0. + +TL;DR If you decided to use Guile v2.2, do not use `{scheme,guile}-bytestructures` v1.0.8 or v1.0.9. + +### Building and Installing Guix itself + +Start by cloning Guix: + +``` +git clone https://git.savannah.gnu.org/git/guix.git +cd guix +``` + +You will likely want to build the latest release, however, if the latest release +when you're reading this is still 1.2.0 then you may want to use 95aca29 instead +to avoid a problem in the GnuTLS test suite. + +``` +git branch -a -l 'origin/version-*' # check for the latest release +git checkout +``` + +Bootstrap the build system: +``` +./bootstrap +``` + +Configure with the recommended `--localstatedir` flag: +``` +./configure --localstatedir=/var +``` + +Note: If you intend to hack on Guix in the future, you will need to supply the +same `--localstatedir=` flag for all future Guix `./configure` invocations. See +the last paragraph of this +[section](https://guix.gnu.org/manual/en/html_node/Requirements.html) for more +details. + +Build Guix (this will take a while): +``` +make -j$(nproc) +``` + +Install Guix: + +``` +sudo make install +``` + +### Post-"build from source" Setup + +#### Creating and starting a `guix-daemon-original` service with a fixed `argv[0]` + +At this point, guix will be installed to `${bindir}`, which is likely +`/usr/local/bin` if you did not override directory variables at +`./configure`-time. More information on standard Automake directory variables +can be found +[here](https://www.gnu.org/software/automake/manual/html_node/Standard-Directory-Variables.html). + +However, the Guix init scripts and service configurations for Upstart, systemd, +SysV, and OpenRC are installed (in `${libdir}`) to launch +`${localstatedir}/guix/profiles/per-user/root/current-guix/bin/guix-daemon`, +which does not yet exist, and will only exist after [`root` performs their first +`guix pull`](#guix-pull-as-root). + +We need to create a `-original` version of these init scripts that's pointed to +the binaries we just built and `make install`'ed in `${bindir}` (normally, +`/usr/local/bin`). + +Example for `systemd`, run as `root`: + +```sh +# Create guix-daemon-original.service by modifying guix-daemon.service +libdir=# set according to your PREFIX (default is /usr/local/lib) +bindir="$(dirname $(command -v guix-daemon))" +sed -E -e "s|/\S*/guix/profiles/per-user/root/current-guix/bin/guix-daemon|${bindir}/guix-daemon|" "${libdir}"/systemd/system/guix-daemon.service > /etc/systemd/system/guix-daemon-original.service +chmod 664 /etc/systemd/system/guix-daemon-original.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the non-working guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the working guix-daemon-original.service is started and enabled +systemctl enable guix-daemon-original +systemctl start guix-daemon-original +``` + +#### Creating `guix-daemon` users / groups + +Please see the [relevant +section](https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html) +in the Guix Reference Manual for more details. + +## Optional setup + +At this point, you are set up to [use Guix to build Bitcoin +Core](./README.md#usage). However, if you want to polish your setup a bit and +make it "what Guix intended", then read the next few subsections. + +### Add an `/etc/profile.d` entry + +This section definitely does not apply to you if you installed Guix using: +1. The shell installer script +2. fanquake's Docker image +3. Debian's `guix` package + +#### Background + +Although Guix knows how to update itself and its packages, it does so in a +non-invasive way (it does not modify `/usr/local/bin/guix`). + +Instead, it does the following: + +- After a `guix pull`, it updates + `/var/guix/profiles/per-user/$USER/current-guix`, and creates a symlink + targeting this directory at `$HOME/.config/guix/current` + +- After a `guix install`, it updates + `/var/guix/profiles/per-user/$USER/guix-profile`, and creates a symlink + targeting this directory at `$HOME/.guix-profile` + +Therefore, in order for these operations to affect your shell/desktop sessions +(and for the principle of least astonishment to hold), their corresponding +directories have to be added to well-known environment variables like `$PATH`, +`$INFOPATH`, `$XDG_DATA_DIRS`, etc. + +In other words, if `$HOME/.config/guix/current/bin` does not exist in your +`$PATH`, a `guix pull` will have no effect on what `guix` you are using. Same +goes for `$HOME/.guix-profile/bin`, `guix install`, and installed packages. + +Helpfully, after a `guix pull` or `guix install`, a message will be printed like +so: + +``` +hint: Consider setting the necessary environment variables by running: + + GUIX_PROFILE="$HOME/.guix-profile" + . "$GUIX_PROFILE/etc/profile" + +Alternately, see `guix package --search-paths -p "$HOME/.guix-profile"'. +``` + +However, this is somewhat tedious to do for both `guix pull` and `guix install` +for each user on the system that wants to properly use `guix`. I recommend that +you instead add an entry to `/etc/profile.d` instead. This is done by default +when installing the Debian package later than 1.2.0-4 and when using the shell +script installer. + +#### Instructions + +Create `/etc/profile.d/guix.sh` with the following content: +```sh +# _GUIX_PROFILE: `guix pull` profile +_GUIX_PROFILE="$HOME/.config/guix/current" +if [ -L $_GUIX_PROFILE ]; then + export PATH="$_GUIX_PROFILE/bin${PATH:+:}$PATH" + # Export INFOPATH so that the updated info pages can be found + # and read by both /usr/bin/info and/or $GUIX_PROFILE/bin/info + # When INFOPATH is unset, add a trailing colon so that Emacs + # searches 'Info-default-directory-list'. + export INFOPATH="$_GUIX_PROFILE/share/info:$INFOPATH" +fi + +# GUIX_PROFILE: User's default profile +GUIX_PROFILE="$HOME/.guix-profile" +[ -L $GUIX_PROFILE ] || return +GUIX_LOCPATH="$GUIX_PROFILE/lib/locale" +export GUIX_PROFILE GUIX_LOCPATH + +[ -f "$GUIX_PROFILE/etc/profile" ] && . "$GUIX_PROFILE/etc/profile" + +# set XDG_DATA_DIRS to include Guix installations +export XDG_DATA_DIRS="$GUIX_PROFILE/share:${XDG_DATA_DIRS:-/usr/local/share/:/usr/share/}" +``` + +Please note that this will not take effect until the next shell or desktop +session (log out and log back in). + +### `guix pull` as root + +Before you do this, you need to read the section on [choosing your security +model][security-model] and adjust `guix` and `guix-daemon` flags according to +your choice, as invoking `guix pull` may pull substitutes from substitute +servers (which you may not want). + +As mentioned in a previous section, Guix expects +`${localstatedir}/guix/profiles/per-user/root/current-guix` to be populated with +`root`'s Guix profile, `guix pull`-ed and built by some former version of Guix. +However, this is not the case when we build from source. Therefore, we need to +perform a `guix pull` as `root`: + +```sh +sudo --login guix pull --branch=version- +# or +sudo --login guix pull --commit= +``` + +`guix pull` is quite a long process (especially if you're using +`--no-substitute`). If you encounter build problems, please refer to the +[troubleshooting section](#troubleshooting). + +Note that running a bare `guix pull` with no commit or branch specified will +pull the latest commit on Guix's master branch, which is likely fine, but not +recommended. + +If you installed Guix from source, you may get an error like the following: +```sh +error: while creating symlink '/root/.config/guix/current' No such file or directory +``` +To resolve this, simply: +``` +sudo mkdir -p /root/.config/guix +``` +Then try the `guix pull` command again. + +After the `guix pull` finishes successfully, +`${localstatedir}/guix/profiles/per-user/root/current-guix` should be populated. + +#### Using the newly-pulled `guix` by restarting the daemon + +Depending on how you installed Guix, you should now make sure that your init +scripts and service configurations point to the newly-pulled `guix-daemon`. + +##### If you built Guix from source + +If you followed the instructions for [fixing argv\[0\]][fix-argv0], you can now +do the following: + +```sh +systemctl stop guix-daemon-original +systemctl disable guix-daemon-original + +systemctl enable guix-daemon +systemctl start guix-daemon +``` + +##### If you installed Guix via the Debian/Ubuntu distribution packages + +You will need to create a `guix-daemon-latest` service which points to the new +`guix` rather than a pinned one. + +```sh +# Create guix-daemon-latest.service by modifying guix-daemon.service +sed -E -e "s|/usr/bin/guix-daemon|/var/guix/profiles/per-user/root/current-guix/bin/guix-daemon|" /etc/systemd/system/guix-daemon.service > /lib/systemd/system/guix-daemon-latest.service +chmod 664 /lib/systemd/system/guix-daemon-latest.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the old guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the new guix-daemon-latest.service is started and enabled +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### If you installed Guix via lantw44's Arch Linux AUR package + +At the time of writing (July 5th, 2021) the systemd unit for "updated Guix" is +`guix-daemon-latest.service`, therefore, you should do the following: + +```sh +systemctl stop guix-daemon +systemctl disable guix-daemon + +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### Otherwise... + +Simply do: + +```sh +systemctl restart guix-daemon +``` + +### Checking everything + +If you followed all the steps above to make your Guix setup "prim and proper," +you can check that you did everything properly by running through this +checklist. + +1. `/etc/profile.d/guix.sh` should exist and be sourced at each shell login + +2. `guix describe` should not print `guix describe: error: failed to determine + origin`, but rather something like: + + ``` + Generation 38 Feb 22 2021 16:39:31 (current) + guix f350df4 + repository URL: https://git.savannah.gnu.org/git/guix.git + branch: version-1.2.0 + commit: f350df405fbcd5b9e27e6b6aa500da7f101f41e7 + ``` + +3. `guix-daemon` should be running from `${localstatedir}/guix/profiles/per-user/root/current-guix` + +# Troubleshooting + +## Derivation failed to build + +When you see a build failure like below: + +``` +building /gnu/store/...-foo-3.6.12.drv... +/ 'check' phasenote: keeping build directory `/tmp/guix-build-foo-3.6.12.drv-0' +builder for `/gnu/store/...-foo-3.6.12.drv' failed with exit code 1 +build of /gnu/store/...-foo-3.6.12.drv failed +View build log at '/var/log/guix/drvs/../...-foo-3.6.12.drv.bz2'. +cannot build derivation `/gnu/store/...-qux-7.69.1.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-bar-3.16.5.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-baz-2.0.5.drv': 1 dependencies couldn't be built +guix time-machine: error: build of `/gnu/store/...-baz-2.0.5.drv' failed +``` + +It means that `guix` failed to build a package named `foo`, which was a +dependency of `qux`, `bar`, and `baz`. Importantly, note that the last "failed" +line is not necessarily the root cause, the first "failed" line is. + +Most of the time, the build failure is due to a spurious test failure or the +package's build system/test suite breaking when running multi-threaded. To +rebuild _just_ this derivation in a single-threaded fashion (please don't forget +to add other `guix` flags like `--no-substitutes` as appropriate): + +```sh +$ guix build --cores=1 /gnu/store/...-foo-3.6.12.drv +``` + +If the single-threaded rebuild did not succeed, you may need to dig deeper. +You may view `foo`'s build logs in `less` like so (please replace paths with the +path you see in the build failure output): + +```sh +$ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less +``` + +`foo`'s build directory is also preserved and available at +`/tmp/guix-build-foo-3.6.12.drv-0`. However, if you fail to build `foo` multiple +times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build +failure output for the most accurate, up-to-date information. + +### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character + +This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem +which rejects characters not present in the UTF-8 character code set. An example +is ZFS with the utf8only=on option set. + +More information: https://bugs.python.org/issue37584 + +### GnuTLS: test-suite FAIL: status-request-revoked + +*The derivation is likely identified by: `/gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv`* + +This unfortunate error is most common for non-substitute builders who installed +Guix v1.2.0. The problem stems from the fact that one of GnuTLS's tests uses a +hardcoded certificate which expired on 2020-10-24. + +What's more unfortunate is that this GnuTLS derivation is somewhat special in +Guix's dependency graph and is not affected by the package transformation flags +like `--without-tests=`. + +The easiest solution for those encountering this problem is to install a newer +version of Guix. However, there are ways to work around this issue: + +#### Workaround 1: Using substitutes for this single derivation + +If you've authorized the official Guix build farm's key (more info +[here](./README.md#step-1-authorize-the-signing-keys)), then you can use +substitutes just for this single derivation by invoking the following: + +```sh +guix build --substitute-urls="https://ci.guix.gnu.org" /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +``` + +See [this section](./README.md#removing-authorized-keys) for instructions on how +to remove authorized keys if you don't want to keep the build farm's key +authorized. + +#### Workaround 2: Temporarily setting the system clock back + +This workaround was described [here](https://issues.guix.gnu.org/44559#5). + +Basically: +1. Turn off networking +2. Turn off NTP +3. Set system time to 2020-10-01 +4. guix build --no-substitutes /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +5. Set system time back to accurate current time +6. Turn NTP back on +7. Turn networking back on + +### coreutils: FAIL: tests/tail-2/inotify-dir-recreate + +The inotify-dir-create test fails on "remote" filesystems such as overlayfs +(Docker's default filesystem) due to the filesystem being mistakenly recognized +as non-remote. + +A relatively easy workaround to this is to make sure that a somewhat traditional +filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds). For +Docker users, this might mean [using a volume][docker/volumes], [binding +mounting][docker/bind-mnt] from host, or (for those with enough RAM and swap) +[mounting a tmpfs][docker/tmpfs] using the `--tmpfs` flag. + +Please see the following links for more details: + +- An upstream coreutils bug has been filed: [debbugs#47940](https://debbugs.gnu.org/cgi/bugreport.cgi?bug=47940) +- A Guix bug detailing the underlying problem has been filed: [guix-issues#47935](https://issues.guix.gnu.org/47935) +- A commit to skip this test in Guix has been merged into the core-updates branch: +[savannah/guix@6ba1058](https://git.savannah.gnu.org/cgit/guix.git/commit/?id=6ba1058df0c4ce5611c2367531ae5c3cdc729ab4) + + +[install-script]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-bin-tarball]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-fanquake-docker]: #option-3-using-fanquakes-docker-image +[install-distro-pkg]: #option-4-using-a-distribution-maintained-package +[install-source]: #option-5-building-from-source + +[fix-argv0]: #creating-and-starting-a-guix-daemon-original-service-with-a-fixed-argv0 +[security-model]: ./README.md#choosing-your-security-model + +[docker/volumes]: https://docs.docker.com/storage/volumes/ +[docker/bind-mnt]: https://docs.docker.com/storage/bind-mounts/ +[docker/tmpfs]: https://docs.docker.com/storage/tmpfs/ diff --git a/contrib/guix/README.md b/contrib/guix/README.md new file mode 100644 index 0000000000000..af5607c710d34 --- /dev/null +++ b/contrib/guix/README.md @@ -0,0 +1,484 @@ +# Bootstrappable Bitcoin Core Builds + +This directory contains the files necessary to perform bootstrappable Bitcoin +Core builds. + +[Bootstrappability][b17e] furthers our binary security guarantees by allowing us +to _audit and reproduce_ our toolchain instead of blindly _trusting_ binary +downloads. + +We achieve bootstrappability by using Guix as a functional package manager. + +# Requirements + +Conservatively, you will need an x86_64 machine with: + +- 16GB of free disk space on the partition that /gnu/store will reside in +- 8GB of free disk space **per platform triple** you're planning on building + (see the `HOSTS` [environment variable description][env-vars-list]) + +# Installation and Setup + +If you don't have Guix installed and set up, please follow the instructions in +[INSTALL.md](./INSTALL.md) + +# Usage + +If you haven't considered your security model yet, please read [the relevant +section](#choosing-your-security-model) before proceeding to perform a build. + +## Making the Xcode SDK available for macOS cross-compilation + +In order to perform a build for macOS (which is included in the default set of +platform triples to build), you'll need to extract the macOS SDK tarball using +tools found in the [`macdeploy` directory](../macdeploy/README.md). + +You can then either point to the SDK using the `SDK_PATH` environment variable: + +```sh +# Extract the SDK tarball to /path/to/parent/dir/of/extracted/SDK/Xcode---extracted-SDK-with-libcxx-headers +tar -C /path/to/parent/dir/of/extracted/SDK -xaf /path/to/Xcode---extracted-SDK-with-libcxx-headers.tar.gz + +# Indicate where to locate the SDK tarball +export SDK_PATH=/path/to/parent/dir/of/extracted/SDK +``` + +or extract it into `depends/SDKs`: + +```sh +mkdir -p depends/SDKs +tar -C depends/SDKs -xaf /path/to/SDK/tarball +``` + +## Building + +*The author highly recommends at least reading over the [common usage patterns +and examples](#common-guix-build-invocation-patterns-and-examples) section below +before starting a build. For a full list of customization options, see the +[recognized environment variables][env-vars-list] section.* + +To build Bitcoin Core reproducibly with all default options, invoke the +following from the top of a clean repository: + +```sh +./contrib/guix/guix-build +``` + +## Codesigning build outputs + +The `guix-codesign` command attaches codesignatures (produced by codesigners) to +existing non-codesigned outputs. Please see the [release process +documentation](/doc/release-process.md) for more context. + +It respects many of the same environment variable flags as `guix-build`, with 2 +crucial differences: + +1. Since only Windows and macOS build outputs require codesigning, the `HOSTS` + environment variable will have a sane default value of `x86_64-w64-mingw32 + x86_64-apple-darwin arm64-apple-darwin` instead of all the platforms. +2. The `guix-codesign` command ***requires*** a `DETACHED_SIGS_REPO` flag. + * _**DETACHED_SIGS_REPO**_ + + Set the directory where detached codesignatures can be found for the current + Bitcoin Core version being built. + + _REQUIRED environment variable_ + +An invocation with all default options would look like: + +``` +env DETACHED_SIGS_REPO= ./contrib/guix/guix-codesign +``` + +## Cleaning intermediate work directories + +By default, `guix-build` leaves all intermediate files or "work directories" +(e.g. `depends/work`, `guix-build-*/distsrc-*`) intact at the end of a build so +that they are available to the user (to aid in debugging, etc.). However, these +directories usually take up a large amount of disk space. Therefore, a +`guix-clean` convenience script is provided which cleans the current `git` +worktree to save disk space: + +``` +./contrib/guix/guix-clean +``` + + +## Attesting to build outputs + +Much like how Gitian build outputs are attested to in a `gitian.sigs` +repository, Guix build outputs are attested to in the [`guix.sigs` +repository](https://github.com/bitcoin-core/guix.sigs). + +After you've cloned the `guix.sigs` repository, to attest to the current +worktree's commit/tag: + +``` +env GUIX_SIGS_REPO= SIGNER= ./contrib/guix/guix-attest +``` + +See `./contrib/guix/guix-attest --help` for more information on the various ways +`guix-attest` can be invoked. + +## Verifying build output attestations + +After at least one other signer has uploaded their signatures to the `guix.sigs` +repository: + +``` +git -C pull +env GUIX_SIGS_REPO= ./contrib/guix/guix-verify +``` + + +## Common `guix-build` invocation patterns and examples + +### Keeping caches and SDKs outside of the worktree + +If you perform a lot of builds and have a bunch of worktrees, you may find it +more efficient to keep the depends tree's download cache, build cache, and SDKs +outside of the worktrees to avoid duplicate downloads and unnecessary builds. To +help with this situation, the `guix-build` script honours the `SOURCES_PATH`, +`BASE_CACHE`, and `SDK_PATH` environment variables and will pass them on to the +depends tree so that you can do something like: + +```sh +env SOURCES_PATH="$HOME/depends-SOURCES_PATH" BASE_CACHE="$HOME/depends-BASE_CACHE" SDK_PATH="$HOME/macOS-SDKs" ./contrib/guix/guix-build +``` + +Note that the paths that these environment variables point to **must be +directories**, and **NOT symlinks to directories**. + +See the [recognized environment variables][env-vars-list] section for more +details. + +### Building a subset of platform triples + +Sometimes you only want to build a subset of the supported platform triples, in +which case you can override the default list by setting the space-separated +`HOSTS` environment variable: + +```sh +env HOSTS='x86_64-w64-mingw32 x86_64-apple-darwin' ./contrib/guix/guix-build +``` + +See the [recognized environment variables][env-vars-list] section for more +details. + +### Controlling the number of threads used by `guix` build commands + +Depending on your system's RAM capacity, you may want to decrease the number of +threads used to decrease RAM usage or vice versa. + +By default, the scripts under `./contrib/guix` will invoke all `guix` build +commands with `--cores="$JOBS"`. Note that `$JOBS` defaults to `$(nproc)` if not +specified. However, astute manual readers will also notice that `guix` build +commands also accept a `--max-jobs=` flag (which defaults to 1 if unspecified). + +Here is the difference between `--cores=` and `--max-jobs=`: + +> Note: When I say "derivation," think "package" + +`--cores=` + + - controls the number of CPU cores to build each derivation. This is the value + passed to `make`'s `--jobs=` flag. + +`--max-jobs=` + + - controls how many derivations can be built in parallel + - defaults to 1 + +Therefore, the default is for `guix` build commands to build one derivation at a +time, utilizing `$JOBS` threads. + +Specifying the `$JOBS` environment variable will only modify `--cores=`, but you +can also modify the value for `--max-jobs=` by specifying +`$ADDITIONAL_GUIX_COMMON_FLAGS`. For example, if you have a LOT of memory, you +may want to set: + +```sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' +``` + +Which allows for a maximum of 8 derivations to be built at the same time, each +utilizing `$JOBS` threads. + +Or, if you'd like to avoid spurious build failures caused by issues with +parallelism within a single package, but would still like to build multiple +packages when the dependency graph allows for it, you may want to try: + +```sh +export JOBS=1 ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' +``` + +See the [recognized environment variables][env-vars-list] section for more +details. + +## Recognized environment variables + +* _**HOSTS**_ + + Override the space-separated list of platform triples for which to perform a + bootstrappable build. + + _(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu + riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu + x86\_64-w64-mingw32 x86\_64-apple-darwin arm64-apple-darwin")_ + +* _**SOURCES_PATH**_ + + Set the depends tree download cache for sources. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary redownloading of package sources. + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**BASE_CACHE**_ + + Set the depends tree cache for built packages. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary building of packages. + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**SDK_PATH**_ + + Set the path where _extracted_ SDKs can be found. This is passed through to + the depends tree. Note that this is should be set to the _parent_ directory of + the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of + `$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`). + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**JOBS**_ + + Override the number of jobs to run simultaneously, you might want to do so on + a memory-limited machine. This may be passed to: + + - `guix` build commands as in `guix environment --cores="$JOBS"` + - `make` as in `make --jobs="$JOBS"` + - `xargs` as in `xargs -P"$JOBS"` + + See [here](#controlling-the-number-of-threads-used-by-guix-build-commands) for + more details. + + _(defaults to the value of `nproc` outside the container)_ + +* _**SOURCE_DATE_EPOCH**_ + + Override the reference UNIX timestamp used for bit-for-bit reproducibility, + the variable name conforms to [standard][r12e/source-date-epoch]. + + _(defaults to the output of `$(git log --format=%at -1)`)_ + +* _**V**_ + + If non-empty, will pass `V=1` to all `make` invocations, making `make` output + verbose. + + Note that any given value is ignored. The variable is only checked for + emptiness. More concretely, this means that `V=` (setting `V` to the empty + string) is interpreted the same way as not setting `V` at all, and that `V=0` + has the same effect as `V=1`. + +* _**SUBSTITUTE_URLS**_ + + A whitespace-delimited list of URLs from which to download pre-built packages. + A URL is only used if its signing key is authorized (refer to the [substitute + servers section](#option-1-building-with-substitutes) for more details). + +* _**ADDITIONAL_GUIX_COMMON_FLAGS**_ + + Additional flags to be passed to all `guix` commands. + +* _**ADDITIONAL_GUIX_TIMEMACHINE_FLAGS**_ + + Additional flags to be passed to `guix time-machine`. + +* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_ + + Additional flags to be passed to the invocation of `guix environment` inside + `guix time-machine`. + +# Choosing your security model + +No matter how you installed Guix, you need to decide on your security model for +building packages with Guix. + +Guix allows us to achieve better binary security by using our CPU time to build +everything from scratch. However, it doesn't sacrifice user choice in pursuit of +this: users can decide whether or not to use **substitutes** (pre-built +packages). + +## Option 1: Building with substitutes + +### Step 1: Authorize the signing keys + +Depending on the installation procedure you followed, you may have already +authorized the Guix build farm key. In particular, the official shell installer +script asks you if you want the key installed, and the debian distribution +package authorized the key during installation. + +You can check the current list of authorized keys at `/etc/guix/acl`. + +At the time of writing, a `/etc/guix/acl` with just the Guix build farm key +authorized looks something like: + +```lisp +(acl + (entry + (public-key + (ecc + (curve Ed25519) + (q #8D156F295D24B0D9A86FA5741A840FF2D24F60F7B6C4134814AD55625971B394#) + ) + ) + (tag + (guix import) + ) + ) + ) +``` + +If you've determined that the official Guix build farm key hasn't been +authorized, and you would like to authorize it, run the following as root: + +``` +guix archive --authorize < /var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub +``` + +If +`/var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub` +doesn't exist, try: + +```sh +guix archive --authorize < /share/guix/ci.guix.gnu.org.pub +``` + +Where `` is likely: +- `/usr` if you installed from a distribution package +- `/usr/local` if you installed Guix from source and didn't supply any + prefix-modifying flags to Guix's `./configure` + +For dongcarl's substitute server at https://guix.carldong.io, run as root: + +```sh +wget -qO- 'https://guix.carldong.io/signing-key.pub' | guix archive --authorize +``` + +#### Removing authorized keys + +To remove previously authorized keys, simply edit `/etc/guix/acl` and remove the +`(entry (public-key ...))` entry. + +### Step 2: Specify the substitute servers + +Once its key is authorized, the official Guix build farm at +https://ci.guix.gnu.org is automatically used unless the `--no-substitutes` flag +is supplied. This default list of substitute servers is overridable both on a +`guix-daemon` level and when you invoke `guix` commands. See examples below for +the various ways of adding dongcarl's substitute server after having [authorized +his signing key](#authorize-the-signing-keys). + +Change the **default list** of substitute servers by starting `guix-daemon` with +the `--substitute-urls` option (you will likely need to edit your init script): + +```sh +guix-daemon --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org' +``` + +Override the default list of substitute servers by passing the +`--substitute-urls` option for invocations of `guix` commands: + +```sh +guix --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org' +``` + +For scripts under `./contrib/guix`, set the `SUBSTITUTE_URLS` environment +variable: + +```sh +export SUBSTITUTE_URLS='https://guix.carldong.io https://ci.guix.gnu.org' +``` + +## Option 2: Disabling substitutes on an ad-hoc basis + +If you prefer not to use any substitutes, make sure to supply `--no-substitutes` +like in the following snippet. The first build will take a while, but the +resulting packages will be cached for future builds. + +For direct invocations of `guix`: +```sh +guix --no-substitutes +``` + +For the scripts under `./contrib/guix/`: +```sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes' +``` + +## Option 3: Disabling substitutes by default + +`guix-daemon` accepts a `--no-substitutes` flag, which will make sure that, +unless otherwise overridden by a command line invocation, no substitutes will be +used. + +If you start `guix-daemon` using an init script, you can edit said script to +supply this flag. + + +# Purging/Uninstalling Guix + +In the extraordinarily rare case where you messed up your Guix installation in +an irreversible way, you may want to completely purge Guix from your system and +start over. + +1. Uninstall Guix itself according to the way you installed it (e.g. `sudo apt + purge guix` for Ubuntu packaging, `sudo make uninstall` for a build from source). +2. Remove all build users and groups + + You may check for relevant users and groups using: + + ``` + getent passwd | grep guix + getent group | grep guix + ``` + + Then, you may remove users and groups using: + + ``` + sudo userdel + sudo groupdel + ``` + +3. Remove all possible Guix-related directories + - `/var/guix/` + - `/var/log/guix/` + - `/gnu/` + - `/etc/guix/` + - `/home/*/.config/guix/` + - `/home/*/.cache/guix/` + - `/home/*/.guix-profile/` + - `/root/.config/guix/` + - `/root/.cache/guix/` + - `/root/.guix-profile/` + +[b17e]: https://bootstrappable.org/ +[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/ + +[guix/install.sh]: https://git.savannah.gnu.org/cgit/guix.git/plain/etc/guix-install.sh +[guix/bin-install]: https://www.gnu.org/software/guix/manual/en/html_node/Binary-Installation.html +[guix/env-setup]: https://www.gnu.org/software/guix/manual/en/html_node/Build-Environment-Setup.html +[guix/substitutes]: https://www.gnu.org/software/guix/manual/en/html_node/Substitutes.html +[guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html +[guix/time-machine]: https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html + +[debian/guix-bullseye]: https://packages.debian.org/bullseye/guix +[ubuntu/guix-hirsute]: https://packages.ubuntu.com/hirsute/guix +[fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix + +[env-vars-list]: #recognized-environment-variables diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest new file mode 100755 index 0000000000000..b0ef28dc3f929 --- /dev/null +++ b/contrib/guix/guix-attest @@ -0,0 +1,263 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat env basename mkdir diff sort + +if [ -z "$NO_SIGN" ]; then + # make it possible to override the gpg binary + GPG=${GPG:-gpg} + + # $GPG can contain extra arguments passed to the binary + # so let's check only the existence of arg[0] + # shellcheck disable=SC2206 + GPG_ARRAY=($GPG) + check_tools "${GPG_ARRAY[0]}" +fi + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < \\ + SIGNER=GPG_KEY_NAME[=SIGNER_NAME] \\ + [ NO_SIGN=1 ] + ./contrib/guix/guix-attest + +Example w/o overriding signing name: + + env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\ + SIGNER=achow101 \\ + ./contrib/guix/guix-attest + +Example overriding signing name: + + env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs \\ + SIGNER=0x96AB007F1A7ED999=dongcarl \\ + ./contrib/guix/guix-attest + +Example w/o signing, just creating SHA256SUMS: + + env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\ + SIGNER=achow101 \\ + NO_SIGN=1 \\ + ./contrib/guix/guix-attest + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ] || [ -z "$SIGNER" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +################ +# The key specified in SIGNER should be usable +################ + +IFS='=' read -r gpg_key_name signer_name <<< "$SIGNER" +if [ -z "${signer_name}" ]; then + signer_name="$gpg_key_name" +fi + +if [ -z "$NO_SIGN" ] && ! ${GPG} --dry-run --list-secret-keys "${gpg_key_name}" >/dev/null 2>&1; then + echo "ERR: GPG can't seem to find any key named '${gpg_key_name}'" + exit 1 +fi + +################ +# We should be able to find at least one output +################ + +echo "Looking for build output SHA256SUMS fragments in ${OUTDIR_BASE}" + +shopt -s nullglob +sha256sum_fragments=( "$OUTDIR_BASE"/*/SHA256SUMS.part ) # This expands to an array of directories... +shopt -u nullglob + +noncodesigned_fragments=() +codesigned_fragments=() + +if (( ${#sha256sum_fragments[@]} )); then + echo "Found build output SHA256SUMS fragments:" + for outdir in "${sha256sum_fragments[@]}"; do + echo " '$outdir'" + case "$outdir" in + "$OUTDIR_BASE"/*-codesigned/SHA256SUMS.part) + codesigned_fragments+=("$outdir") + ;; + *) + noncodesigned_fragments+=("$outdir") + ;; + esac + done + echo +else + echo "ERR: Could not find any build output SHA256SUMS fragments in ${OUTDIR_BASE}" + exit 1 +fi + +############## +## Attest ## +############## + +# Usage: out_name $outdir +# +# HOST: The output directory being attested +# +out_name() { + basename "$(dirname "$1")" +} + +shasum_already_exists() { +cat < "$temp_noncodesigned" + if [ -e noncodesigned.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u noncodesigned.SHA256SUMS "$temp_noncodesigned"; then + echo "A noncodesigned.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." + else + shasum_already_exists noncodesigned.SHA256SUMS + exit 1 + fi + else + mv "$temp_noncodesigned" noncodesigned.SHA256SUMS + fi + else + echo "ERR: No noncodesigned outputs found for '${VERSION}', exiting..." + exit 1 + fi + + temp_all="$(mktemp)" + trap 'rm -rf -- "$temp_all"' EXIT + + if (( ${#codesigned_fragments[@]} )); then + # Note: all.SHA256SUMS attests to all of $sha256sum_fragments, but is + # not needed if there are no $codesigned_fragments + cat "${sha256sum_fragments[@]}" \ + | sort -u \ + | sort -k2 \ + | basenameify_SHA256SUMS \ + > "$temp_all" + if [ -e all.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u all.SHA256SUMS "$temp_all"; then + echo "An all.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." + else + shasum_already_exists all.SHA256SUMS + exit 1 + fi + else + mv "$temp_all" all.SHA256SUMS + fi + else + # It is fine to have the codesigned outputs be missing (perhaps the + # detached codesigs have not been published yet), just print a log + # message instead of erroring out + echo "INFO: No codesigned outputs found for '${VERSION}', skipping..." + fi + + if [ -z "$NO_SIGN" ]; then + echo "Signing SHA256SUMS to produce SHA256SUMS.asc" + for i in *.SHA256SUMS; do + if [ ! -e "$i".asc ]; then + ${GPG} --detach-sign \ + --digest-algo sha256 \ + --local-user "$gpg_key_name" \ + --armor \ + --output "$i".asc "$i" + else + echo "Signature already there" + fi + done + else + echo "Not signing SHA256SUMS as \$NO_SIGN is not empty" + fi + echo "" +) diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build new file mode 100755 index 0000000000000..74b24b9612072 --- /dev/null +++ b/contrib/guix/guix-build @@ -0,0 +1,463 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## SANITY CHECKS ## +################### + +################ +# Required non-builtin commands should be invocable +################ + +check_tools cat mkdir make getent curl git guix + +################ +# GUIX_BUILD_OPTIONS should be empty +################ +# +# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that +# can perform builds. This seems like what we want instead of +# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually +# _appended_ to normal command-line options. Meaning that they will take +# precedence over the command-specific ADDITIONAL_GUIX__FLAGS. +# +# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's +# existence here and direct users of this script to use our (more flexible) +# custom environment variables. +if [ -n "$GUIX_BUILD_OPTIONS" ]; then +cat << EOF +Error: Environment variable GUIX_BUILD_OPTIONS is not empty: + '$GUIX_BUILD_OPTIONS' + +Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset +GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options +across guix commands or ADDITIONAL_GUIX__FLAGS to set build options for a +specific guix command. + +See contrib/guix/README.md for more details. +EOF +exit 1 +fi + +################ +# The git worktree should not be dirty +################ + +if ! git diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then +cat << EOF +ERR: The current git worktree is dirty, which may lead to broken builds. + + Aborting... + +Hint: To make your git worktree clean, You may want to: + 1. Commit your changes, + 2. Stash your changes, or + 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on + using a dirty worktree +EOF +exit 1 +fi + +mkdir -p "$VERSION_BASE" + +################ +# Build directories should not exist +################ + +# Default to building for all supported HOSTs (overridable by environment) +export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu + x86_64-w64-mingw32 + x86_64-apple-darwin arm64-apple-darwin}" + +# Usage: distsrc_for_host HOST +# +# HOST: The current platform triple we're building for +# +distsrc_for_host() { + echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}" +} + +# Accumulate a list of build directories that already exist... +hosts_distsrc_exists="" +for host in $HOSTS; do + if [ -e "$(distsrc_for_host "$host")" ]; then + hosts_distsrc_exists+=" ${host}" + fi +done + +if [ -n "$hosts_distsrc_exists" ]; then +# ...so that we can print them out nicely in an error message +cat << EOF +ERR: Build directories for this commit already exist for the following platform + triples you're attempting to build, probably because of previous builds. + Please remove, or otherwise deal with them prior to starting another build. + + Aborting... + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +for host in $hosts_distsrc_exists; do + echo " ${host} '$(distsrc_for_host "$host")'" +done +exit 1 +else + mkdir -p "$DISTSRC_BASE" +fi + +################ +# When building for darwin, the macOS SDK should exist +################ + +for host in $HOSTS; do + case "$host" in + *darwin*) + OSX_SDK="$(make -C "${PWD}/depends" --no-print-directory HOST="$host" print-OSX_SDK | sed 's@^[^=]\+=@@g')" + if [ -e "$OSX_SDK" ]; then + echo "Found macOS SDK at '${OSX_SDK}', using..." + break + else + echo "macOS SDK does not exist at '${OSX_SDK}', please place the extracted, untarred SDK there to perform darwin builds, or define SDK_PATH environment variable. Exiting..." + exit 1 + fi + ;; + esac +done + +################ +# VERSION_BASE should have enough space +################ + +avail_KiB="$(df -Pk "$VERSION_BASE" | sed 1d | tr -s ' ' | cut -d' ' -f4)" +total_required_KiB=0 +for host in $HOSTS; do + case "$host" in + *darwin*) required_KiB=440000 ;; + *mingw*) required_KiB=7600000 ;; + *) required_KiB=6400000 ;; + esac + total_required_KiB=$((total_required_KiB+required_KiB)) +done + +if (( total_required_KiB > avail_KiB )); then + total_required_GiB=$((total_required_KiB / 1048576)) + avail_GiB=$((avail_KiB / 1048576)) + echo "Performing a Bitcoin Core Guix build for the selected HOSTS requires ${total_required_GiB} GiB, however, only ${avail_GiB} GiB is available. Please free up some disk space before performing the build." + exit 1 +fi + +################ +# Check that we can connect to the guix-daemon +################ + +cat << EOF +Checking that we can connect to the guix-daemon... + +Hint: If this hangs, you may want to try turning your guix-daemon off and on + again. + +EOF +if ! guix gc --list-failures > /dev/null; then +cat << EOF + +ERR: Failed to connect to the guix-daemon, please ensure that one is running and + reachable. +EOF +exit 1 +fi + +# Developer note: we could use `guix repl` for this check and run: +# +# (import (guix store)) (close-connection (open-connection)) +# +# However, the internal API is likely to change more than the CLI invocation + +################ +# Services database must have basic entries +################ + +if ! getent services http https ftp > /dev/null 2>&1; then +cat << EOF +ERR: Your system's C library cannot find service database entries for at least + one of the following services: http, https, ftp. + +Hint: Most likely, /etc/services does not exist yet (common for docker images + and minimal distros), or you don't have permissions to access it. + + If /etc/services does not exist yet, you may want to install the + appropriate package for your distro which provides it. + + On Debian/Ubuntu: netbase + On Arch Linux: iana-etc + + For more information, see: getent(1), services(5) + +EOF + +fi + +######### +# SETUP # +######### + +# Determine the maximum number of jobs to run simultaneously (overridable by +# environment) +JOBS="${JOBS:-$(nproc)}" + +# Usage: host_to_commonname HOST +# +# HOST: The current platform triple we're building for +# +host_to_commonname() { + case "$1" in + *darwin*) echo osx ;; + *mingw*) echo win ;; + *linux*) echo linux ;; + *) exit 1 ;; + esac +} + +# Determine the reference time used for determinism (overridable by environment) +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}" + +# Precious directories are those which should not be cleaned between successive +# guix builds +depends_precious_dir_names='SOURCES_PATH BASE_CACHE SDK_PATH' +precious_dir_names="${depends_precious_dir_names} OUTDIR_BASE PROFILES_BASE" + +# Usage: contains IFS-SEPARATED-LIST ITEM +contains() { + for i in ${1}; do + if [ "$i" = "${2}" ]; then + return 0 # Found! + fi + done + return 1 +} + +# If the user explicitly specified a precious directory, create it so we +# can map it into the container +for precious_dir_name in $precious_dir_names; do + precious_dir_path="${!precious_dir_name}" + if [ -n "$precious_dir_path" ]; then + if [ ! -e "$precious_dir_path" ]; then + mkdir -p "$precious_dir_path" + elif [ -L "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} cannot be a symbolic link" + exit 1 + elif [ ! -d "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} must be a directory" + exit 1 + fi + fi +done + +mkdir -p "$VAR_BASE" + +# Record the _effective_ values of precious directories such that guix-clean can +# avoid clobbering them if appropriate. +# +# shellcheck disable=SC2046,SC2086 +{ + # Get depends precious dir definitions from depends + make -C "${PWD}/depends" \ + --no-print-directory \ + -- $(printf "print-%s\n" $depends_precious_dir_names) + + # Get remaining precious dir definitions from the environment + for precious_dir_name in $precious_dir_names; do + precious_dir_path="${!precious_dir_name}" + if ! contains "$depends_precious_dir_names" "$precious_dir_name"; then + echo "${precious_dir_name}=${precious_dir_path}" + fi + done +} > "${VAR_BASE}/precious_dirs" + +# Make sure an output directory exists for our builds +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" +mkdir -p "$OUTDIR_BASE" + +# Download the depends sources now as we won't have internet access in the build +# container +for host in $HOSTS; do + make -C "${PWD}/depends" -j"$JOBS" download-"$(host_to_commonname "$host")" ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} +done + +# Usage: outdir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +outdir_for_host() { + echo "${OUTDIR_BASE}/${1}${2:+-${2}}" +} + +# Usage: profiledir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +profiledir_for_host() { + echo "${PROFILES_BASE}/${1}${2:+-${2}}" +} + + +######### +# BUILD # +######### + +# Function to be called when building for host ${1} and the user interrupts the +# build +int_trap() { +cat << EOF +** INT received while building ${1}, you may want to clean up the relevant + work directories (e.g. distsrc-*) before rebuilding + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +} + +# Deterministically build Bitcoin Core +# shellcheck disable=SC2153 +for host in $HOSTS; do + + # Display proper warning when the user interrupts the build + trap 'int_trap ${host}' INT + + ( + # Required for 'contrib/guix/manifest.scm' to output the right manifest + # for the particular $HOST we're building for + export HOST="$host" + + # shellcheck disable=SC2030 +cat << EOF +INFO: Building ${VERSION:?not set} for platform triple ${HOST:?not set}: + ...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set} + ...running at most ${JOBS:?not set} jobs + ...from worktree directory: '${PWD}' + ...bind-mounted in container to: '/bitcoin' + ...in build directory: '$(distsrc_for_host "$HOST")' + ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")' + ...outputting in: '$(outdir_for_host "$HOST")' + ...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")' +EOF + + # Run the build script 'contrib/guix/libexec/build.sh' in the build + # container specified by 'contrib/guix/manifest.scm'. + # + # Explanation of `guix environment` flags: + # + # --container run command within an isolated container + # + # Running in an isolated container minimizes build-time differences + # between machines and improves reproducibility + # + # --pure unset existing environment variables + # + # Same rationale as --container + # + # --no-cwd do not share current working directory with an + # isolated container + # + # When --container is specified, the default behavior is to share + # the current working directory with the isolated container at the + # same exact path (e.g. mapping '/home/satoshi/bitcoin/' to + # '/home/satoshi/bitcoin/'). This means that the $PWD inside the + # container becomes a source of irreproducibility. --no-cwd disables + # this behaviour. + # + # --share=SPEC for containers, share writable host file system + # according to SPEC + # + # --share="$PWD"=/bitcoin + # + # maps our current working directory to /bitcoin + # inside the isolated container, which we later cd + # into. + # + # While we don't want to map our current working directory to the + # same exact path (as this introduces irreproducibility), we do want + # it to be at a _fixed_ path _somewhere_ inside the isolated + # container so that we have something to build. '/bitcoin' was + # chosen arbitrarily. + # + # ${SOURCES_PATH:+--share="$SOURCES_PATH"} + # + # make the downloaded depends sources path available + # inside the isolated container + # + # The isolated container has no network access as it's in a + # different network namespace from the main machine, so we have to + # make the downloaded depends sources available to it. The sources + # should have been downloaded prior to this invocation. + # + # --keep-failed keep build tree of failed builds + # + # When builds of the Guix environment itself (not Bitcoin Core) + # fail, it is useful for the build tree to be kept for debugging + # purposes. + # + # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} + # + # fetch substitute from SUBSTITUTE_URLS if they are + # authorized + # + # Depending on the user's security model, it may be desirable to use + # substitutes (pre-built packages) from servers that the user trusts. + # Please read the README.md in the same directory as this file for + # more information. + # + # shellcheck disable=SC2086,SC2031 + time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --share="$PWD"=/bitcoin \ + --share="$DISTSRC_BASE"=/distsrc-base \ + --share="$OUTDIR_BASE"=/outdir-base \ + --expose="$(git rev-parse --git-common-dir)" \ + ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + ${BASE_CACHE:+--share="$BASE_CACHE"} \ + ${SDK_PATH:+--share="$SDK_PATH"} \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + --link-profile \ + --root="$(profiledir_for_host "${HOST}")" \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- env HOST="$host" \ + DISTNAME="$DISTNAME" \ + JOBS="$JOBS" \ + SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ + ${V:+V=1} \ + ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE:+BASE_CACHE="$BASE_CACHE"} \ + ${SDK_PATH:+SDK_PATH="$SDK_PATH"} \ + DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ + OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")" \ + DIST_ARCHIVE_BASE=/outdir-base/dist-archive \ + bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh" + ) + +done diff --git a/contrib/guix/guix-clean b/contrib/guix/guix-clean new file mode 100755 index 0000000000000..9af0a793cff7d --- /dev/null +++ b/contrib/guix/guix-clean @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat mkdir make git guix + + +############# +## Clean ## +############# + +# Usage: under_dir MAYBE_PARENT MAYBE_CHILD +# +# If MAYBE_CHILD is a subdirectory of MAYBE_PARENT, print the relative path +# from MAYBE_PARENT to MAYBE_CHILD. Otherwise, return 1 as the error code. +# +# NOTE: This does not perform any symlink-resolving or path canonicalization. +# +under_dir() { + local path_residue + path_residue="${2##"${1}"}" + if [ -z "$path_residue" ] || [ "$path_residue" = "$2" ]; then + return 1 + else + echo "$path_residue" + fi +} + +# Usage: dir_under_git_root MAYBE_CHILD +# +# If MAYBE_CHILD is under the current git repository and exists, print the +# relative path from the git repository's top-level directory to MAYBE_CHILD, +# otherwise, exit with an error code. +# +dir_under_git_root() { + local rv + rv="$(under_dir "$(git_root)" "$1")" + [ -n "$rv" ] && echo "$rv" +} + +shopt -s nullglob +found_precious_dirs_files=( "${version_base_prefix}"*/"${var_base_basename}/precious_dirs" ) # This expands to an array of directories... +shopt -u nullglob + +exclude_flags=() + +for precious_dirs_file in "${found_precious_dirs_files[@]}"; do + # Make sure the precious directories (e.g. SOURCES_PATH, BASE_CACHE, SDK_PATH) + # are excluded from git-clean + echo "Found precious_dirs file: '${precious_dirs_file}'" + + # Exclude the precious_dirs file itself + if dirs_file_exclude_fragment=$(dir_under_git_root "$(dirname "$precious_dirs_file")"); then + exclude_flags+=( --exclude="${dirs_file_exclude_fragment}/precious_dirs" ) + fi + + # Read each 'name=dir' pair from the precious_dirs file + while IFS='=' read -r name dir; do + # Add an exclusion flag if the precious directory is under the git root. + if under=$(dir_under_git_root "$dir"); then + echo "Avoiding ${name}: ${under}" + exclude_flags+=( --exclude="$under" ) + fi + done < "$precious_dirs_file" +done + +git clean -xdff "${exclude_flags[@]}" diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign new file mode 100755 index 0000000000000..3279d431aaf34 --- /dev/null +++ b/contrib/guix/guix-codesign @@ -0,0 +1,378 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## SANITY CHECKS ## +################### + +################ +# Required non-builtin commands should be invocable +################ + +check_tools cat mkdir git guix + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { + cat < \\ + ./contrib/guix/guix-codesign + +EOF +} + +if [ -z "$DETACHED_SIGS_REPO" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_BUILD_OPTIONS should be empty +################ +# +# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that +# can perform builds. This seems like what we want instead of +# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually +# _appended_ to normal command-line options. Meaning that they will take +# precedence over the command-specific ADDITIONAL_GUIX__FLAGS. +# +# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's +# existence here and direct users of this script to use our (more flexible) +# custom environment variables. +if [ -n "$GUIX_BUILD_OPTIONS" ]; then +cat << EOF +Error: Environment variable GUIX_BUILD_OPTIONS is not empty: + '$GUIX_BUILD_OPTIONS' + +Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset +GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options +across guix commands or ADDITIONAL_GUIX__FLAGS to set build options for a +specific guix command. + +See contrib/guix/README.md for more details. +EOF +exit 1 +fi + +################ +# The codesignature git worktree should not be dirty +################ + +if ! git -C "$DETACHED_SIGS_REPO" diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then + cat << EOF +ERR: The DETACHED CODESIGNATURE git worktree is dirty, which may lead to broken builds. + + Aborting... + +Hint: To make your git worktree clean, You may want to: + 1. Commit your changes, + 2. Stash your changes, or + 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on + using a dirty worktree +EOF + exit 1 +fi + +################ +# Build directories should not exist +################ + +# Default to building for all supported HOSTs (overridable by environment) +export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin arm64-apple-darwin}" + +# Usage: distsrc_for_host HOST +# +# HOST: The current platform triple we're building for +# +distsrc_for_host() { + echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}-codesigned" +} + +# Accumulate a list of build directories that already exist... +hosts_distsrc_exists="" +for host in $HOSTS; do + if [ -e "$(distsrc_for_host "$host")" ]; then + hosts_distsrc_exists+=" ${host}" + fi +done + +if [ -n "$hosts_distsrc_exists" ]; then +# ...so that we can print them out nicely in an error message +cat << EOF +ERR: Build directories for this commit already exist for the following platform + triples you're attempting to build, probably because of previous builds. + Please remove, or otherwise deal with them prior to starting another build. + + Aborting... + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +for host in $hosts_distsrc_exists; do + echo " ${host} '$(distsrc_for_host "$host")'" +done +exit 1 +else + mkdir -p "$DISTSRC_BASE" +fi + + +################ +# Unsigned tarballs SHOULD exist +################ + +# Usage: outdir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +outdir_for_host() { + echo "${OUTDIR_BASE}/${1}${2:+-${2}}" +} + + +unsigned_tarball_for_host() { + case "$1" in + *mingw*) + echo "$(outdir_for_host "$1")/${DISTNAME}-win64-unsigned.tar.gz" + ;; + *darwin*) + echo "$(outdir_for_host "$1")/${DISTNAME}-${1}-unsigned.tar.gz" + ;; + *) + exit 1 + ;; + esac +} + +# Accumulate a list of build directories that already exist... +hosts_unsigned_tarball_missing="" +for host in $HOSTS; do + if [ ! -e "$(unsigned_tarball_for_host "$host")" ]; then + hosts_unsigned_tarball_missing+=" ${host}" + fi +done + +if [ -n "$hosts_unsigned_tarball_missing" ]; then + # ...so that we can print them out nicely in an error message + cat << EOF +ERR: Unsigned tarballs do not exist +... + +EOF +for host in $hosts_unsigned_tarball_missing; do + echo " ${host} '$(unsigned_tarball_for_host "$host")'" +done +exit 1 +fi + +################ +# Check that we can connect to the guix-daemon +################ + +cat << EOF +Checking that we can connect to the guix-daemon... + +Hint: If this hangs, you may want to try turning your guix-daemon off and on + again. + +EOF +if ! guix gc --list-failures > /dev/null; then + cat << EOF + +ERR: Failed to connect to the guix-daemon, please ensure that one is running and + reachable. +EOF + exit 1 +fi + +# Developer note: we could use `guix repl` for this check and run: +# +# (import (guix store)) (close-connection (open-connection)) +# +# However, the internal API is likely to change more than the CLI invocation + + +######### +# SETUP # +######### + +# Determine the maximum number of jobs to run simultaneously (overridable by +# environment) +JOBS="${JOBS:-$(nproc)}" + +# Determine the reference time used for determinism (overridable by environment) +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}" + +# Make sure an output directory exists for our builds +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" +mkdir -p "$OUTDIR_BASE" + +# Usage: profiledir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +profiledir_for_host() { + echo "${PROFILES_BASE}/${1}${2:+-${2}}" +} + +######### +# BUILD # +######### + +# Function to be called when codesigning for host ${1} and the user interrupts +# the codesign +int_trap() { +cat << EOF +** INT received while codesigning ${1}, you may want to clean up the relevant + work directories (e.g. distsrc-*) before recodesigning + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +} + +# Deterministically build Bitcoin Core +# shellcheck disable=SC2153 +for host in $HOSTS; do + + # Display proper warning when the user interrupts the build + trap 'int_trap ${host}' INT + + ( + # Required for 'contrib/guix/manifest.scm' to output the right manifest + # for the particular $HOST we're building for + export HOST="$host" + + # shellcheck disable=SC2030 +cat << EOF +INFO: Codesigning ${VERSION:?not set} for platform triple ${HOST:?not set}: + ...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set} + ...from worktree directory: '${PWD}' + ...bind-mounted in container to: '/bitcoin' + ...in build directory: '$(distsrc_for_host "$HOST")' + ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")' + ...outputting in: '$(outdir_for_host "$HOST" codesigned)' + ...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)' + ...using detached signatures in: '${DETACHED_SIGS_REPO:?not set}' + ...bind-mounted in container to: '/detached-sigs' +EOF + + + # Run the build script 'contrib/guix/libexec/build.sh' in the build + # container specified by 'contrib/guix/manifest.scm'. + # + # Explanation of `guix environment` flags: + # + # --container run command within an isolated container + # + # Running in an isolated container minimizes build-time differences + # between machines and improves reproducibility + # + # --pure unset existing environment variables + # + # Same rationale as --container + # + # --no-cwd do not share current working directory with an + # isolated container + # + # When --container is specified, the default behavior is to share + # the current working directory with the isolated container at the + # same exact path (e.g. mapping '/home/satoshi/bitcoin/' to + # '/home/satoshi/bitcoin/'). This means that the $PWD inside the + # container becomes a source of irreproducibility. --no-cwd disables + # this behaviour. + # + # --share=SPEC for containers, share writable host file system + # according to SPEC + # + # --share="$PWD"=/bitcoin + # + # maps our current working directory to /bitcoin + # inside the isolated container, which we later cd + # into. + # + # While we don't want to map our current working directory to the + # same exact path (as this introduces irreproducibility), we do want + # it to be at a _fixed_ path _somewhere_ inside the isolated + # container so that we have something to build. '/bitcoin' was + # chosen arbitrarily. + # + # ${SOURCES_PATH:+--share="$SOURCES_PATH"} + # + # make the downloaded depends sources path available + # inside the isolated container + # + # The isolated container has no network access as it's in a + # different network namespace from the main machine, so we have to + # make the downloaded depends sources available to it. The sources + # should have been downloaded prior to this invocation. + # + # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} + # + # fetch substitute from SUBSTITUTE_URLS if they are + # authorized + # + # Depending on the user's security model, it may be desirable to use + # substitutes (pre-built packages) from servers that the user trusts. + # Please read the README.md in the same directory as this file for + # more information. + # + # shellcheck disable=SC2086,SC2031 + time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --share="$PWD"=/bitcoin \ + --share="$DISTSRC_BASE"=/distsrc-base \ + --share="$OUTDIR_BASE"=/outdir-base \ + --share="$DETACHED_SIGS_REPO"=/detached-sigs \ + --expose="$(git rev-parse --git-common-dir)" \ + --expose="$(git -C "$DETACHED_SIGS_REPO" rev-parse --git-common-dir)" \ + ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + --link-profile \ + --root="$(profiledir_for_host "${HOST}" codesigned)" \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- env HOST="$host" \ + DISTNAME="$DISTNAME" \ + JOBS="$JOBS" \ + SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ + ${V:+V=1} \ + ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ + OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)" \ + DIST_ARCHIVE_BASE=/outdir-base/dist-archive \ + DETACHED_SIGS_REPO=/detached-sigs \ + UNSIGNED_TARBALL="$(OUTDIR_BASE=/outdir-base && unsigned_tarball_for_host "$HOST")" \ + bash -c "cd /bitcoin && bash contrib/guix/libexec/codesign.sh" + ) + +done diff --git a/contrib/guix/guix-verify b/contrib/guix/guix-verify new file mode 100755 index 0000000000000..02ae022741bab --- /dev/null +++ b/contrib/guix/guix-verify @@ -0,0 +1,174 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat diff gpg + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < [ SIGNER= ] ./contrib/guix/guix-verify + +Example overriding signer's manifest to use as base + + env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs SIGNER=achow101 ./contrib/guix/guix-verify + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +############## +## Verify ## +############## + +OUTSIGDIR_BASE="${GUIX_SIGS_REPO}/${VERSION}" +echo "Looking for signature directories in '${OUTSIGDIR_BASE}'" +echo "" + +# Usage: verify compare_manifest current_manifest +verify() { + local compare_manifest="$1" + local current_manifest="$2" + if ! gpg --quiet --batch --verify "$current_manifest".asc "$current_manifest" 1>&2; then + echo "ERR: Failed to verify GPG signature in '${current_manifest}'" + echo "" + echo "Hint: Either the signature is invalid or the public key is missing" + echo "" + failure=1 + elif ! diff --report-identical "$compare_manifest" "$current_manifest" 1>&2; then + echo "ERR: The SHA256SUMS attestation in these two directories differ:" + echo " '${compare_manifest}'" + echo " '${current_manifest}'" + echo "" + failure=1 + else + echo "Verified: '${current_manifest}'" + echo "" + fi +} + +shopt -s nullglob +all_noncodesigned=( "$OUTSIGDIR_BASE"/*/noncodesigned.SHA256SUMS ) +shopt -u nullglob + +echo "--------------------" +echo "" +if (( ${#all_noncodesigned[@]} )); then + compare_noncodesigned="${all_noncodesigned[0]}" + if [[ -n "$SIGNER" ]]; then + signer_noncodesigned="$OUTSIGDIR_BASE/$SIGNER/noncodesigned.SHA256SUMS" + if [[ -f "$signer_noncodesigned" ]]; then + echo "Using $SIGNER's manifest as the base to compare against" + compare_noncodesigned="$signer_noncodesigned" + else + echo "Unable to find $SIGNER's manifest, using the first one found" + fi + else + echo "No SIGNER provided, using the first manifest found" + fi + + for current_manifest in "${all_noncodesigned[@]}"; do + verify "$compare_noncodesigned" "$current_manifest" + done + + echo "DONE: Checking output signatures for noncodesigned.SHA256SUMS" + echo "" +else + echo "WARN: No signature directories with noncodesigned.SHA256SUMS found" + echo "" +fi + +shopt -s nullglob +all_all=( "$OUTSIGDIR_BASE"/*/all.SHA256SUMS ) +shopt -u nullglob + +echo "--------------------" +echo "" +if (( ${#all_all[@]} )); then + compare_all="${all_all[0]}" + if [[ -n "$SIGNER" ]]; then + signer_all="$OUTSIGDIR_BASE/$SIGNER/all.SHA256SUMS" + if [[ -f "$signer_all" ]]; then + echo "Using $SIGNER's manifest as the base to compare against" + compare_all="$signer_all" + else + echo "Unable to find $SIGNER's manifest, using the first one found" + fi + else + echo "No SIGNER provided, using the first manifest found" + fi + + for current_manifest in "${all_all[@]}"; do + verify "$compare_all" "$current_manifest" + done + + # Sanity check: there should be no entries that exist in + # noncodesigned.SHA256SUMS that doesn't exist in all.SHA256SUMS + if [[ "$(comm -23 <(sort "$compare_noncodesigned") <(sort "$compare_all") | wc -c)" -ne 0 ]]; then + echo "ERR: There are unique lines in noncodesigned.SHA256SUMS which" + echo " do not exist in all.SHA256SUMS, something went very wrong." + exit 1 + fi + + echo "DONE: Checking output signatures for all.SHA256SUMS" + echo "" +else + echo "WARN: No signature directories with all.SHA256SUMS found" + echo "" +fi + +echo "====================" +echo "" +if (( ${#all_noncodesigned[@]} + ${#all_all[@]} == 0 )); then + echo "ERR: Unable to perform any verifications as no signature directories" + echo " were found" + echo "" + exit 1 +fi + +if [ -n "$failure" ]; then + exit 1 +fi diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh new file mode 100755 index 0000000000000..cdf0020d4d5b9 --- /dev/null +++ b/contrib/guix/libexec/build.sh @@ -0,0 +1,444 @@ +#!/usr/bin/env bash +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +export LC_ALL=C +set -e -o pipefail +export TZ=UTC + +# Although Guix _does_ set umask when building its own packages (in our case, +# this is all packages in manifest.scm), it does not set it for `guix +# environment`. It does make sense for at least `guix environment --container` +# to set umask, so if that change gets merged upstream and we bump the +# time-machine to a commit which includes the aforementioned change, we can +# remove this line. +# +# This line should be placed before any commands which creates files. +umask 0022 + +if [ -n "$V" ]; then + # Print both unexpanded (-v) and expanded (-x) forms of commands as they are + # read from this file. + set -vx + # Set VERBOSE for CMake-based builds + export VERBOSE="$V" +fi + +# Check that required environment variables are set +cat << EOF +Required environment variables as seen inside the container: + DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set} + DISTNAME: ${DISTNAME:?not set} + HOST: ${HOST:?not set} + SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set} + JOBS: ${JOBS:?not set} + DISTSRC: ${DISTSRC:?not set} + OUTDIR: ${OUTDIR:?not set} +EOF + +ACTUAL_OUTDIR="${OUTDIR}" +OUTDIR="${DISTSRC}/output" + +##################### +# Environment Setup # +##################### + +# The depends folder also serves as a base-prefix for depends packages for +# $HOSTs after successfully building. +BASEPREFIX="${PWD}/depends" + +# Given a package name and an output name, return the path of that output in our +# current guix environment +store_path() { + grep --extended-regexp "/[^-]{32}-${1}-[^-]+${2:+-${2}}" "${GUIX_ENVIRONMENT}/manifest" \ + | head --lines=1 \ + | sed --expression='s|^[[:space:]]*"||' \ + --expression='s|"[[:space:]]*$||' +} + + +# Set environment variables to point the NATIVE toolchain to the right +# includes/libs +NATIVE_GCC="$(store_path gcc-toolchain)" +NATIVE_GCC_STATIC="$(store_path gcc-toolchain static)" + +unset LIBRARY_PATH +unset CPATH +unset C_INCLUDE_PATH +unset CPLUS_INCLUDE_PATH +unset OBJC_INCLUDE_PATH +unset OBJCPLUS_INCLUDE_PATH + +export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC}/lib64:${NATIVE_GCC_STATIC}/lib:${NATIVE_GCC_STATIC}/lib64" +export C_INCLUDE_PATH="${NATIVE_GCC}/include" +export CPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include" +export OBJC_INCLUDE_PATH="${NATIVE_GCC}/include" +export OBJCPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include" + +prepend_to_search_env_var() { + export "${1}=${2}${!1:+:}${!1}" +} + +# Set environment variables to point the CROSS toolchain to the right +# includes/libs for $HOST +case "$HOST" in + *mingw*) + # Determine output paths to use in CROSS_* environment variables + CROSS_GLIBC="$(store_path "mingw-w64-x86_64-winpthreads")" + CROSS_GCC="$(store_path "gcc-cross-${HOST}")" + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) + + # The search path ordering is generally: + # 1. gcc-related search paths + # 2. libc-related search paths + # 2. kernel-header-related search paths (not applicable to mingw-w64 hosts) + export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include" + export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + ;; + *darwin*) + # The CROSS toolchain for darwin uses the SDK and ignores environment variables. + # See depends/hosts/darwin.mk for more details. + ;; + *linux*) + CROSS_GLIBC="$(store_path "glibc-cross-${HOST}")" + CROSS_GLIBC_STATIC="$(store_path "glibc-cross-${HOST}" static)" + CROSS_KERNEL="$(store_path "linux-libre-headers-cross-${HOST}")" + CROSS_GCC="$(store_path "gcc-cross-${HOST}")" + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) + + export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" + export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" + ;; + *) + exit 1 ;; +esac + +# Sanity check CROSS_*_PATH directories +IFS=':' read -ra PATHS <<< "${CROSS_C_INCLUDE_PATH}:${CROSS_CPLUS_INCLUDE_PATH}:${CROSS_LIBRARY_PATH}" +for p in "${PATHS[@]}"; do + if [ -n "$p" ] && [ ! -d "$p" ]; then + echo "'$p' doesn't exist or isn't a directory... Aborting..." + exit 1 + fi +done + +# Disable Guix ld auto-rpath behavior +case "$HOST" in + *darwin*) + # The auto-rpath behavior is necessary for darwin builds as some native + # tools built by depends refer to and depend on Guix-built native + # libraries + # + # After the native packages in depends are built, the ld wrapper should + # no longer affect our build, as clang would instead reach for + # x86_64-apple-darwin-ld from cctools + ;; + *) export GUIX_LD_WRAPPER_DISABLE_RPATH=yes ;; +esac + +# Make /usr/bin if it doesn't exist +[ -e /usr/bin ] || mkdir -p /usr/bin + +# Symlink file and env to a conventional path +[ -e /usr/bin/file ] || ln -s --no-dereference "$(command -v file)" /usr/bin/file +[ -e /usr/bin/env ] || ln -s --no-dereference "$(command -v env)" /usr/bin/env + +# Determine the correct value for -Wl,--dynamic-linker for the current $HOST +case "$HOST" in + *linux*) + glibc_dynamic_linker=$( + case "$HOST" in + x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; + arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; + aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; + riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; + powerpc64-linux-gnu) echo /lib64/ld64.so.1;; + powerpc64le-linux-gnu) echo /lib64/ld64.so.2;; + *) exit 1 ;; + esac + ) + ;; +esac + +# Environment variables for determinism +export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" +export TZ="UTC" +case "$HOST" in + *darwin*) + # cctools AR, unlike GNU binutils AR, does not have a deterministic mode + # or a configure flag to enable determinism by default, it only + # understands if this env-var is set or not. See: + # + # https://github.com/tpoechtrager/cctools-port/blob/55562e4073dea0fbfd0b20e0bf69ffe6390c7f97/cctools/ar/archive.c#L334 + export ZERO_AR_DATE=yes + ;; +esac + +#################### +# Depends Building # +#################### + +# Build the depends tree, overriding variables that assume multilib gcc +make -C depends --jobs="$JOBS" HOST="$HOST" \ + ${V:+V=1} \ + ${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} \ + ${SDK_PATH+SDK_PATH="$SDK_PATH"} \ + x86_64_linux_CC=x86_64-linux-gnu-gcc \ + x86_64_linux_CXX=x86_64-linux-gnu-g++ \ + x86_64_linux_AR=x86_64-linux-gnu-ar \ + x86_64_linux_RANLIB=x86_64-linux-gnu-ranlib \ + x86_64_linux_NM=x86_64-linux-gnu-nm \ + x86_64_linux_STRIP=x86_64-linux-gnu-strip \ + qt_config_opts_x86_64_linux='-platform linux-g++ -xplatform bitcoin-linux-g++' \ + FORCE_USE_SYSTEM_CLANG=1 + + +########################### +# Source Tarball Building # +########################### + +GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}.tar.gz" + +# Create the source tarball if not already there +if [ ! -e "$GIT_ARCHIVE" ]; then + mkdir -p "$(dirname "$GIT_ARCHIVE")" + git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD +fi + +mkdir -p "$OUTDIR" + +########################### +# Binary Tarball Building # +########################### + +# CONFIGFLAGS +CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" + +# CFLAGS +HOST_CFLAGS="-O2 -g" +case "$HOST" in + *linux*) HOST_CFLAGS+=" -ffile-prefix-map=${PWD}=." ;; + *mingw*) HOST_CFLAGS+=" -fno-ident" ;; + *darwin*) unset HOST_CFLAGS ;; +esac + +# CXXFLAGS +HOST_CXXFLAGS="$HOST_CFLAGS" + +case "$HOST" in + arm-linux-gnueabihf) HOST_CXXFLAGS="${HOST_CXXFLAGS} -Wno-psabi" ;; +esac + +# LDFLAGS +case "$HOST" in + *linux*) HOST_LDFLAGS="-Wl,--as-needed -Wl,--dynamic-linker=$glibc_dynamic_linker -static-libstdc++ -Wl,-O2" ;; + *mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;; +esac + +# Using --no-tls-get-addr-optimize retains compatibility with glibc 2.18, by +# avoiding a PowerPC64 optimisation available in glibc 2.22 and later. +# https://sourceware.org/binutils/docs-2.35/ld/PowerPC64-ELF64.html +case "$HOST" in + *powerpc64*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,--no-tls-get-addr-optimize" ;; +esac + +case "$HOST" in + powerpc64-linux-*|riscv64-linux-*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,-z,noexecstack" ;; +esac + +# Make $HOST-specific native binaries from depends available in $PATH +export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" +mkdir -p "$DISTSRC" +( + cd "$DISTSRC" + + # Extract the source tarball + tar --strip-components=1 -xf "${GIT_ARCHIVE}" + + ./autogen.sh + + # Configure this DISTSRC for $HOST + # shellcheck disable=SC2086 + env CONFIG_SITE="${BASEPREFIX}/${HOST}/share/config.site" \ + ./configure --prefix=/ \ + --disable-ccache \ + --disable-maintainer-mode \ + --disable-dependency-tracking \ + ${CONFIGFLAGS} \ + ${HOST_CFLAGS:+CFLAGS="${HOST_CFLAGS}"} \ + ${HOST_CXXFLAGS:+CXXFLAGS="${HOST_CXXFLAGS}"} \ + ${HOST_LDFLAGS:+LDFLAGS="${HOST_LDFLAGS}"} + + sed -i.old 's/-lstdc++ //g' config.status libtool + + # Build Bitcoin Core + make --jobs="$JOBS" ${V:+V=1} + + # Check that symbol/security checks tools are sane. + make test-security-check ${V:+V=1} + # Perform basic security checks on a series of executables. + make -C src --jobs=1 check-security ${V:+V=1} + # Check that executables only contain allowed version symbols. + make -C src --jobs=1 check-symbols ${V:+V=1} + + mkdir -p "$OUTDIR" + + # Make the os-specific installers + case "$HOST" in + *mingw*) + make deploy ${V:+V=1} BITCOIN_WIN_INSTALLER="${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + ;; + esac + + # Setup the directory where our Bitcoin Core build for HOST will be + # installed. This directory will also later serve as the input for our + # binary tarballs. + INSTALLPATH="${PWD}/installed/${DISTNAME}" + mkdir -p "${INSTALLPATH}" + # Install built Bitcoin Core to $INSTALLPATH + case "$HOST" in + *darwin*) + make install-strip DESTDIR="${INSTALLPATH}" ${V:+V=1} + ;; + *) + make install DESTDIR="${INSTALLPATH}" ${V:+V=1} + ;; + esac + + case "$HOST" in + *darwin*) + make osx_volname ${V:+V=1} + make deploydir ${V:+V=1} + mkdir -p "unsigned-app-${HOST}" + cp --target-directory="unsigned-app-${HOST}" \ + osx_volname \ + contrib/macdeploy/detached-sig-create.sh + mv --target-directory="unsigned-app-${HOST}" dist + ( + cd "unsigned-app-${HOST}" + find . -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" && exit 1 ) + ) + make deploy ${V:+V=1} OSX_DMG="${OUTDIR}/${DISTNAME}-${HOST}-unsigned.dmg" + ;; + esac + ( + cd installed + + case "$HOST" in + *mingw*) + mv --target-directory="$DISTNAME"/lib/ "$DISTNAME"/bin/*.dll + ;; + esac + + # Prune libtool and object archives + find . -name "lib*.la" -delete + find . -name "lib*.a" -delete + + # Prune pkg-config files + rm -rf "${DISTNAME}/lib/pkgconfig" + + case "$HOST" in + *darwin*) ;; + *) + # Split binaries and libraries from their debug symbols + { + find "${DISTNAME}/bin" -type f -executable -print0 + find "${DISTNAME}/lib" -type f -print0 + } | xargs -0 -P"$JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg + ;; + esac + + case "$HOST" in + *mingw*) + cp "${DISTSRC}/doc/README_windows.txt" "${DISTNAME}/readme.txt" + ;; + *linux*) + cp "${DISTSRC}/README.md" "${DISTNAME}/" + ;; + esac + + # copy over the example bitcoin.conf file. if contrib/devtools/gen-bitcoin-conf.sh + # has not been run before buildling, this file will be a stub + cp "${DISTSRC}/share/examples/bitcoin.conf" "${DISTNAME}/" + + # Finally, deterministically produce {non-,}debug binary tarballs ready + # for release + case "$HOST" in + *mingw*) + find "${DISTNAME}" -not -name "*.dbg" -print0 \ + | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" + find "${DISTNAME}" -not -name "*.dbg" \ + | sort \ + | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}.zip" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}.zip" && exit 1 ) + find "${DISTNAME}" -name "*.dbg" -print0 \ + | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" + find "${DISTNAME}" -name "*.dbg" \ + | sort \ + | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-debug.zip" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-debug.zip" && exit 1 ) + ;; + *linux*) + find "${DISTNAME}" -not -name "*.dbg" -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" && exit 1 ) + find "${DISTNAME}" -name "*.dbg" -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" && exit 1 ) + ;; + *darwin*) + find "${DISTNAME}" -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" && exit 1 ) + ;; + esac + ) # $DISTSRC/installed + + case "$HOST" in + *mingw*) + cp -rf --target-directory=. contrib/windeploy + ( + cd ./windeploy + mkdir -p unsigned + cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + find . -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" && exit 1 ) + ) + ;; + esac +) # $DISTSRC + +rm -rf "$ACTUAL_OUTDIR" +mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ + || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sort -k2 \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh new file mode 100755 index 0000000000000..9a5d3a1ce5428 --- /dev/null +++ b/contrib/guix/libexec/codesign.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +export LC_ALL=C +set -e -o pipefail +export TZ=UTC + +# Although Guix _does_ set umask when building its own packages (in our case, +# this is all packages in manifest.scm), it does not set it for `guix +# environment`. It does make sense for at least `guix environment --container` +# to set umask, so if that change gets merged upstream and we bump the +# time-machine to a commit which includes the aforementioned change, we can +# remove this line. +# +# This line should be placed before any commands which creates files. +umask 0022 + +if [ -n "$V" ]; then + # Print both unexpanded (-v) and expanded (-x) forms of commands as they are + # read from this file. + set -vx + # Set VERBOSE for CMake-based builds + export VERBOSE="$V" +fi + +# Check that required environment variables are set +cat << EOF +Required environment variables as seen inside the container: + UNSIGNED_TARBALL: ${UNSIGNED_TARBALL:?not set} + DETACHED_SIGS_REPO: ${DETACHED_SIGS_REPO:?not set} + DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set} + DISTNAME: ${DISTNAME:?not set} + HOST: ${HOST:?not set} + SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set} + DISTSRC: ${DISTSRC:?not set} + OUTDIR: ${OUTDIR:?not set} +EOF + +ACTUAL_OUTDIR="${OUTDIR}" +OUTDIR="${DISTSRC}/output" + +git_head_version() { + local recent_tag + if recent_tag="$(git -C "$1" describe --exact-match HEAD 2> /dev/null)"; then + echo "${recent_tag#v}" + else + git -C "$1" rev-parse --short=12 HEAD + fi +} + +CODESIGNATURE_GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}-codesignatures-$(git_head_version "$DETACHED_SIGS_REPO").tar.gz" + +# Create the codesignature tarball if not already there +if [ ! -e "$CODESIGNATURE_GIT_ARCHIVE" ]; then + mkdir -p "$(dirname "$CODESIGNATURE_GIT_ARCHIVE")" + git -C "$DETACHED_SIGS_REPO" archive --output="$CODESIGNATURE_GIT_ARCHIVE" HEAD +fi + +mkdir -p "$OUTDIR" + +mkdir -p "$DISTSRC" +( + cd "$DISTSRC" + + tar -xf "$UNSIGNED_TARBALL" + + mkdir -p codesignatures + tar -C codesignatures -xf "$CODESIGNATURE_GIT_ARCHIVE" + + case "$HOST" in + *mingw*) + find "$PWD" -name "*-unsigned.exe" | while read -r infile; do + infile_base="$(basename "$infile")" + + # Codesigned *-unsigned.exe and output to OUTDIR + osslsigncode attach-signature \ + -in "$infile" \ + -out "${OUTDIR}/${infile_base/-unsigned}" \ + -sigin codesignatures/win/"$infile_base".pem + done + ;; + *darwin*) + # Apply detached codesignatures to dist/ (in-place) + signapple apply dist/Bitcoin-Qt.app codesignatures/osx/dist + + # Make a DMG from dist/ + xorrisofs -D -l -V "$(< osx_volname)" -no-pad -r -dir-mode 0755 \ + -o "${OUTDIR}/${DISTNAME}-${HOST}.dmg" \ + dist \ + -- -volume_date all_file_dates ="$SOURCE_DATE_EPOCH" + ;; + *) + exit 1 + ;; + esac +) # $DISTSRC + +rm -rf "$ACTUAL_OUTDIR" +mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ + || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$UNSIGNED_TARBALL" + echo "$CODESIGNATURE_GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sort -k2 \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash new file mode 100644 index 0000000000000..3eb8fc02dae65 --- /dev/null +++ b/contrib/guix/libexec/prelude.bash @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# shellcheck source=contrib/shell/realpath.bash +source contrib/shell/realpath.bash + +# shellcheck source=contrib/shell/git-utils.bash +source contrib/shell/git-utils.bash + +################ +# Required non-builtin commands should be invocable +################ + +check_tools() { + for cmd in "$@"; do + if ! command -v "$cmd" > /dev/null 2>&1; then + echo "ERR: This script requires that '$cmd' is installed and available in your \$PATH" + exit 1 + fi + done +} + +check_tools cat env readlink dirname basename git + +################ +# We should be at the top directory of the repository +################ + +same_dir() { + local resolved1 resolved2 + resolved1="$(bash_realpath "${1}")" + resolved2="$(bash_realpath "${2}")" + [ "$resolved1" = "$resolved2" ] +} + +if ! same_dir "${PWD}" "$(git_root)"; then +cat << EOF +ERR: This script must be invoked from the top level of the git repository + +Hint: This may look something like: + env FOO=BAR ./contrib/guix/guix- + +EOF +exit 1 +fi + +################ +# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility +# across time. +time-machine() { + # shellcheck disable=SC2086 + guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \ + --commit=998eda3067c7d21e0d9bb3310d2f5a14b8f1c681 \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \ + -- "$@" +} + + +################ +# Set common variables +################ + +VERSION="${FORCE_VERSION:-$(git_head_version)}" +DISTNAME="${DISTNAME:-bitcoin-${VERSION}}" + +version_base_prefix="${PWD}/guix-build-" +VERSION_BASE="${version_base_prefix}${VERSION}" # TOP + +DISTSRC_BASE="${DISTSRC_BASE:-${VERSION_BASE}}" + +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" + +var_base_basename="var" +VAR_BASE="${VAR_BASE:-${VERSION_BASE}/${var_base_basename}}" + +profiles_base_basename="profiles" +PROFILES_BASE="${PROFILES_BASE:-${VAR_BASE}/${profiles_base_basename}}" diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm new file mode 100644 index 0000000000000..34a9c608db2bd --- /dev/null +++ b/contrib/guix/manifest.scm @@ -0,0 +1,615 @@ +(use-modules (gnu) + (gnu packages) + (gnu packages autotools) + (gnu packages base) + (gnu packages bash) + (gnu packages bison) + (gnu packages certs) + (gnu packages cdrom) + (gnu packages check) + (gnu packages cmake) + (gnu packages commencement) + (gnu packages compression) + (gnu packages cross-base) + (gnu packages curl) + (gnu packages file) + (gnu packages gawk) + (gnu packages gcc) + (gnu packages gnome) + (gnu packages installers) + (gnu packages linux) + (gnu packages llvm) + (gnu packages mingw) + (gnu packages moreutils) + (gnu packages perl) + (gnu packages pkg-config) + (gnu packages python) + (gnu packages python-crypto) + (gnu packages python-web) + (gnu packages shells) + (gnu packages tls) + (gnu packages version-control) + (guix build-system gnu) + (guix build-system python) + (guix build-system trivial) + (guix download) + (guix gexp) + (guix git-download) + ((guix licenses) #:prefix license:) + (guix packages) + (guix profiles) + (guix utils)) + +(define-syntax-rule (search-our-patches file-name ...) + "Return the list of absolute file names corresponding to each +FILE-NAME found in ./patches relative to the current file." + (parameterize + ((%patch-path (list (string-append (dirname (current-filename)) "/patches")))) + (list (search-patch file-name) ...))) + +(define (make-ssp-fixed-gcc xgcc) + "Given a XGCC package, return a modified package that uses the SSP function +from glibc instead of from libssp.so. Our `symbol-check' script will complain if +we link against libssp.so, and thus will ensure that this works properly. + +Taken from: +http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" + (package + (inherit xgcc) + (arguments + (substitute-keyword-arguments (package-arguments xgcc) + ((#:make-flags flags) + `(cons "gcc_cv_libc_provides_ssp=yes" ,flags)))))) + +(define (make-gcc-rpath-link xgcc) + "Given a XGCC package, return a modified package that replace each instance of +-rpath in the default system spec that's inserted by Guix with -rpath-link" + (package + (inherit xgcc) + (arguments + (substitute-keyword-arguments (package-arguments xgcc) + ((#:phases phases) + `(modify-phases ,phases + (add-after 'pre-configure 'replace-rpath-with-rpath-link + (lambda _ + (substitute* (cons "gcc/config/rs6000/sysv4.h" + (find-files "gcc/config" + "^gnu-user.*\\.h$")) + (("-rpath=") "-rpath-link=")) + #t)))))))) + +(define (make-cross-toolchain target + base-gcc-for-libc + base-kernel-headers + base-libc + base-gcc) + "Create a cross-compilation toolchain package for TARGET" + (let* ((xbinutils (cross-binutils target)) + ;; 1. Build a cross-compiling gcc without targeting any libc, derived + ;; from BASE-GCC-FOR-LIBC + (xgcc-sans-libc (cross-gcc target + #:xgcc base-gcc-for-libc + #:xbinutils xbinutils)) + ;; 2. Build cross-compiled kernel headers with XGCC-SANS-LIBC, derived + ;; from BASE-KERNEL-HEADERS + (xkernel (cross-kernel-headers target + base-kernel-headers + xgcc-sans-libc + xbinutils)) + ;; 3. Build a cross-compiled libc with XGCC-SANS-LIBC and XKERNEL, + ;; derived from BASE-LIBC + (xlibc (cross-libc target + base-libc + xgcc-sans-libc + xbinutils + xkernel)) + ;; 4. Build a cross-compiling gcc targeting XLIBC, derived from + ;; BASE-GCC + (xgcc (cross-gcc target + #:xgcc base-gcc + #:xbinutils xbinutils + #:libc xlibc))) + ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and + ;; XGCC + (package + (name (string-append target "-toolchain")) + (version (package-version xgcc)) + (source #f) + (build-system trivial-build-system) + (arguments '(#:builder (begin (mkdir %output) #t))) + (propagated-inputs + `(("binutils" ,xbinutils) + ("libc" ,xlibc) + ("libc:static" ,xlibc "static") + ("gcc" ,xgcc) + ("gcc-lib" ,xgcc "lib"))) + (synopsis (string-append "Complete GCC tool chain for " target)) + (description (string-append "This package provides a complete GCC tool +chain for " target " development.")) + (home-page (package-home-page xgcc)) + (license (package-license xgcc))))) + +(define base-gcc gcc-10) +(define base-linux-kernel-headers linux-libre-headers-5.15) + +;; Building glibc with stack smashing protector first landed in glibc 2.25, use +;; this function to disable for older glibcs +;; +;; From glibc 2.25 changelog: +;; +;; * Most of glibc can now be built with the stack smashing protector enabled. +;; It is recommended to build glibc with --enable-stack-protector=strong. +;; Implemented by Nick Alcock (Oracle). +(define (make-glibc-without-ssp xglibc) + (package-with-extra-configure-variable + (package-with-extra-configure-variable + xglibc "libc_cv_ssp" "no") + "libc_cv_ssp_strong" "no")) + +(define* (make-bitcoin-cross-toolchain target + #:key + (base-gcc-for-libc base-gcc) + (base-kernel-headers base-linux-kernel-headers) + (base-libc (make-glibc-without-ssp (make-glibc-without-werror glibc-2.24))) + (base-gcc (make-gcc-rpath-link base-gcc))) + "Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values +desirable for building Bitcoin Core release binaries." + (make-cross-toolchain target + base-gcc-for-libc + base-kernel-headers + base-libc + base-gcc)) + +(define (make-gcc-with-pthreads gcc) + (package-with-extra-configure-variable gcc "--enable-threads" "posix")) + +(define (make-mingw-w64-cross-gcc cross-gcc) + (package-with-extra-patches cross-gcc + (search-our-patches "vmov-alignment.patch" + "gcc-broken-longjmp.patch"))) + +(define (make-mingw-pthreads-cross-toolchain target) + "Create a cross-compilation toolchain package for TARGET" + (let* ((xbinutils (cross-binutils target)) + (pthreads-xlibc mingw-w64-x86_64-winpthreads) + (pthreads-xgcc (make-gcc-with-pthreads + (cross-gcc target + #:xgcc (make-ssp-fixed-gcc (make-mingw-w64-cross-gcc base-gcc)) + #:xbinutils xbinutils + #:libc pthreads-xlibc)))) + ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and + ;; XGCC + (package + (name (string-append target "-posix-toolchain")) + (version (package-version pthreads-xgcc)) + (source #f) + (build-system trivial-build-system) + (arguments '(#:builder (begin (mkdir %output) #t))) + (propagated-inputs + `(("binutils" ,xbinutils) + ("libc" ,pthreads-xlibc) + ("gcc" ,pthreads-xgcc) + ("gcc-lib" ,pthreads-xgcc "lib"))) + (synopsis (string-append "Complete GCC tool chain for " target)) + (description (string-append "This package provides a complete GCC tool +chain for " target " development.")) + (home-page (package-home-page pthreads-xgcc)) + (license (package-license pthreads-xgcc))))) + +(define (make-nsis-for-gcc-10 base-nsis) + (package-with-extra-patches base-nsis + (search-our-patches "nsis-gcc-10-memmove.patch"))) + +(define-public lief + (package + (name "python-lief") + (version "0.12.0") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/lief-project/LIEF.git") + (commit version))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "026jchj56q25v6gc0754dj9cj5hz5zaza8ij93y5ga94w20kzm9q")))) + (build-system python-build-system) + (native-inputs + `(("cmake" ,cmake))) + (home-page "https://github.com/lief-project/LIEF") + (synopsis "Library to Instrument Executable Formats") + (description "Python library to to provide a cross platform library which can +parse, modify and abstract ELF, PE and MachO formats.") + (license license:asl2.0))) + +(define osslsigncode + (package + (name "osslsigncode") + (version "2.0") + (source (origin + (method url-fetch) + (uri (string-append "https://github.com/mtrojnar/" + name "/archive/" version ".tar.gz")) + (sha256 + (base32 + "0byri6xny770wwb2nciq44j5071122l14bvv65axdd70nfjf0q2s")))) + (build-system gnu-build-system) + (native-inputs + `(("pkg-config" ,pkg-config) + ("autoconf" ,autoconf) + ("automake" ,automake) + ("libtool" ,libtool))) + (inputs + `(("openssl" ,openssl))) + (arguments + `(#:configure-flags + `("--without-gsf" + "--without-curl" + "--disable-dependency-tracking"))) + (home-page "https://github.com/mtrojnar/osslsigncode") + (synopsis "Authenticode signing and timestamping tool") + (description "osslsigncode is a small tool that implements part of the +functionality of the Microsoft tool signtool.exe - more exactly the Authenticode +signing and timestamping. But osslsigncode is based on OpenSSL and cURL, and +thus should be able to compile on most platforms where these exist.") + (license license:gpl3+))) ; license is with openssl exception + +(define-public python-elfesteem + (let ((commit "87bbd79ab7e361004c98cc8601d4e5f029fd8bd5")) + (package + (name "python-elfesteem") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/LRGH/elfesteem") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "1nyvjisvyxyxnd0023xjf5846xd03lwawp5pfzr8vrky7wwm5maz")) + (patches (search-our-patches "elfsteem-value-error-python-39.patch")))) + (build-system python-build-system) + ;; There are no tests, but attempting to run python setup.py test leads to + ;; PYTHONPATH problems, just disable the test + (arguments '(#:tests? #f)) + (home-page "https://github.com/LRGH/elfesteem") + (synopsis "ELF/PE/Mach-O parsing library") + (description "elfesteem parses ELF, PE and Mach-O files.") + (license license:lgpl2.1)))) + +(define-public python-oscrypto + (package + (name "python-oscrypto") + (version "1.2.1") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/wbond/oscrypto") + (commit version))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "1d4d8s4z340qhvb3g5m5v3436y3a71yc26wk4749q64m09kxqc3l")) + (patches (search-our-patches "oscrypto-hard-code-openssl.patch")))) + (build-system python-build-system) + (native-search-paths + (list (search-path-specification + (variable "SSL_CERT_FILE") + (file-type 'regular) + (separator #f) ;single entry + (files '("etc/ssl/certs/ca-certificates.crt"))))) + + (propagated-inputs + `(("python-asn1crypto" ,python-asn1crypto) + ("openssl" ,openssl))) + (arguments + `(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'hard-code-path-to-libscrypt + (lambda* (#:key inputs #:allow-other-keys) + (let ((openssl (assoc-ref inputs "openssl"))) + (substitute* "oscrypto/__init__.py" + (("@GUIX_OSCRYPTO_USE_OPENSSL@") + (string-append openssl "/lib/libcrypto.so" "," openssl "/lib/libssl.so"))) + #t))) + (add-after 'unpack 'disable-broken-tests + (lambda _ + ;; This test is broken as there is no keyboard interrupt. + (substitute* "tests/test_trust_list.py" + (("^(.*)class TrustListTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_tls.py" + (("^(.*)class TLSTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + #t)) + (replace 'check + (lambda _ + (invoke "python" "run.py" "tests") + #t))))) + (home-page "https://github.com/wbond/oscrypto") + (synopsis "Compiler-free Python crypto library backed by the OS") + (description "oscrypto is a compilation-free, always up-to-date encryption library for Python.") + (license license:expat))) + +(define-public python-oscryptotests + (package (inherit python-oscrypto) + (name "python-oscryptotests") + (propagated-inputs + `(("python-oscrypto" ,python-oscrypto))) + (arguments + `(#:tests? #f + #:phases + (modify-phases %standard-phases + (add-after 'unpack 'hard-code-path-to-libscrypt + (lambda* (#:key inputs #:allow-other-keys) + (chdir "tests") + #t))))))) + +(define-public python-certvalidator + (let ((commit "a145bf25eb75a9f014b3e7678826132efbba6213")) + (package + (name "python-certvalidator") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/achow101/certvalidator") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "1qw2k7xis53179lpqdqyylbcmp76lj7sagp883wmxg5i7chhc96k")))) + (build-system python-build-system) + (propagated-inputs + `(("python-asn1crypto" ,python-asn1crypto) + ("python-oscrypto" ,python-oscrypto) + ("python-oscryptotests", python-oscryptotests))) ;; certvalidator tests import oscryptotests + (arguments + `(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'disable-broken-tests + (lambda _ + (substitute* "tests/test_certificate_validator.py" + (("^(.*)class CertificateValidatorTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_crl_client.py" + (("^(.*)def test_fetch_crl" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_ocsp_client.py" + (("^(.*)def test_fetch_ocsp" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_registry.py" + (("^(.*)def test_build_paths" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_validate.py" + (("^(.*)def test_revocation_mode_hard" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + #t)) + (replace 'check + (lambda _ + (invoke "python" "run.py" "tests") + #t))))) + (home-page "https://github.com/wbond/certvalidator") + (synopsis "Python library for validating X.509 certificates and paths") + (description "certvalidator is a Python library for validating X.509 +certificates or paths. Supports various options, including: validation at a +specific moment in time, whitelisting and revocation checks.") + (license license:expat)))) + +(define-public python-altgraph + (package + (name "python-altgraph") + (version "0.17") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/ronaldoussoren/altgraph") + (commit (string-append "v" version)))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "09sm4srvvkw458pn48ga9q7ykr4xlz7q8gh1h9w7nxpf001qgpwb")))) + (build-system python-build-system) + (home-page "https://github.com/ronaldoussoren/altgraph") + (synopsis "Python graph (network) package") + (description "altgraph is a fork of graphlib: a graph (network) package for +constructing graphs, BFS and DFS traversals, topological sort, shortest paths, +etc. with graphviz output.") + (license license:expat))) + + +(define-public python-macholib + (package + (name "python-macholib") + (version "1.14") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/ronaldoussoren/macholib") + (commit (string-append "v" version)))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "0aislnnfsza9wl4f0vp45ivzlc0pzhp9d4r08700slrypn5flg42")))) + (build-system python-build-system) + (propagated-inputs + `(("python-altgraph" ,python-altgraph))) + (arguments + '(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'disable-broken-tests + (lambda _ + ;; This test is broken as there is no keyboard interrupt. + (substitute* "macholib_tests/test_command_line.py" + (("^(.*)class TestCmdLine" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "macholib_tests/test_dyld.py" + (("^(.*)def test_\\S+_find" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line)) + (("^(.*)def testBasic" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line)) + ) + #t))))) + (home-page "https://github.com/ronaldoussoren/macholib") + (synopsis "Python library for analyzing and editing Mach-O headers") + (description "macholib is a Macho-O header analyzer and editor. It's +typically used as a dependency analysis tool, and also to rewrite dylib +references in Mach-O headers to be @executable_path relative. Though this tool +targets a platform specific file format, it is pure python code that is platform +and endian independent.") + (license license:expat))) + +(define-public python-signapple + (let ((commit "8a945a2e7583be2665cf3a6a89d665b70ecd1ab6")) + (package + (name "python-signapple") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/achow101/signapple") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "0fr1hangvfyiwflca6jg5g8zvg3jc9qr7vd2c12ff89pznf38dlg")))) + (build-system python-build-system) + (propagated-inputs + `(("python-asn1crypto" ,python-asn1crypto) + ("python-oscrypto" ,python-oscrypto) + ("python-certvalidator" ,python-certvalidator) + ("python-elfesteem" ,python-elfesteem) + ("python-requests" ,python-requests) + ("python-macholib" ,python-macholib))) + ;; There are no tests, but attempting to run python setup.py test leads to + ;; problems, just disable the test + (arguments '(#:tests? #f)) + (home-page "https://github.com/achow101/signapple") + (synopsis "Mach-O binary signature tool") + (description "signapple is a Python tool for creating, verifying, and +inspecting signatures in Mach-O binaries.") + (license license:expat)))) + +(define (make-glibc-without-werror glibc) + (package-with-extra-configure-variable glibc "enable_werror" "no")) + +(define-public glibc-2.24 + (package + (inherit glibc-2.31) + (version "2.24") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://sourceware.org/git/glibc.git") + (commit "0d7f1ed30969886c8dde62fbf7d2c79967d4bace"))) + (file-name (git-file-name "glibc" "0d7f1ed30969886c8dde62fbf7d2c79967d4bace")) + (sha256 + (base32 + "0g5hryia5v1k0qx97qffgwzrz4lr4jw3s5kj04yllhswsxyjbic3")) + (patches (search-our-patches "glibc-ldd-x86_64.patch" + "glibc-versioned-locpath.patch" + "glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch" + "glibc-2.24-no-build-time-cxx-header-run.patch" + "glibc-2.24-fcommon.patch")))))) + +(define-public glibc-2.27/bitcoin-patched + (package + (inherit glibc-2.31) + (version "2.27") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://sourceware.org/git/glibc.git") + (commit "23158b08a0908f381459f273a984c6fd328363cb"))) + (file-name (git-file-name "glibc" "23158b08a0908f381459f273a984c6fd328363cb")) + (sha256 + (base32 + "1b2n1gxv9f4fd5yy68qjbnarhf8mf4vmlxk10i3328c1w5pmp0ca")) + (patches (search-our-patches "glibc-ldd-x86_64.patch" + "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch" + "glibc-2.27-dont-redefine-nss-database.patch")))))) + +(packages->manifest + (append + (list ;; The Basics + bash + which + coreutils + util-linux + ;; File(system) inspection + file + grep + diffutils + findutils + ;; File transformation + patch + gawk + sed + moreutils + ;; Compression and archiving + tar + bzip2 + gzip + xz + ;; Build tools + gnu-make + libtool + autoconf-2.71 + automake + pkg-config + bison + ;; Native GCC 10 toolchain + gcc-toolchain-10 + (list gcc-toolchain-10 "static") + ;; Scripting + perl + python-3 + ;; Git + git + ;; Tests + lief) + (let ((target (getenv "HOST"))) + (cond ((string-suffix? "-mingw32" target) + ;; Windows + (list zip + (make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32") + (make-nsis-for-gcc-10 nsis-x86_64) + osslsigncode)) + ((string-contains target "-linux-") + (list (cond ((string-contains target "riscv64-") + (make-bitcoin-cross-toolchain target + #:base-libc (make-glibc-without-werror glibc-2.27/bitcoin-patched) + #:base-kernel-headers base-linux-kernel-headers)) + (else + (make-bitcoin-cross-toolchain target))))) + ((string-contains target "darwin") + (list clang-toolchain-10 binutils cmake xorriso python-signapple)) + (else '()))))) diff --git a/contrib/guix/patches/elfsteem-value-error-python-39.patch b/contrib/guix/patches/elfsteem-value-error-python-39.patch new file mode 100644 index 0000000000000..21e1228afd83c --- /dev/null +++ b/contrib/guix/patches/elfsteem-value-error-python-39.patch @@ -0,0 +1,13 @@ +diff --git a/examples/otool.py b/examples/otool.py +index 2b8efc0..d797b2e 100755 +--- a/examples/otool.py ++++ b/examples/otool.py +@@ -342,7 +342,7 @@ if __name__ == '__main__': + try: + e = macho_init.MACHO(raw, + parseSymbols = False) +- except ValueError, err: ++ except ValueError as err: + print("%s:" %file) + print(" %s" % err) + continue diff --git a/contrib/guix/patches/gcc-broken-longjmp.patch b/contrib/guix/patches/gcc-broken-longjmp.patch new file mode 100644 index 0000000000000..1cfc0918b090a --- /dev/null +++ b/contrib/guix/patches/gcc-broken-longjmp.patch @@ -0,0 +1,68 @@ +commit eb5698897c52702498938592d7f76e67d126451f +Author: Eric Botcazou +Date: Wed May 5 22:48:51 2021 +0200 + + Fix PR target/100402 + + This is a regression for 64-bit Windows present from mainline down to the 9 + branch and introduced by the fix for PR target/99234. Again SEH, but with + a twist related to the way MinGW implements setjmp/longjmp, which turns out + to be piggybacked on SEH with recent versions of MinGW, i.e. the longjmp + performs a bona-fide unwinding of the stack, because it calls RtlUnwindEx + with the second argument initially passed to setjmp, which is the result of + __builtin_frame_address (0) in the MinGW header file: + + define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) + + This means that we directly expose the frame pointer to the SEH machinery + here (unlike with regular exception handling where we use an intermediate + CFA) and thus that we cannot do whatever we want with it. The old code + would leave it unaligned, i.e. not multiple of 16, whereas the new code + aligns it, but this breaks for some reason; at least it appears that a + .seh_setframe directive with 0 as second argument always works, so the + fix aligns it this way. + + gcc/ + PR target/100402 + * config/i386/i386.c (ix86_compute_frame_layout): For a SEH target, + always return the establisher frame for __builtin_frame_address (0). + gcc/testsuite/ + * gcc.c-torture/execute/20210505-1.c: New test. + +diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c +index 2f838840e96..06ad1b2274e 100644 +--- a/gcc/config/i386/i386.c ++++ b/gcc/config/i386/i386.c +@@ -6356,12 +6356,29 @@ ix86_compute_frame_layout (void) + area, see the SEH code in config/i386/winnt.c for the rationale. */ + frame->hard_frame_pointer_offset = frame->sse_reg_save_offset; + +- /* If we can leave the frame pointer where it is, do so. Also, return ++ /* If we can leave the frame pointer where it is, do so; however return + the establisher frame for __builtin_frame_address (0) or else if the +- frame overflows the SEH maximum frame size. */ ++ frame overflows the SEH maximum frame size. ++ ++ Note that the value returned by __builtin_frame_address (0) is quite ++ constrained, because setjmp is piggybacked on the SEH machinery with ++ recent versions of MinGW: ++ ++ # elif defined(__SEH__) ++ # if defined(__aarch64__) || defined(_ARM64_) ++ # define setjmp(BUF) _setjmp((BUF), __builtin_sponentry()) ++ # elif (__MINGW_GCC_VERSION < 40702) ++ # define setjmp(BUF) _setjmp((BUF), mingw_getsp()) ++ # else ++ # define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) ++ # endif ++ ++ and the second argument passed to _setjmp, if not null, is forwarded ++ to the TargetFrame parameter of RtlUnwindEx by longjmp (after it has ++ built an ExceptionRecord on the fly describing the setjmp buffer). */ + const HOST_WIDE_INT diff + = frame->stack_pointer_offset - frame->hard_frame_pointer_offset; +- if (diff <= 255) ++ if (diff <= 255 && !crtl->accesses_prior_frames) + { + /* The resulting diff will be a multiple of 16 lower than 255, + i.e. at most 240 as required by the unwind data structure. */ diff --git a/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch new file mode 100644 index 0000000000000..5c4d0c6ebe196 --- /dev/null +++ b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch @@ -0,0 +1,62 @@ +https://sourceware.org/git/?p=glibc.git;a=commit;h=a68ba2f3cd3cbe32c1f31e13c20ed13487727b32 + +commit 6b02af31e9a721bb15a11380cd22d53b621711f8 +Author: Szabolcs Nagy +Date: Wed Oct 18 17:26:23 2017 +0100 + + [AARCH64] Rewrite elf_machine_load_address using _DYNAMIC symbol + + This patch rewrites aarch64 elf_machine_load_address to use special _DYNAMIC + symbol instead of _dl_start. + + The static address of _DYNAMIC symbol is stored in the first GOT entry. + Here is the change which makes this solution work (part of binutils 2.24): + https://sourceware.org/ml/binutils/2013-06/msg00248.html + + i386, x86_64 targets use the same method to do this as well. + + The original implementation relies on a trick that R_AARCH64_ABS32 relocation + being resolved at link time and the static address fits in the 32bits. + However, in LP64, normally, the address is defined to be 64 bit. + + Here is the C version one which should be portable in all cases. + + * sysdeps/aarch64/dl-machine.h (elf_machine_load_address): Use + _DYNAMIC symbol to calculate load address. + +diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h +index e86d8b5b63..5a5b8a5de5 100644 +--- a/sysdeps/aarch64/dl-machine.h ++++ b/sysdeps/aarch64/dl-machine.h +@@ -49,26 +49,11 @@ elf_machine_load_address (void) + /* To figure out the load address we use the definition that for any symbol: + dynamic_addr(symbol) = static_addr(symbol) + load_addr + +- The choice of symbol is arbitrary. The static address we obtain +- by constructing a non GOT reference to the symbol, the dynamic +- address of the symbol we compute using adrp/add to compute the +- symbol's address relative to the PC. +- This depends on 32bit relocations being resolved at link time +- and that the static address fits in the 32bits. */ +- +- ElfW(Addr) static_addr; +- ElfW(Addr) dynamic_addr; +- +- asm (" \n" +-" adrp %1, _dl_start; \n" +-" add %1, %1, #:lo12:_dl_start \n" +-" ldr %w0, 1f \n" +-" b 2f \n" +-"1: \n" +-" .word _dl_start \n" +-"2: \n" +- : "=r" (static_addr), "=r" (dynamic_addr)); +- return dynamic_addr - static_addr; ++ _DYNAMIC sysmbol is used here as its link-time address stored in ++ the special unrelocated first GOT entry. */ ++ ++ extern ElfW(Dyn) _DYNAMIC[] attribute_hidden; ++ return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic (); + } + + /* Set up the loaded object described by L so its unrelocated PLT diff --git a/contrib/guix/patches/glibc-2.24-fcommon.patch b/contrib/guix/patches/glibc-2.24-fcommon.patch new file mode 100644 index 0000000000000..2bc32ede90560 --- /dev/null +++ b/contrib/guix/patches/glibc-2.24-fcommon.patch @@ -0,0 +1,32 @@ +commit 264a4a0dbe1f4369db315080034b500bed66016c +Author: fanquake +Date: Fri May 6 11:03:04 2022 +0100 + + build: use -fcommon to retain legacy behaviour with GCC 10 + + GCC 10 started using -fno-common by default, which causes issues with + the powerpc builds using gibc 2.24. A patch was commited to glibc to fix + the issue, 18363b4f010da9ba459b13310b113ac0647c2fcc but is non-trvial + to backport, and was broken in at least one way, see the followup in + commit 7650321ce037302bfc2f026aa19e0213b8d02fe6. + + For now, retain the legacy GCC behaviour by passing -fcommon when + building glibc. + + https://gcc.gnu.org/onlinedocs/gcc/Code-Gen-Options.html. + https://sourceware.org/git/?p=glibc.git;a=commit;h=18363b4f010da9ba459b13310b113ac0647c2fcc + https://sourceware.org/git/?p=glibc.git;a=commit;h=7650321ce037302bfc2f026aa19e0213b8d02fe6 + +diff --git a/Makeconfig b/Makeconfig +index ee379f5852..63c4a2f234 100644 +--- a/Makeconfig ++++ b/Makeconfig +@@ -824,7 +824,7 @@ ifeq "$(strip $(+cflags))" "" + +cflags := $(default_cflags) + endif # $(+cflags) == "" + +-+cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) +++cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) -fcommon + +gcc-nowarn := -w + + # Don't duplicate options if we inherited variables from the parent. diff --git a/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch new file mode 100644 index 0000000000000..11fe7fdc99c05 --- /dev/null +++ b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch @@ -0,0 +1,100 @@ +https://sourceware.org/git/?p=glibc.git;a=commit;h=fc3e1337be1c6935ab58bd13520f97a535cf70cc + +commit dc23a45db566095e83ff0b7a57afc87fb5ca89a1 +Author: Florian Weimer +Date: Wed Sep 21 10:45:32 2016 +0200 + + Avoid running $(CXX) during build to obtain header file paths + + This reduces the build time somewhat and is particularly noticeable + during rebuilds with few code changes. + +diff --git a/Makerules b/Makerules +index 7e4077ee50..c338850de5 100644 +--- a/Makerules ++++ b/Makerules +@@ -121,14 +121,10 @@ ifneq (,$(CXX)) + # will be used instead of /usr/include/stdlib.h and /usr/include/math.h. + before-compile := $(common-objpfx)cstdlib $(common-objpfx)cmath \ + $(before-compile) +-cstdlib=$(shell echo "\#include " | $(CXX) -M -MP -x c++ - \ +- | sed -n "/cstdlib:/{s/:$$//;p}") +-$(common-objpfx)cstdlib: $(cstdlib) ++$(common-objpfx)cstdlib: $(c++-cstdlib-header) + $(INSTALL_DATA) $< $@T + $(move-if-change) $@T $@ +-cmath=$(shell echo "\#include " | $(CXX) -M -MP -x c++ - \ +- | sed -n "/cmath:/{s/:$$//;p}") +-$(common-objpfx)cmath: $(cmath) ++$(common-objpfx)cmath: $(c++-cmath-header) + $(INSTALL_DATA) $< $@T + $(move-if-change) $@T $@ + endif +diff --git a/config.make.in b/config.make.in +index 95c6f36876..04a8b3ed7f 100644 +--- a/config.make.in ++++ b/config.make.in +@@ -45,6 +45,8 @@ defines = @DEFINES@ + sysheaders = @sysheaders@ + sysincludes = @SYSINCLUDES@ + c++-sysincludes = @CXX_SYSINCLUDES@ ++c++-cstdlib-header = @CXX_CSTDLIB_HEADER@ ++c++-cmath-header = @CXX_CMATH_HEADER@ + all-warnings = @all_warnings@ + enable-werror = @enable_werror@ + +diff --git a/configure b/configure +index 17625e1041..6ff252744b 100755 +--- a/configure ++++ b/configure +@@ -635,6 +635,8 @@ BISON + INSTALL_INFO + PERL + BASH_SHELL ++CXX_CMATH_HEADER ++CXX_CSTDLIB_HEADER + CXX_SYSINCLUDES + SYSINCLUDES + AUTOCONF +@@ -5054,6 +5056,18 @@ fi + + + ++# Obtain some C++ header file paths. This is used to make a local ++# copy of those headers in Makerules. ++if test -n "$CXX"; then ++ find_cxx_header () { ++ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}" ++ } ++ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)" ++ CXX_CMATH_HEADER="$(find_cxx_header cmath)" ++fi ++ ++ ++ + # Test if LD_LIBRARY_PATH contains the notation for the current directory + # since this would lead to problems installing/building glibc. + # LD_LIBRARY_PATH contains the current directory if one of the following +diff --git a/configure.ac b/configure.ac +index 33bcd62180..9938ab0dc2 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1039,6 +1039,18 @@ fi + AC_SUBST(SYSINCLUDES) + AC_SUBST(CXX_SYSINCLUDES) + ++# Obtain some C++ header file paths. This is used to make a local ++# copy of those headers in Makerules. ++if test -n "$CXX"; then ++ find_cxx_header () { ++ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}" ++ } ++ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)" ++ CXX_CMATH_HEADER="$(find_cxx_header cmath)" ++fi ++AC_SUBST(CXX_CSTDLIB_HEADER) ++AC_SUBST(CXX_CMATH_HEADER) ++ + # Test if LD_LIBRARY_PATH contains the notation for the current directory + # since this would lead to problems installing/building glibc. + # LD_LIBRARY_PATH contains the current directory if one of the following diff --git a/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch b/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch new file mode 100644 index 0000000000000..16a595d613c11 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch @@ -0,0 +1,87 @@ +commit 78a90c2f74a2012dd3eff302189e47ff6779a757 +Author: Andreas Schwab +Date: Fri Mar 2 23:07:14 2018 +0100 + + Fix multiple definitions of __nss_*_database (bug 22918) + + (cherry picked from commit eaf6753f8aac33a36deb98c1031d1bad7b593d2d) + +diff --git a/nscd/gai.c b/nscd/gai.c +index d081747797..576fd0045b 100644 +--- a/nscd/gai.c ++++ b/nscd/gai.c +@@ -45,3 +45,6 @@ + #ifdef HAVE_LIBIDN + # include + #endif ++ ++/* Some variables normally defined in libc. */ ++service_user *__nss_hosts_database attribute_hidden; +diff --git a/nss/nsswitch.c b/nss/nsswitch.c +index d5e655974f..b0f0c11a3e 100644 +--- a/nss/nsswitch.c ++++ b/nss/nsswitch.c +@@ -62,7 +62,7 @@ static service_library *nss_new_service (name_database *database, + + /* Declare external database variables. */ + #define DEFINE_DATABASE(name) \ +- extern service_user *__nss_##name##_database attribute_hidden; \ ++ service_user *__nss_##name##_database attribute_hidden; \ + weak_extern (__nss_##name##_database) + #include "databases.def" + #undef DEFINE_DATABASE +diff --git a/nss/nsswitch.h b/nss/nsswitch.h +index eccb535ef5..63573b9ebc 100644 +--- a/nss/nsswitch.h ++++ b/nss/nsswitch.h +@@ -226,10 +226,10 @@ libc_hidden_proto (__nss_hostname_digits_dots) + #define MAX_NR_ADDRS 48 + + /* Prototypes for __nss_*_lookup2 functions. */ +-#define DEFINE_DATABASE(arg) \ +- service_user *__nss_##arg##_database attribute_hidden; \ +- int __nss_##arg##_lookup2 (service_user **, const char *, \ +- const char *, void **); \ ++#define DEFINE_DATABASE(arg) \ ++ extern service_user *__nss_##arg##_database attribute_hidden; \ ++ int __nss_##arg##_lookup2 (service_user **, const char *, \ ++ const char *, void **); \ + libc_hidden_proto (__nss_##arg##_lookup2) + #include "databases.def" + #undef DEFINE_DATABASE +diff --git a/posix/tst-rfc3484-2.c b/posix/tst-rfc3484-2.c +index f509534ca9..8c64ac59ff 100644 +--- a/posix/tst-rfc3484-2.c ++++ b/posix/tst-rfc3484-2.c +@@ -58,6 +58,7 @@ _res_hconf_init (void) + #undef USE_NSCD + #include "../sysdeps/posix/getaddrinfo.c" + ++service_user *__nss_hosts_database attribute_hidden; + + /* This is the beginning of the real test code. The above defines + (among other things) the function rfc3484_sort. */ +diff --git a/posix/tst-rfc3484-3.c b/posix/tst-rfc3484-3.c +index ae44087a10..1c61aaf844 100644 +--- a/posix/tst-rfc3484-3.c ++++ b/posix/tst-rfc3484-3.c +@@ -58,6 +58,7 @@ _res_hconf_init (void) + #undef USE_NSCD + #include "../sysdeps/posix/getaddrinfo.c" + ++service_user *__nss_hosts_database attribute_hidden; + + /* This is the beginning of the real test code. The above defines + (among other things) the function rfc3484_sort. */ +diff --git a/posix/tst-rfc3484.c b/posix/tst-rfc3484.c +index 7f191abbbc..8f45848e44 100644 +--- a/posix/tst-rfc3484.c ++++ b/posix/tst-rfc3484.c +@@ -58,6 +58,7 @@ _res_hconf_init (void) + #undef USE_NSCD + #include "../sysdeps/posix/getaddrinfo.c" + ++service_user *__nss_hosts_database attribute_hidden; + + /* This is the beginning of the real test code. The above defines + (among other things) the function rfc3484_sort. */ diff --git a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch new file mode 100644 index 0000000000000..c0f8495c41de1 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch @@ -0,0 +1,76 @@ +Note that this has been modified from the original commit, to use __has_include +instead of __has_include__, as the later was causing build failures with GCC 10. +See also: http://lists.busybox.net/pipermail/buildroot/2020-July/590376.html. + +https://sourceware.org/git/?p=glibc.git;a=commit;h=0b9c84906f653978fb8768c7ebd0ee14a47e662e + +From 562c52cc81a4e456a62e6455feb32732049e9070 Mon Sep 17 00:00:00 2001 +From: "H.J. Lu" +Date: Mon, 31 Dec 2018 09:26:42 -0800 +Subject: [PATCH] riscv: Use __has_include__ to include [BZ + #24022] + + has been removed by + +commit 27f8899d6002e11a6e2d995e29b8deab5aa9cc25 +Author: David Abdurachmanov +Date: Thu Nov 8 20:02:39 2018 +0100 + + riscv: add asm/unistd.h UAPI header + + Marcin Juszkiewicz reported issues while generating syscall table for riscv + using 4.20-rc1. The patch refactors our unistd.h files to match some other + architectures. + + - Add asm/unistd.h UAPI header, which has __ARCH_WANT_NEW_STAT only for 64-bit + - Remove asm/syscalls.h UAPI header and merge to asm/unistd.h + - Adjust kernel asm/unistd.h + + So now asm/unistd.h UAPI header should show all syscalls for riscv. + + may be restored by + +Subject: [PATCH] riscv: restore asm/syscalls.h UAPI header +Date: Tue, 11 Dec 2018 09:09:35 +0100 + +UAPI header asm/syscalls.h was merged into UAPI asm/unistd.h header, +which did resolve issue with missing syscalls macros resulting in +glibc (2.28) build failure. It also broke glibc in a different way: +asm/syscalls.h is being used by glibc. I noticed this while doing +Fedora 30/Rawhide mass rebuild. + +The patch returns asm/syscalls.h header and incl. it into asm/unistd.h. +I plan to send a patch to glibc to use asm/unistd.h instead of +asm/syscalls.h + +In the meantime, we use __has_include__, which was added to GCC 5, to +check if exists before including it. Tested with +build-many-glibcs.py for riscv against kernel 4.19.12 and 4.20-rc7. + + [BZ #24022] + * sysdeps/unix/sysv/linux/riscv/flush-icache.c: Check if + exists with __has_include__ before including it. +--- + sysdeps/unix/sysv/linux/riscv/flush-icache.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/sysdeps/unix/sysv/linux/riscv/flush-icache.c b/sysdeps/unix/sysv/linux/riscv/flush-icache.c +index d612ef4c6c..0b2042620b 100644 +--- a/sysdeps/unix/sysv/linux/riscv/flush-icache.c ++++ b/sysdeps/unix/sysv/linux/riscv/flush-icache.c +@@ -21,7 +21,11 @@ + #include + #include + #include +-#include ++#if __has_include () ++# include ++#else ++# include ++#endif + + typedef int (*func_type) (void *, void *, unsigned long int); + +-- +2.31.1 + diff --git a/contrib/guix/patches/glibc-ldd-x86_64.patch b/contrib/guix/patches/glibc-ldd-x86_64.patch new file mode 100644 index 0000000000000..b1b6d5a54863c --- /dev/null +++ b/contrib/guix/patches/glibc-ldd-x86_64.patch @@ -0,0 +1,10 @@ +By default, 'RTDLLIST' in 'ldd' refers to 'lib64/ld-linux-x86-64.so', whereas +it's in 'lib/' for us. This patch fixes that. + +--- glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2012-12-25 04:02:13.000000000 +0100 ++++ glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2013-09-15 23:08:03.000000000 +0200 +@@ -1,3 +1,3 @@ + /LD_TRACE_LOADED_OBJECTS=1/a\ + add_env="$add_env LD_LIBRARY_VERSION=\\$verify_out" +-s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \264\4-x86-64\6 \2x32\4-x32\6"_ ++s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \2\4-x86-64\6 \2x32\4-x32\6"_ diff --git a/contrib/guix/patches/glibc-versioned-locpath.patch b/contrib/guix/patches/glibc-versioned-locpath.patch new file mode 100644 index 0000000000000..bc7652127fa5a --- /dev/null +++ b/contrib/guix/patches/glibc-versioned-locpath.patch @@ -0,0 +1,240 @@ +The format of locale data can be incompatible between libc versions, and +loading incompatible data can lead to 'setlocale' returning EINVAL at best +or triggering an assertion failure at worst. See +https://lists.gnu.org/archive/html/guix-devel/2015-09/msg00717.html +for background information. + +To address that, this patch changes libc to honor a new 'GUIX_LOCPATH' +variable, and to look for locale data in version-specific sub-directories of +that variable. So, if GUIX_LOCPATH=/foo:/bar, locale data is searched for in +/foo/X.Y and /bar/X.Y, where X.Y is the libc version number. + +That way, a single 'GUIX_LOCPATH' setting can work even if different libc +versions coexist on the system. + +--- a/locale/newlocale.c ++++ b/locale/newlocale.c +@@ -30,6 +30,7 @@ + /* Lock for protecting global data. */ + __libc_rwlock_define (extern , __libc_setlocale_lock attribute_hidden) + ++extern error_t compute_locale_search_path (char **, size_t *); + + /* Use this when we come along an error. */ + #define ERROR_RETURN \ +@@ -48,7 +49,6 @@ __newlocale (int category_mask, const char *locale, __locale_t base) + __locale_t result_ptr; + char *locale_path; + size_t locale_path_len; +- const char *locpath_var; + int cnt; + size_t names_len; + +@@ -102,17 +102,8 @@ __newlocale (int category_mask, const char *locale, __locale_t base) + locale_path = NULL; + locale_path_len = 0; + +- locpath_var = getenv ("LOCPATH"); +- if (locpath_var != NULL && locpath_var[0] != '\0') +- { +- if (__argz_create_sep (locpath_var, ':', +- &locale_path, &locale_path_len) != 0) +- return NULL; +- +- if (__argz_add_sep (&locale_path, &locale_path_len, +- _nl_default_locale_path, ':') != 0) +- return NULL; +- } ++ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0) ++ return NULL; + + /* Get the names for the locales we are interested in. We either + allow a composite name or a single name. */ +diff --git a/locale/setlocale.c b/locale/setlocale.c +index ead030d..0c0e314 100644 +--- a/locale/setlocale.c ++++ b/locale/setlocale.c +@@ -215,12 +215,65 @@ setdata (int category, struct __locale_data *data) + } + } + ++/* Return in *LOCALE_PATH and *LOCALE_PATH_LEN the locale data search path as ++ a colon-separated list. Return ENOMEN on error, zero otherwise. */ ++error_t ++compute_locale_search_path (char **locale_path, size_t *locale_path_len) ++{ ++ char* guix_locpath_var = getenv ("GUIX_LOCPATH"); ++ char *locpath_var = getenv ("LOCPATH"); ++ ++ if (guix_locpath_var != NULL && guix_locpath_var[0] != '\0') ++ { ++ /* Entries in 'GUIX_LOCPATH' take precedence over 'LOCPATH'. These ++ entries are systematically prefixed with "/X.Y" where "X.Y" is the ++ libc version. */ ++ if (__argz_create_sep (guix_locpath_var, ':', ++ locale_path, locale_path_len) != 0 ++ || __argz_suffix_entries (locale_path, locale_path_len, ++ "/" VERSION) != 0) ++ goto bail_out; ++ } ++ ++ if (locpath_var != NULL && locpath_var[0] != '\0') ++ { ++ char *reg_locale_path = NULL; ++ size_t reg_locale_path_len = 0; ++ ++ if (__argz_create_sep (locpath_var, ':', ++ ®_locale_path, ®_locale_path_len) != 0) ++ goto bail_out; ++ ++ if (__argz_append (locale_path, locale_path_len, ++ reg_locale_path, reg_locale_path_len) != 0) ++ goto bail_out; ++ ++ free (reg_locale_path); ++ } ++ ++ if (*locale_path != NULL) ++ { ++ /* Append the system default locale directory. */ ++ if (__argz_add_sep (locale_path, locale_path_len, ++ _nl_default_locale_path, ':') != 0) ++ goto bail_out; ++ } ++ ++ return 0; ++ ++ bail_out: ++ free (*locale_path); ++ *locale_path = NULL; ++ *locale_path_len = 0; ++ ++ return ENOMEM; ++} ++ + char * + setlocale (int category, const char *locale) + { + char *locale_path; + size_t locale_path_len; +- const char *locpath_var; + char *composite; + + /* Sanity check for CATEGORY argument. */ +@@ -251,17 +304,10 @@ setlocale (int category, const char *locale) + locale_path = NULL; + locale_path_len = 0; + +- locpath_var = getenv ("LOCPATH"); +- if (locpath_var != NULL && locpath_var[0] != '\0') ++ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0) + { +- if (__argz_create_sep (locpath_var, ':', +- &locale_path, &locale_path_len) != 0 +- || __argz_add_sep (&locale_path, &locale_path_len, +- _nl_default_locale_path, ':') != 0) +- { +- __libc_rwlock_unlock (__libc_setlocale_lock); +- return NULL; +- } ++ __libc_rwlock_unlock (__libc_setlocale_lock); ++ return NULL; + } + + if (category == LC_ALL) +diff --git a/string/Makefile b/string/Makefile +index 8424a61..f925503 100644 +--- a/string/Makefile ++++ b/string/Makefile +@@ -38,7 +38,7 @@ routines := strcat strchr strcmp strcoll strcpy strcspn \ + swab strfry memfrob memmem rawmemchr strchrnul \ + $(addprefix argz-,append count create ctsep next \ + delete extract insert stringify \ +- addsep replace) \ ++ addsep replace suffix) \ + envz basename \ + strcoll_l strxfrm_l string-inlines memrchr \ + xpg-strerror strerror_l +diff --git a/string/argz-suffix.c b/string/argz-suffix.c +new file mode 100644 +index 0000000..505b0f2 +--- /dev/null ++++ b/string/argz-suffix.c +@@ -0,0 +1,56 @@ ++/* Copyright (C) 2015 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ludovic Courtès . ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ . */ ++ ++#include ++#include ++#include ++#include ++ ++ ++error_t ++__argz_suffix_entries (char **argz, size_t *argz_len, const char *suffix) ++ ++{ ++ size_t suffix_len = strlen (suffix); ++ size_t count = __argz_count (*argz, *argz_len); ++ size_t new_argz_len = *argz_len + count * suffix_len; ++ char *new_argz = malloc (new_argz_len); ++ ++ if (new_argz) ++ { ++ char *p = new_argz, *entry; ++ ++ for (entry = *argz; ++ entry != NULL; ++ entry = argz_next (*argz, *argz_len, entry)) ++ { ++ p = stpcpy (p, entry); ++ p = stpcpy (p, suffix); ++ p++; ++ } ++ ++ free (*argz); ++ *argz = new_argz; ++ *argz_len = new_argz_len; ++ ++ return 0; ++ } ++ else ++ return ENOMEM; ++} ++weak_alias (__argz_suffix_entries, argz_suffix_entries) +diff --git a/string/argz.h b/string/argz.h +index bb62a31..d276a35 100644 +--- a/string/argz.h ++++ b/string/argz.h +@@ -134,6 +134,16 @@ extern error_t argz_replace (char **__restrict __argz, + const char *__restrict __str, + const char *__restrict __with, + unsigned int *__restrict __replace_count); ++ ++/* Suffix each entry of ARGZ & ARGZ_LEN with SUFFIX. Return 0 on success, ++ and ENOMEN if memory cannot be allocated. */ ++extern error_t __argz_suffix_entries (char **__restrict __argz, ++ size_t *__restrict __argz_len, ++ const char *__restrict __suffix); ++extern error_t argz_suffix_entries (char **__restrict __argz, ++ size_t *__restrict __argz_len, ++ const char *__restrict __suffix); ++ + + /* Returns the next entry in ARGZ & ARGZ_LEN after ENTRY, or NULL if there + are no more. If entry is NULL, then the first entry is returned. This diff --git a/contrib/guix/patches/nsis-gcc-10-memmove.patch b/contrib/guix/patches/nsis-gcc-10-memmove.patch new file mode 100644 index 0000000000000..a1aadfd4f3618 --- /dev/null +++ b/contrib/guix/patches/nsis-gcc-10-memmove.patch @@ -0,0 +1,23 @@ +commit f6df41524e703dc471e283e566a48e05a735b7f2 +Author: Anders +Date: Sat Jun 27 23:18:45 2020 +0000 + + Don't let GCC 10 generate memmove calls (bug #1248) + + git-svn-id: https://svn.code.sf.net/p/nsis/code/NSIS/trunk@7189 212acab6-be3b-0410-9dea-997c60f758d6 + +diff --git a/SCons/Config/gnu b/SCons/Config/gnu +index bfcb362d..21fa446b 100644 +--- a/SCons/Config/gnu ++++ b/SCons/Config/gnu +@@ -103,6 +103,10 @@ stub_env.Append(LINKFLAGS = ['$NODEFLIBS_FLAG']) # no standard libraries + stub_env.Append(LINKFLAGS = ['$ALIGN_FLAG']) # 512 bytes align + stub_env.Append(LINKFLAGS = ['$MAP_FLAG']) # generate map file + ++conf = FlagsConfigure(stub_env) ++conf.CheckCompileFlag('-fno-tree-loop-distribute-patterns') # GCC 10: Don't generate msvcrt!memmove calls (bug #1248) ++conf.Finish() ++ + stub_uenv = stub_env.Clone() + stub_uenv.Append(CPPDEFINES = ['_UNICODE', 'UNICODE']) + diff --git a/contrib/guix/patches/oscrypto-hard-code-openssl.patch b/contrib/guix/patches/oscrypto-hard-code-openssl.patch new file mode 100644 index 0000000000000..32027f2d09af1 --- /dev/null +++ b/contrib/guix/patches/oscrypto-hard-code-openssl.patch @@ -0,0 +1,13 @@ +diff --git a/oscrypto/__init__.py b/oscrypto/__init__.py +index eb27313..371ab24 100644 +--- a/oscrypto/__init__.py ++++ b/oscrypto/__init__.py +@@ -302,3 +302,8 @@ def load_order(): + 'oscrypto._win.tls', + 'oscrypto.tls', + ] ++ ++ ++paths = '@GUIX_OSCRYPTO_USE_OPENSSL@'.split(',') ++assert len(paths) == 2, 'Value for OSCRYPTO_USE_OPENSSL env var must be two paths separated by a comma' ++use_openssl(*paths) diff --git a/contrib/guix/patches/vmov-alignment.patch b/contrib/guix/patches/vmov-alignment.patch new file mode 100644 index 0000000000000..072f76eafd38f --- /dev/null +++ b/contrib/guix/patches/vmov-alignment.patch @@ -0,0 +1,267 @@ +Description: Use unaligned VMOV instructions +Author: Stephen Kitt +Bug-Debian: https://bugs.debian.org/939559 + +Based on a patch originally by Claude Heiland-Allen + +--- a/gcc/config/i386/sse.md ++++ b/gcc/config/i386/sse.md +@@ -1058,17 +1058,11 @@ + { + if (FLOAT_MODE_P (GET_MODE_INNER (mode))) + { +- if (misaligned_operand (operands[1], mode)) +- return "vmovu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; +- else +- return "vmova\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; ++ return "vmovu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; + } + else + { +- if (misaligned_operand (operands[1], mode)) +- return "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; +- else +- return "vmovdqa\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; ++ return "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; + } + } + [(set_attr "type" "ssemov") +@@ -1184,17 +1178,11 @@ + { + if (FLOAT_MODE_P (GET_MODE_INNER (mode))) + { +- if (misaligned_operand (operands[0], mode)) +- return "vmovu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; +- else +- return "vmova\t{%1, %0%{%2%}|%0%{%2%}, %1}"; ++ return "vmovu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; + } + else + { +- if (misaligned_operand (operands[0], mode)) +- return "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; +- else +- return "vmovdqa\t{%1, %0%{%2%}|%0%{%2%}, %1}"; ++ return "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; + } + } + [(set_attr "type" "ssemov") +@@ -7806,7 +7794,7 @@ + "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" + "@ + %vmovlps\t{%1, %0|%q0, %1} +- %vmovaps\t{%1, %0|%0, %1} ++ %vmovups\t{%1, %0|%0, %1} + %vmovlps\t{%1, %d0|%d0, %q1}" + [(set_attr "type" "ssemov") + (set_attr "prefix" "maybe_vex") +@@ -13997,29 +13985,15 @@ + switch (mode) + { + case E_V8DFmode: +- if (misaligned_operand (operands[2], mode)) +- return "vmovupd\t{%2, %x0|%x0, %2}"; +- else +- return "vmovapd\t{%2, %x0|%x0, %2}"; ++ return "vmovupd\t{%2, %x0|%x0, %2}"; + case E_V16SFmode: +- if (misaligned_operand (operands[2], mode)) +- return "vmovups\t{%2, %x0|%x0, %2}"; +- else +- return "vmovaps\t{%2, %x0|%x0, %2}"; ++ return "vmovups\t{%2, %x0|%x0, %2}"; + case E_V8DImode: +- if (misaligned_operand (operands[2], mode)) +- return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}" ++ return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}" + : "vmovdqu\t{%2, %x0|%x0, %2}"; +- else +- return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}" +- : "vmovdqa\t{%2, %x0|%x0, %2}"; + case E_V16SImode: +- if (misaligned_operand (operands[2], mode)) +- return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}" ++ return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}" + : "vmovdqu\t{%2, %x0|%x0, %2}"; +- else +- return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}" +- : "vmovdqa\t{%2, %x0|%x0, %2}"; + default: + gcc_unreachable (); + } +@@ -21225,63 +21199,27 @@ + switch (get_attr_mode (insn)) + { + case MODE_V16SF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovups\t{%1, %t0|%t0, %1}"; +- else +- return "vmovaps\t{%1, %t0|%t0, %1}"; ++ return "vmovups\t{%1, %t0|%t0, %1}"; + case MODE_V8DF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovupd\t{%1, %t0|%t0, %1}"; +- else +- return "vmovapd\t{%1, %t0|%t0, %1}"; ++ return "vmovupd\t{%1, %t0|%t0, %1}"; + case MODE_V8SF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovups\t{%1, %x0|%x0, %1}"; +- else +- return "vmovaps\t{%1, %x0|%x0, %1}"; ++ return "vmovups\t{%1, %x0|%x0, %1}"; + case MODE_V4DF: +- if (misaligned_operand (operands[1], mode)) +- return "vmovupd\t{%1, %x0|%x0, %1}"; +- else +- return "vmovapd\t{%1, %x0|%x0, %1}"; ++ return "vmovupd\t{%1, %x0|%x0, %1}"; + case MODE_XI: +- if (misaligned_operand (operands[1], mode)) +- { +- if (which_alternative == 2) +- return "vmovdqu\t{%1, %t0|%t0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqu64\t{%1, %t0|%t0, %1}"; +- else +- return "vmovdqu32\t{%1, %t0|%t0, %1}"; +- } ++ if (which_alternative == 2) ++ return "vmovdqu\t{%1, %t0|%t0, %1}"; ++ else if (GET_MODE_SIZE (mode) == 8) ++ return "vmovdqu64\t{%1, %t0|%t0, %1}"; + else +- { +- if (which_alternative == 2) +- return "vmovdqa\t{%1, %t0|%t0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqa64\t{%1, %t0|%t0, %1}"; +- else +- return "vmovdqa32\t{%1, %t0|%t0, %1}"; +- } ++ return "vmovdqu32\t{%1, %t0|%t0, %1}"; + case MODE_OI: +- if (misaligned_operand (operands[1], mode)) +- { +- if (which_alternative == 2) +- return "vmovdqu\t{%1, %x0|%x0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqu64\t{%1, %x0|%x0, %1}"; +- else +- return "vmovdqu32\t{%1, %x0|%x0, %1}"; +- } ++ if (which_alternative == 2) ++ return "vmovdqu\t{%1, %x0|%x0, %1}"; ++ else if (GET_MODE_SIZE (mode) == 8) ++ return "vmovdqu64\t{%1, %x0|%x0, %1}"; + else +- { +- if (which_alternative == 2) +- return "vmovdqa\t{%1, %x0|%x0, %1}"; +- else if (GET_MODE_SIZE (mode) == 8) +- return "vmovdqa64\t{%1, %x0|%x0, %1}"; +- else +- return "vmovdqa32\t{%1, %x0|%x0, %1}"; +- } ++ return "vmovdqu32\t{%1, %x0|%x0, %1}"; + default: + gcc_unreachable (); + } +--- a/gcc/config/i386/i386.c ++++ b/gcc/config/i386/i386.c +@@ -4981,13 +4981,13 @@ + switch (type) + { + case opcode_int: +- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32"; ++ opcode = "vmovdqu32"; + break; + case opcode_float: +- opcode = misaligned_p ? "vmovups" : "vmovaps"; ++ opcode = "vmovups"; + break; + case opcode_double: +- opcode = misaligned_p ? "vmovupd" : "vmovapd"; ++ opcode = "vmovupd"; + break; + } + } +@@ -4996,16 +4996,16 @@ + switch (scalar_mode) + { + case E_SFmode: +- opcode = misaligned_p ? "%vmovups" : "%vmovaps"; ++ opcode = "%vmovups"; + break; + case E_DFmode: +- opcode = misaligned_p ? "%vmovupd" : "%vmovapd"; ++ opcode = "%vmovupd"; + break; + case E_TFmode: + if (evex_reg_p) +- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64"; ++ opcode = "vmovdqu64"; + else +- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa"; ++ opcode = "%vmovdqu"; + break; + default: + gcc_unreachable (); +@@ -5017,48 +5017,32 @@ + { + case E_QImode: + if (evex_reg_p) +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu8" +- : "vmovdqu64") +- : "vmovdqa64"); ++ opcode = TARGET_AVX512BW ? "vmovdqu8" : "vmovdqu64"; + else +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu8" +- : "%vmovdqu") +- : "%vmovdqa"); ++ opcode = TARGET_AVX512BW ? "vmovdqu8" : "%vmovdqu"; + break; + case E_HImode: + if (evex_reg_p) +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu16" +- : "vmovdqu64") +- : "vmovdqa64"); ++ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64"; + else +- opcode = (misaligned_p +- ? (TARGET_AVX512BW +- ? "vmovdqu16" +- : "%vmovdqu") +- : "%vmovdqa"); ++ opcode = TARGET_AVX512BW ? "vmovdqu16" : "%vmovdqu"; + break; + case E_SImode: + if (evex_reg_p) +- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32"; ++ opcode = "vmovdqu32"; + else +- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa"; ++ opcode = "%vmovdqu"; + break; + case E_DImode: + case E_TImode: + case E_OImode: + if (evex_reg_p) +- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64"; ++ opcode = "vmovdqu64"; + else +- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa"; ++ opcode = "%vmovdqu"; + break; + case E_XImode: +- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64"; ++ opcode = "vmovdqu64"; + break; + default: + gcc_unreachable (); diff --git a/contrib/init/README.md b/contrib/init/README.md index d3fa9665839a2..affc7c2e75063 100644 --- a/contrib/init/README.md +++ b/contrib/init/README.md @@ -1,10 +1,12 @@ Sample configuration files for: - -SystemD: bitcoind.service +``` +systemd: bitcoind.service Upstart: bitcoind.conf OpenRC: bitcoind.openrc bitcoind.openrcconf - +CentOS: bitcoind.init +macOS: org.bitcoin.bitcoind.plist +``` have been made available to assist packagers in creating node packages here. -See doc/init.md for more information. +See [doc/init.md](../../doc/init.md) for more information. diff --git a/contrib/init/bitcoind.conf b/contrib/init/bitcoind.conf index f9554eecde7f2..dde1bd0c4d16b 100644 --- a/contrib/init/bitcoind.conf +++ b/contrib/init/bitcoind.conf @@ -16,7 +16,7 @@ expect fork respawn respawn limit 5 120 -kill timeout 60 +kill timeout 600 pre-start script # this will catch non-existent config files @@ -30,12 +30,12 @@ pre-start script echo echo "This password is security critical to securing wallets " echo "and must not be the same as the rpcuser setting." - echo "You can generate a suitable random password using the following" + echo "You can generate a suitable random password using the following " echo "command from the shell:" echo echo "bash -c 'tr -dc a-zA-Z0-9 < /dev/urandom | head -c32 && echo'" echo - echo "It is also recommended that you also set alertnotify so you are " + echo "It is recommended that you also set alertnotify so you are " echo "notified of problems:" echo echo "ie: alertnotify=echo %%s | mail -s \"Bitcoin Alert\"" \ diff --git a/contrib/init/bitcoind.init b/contrib/init/bitcoind.init new file mode 100644 index 0000000000000..19e1f76d094ce --- /dev/null +++ b/contrib/init/bitcoind.init @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +# +# bitcoind The bitcoin core server. +# +# +# chkconfig: 345 80 20 +# description: bitcoind +# processname: bitcoind +# + +# Source function library. +. /etc/init.d/functions + +# you can override defaults in /etc/sysconfig/bitcoind, see below +if [ -f /etc/sysconfig/bitcoind ]; then + . /etc/sysconfig/bitcoind +fi + +RETVAL=0 + +prog=bitcoind +# you can override the lockfile via BITCOIND_LOCKFILE in /etc/sysconfig/bitcoind +lockfile=${BITCOIND_LOCKFILE-/var/lock/subsys/bitcoind} + +# bitcoind defaults to /usr/bin/bitcoind, override with BITCOIND_BIN +bitcoind=${BITCOIND_BIN-/usr/bin/bitcoind} + +# bitcoind opts default to -disablewallet, override with BITCOIND_OPTS +bitcoind_opts=${BITCOIND_OPTS--disablewallet} + +start() { + echo -n $"Starting $prog: " + daemon $DAEMONOPTS $bitcoind $bitcoind_opts + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch $lockfile + return $RETVAL +} + +stop() { + echo -n $"Stopping $prog: " + killproc $prog -t600 + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f $lockfile + return $RETVAL +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status $prog + ;; + restart) + stop + start + ;; + *) + echo "Usage: service $prog {start|stop|status|restart}" + exit 1 + ;; +esac diff --git a/contrib/init/bitcoind.openrc b/contrib/init/bitcoind.openrc index 1f7758c9202a5..013a1a607027c 100644 --- a/contrib/init/bitcoind.openrc +++ b/contrib/init/bitcoind.openrc @@ -1,4 +1,4 @@ -#!/sbin/runscript +#!/sbin/openrc-run # backward compatibility for existing gentoo layout # @@ -12,12 +12,14 @@ BITCOIND_CONFIGFILE=${BITCOIND_CONFIGFILE:-/etc/bitcoin/bitcoin.conf} BITCOIND_PIDDIR=${BITCOIND_PIDDIR:-/var/run/bitcoind} BITCOIND_PIDFILE=${BITCOIND_PIDFILE:-${BITCOIND_PIDDIR}/bitcoind.pid} BITCOIND_DATADIR=${BITCOIND_DATADIR:-${BITCOIND_DEFAULT_DATADIR}} -BITCOIND_USER=${BITCOIND_USER:-bitcoin} +BITCOIND_USER=${BITCOIND_USER:-${BITCOIN_USER:-bitcoin}} BITCOIND_GROUP=${BITCOIND_GROUP:-bitcoin} BITCOIND_BIN=${BITCOIND_BIN:-/usr/bin/bitcoind} +BITCOIND_NICE=${BITCOIND_NICE:-${NICELEVEL:-0}} +BITCOIND_OPTS="${BITCOIND_OPTS:-${BITCOIN_OPTS}}" name="Bitcoin Core Daemon" -description="Bitcoin crypto-currency p2p network daemon" +description="Bitcoin cryptocurrency P2P network daemon" command="/usr/bin/bitcoind" command_args="-pid=\"${BITCOIND_PIDFILE}\" \ @@ -28,9 +30,13 @@ command_args="-pid=\"${BITCOIND_PIDFILE}\" \ required_files="${BITCOIND_CONFIGFILE}" start_stop_daemon_args="-u ${BITCOIND_USER} \ - -N ${BITCOIND_NICE:-0} -w 2000" + -N ${BITCOIND_NICE} -w 2000" pidfile="${BITCOIND_PIDFILE}" -retry=60 + +# The retry schedule to use when stopping the daemon. Could be either +# a timeout in seconds or multiple signal/timeout pairs (like +# "SIGKILL/180 SIGTERM/300") +retry="${BITCOIND_SIGTERM_TIMEOUT}" depend() { need localmount net @@ -54,28 +60,29 @@ start_pre() { "${BITCOIND_PIDDIR}" checkpath -f \ - -o ${BITCOIND_USER}:${BITCOIND_GROUP} \ + -o "${BITCOIND_USER}:${BITCOIND_GROUP}" \ -m 0660 \ - ${BITCOIND_CONFIGFILE} + "${BITCOIND_CONFIGFILE}" checkconfig || return 1 } checkconfig() { - if ! grep -qs '^rpcpassword=' "${BITCOIND_CONFIGFILE}" ; then + if grep -qs '^rpcuser=' "${BITCOIND_CONFIGFILE}" && \ + ! grep -qs '^rpcpassword=' "${BITCOIND_CONFIGFILE}" ; then eerror "" eerror "ERROR: You must set a secure rpcpassword to run bitcoind." eerror "The setting must appear in ${BITCOIND_CONFIGFILE}" eerror "" eerror "This password is security critical to securing wallets " eerror "and must not be the same as the rpcuser setting." - eerror "You can generate a suitable random password using the following" + eerror "You can generate a suitable random password using the following " eerror "command from the shell:" eerror "" eerror "bash -c 'tr -dc a-zA-Z0-9 < /dev/urandom | head -c32 && echo'" eerror "" - eerror "It is also recommended that you also set alertnotify so you are " + eerror "It is recommended that you also set alertnotify so you are " eerror "notified of problems:" eerror "" eerror "ie: alertnotify=echo %%s | mail -s \"Bitcoin Alert\"" \ diff --git a/contrib/init/bitcoind.openrcconf b/contrib/init/bitcoind.openrcconf index d8d7f58337460..c8a22a08d9241 100644 --- a/contrib/init/bitcoind.openrcconf +++ b/contrib/init/bitcoind.openrcconf @@ -23,5 +23,11 @@ #BITCOIND_NICE=0 # Additional options (avoid -conf and -datadir, use flags above) -BITCOIND_OPTS="-disablewallet" - +#BITCOIND_OPTS="" + +# The timeout in seconds OpenRC will wait for bitcoind to terminate +# after a SIGTERM has been raised. +# Note that this will be mapped as argument to start-stop-daemon's +# '--retry' option, which means you can specify a retry schedule +# here. For more information see man 8 start-stop-daemon. +BITCOIND_SIGTERM_TIMEOUT=600 diff --git a/contrib/init/bitcoind.service b/contrib/init/bitcoind.service index 9132957c38b8d..93de353bb4eb0 100644 --- a/contrib/init/bitcoind.service +++ b/contrib/init/bitcoind.service @@ -1,22 +1,82 @@ +# It is not recommended to modify this file in-place, because it will +# be overwritten during package upgrades. If you want to add further +# options or overwrite existing ones then use +# $ systemctl edit bitcoind.service +# See "man systemd.service" for details. + +# Note that almost all daemon options could be specified in +# /etc/bitcoin/bitcoin.conf, but keep in mind those explicitly +# specified as arguments in ExecStart= will override those in the +# config file. + [Unit] -Description=Bitcoin's distributed currency daemon -After=network.target +Description=Bitcoin daemon +Documentation=https://github.com/bitcoin/bitcoin/blob/master/doc/init.md + +# https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/ +After=network-online.target +Wants=network-online.target [Service] +ExecStart=/usr/bin/bitcoind -daemonwait \ + -pid=/run/bitcoind/bitcoind.pid \ + -conf=/etc/bitcoin/bitcoin.conf \ + -datadir=/var/lib/bitcoind + +# Make sure the config directory is readable by the service user +PermissionsStartOnly=true +ExecStartPre=/bin/chgrp bitcoin /etc/bitcoin + +# Process management +#################### + +Type=forking +PIDFile=/run/bitcoind/bitcoind.pid +Restart=on-failure +TimeoutStartSec=infinity +TimeoutStopSec=600 + +# Directory creation and permissions +#################################### + +# Run as bitcoin:bitcoin User=bitcoin Group=bitcoin -Type=forking -PIDFile=/var/lib/bitcoind/bitcoind.pid -ExecStart=/usr/bin/bitcoind -daemon -pid=/var/lib/bitcoind/bitcoind.pid \ --conf=/etc/bitcoin/bitcoin.conf -datadir=/var/lib/bitcoind -disablewallet +# /run/bitcoind +RuntimeDirectory=bitcoind +RuntimeDirectoryMode=0710 + +# /etc/bitcoin +ConfigurationDirectory=bitcoin +ConfigurationDirectoryMode=0710 + +# /var/lib/bitcoind +StateDirectory=bitcoind +StateDirectoryMode=0710 -Restart=always +# Hardening measures +#################### + +# Provide a private /tmp and /var/tmp. PrivateTmp=true -TimeoutStopSec=60s -TimeoutStartSec=2s -StartLimitInterval=120s -StartLimitBurst=5 + +# Mount /usr, /boot/ and /etc read-only for the process. +ProtectSystem=full + +# Deny access to /home, /root and /run/user +ProtectHome=true + +# Disallow the process and all of its children to gain +# new privileges through execve(). +NoNewPrivileges=true + +# Use a new /dev namespace only populated with API pseudo devices +# such as /dev/null, /dev/zero and /dev/random. +PrivateDevices=true + +# Deny the creation of writable and executable memory mappings. +MemoryDenyWriteExecute=true [Install] WantedBy=multi-user.target diff --git a/contrib/init/org.bitcoin.bitcoind.plist b/contrib/init/org.bitcoin.bitcoind.plist new file mode 100644 index 0000000000000..95b5342f1ef9d --- /dev/null +++ b/contrib/init/org.bitcoin.bitcoind.plist @@ -0,0 +1,14 @@ + + + + + Label + org.bitcoin.bitcoind + ProgramArguments + + /usr/local/bin/bitcoind + + RunAtLoad + + + diff --git a/contrib/install_db4.sh b/contrib/install_db4.sh new file mode 100755 index 0000000000000..2850c4b993ef5 --- /dev/null +++ b/contrib/install_db4.sh @@ -0,0 +1,260 @@ +#!/bin/sh +# Copyright (c) 2017-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# Install libdb4.8 (Berkeley DB). + +export LC_ALL=C +set -e + +if [ -z "${1}" ]; then + echo "Usage: $0 [ ...]" + echo + echo "Must specify a single argument: the directory in which db4 will be built." + echo "This is probably \`pwd\` if you're at the root of the bitcoin repository." + exit 1 +fi + +expand_path() { + cd "${1}" && pwd -P +} + +BDB_PREFIX="$(expand_path "${1}")/db4"; shift; +BDB_VERSION='db-4.8.30.NC' +BDB_HASH='12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef' +BDB_URL="https://download.oracle.com/berkeley-db/${BDB_VERSION}.tar.gz" + +check_exists() { + command -v "$1" >/dev/null +} + +sha256_check() { + # Args: + # + if check_exists sha256sum; then + echo "${1} ${2}" | sha256sum -c + elif check_exists sha256; then + if [ "$(uname)" = "FreeBSD" ]; then + sha256 -c "${1}" "${2}" + else + echo "${1} ${2}" | sha256 -c + fi + else + echo "${1} ${2}" | shasum -a 256 -c + fi +} + +http_get() { + # Args: + # + # It's acceptable that we don't require SSL here because we manually verify + # content hashes below. + # + if [ -f "${2}" ]; then + echo "File ${2} already exists; not downloading again" + elif check_exists curl; then + curl --insecure --retry 5 "${1}" -o "${2}" + elif check_exists wget; then + wget --no-check-certificate "${1}" -O "${2}" + else + echo "Simple transfer utilities 'curl' and 'wget' not found. Please install one of them and try again." + exit 1 + fi + + sha256_check "${3}" "${2}" +} + +# Ensure the commands we use exist on the system +if ! check_exists patch; then + echo "Command-line tool 'patch' not found. Install patch and try again." + exit 1 +fi + +mkdir -p "${BDB_PREFIX}" +http_get "${BDB_URL}" "${BDB_VERSION}.tar.gz" "${BDB_HASH}" +tar -xzvf ${BDB_VERSION}.tar.gz -C "$BDB_PREFIX" +cd "${BDB_PREFIX}/${BDB_VERSION}/" + +# Apply a patch necessary when building with clang and c++11 (see https://community.oracle.com/thread/3952592) +patch --ignore-whitespace -p1 << 'EOF' +commit 3311d68f11d1697565401eee6efc85c34f022ea7 +Author: fanquake +Date: Mon Aug 17 20:03:56 2020 +0800 + + Fix C++11 compatibility + +diff --git a/dbinc/atomic.h b/dbinc/atomic.h +index 0034dcc..7c11d4a 100644 +--- a/dbinc/atomic.h ++++ b/dbinc/atomic.h +@@ -70,7 +70,7 @@ typedef struct { + * These have no memory barriers; the caller must include them when necessary. + */ + #define atomic_read(p) ((p)->value) +-#define atomic_init(p, val) ((p)->value = (val)) ++#define atomic_init_db(p, val) ((p)->value = (val)) + + #ifdef HAVE_ATOMIC_SUPPORT + +@@ -144,7 +144,7 @@ typedef LONG volatile *interlocked_val; + #define atomic_inc(env, p) __atomic_inc(p) + #define atomic_dec(env, p) __atomic_dec(p) + #define atomic_compare_exchange(env, p, o, n) \ +- __atomic_compare_exchange((p), (o), (n)) ++ __atomic_compare_exchange_db((p), (o), (n)) + static inline int __atomic_inc(db_atomic_t *p) + { + int temp; +@@ -176,7 +176,7 @@ static inline int __atomic_dec(db_atomic_t *p) + * http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html + * which configure could be changed to use. + */ +-static inline int __atomic_compare_exchange( ++static inline int __atomic_compare_exchange_db( + db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval) + { + atomic_value_t was; +@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange( + #define atomic_dec(env, p) (--(p)->value) + #define atomic_compare_exchange(env, p, oldval, newval) \ + (DB_ASSERT(env, atomic_read(p) == (oldval)), \ +- atomic_init(p, (newval)), 1) ++ atomic_init_db(p, (newval)), 1) + #else + #define atomic_inc(env, p) __atomic_inc(env, p) + #define atomic_dec(env, p) __atomic_dec(env, p) +diff --git a/mp/mp_fget.c b/mp/mp_fget.c +index 5fdee5a..0b75f57 100644 +--- a/mp/mp_fget.c ++++ b/mp/mp_fget.c +@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */ + + /* Initialize enough so we can call __memp_bhfree. */ + alloc_bhp->flags = 0; +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + #ifdef DIAGNOSTIC + if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) { + __db_errx(env, +@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */ + MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize, + PROT_READ); + +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + MUTEX_LOCK(env, alloc_bhp->mtx_buf); + alloc_bhp->priority = bhp->priority; + alloc_bhp->pgno = bhp->pgno; +diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c +index 34467d2..f05aa0c 100644 +--- a/mp/mp_mvcc.c ++++ b/mp/mp_mvcc.c +@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp) + #else + memcpy(frozen_bhp, bhp, SSZA(BH, buf)); + #endif +- atomic_init(&frozen_bhp->ref, 0); ++ atomic_init_db(&frozen_bhp->ref, 0); + if (mutex != MUTEX_INVALID) + frozen_bhp->mtx_buf = mutex; + else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH, +@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp) + #endif + alloc_bhp->mtx_buf = mutex; + MUTEX_LOCK(env, alloc_bhp->mtx_buf); +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + F_CLR(alloc_bhp, BH_FROZEN); + } + +diff --git a/mp/mp_region.c b/mp/mp_region.c +index e6cece9..ddbe906 100644 +--- a/mp/mp_region.c ++++ b/mp/mp_region.c +@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0) + return (ret); + SH_TAILQ_INIT(&htab[i].hash_bucket); +- atomic_init(&htab[i].hash_page_dirty, 0); ++ atomic_init_db(&htab[i].hash_page_dirty, 0); + } + + /* +@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID : + mtx_base + i; + SH_TAILQ_INIT(&hp->hash_bucket); +- atomic_init(&hp->hash_page_dirty, 0); ++ atomic_init_db(&hp->hash_page_dirty, 0); + #ifdef HAVE_STATISTICS + hp->hash_io_wait = 0; + hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0; +diff --git a/mutex/mut_method.c b/mutex/mut_method.c +index 2588763..5c6d516 100644 +--- a/mutex/mut_method.c ++++ b/mutex/mut_method.c +@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval) + MUTEX_LOCK(env, mtx); + ret = atomic_read(v) == oldval; + if (ret) +- atomic_init(v, newval); ++ atomic_init_db(v, newval); + MUTEX_UNLOCK(env, mtx); + + return (ret); +diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c +index f3922e0..e40fcdf 100644 +--- a/mutex/mut_tas.c ++++ b/mutex/mut_tas.c +@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags) + + #ifdef HAVE_SHARED_LATCHES + if (F_ISSET(mutexp, DB_MUTEX_SHARED)) +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + else + #endif + if (MUTEX_INIT(&mutexp->tas)) { +@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex) + F_CLR(mutexp, DB_MUTEX_LOCKED); + /* Flush flag update before zeroing count */ + MEMBAR_EXIT(); +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + } else { + DB_ASSERT(env, sharecount > 0); + MEMBAR_EXIT(); +EOF + +# The packaged config.guess and config.sub are ancient (2009) and can cause build issues. +# Replace them with modern versions. +# See https://github.com/bitcoin/bitcoin/issues/16064 +CONFIG_GUESS_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f' +CONFIG_GUESS_HASH='c8f530e01840719871748a8071113435bdfdf75b74c57e78e47898edea8754ae' +CONFIG_SUB_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f' +CONFIG_SUB_HASH='3969f7d5f6967ccc6f792401b8ef3916a1d1b1d0f0de5a4e354c95addb8b800e' + +rm -f "dist/config.guess" +rm -f "dist/config.sub" + +http_get "${CONFIG_GUESS_URL}" dist/config.guess "${CONFIG_GUESS_HASH}" +http_get "${CONFIG_SUB_URL}" dist/config.sub "${CONFIG_SUB_HASH}" + +cd build_unix/ + +"${BDB_PREFIX}/${BDB_VERSION}/dist/configure" \ + --enable-cxx --disable-shared --disable-replication --with-pic --prefix="${BDB_PREFIX}" \ + "${@}" + +make install + +echo +echo "db4 build complete." +echo +# shellcheck disable=SC2016 +echo 'When compiling bitcoind, run `./configure` in the following way:' +echo +echo " export BDB_PREFIX='${BDB_PREFIX}'" +# shellcheck disable=SC2016 +echo ' ./configure BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include" ...' diff --git a/contrib/linearize/README.md b/contrib/linearize/README.md index 157586e4d4e0a..25a1c7351a297 100644 --- a/contrib/linearize/README.md +++ b/contrib/linearize/README.md @@ -1,33 +1,54 @@ # Linearize -Construct a linear, no-fork, best version of the blockchain. +Construct a linear, no-fork, best version of the Bitcoin blockchain. ## Step 1: Download hash list - $ ./linearize-hashes.py linearize.cfg > hashlist.txt + $ ./linearize-hashes.py linearize.cfg > hashlist.txt Required configuration file settings for linearize-hashes: -* RPC: rpcuser, rpcpassword +* RPC: `datadir` (Required if `rpcuser` and `rpcpassword` are not specified) +* RPC: `rpcuser`, `rpcpassword` (Required if `datadir` is not specified) Optional config file setting for linearize-hashes: -* RPC: host, port -* Block chain: min_height, max_height +* RPC: `host` (Default: `127.0.0.1`) +* RPC: `port` (Default: `8332`) +* Blockchain: `min_height`, `max_height` +* `rev_hash_bytes`: If true, the written block hash list will be +byte-reversed. (In other words, the hash returned by getblockhash will have its +bytes reversed.) False by default. Intended for generation of +standalone hash lists but safe to use with linearize-data.py, which will output +the same data no matter which byte format is chosen. + +The `linearize-hashes` script requires a connection, local or remote, to a +JSON-RPC server. Running `bitcoind` or `bitcoin-qt -server` will be sufficient. ## Step 2: Copy local block data - $ ./linearize-data.py linearize.cfg + $ ./linearize-data.py linearize.cfg Required configuration file settings: -* "input": bitcoind blocks/ directory containing blkNNNNN.dat -* "hashlist": text file containing list of block hashes, linearized-hashes.py -output. -* "output_file": bootstrap.dat +* `output_file`: The file that will contain the final blockchain. or -* "output": output directory for linearized blocks/blkNNNNN.dat output +* `output`: Output directory for linearized `blocks/blkNNNNN.dat` output. Optional config file setting for linearize-data: -* "netmagic": network magic number -* "max_out_sz": maximum output file size (default 1000*1000*1000) -* "split_timestamp": Split files when a new month is first seen, in addition to -reaching a maximum file size. -* "file_timestamp": Set each file's last-modified time to that of the -most recent block in that file. +* `debug_output`: Some printouts may not always be desired. If true, such output +will be printed. +* `file_timestamp`: Set each file's last-accessed and last-modified times, +respectively, to the current time and to the timestamp of the most recent block +written to the script's blockchain. +* `genesis`: The hash of the genesis block in the blockchain. +* `input`: bitcoind blocks/ directory containing blkNNNNN.dat +* `hashlist`: text file containing list of block hashes created by +linearize-hashes.py. +* `max_out_sz`: Maximum size for files created by the `output_file` option. +(Default: `1000*1000*1000 bytes`) +* `netmagic`: Network magic number. +* `out_of_order_cache_sz`: If out-of-order blocks are being read, the block can +be written to a cache so that the blockchain doesn't have to be sought again. +This option specifies the cache size. (Default: `100*1000*1000 bytes`) +* `rev_hash_bytes`: If true, the block hash list written by linearize-hashes.py +will be byte-reversed when read by linearize-data.py. See the linearize-hashes +entry for more information. +* `split_timestamp`: Split blockchain files when a new month is first seen, in +addition to reaching a maximum file size (`max_out_sz`). diff --git a/contrib/linearize/example-linearize.cfg b/contrib/linearize/example-linearize.cfg index e0fef13886d92..5f566261ca674 100644 --- a/contrib/linearize/example-linearize.cfg +++ b/contrib/linearize/example-linearize.cfg @@ -1,19 +1,63 @@ - # bitcoind RPC settings (linearize-hashes) rpcuser=someuser rpcpassword=somepassword +#datadir=~/.bitcoin host=127.0.0.1 + +#mainnet default port=8332 +#testnet default +#port=18332 + +#regtest default +#port=18443 + +#signet default +#port=38332 + # bootstrap.dat hashlist settings (linearize-hashes) max_height=313000 # bootstrap.dat input/output settings (linearize-data) + +# mainnet netmagic=f9beb4d9 +genesis=000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f input=/home/example/.bitcoin/blocks + +# testnet +#netmagic=0b110907 +#genesis=000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943 +#input=/home/example/.bitcoin/testnet3/blocks + +# regtest +#netmagic=fabfb5da +#genesis=0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206 +#input=/home/example/.bitcoin/regtest/blocks + +# signet +#netmagic=0a03cf40 +#genesis=00000008819873e925422c1ff0f99f7cc9bbb232af63a077a480a3633bee1ef6 +#input=/home/example/.bitcoin/signet/blocks + +# "output" option causes blockchain files to be written to the given location, +# with "output_file" ignored. If not used, "output_file" is used instead. +# output=/home/example/blockchain_directory output_file=/home/example/Downloads/bootstrap.dat hashlist=hashlist.txt -split_year=1 -# Maxmimum size in bytes of out-of-order blocks cache in memory +# Maximum size in bytes of out-of-order blocks cache in memory out_of_order_cache_sz = 100000000 + +# Do we want the reverse the hash bytes coming from getblockhash? +rev_hash_bytes = False + +# On a new month, do we want to set the access and modify times of the new +# blockchain file? +file_timestamp = 0 +# Do we want to split the blockchain files given a new month or specific height? +split_timestamp = 0 + +# Do we want debug printouts? +debug_output = False diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 2dac3a614b031..b72c7b0d0885c 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -1,299 +1,308 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # linearize-data.py: Construct a linear, no-fork version of the chain. # -# Copyright (c) 2013-2014 The Bitcoin developers -# Distributed under the MIT/X11 software license, see the accompanying +# Copyright (c) 2013-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # -from __future__ import print_function, division -import json import struct import re import os -import base64 -import httplib +import os.path import sys import hashlib import datetime import time +import glob from collections import namedtuple settings = {} -def uint32(x): - return x & 0xffffffffL - -def bytereverse(x): - return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | - (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) - -def bufreverse(in_buf): - out_words = [] - for i in range(0, len(in_buf), 4): - word = struct.unpack('@I', in_buf[i:i+4])[0] - out_words.append(struct.pack('@I', bytereverse(word))) - return ''.join(out_words) - -def wordreverse(in_buf): - out_words = [] - for i in range(0, len(in_buf), 4): - out_words.append(in_buf[i:i+4]) - out_words.reverse() - return ''.join(out_words) - -def calc_hdr_hash(blk_hdr): - hash1 = hashlib.sha256() - hash1.update(blk_hdr) - hash1_o = hash1.digest() - - hash2 = hashlib.sha256() - hash2.update(hash1_o) - hash2_o = hash2.digest() - - return hash2_o - def calc_hash_str(blk_hdr): - hash = calc_hdr_hash(blk_hdr) - hash = bufreverse(hash) - hash = wordreverse(hash) - hash_str = hash.encode('hex') - return hash_str + blk_hdr_hash = hashlib.sha256(hashlib.sha256(blk_hdr).digest()).digest() + return blk_hdr_hash[::-1].hex() def get_blk_dt(blk_hdr): - members = struct.unpack(" self.maxOutSz): - self.outF.close() - if self.setFileTime: - os.utime(outFname, (int(time.time()), highTS)) - self.outF = None - self.outFname = None - self.outFn = outFn + 1 - self.outsz = 0 - - (blkDate, blkTS) = get_blk_dt(blk_hdr) - if self.timestampSplit and (blkDate > self.lastDate): - print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str) - lastDate = blkDate - if outF: - outF.close() - if setFileTime: - os.utime(outFname, (int(time.time()), highTS)) - self.outF = None - self.outFname = None - self.outFn = self.outFn + 1 - self.outsz = 0 - - if not self.outF: - if self.fileOutput: - outFname = self.settings['output_file'] - else: - outFname = "%s/blk%05d.dat" % (self.settings['output'], outFn) - print("Output file" + outFname) - self.outF = open(outFname, "wb") - - self.outF.write(inhdr) - self.outF.write(blk_hdr) - self.outF.write(rawblock) - self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock) - - self.blkCountOut = self.blkCountOut + 1 - if blkTS > self.highTS: - self.highTS = blkTS - - if (self.blkCountOut % 1000) == 0: - print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' % - (self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex))) - - def inFileName(self, fn): - return "%s/blk%05d.dat" % (self.settings['input'], fn) - - def fetchBlock(self, extent): - '''Fetch block contents from disk given extents''' - with open(self.inFileName(extent.fn), "rb") as f: - f.seek(extent.offset) - return f.read(extent.size) - - def copyOneBlock(self): - '''Find the next block to be written in the input, and copy it to the output.''' - extent = self.blockExtents.pop(self.blkCountOut) - if self.blkCountOut in self.outOfOrderData: - # If the data is cached, use it from memory and remove from the cache - rawblock = self.outOfOrderData.pop(self.blkCountOut) - self.outOfOrderSize -= len(rawblock) - else: # Otherwise look up data on disk - rawblock = self.fetchBlock(extent) - - self.writeBlock(extent.inhdr, extent.blkhdr, rawblock) - - def run(self): - while self.blkCountOut < len(self.blkindex): - if not self.inF: - fname = self.inFileName(self.inFn) - print("Input file" + fname) - try: - self.inF = open(fname, "rb") - except IOError: - print("Premature end of block data") - return - - inhdr = self.inF.read(8) - if (not inhdr or (inhdr[0] == "\0")): - self.inF.close() - self.inF = None - self.inFn = self.inFn + 1 - continue - - inMagic = inhdr[:4] - if (inMagic != self.settings['netmagic']): - print("Invalid magic:" + inMagic) - return - inLenLE = inhdr[4:] - su = struct.unpack(" self.maxOutSz): + self.outF.close() + if self.setFileTime: + os.utime(self.outFname, (int(time.time()), self.highTS)) + self.outF = None + self.outFname = None + self.outFn = self.outFn + 1 + self.outsz = 0 + + (blkDate, blkTS) = get_blk_dt(blk_hdr) + if self.timestampSplit and (blkDate > self.lastDate): + print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str) + self.lastDate = blkDate + if self.outF: + self.outF.close() + if self.setFileTime: + os.utime(self.outFname, (int(time.time()), self.highTS)) + self.outF = None + self.outFname = None + self.outFn = self.outFn + 1 + self.outsz = 0 + + if not self.outF: + if self.fileOutput: + self.outFname = self.settings['output_file'] + else: + self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn) + print("Output file " + self.outFname) + self.outF = open(self.outFname, "wb") + + self.outF.write(inhdr) + self.outF.write(blk_hdr) + self.outF.write(rawblock) + self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock) + + self.blkCountOut = self.blkCountOut + 1 + if blkTS > self.highTS: + self.highTS = blkTS + + if (self.blkCountOut % 1000) == 0: + print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' % + (self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex))) + + def inFileName(self, fn): + return os.path.join(self.settings['input'], "blk%05d.dat" % fn) + + def fetchBlock(self, extent): + '''Fetch block contents from disk given extents''' + with open(self.inFileName(extent.fn), "rb") as f: + f.seek(extent.offset) + return f.read(extent.size) + + def copyOneBlock(self): + '''Find the next block to be written in the input, and copy it to the output.''' + extent = self.blockExtents.pop(self.blkCountOut) + if self.blkCountOut in self.outOfOrderData: + # If the data is cached, use it from memory and remove from the cache + rawblock = self.outOfOrderData.pop(self.blkCountOut) + self.outOfOrderSize -= len(rawblock) + else: # Otherwise look up data on disk + rawblock = self.fetchBlock(extent) + + self.writeBlock(extent.inhdr, extent.blkhdr, rawblock) + + def run(self): + while self.blkCountOut < len(self.blkindex): + if not self.inF: + fname = self.inFileName(self.inFn) + print("Input file " + fname) + try: + self.inF = open(fname, "rb") + except IOError: + print("Premature end of block data") + return + + inhdr = self.inF.read(8) + if (not inhdr or (inhdr[0] == "\0")): + self.inF.close() + self.inF = None + self.inFn = self.inFn + 1 + continue + + inMagic = inhdr[:4] + if (inMagic != self.settings['netmagic']): + # Seek backwards 7 bytes (skipping the first byte in the previous search) + # and continue searching from the new position if the magic bytes are not + # found. + self.inF.seek(-7, os.SEEK_CUR) + continue + inLenLE = inhdr[4:] + su = struct.unpack(" " - exit 1 -fi - -if [ -z "$SIGNATURE" ]; then - echo "usage: $0 " - exit 1 -fi - -rm -rf ${TEMPDIR} && mkdir -p ${TEMPDIR} -tar -C ${TEMPDIR} -xf ${UNSIGNED} -tar -C ${TEMPDIR} -xf ${SIGNATURE} - -if [ -z "${PAGESTUFF}" ]; then - PAGESTUFF=${TEMPDIR}/pagestuff -fi - -if [ -z "${CODESIGN_ALLOCATE}" ]; then - CODESIGN_ALLOCATE=${TEMPDIR}/codesign_allocate -fi - -for i in `find ${TEMPDIR} -name "*.sign"`; do - SIZE=`stat -c %s ${i}` - TARGET_FILE=`echo ${i} | sed 's/\.sign$//'` - - echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}" - ${CODESIGN_ALLOCATE} -i ${TARGET_FILE} -a ${ARCH} ${SIZE} -o ${i}.tmp - - OFFSET=`${PAGESTUFF} ${i}.tmp -p | tail -2 | grep offset | sed 's/[^0-9]*//g'` - if [ -z ${QUIET} ]; then - echo "Attaching signature at offset ${OFFSET}" - fi - - dd if=$i of=${i}.tmp bs=1 seek=${OFFSET} count=${SIZE} 2>/dev/null - mv ${i}.tmp ${TARGET_FILE} - rm ${i} - echo "Success." -done -mv ${TEMPDIR}/${ROOTDIR} ${OUTDIR} -rm -rf ${TEMPDIR} -echo "Signed: ${OUTDIR}" diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh index aff4f08da8bec..f393331084e37 100755 --- a/contrib/macdeploy/detached-sig-create.sh +++ b/contrib/macdeploy/detached-sig-create.sh @@ -1,46 +1,31 @@ #!/bin/sh +# Copyright (c) 2014-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C set -e ROOTDIR=dist -BUNDLE=${ROOTDIR}/Bitcoin-Qt.app -CODESIGN=codesign +BUNDLE="${ROOTDIR}/Bitcoin-Qt.app" +BINARY="${BUNDLE}/Contents/MacOS/Bitcoin-Qt" +SIGNAPPLE=signapple TEMPDIR=sign.temp -TEMPLIST=${TEMPDIR}/signatures.txt -OUT=signature.tar.gz +ARCH=$(${SIGNAPPLE} info ${BINARY} | head -n 1 | cut -d " " -f 1) +OUT="signature-osx-${ARCH}.tar.gz" +OUTROOT=osx/dist -if [ ! -n "$1" ]; then - echo "usage: $0 " - echo "example: $0 -s MyIdentity" +if [ -z "$1" ]; then + echo "usage: $0 " + echo "example: $0 " exit 1 fi -rm -rf ${TEMPDIR} ${TEMPLIST} +rm -rf ${TEMPDIR} mkdir -p ${TEMPDIR} -${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}" - -for i in `grep -v CodeResources ${TEMPLIST}`; do - TARGETFILE="${BUNDLE}/`echo ${i} | sed "s|.*${BUNDLE}/||"`" - SIZE=`pagestuff $i -p | tail -2 | grep size | sed 's/[^0-9]*//g'` - OFFSET=`pagestuff $i -p | tail -2 | grep offset | sed 's/[^0-9]*//g'` - SIGNFILE="${TEMPDIR}/${TARGETFILE}.sign" - DIRNAME="`dirname ${SIGNFILE}`" - mkdir -p "${DIRNAME}" - echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}" - dd if=$i of=${SIGNFILE} bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null -done +${SIGNAPPLE} sign -f --detach "${TEMPDIR}/${OUTROOT}" "$@" "${BUNDLE}" -for i in `grep CodeResources ${TEMPLIST}`; do - TARGETFILE="${BUNDLE}/`echo ${i} | sed "s|.*${BUNDLE}/||"`" - RESOURCE="${TEMPDIR}/${TARGETFILE}" - DIRNAME="`dirname "${RESOURCE}"`" - mkdir -p "${DIRNAME}" - echo "Adding resource for: "${TARGETFILE}"" - cp "${i}" "${RESOURCE}" -done - -rm ${TEMPLIST} - -tar -C ${TEMPDIR} -czf ${OUT} . -rm -rf ${TEMPDIR} +tar -C "${TEMPDIR}" -czf "${OUT}" . +rm -rf "${TEMPDIR}" echo "Created ${OUT}" diff --git a/contrib/macdeploy/fancy.plist b/contrib/macdeploy/fancy.plist deleted file mode 100644 index e73b9b697ef9f..0000000000000 --- a/contrib/macdeploy/fancy.plist +++ /dev/null @@ -1,32 +0,0 @@ - - - - - window_bounds - - 300 - 300 - 800 - 620 - - background_picture - background.png - icon_size - 96 - applications_symlink - - items_position - - Applications - - 370 - 156 - - Bitcoin-Qt.app - - 128 - 156 - - - - diff --git a/contrib/macdeploy/gen-sdk b/contrib/macdeploy/gen-sdk new file mode 100755 index 0000000000000..6efaaccb8e16b --- /dev/null +++ b/contrib/macdeploy/gen-sdk @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +import argparse +import plistlib +import pathlib +import sys +import tarfile +import gzip +import os +import contextlib + +# monkey-patch Python 3.8 and older to fix wrong TAR header handling +# see https://github.com/bitcoin/bitcoin/pull/24534 +# and https://github.com/python/cpython/pull/18080 for more info +if sys.version_info < (3, 9): + _old_create_header = tarfile.TarInfo._create_header + def _create_header(info, format, encoding, errors): + buf = _old_create_header(info, format, encoding, errors) + # replace devmajor/devminor with binary zeroes + buf = buf[:329] + bytes(16) + buf[345:] + # recompute checksum + chksum = tarfile.calc_chksums(buf)[0] + buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:] + return buf + tarfile.TarInfo._create_header = staticmethod(_create_header) + +@contextlib.contextmanager +def cd(path): + """Context manager that restores PWD even if an exception was raised.""" + old_pwd = os.getcwd() + os.chdir(str(path)) + try: + yield + finally: + os.chdir(old_pwd) + +def run(): + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument('xcode_app', metavar='XCODEAPP', nargs=1) + parser.add_argument("-o", metavar='OUTSDKTGZ', nargs=1, dest='out_sdktgz', required=False) + + args = parser.parse_args() + + xcode_app = pathlib.Path(args.xcode_app[0]).resolve() + assert xcode_app.is_dir(), "The supplied Xcode.app path '{}' either does not exist or is not a directory".format(xcode_app) + + xcode_app_plist = xcode_app.joinpath("Contents/version.plist") + with xcode_app_plist.open('rb') as fp: + pl = plistlib.load(fp) + xcode_version = pl['CFBundleShortVersionString'] + xcode_build_id = pl['ProductBuildVersion'] + print("Found Xcode (version: {xcode_version}, build id: {xcode_build_id})".format(xcode_version=xcode_version, xcode_build_id=xcode_build_id)) + + sdk_dir = xcode_app.joinpath("Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk") + sdk_plist = sdk_dir.joinpath("System/Library/CoreServices/SystemVersion.plist") + with sdk_plist.open('rb') as fp: + pl = plistlib.load(fp) + sdk_version = pl['ProductVersion'] + sdk_build_id = pl['ProductBuildVersion'] + print("Found MacOSX SDK (version: {sdk_version}, build id: {sdk_build_id})".format(sdk_version=sdk_version, sdk_build_id=sdk_build_id)) + + out_name = "Xcode-{xcode_version}-{xcode_build_id}-extracted-SDK-with-libcxx-headers".format(xcode_version=xcode_version, xcode_build_id=xcode_build_id) + + xcode_libcxx_dir = xcode_app.joinpath("Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1") + assert xcode_libcxx_dir.is_dir() + + if args.out_sdktgz: + out_sdktgz_path = pathlib.Path(args.out_sdktgz_path) + else: + # Construct our own out_sdktgz if not specified on the command line + out_sdktgz_path = pathlib.Path("./{}.tar.gz".format(out_name)) + + def tarfp_add_with_base_change(tarfp, dir_to_add, alt_base_dir): + """Add all files in dir_to_add to tarfp, but prepent MEMBERPREFIX to the files' + names + + e.g. if the only file under /root/bazdir is /root/bazdir/qux, invoking: + + tarfp_add_with_base_change(tarfp, "foo/bar", "/root/bazdir") + + would result in the following members being added to tarfp: + + foo/bar/ -> corresponding to /root/bazdir + foo/bar/qux -> corresponding to /root/bazdir/qux + + """ + def change_tarinfo_base(tarinfo): + if tarinfo.name and tarinfo.name.startswith("./"): + tarinfo.name = str(pathlib.Path(alt_base_dir, tarinfo.name)) + if tarinfo.linkname and tarinfo.linkname.startswith("./"): + tarinfo.linkname = str(pathlib.Path(alt_base_dir, tarinfo.linkname)) + # make metadata deterministic + tarinfo.mtime = 0 + tarinfo.uid, tarinfo.uname = 0, '' + tarinfo.gid, tarinfo.gname = 0, '' + # don't use isdir() as there are also executable files present + tarinfo.mode = 0o0755 if tarinfo.mode & 0o0100 else 0o0644 + return tarinfo + with cd(dir_to_add): + # recursion already adds entries in sorted order + tarfp.add(".", recursive=True, filter=change_tarinfo_base) + + print("Creating output .tar.gz file...") + with out_sdktgz_path.open("wb") as fp: + with gzip.GzipFile(fileobj=fp, mode='wb', compresslevel=9, mtime=0) as gzf: + with tarfile.open(mode="w", fileobj=gzf, format=tarfile.GNU_FORMAT) as tarfp: + print("Adding MacOSX SDK {} files...".format(sdk_version)) + tarfp_add_with_base_change(tarfp, sdk_dir, out_name) + print("Adding libc++ headers...") + tarfp_add_with_base_change(tarfp, xcode_libcxx_dir, "{}/usr/include/c++/v1".format(out_name)) + print("Done! Find the resulting gzipped tarball at:") + print(out_sdktgz_path.resolve()) + +if __name__ == '__main__': + run() diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 541136001fcee..2420539b7cad5 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +#!/usr/bin/env python3 # # Copyright (C) 2011 Patrick "p2k" Schneider # @@ -17,9 +16,13 @@ # along with this program. If not, see . # -import subprocess, sys, re, os, shutil, stat, os.path, time -from string import Template +import sys, re, os, platform, shutil, stat, subprocess, os.path from argparse import ArgumentParser +from ds_store import DSStore +from mac_alias import Alias +from pathlib import Path +from subprocess import PIPE, run +from typing import List, Optional # This is ported from the original macdeployqt with modifications @@ -49,28 +52,18 @@ class FrameworkInfo(object): return False def __str__(self): - return """ Framework name: %s - Framework directory: %s - Framework path: %s - Binary name: %s - Binary directory: %s - Binary path: %s - Version: %s - Install name: %s - Deployed install name: %s - Source file Path: %s - Deployed Directory (relative to bundle): %s -""" % (self.frameworkName, - self.frameworkDirectory, - self.frameworkPath, - self.binaryName, - self.binaryDirectory, - self.binaryPath, - self.version, - self.installName, - self.deployedInstallName, - self.sourceFilePath, - self.destinationDirectory) + return f""" Framework name: {self.frameworkName} + Framework directory: {self.frameworkDirectory} + Framework path: {self.frameworkPath} + Binary name: {self.binaryName} + Binary directory: {self.binaryDirectory} + Binary path: {self.binaryPath} + Version: {self.version} + Install name: {self.installName} + Deployed install name: {self.deployedInstallName} + Source file Path: {self.sourceFilePath} + Deployed Directory (relative to bundle): {self.destinationDirectory} +""" def isDylib(self): return self.frameworkName.endswith(".dylib") @@ -86,18 +79,18 @@ class FrameworkInfo(object): bundleBinaryDirectory = "Contents/MacOS" @classmethod - def fromOtoolLibraryLine(cls, line): + def fromOtoolLibraryLine(cls, line: str) -> Optional['FrameworkInfo']: # Note: line must be trimmed if line == "": return None - # Don't deploy system libraries (exception for libQtuitools and libQtlucene). - if line.startswith("/System/Library/") or line.startswith("@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line): + # Don't deploy system libraries + if line.startswith("/System/Library/") or line.startswith("@executable_path") or line.startswith("/usr/lib/"): return None m = cls.reOLine.match(line) if m is None: - raise RuntimeError("otool line could not be parsed: " + line) + raise RuntimeError(f"otool line could not be parsed: {line}") path = m.group(1) @@ -117,7 +110,7 @@ class FrameworkInfo(object): info.version = "-" info.installName = path - info.deployedInstallName = "@executable_path/../Frameworks/" + info.binaryName + info.deployedInstallName = f"@executable_path/../Frameworks/{info.binaryName}" info.sourceFilePath = path info.destinationDirectory = cls.bundleFrameworkDirectory else: @@ -129,7 +122,7 @@ class FrameworkInfo(object): break i += 1 if i == len(parts): - raise RuntimeError("Could not find .framework or .dylib in otool line: " + line) + raise RuntimeError(f"Could not find .framework or .dylib in otool line: {line}") info.frameworkName = parts[i] info.frameworkDirectory = "/".join(parts[:i]) @@ -140,25 +133,24 @@ class FrameworkInfo(object): info.binaryPath = os.path.join(info.binaryDirectory, info.binaryName) info.version = parts[i+2] - info.deployedInstallName = "@executable_path/../Frameworks/" + os.path.join(info.frameworkName, info.binaryPath) + info.deployedInstallName = f"@executable_path/../Frameworks/{os.path.join(info.frameworkName, info.binaryPath)}" info.destinationDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory) info.sourceResourcesDirectory = os.path.join(info.frameworkPath, "Resources") info.sourceContentsDirectory = os.path.join(info.frameworkPath, "Contents") info.sourceVersionContentsDirectory = os.path.join(info.frameworkPath, "Versions", info.version, "Contents") info.destinationResourcesDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, "Resources") - info.destinationContentsDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, "Contents") info.destinationVersionContentsDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, "Versions", info.version, "Contents") return info class ApplicationBundleInfo(object): - def __init__(self, path): + def __init__(self, path: str): self.path = path - appName = os.path.splitext(os.path.basename(path))[0] - self.binaryPath = os.path.join(path, "Contents", "MacOS", appName) + # for backwards compatibility reasons, this must remain as Bitcoin-Qt + self.binaryPath = os.path.join(path, "Contents", "MacOS", "Bitcoin-Qt") if not os.path.exists(self.binaryPath): - raise RuntimeError("Could not find bundle binary for " + path) + raise RuntimeError(f"Could not find bundle binary for {path}") self.resourcesPath = os.path.join(path, "Contents", "Resources") self.pluginPath = os.path.join(path, "Contents", "PlugIns") @@ -168,17 +160,11 @@ class DeploymentInfo(object): self.pluginPath = None self.deployedFrameworks = [] - def detectQtPath(self, frameworkDirectory): + def detectQtPath(self, frameworkDirectory: str): parentDir = os.path.dirname(frameworkDirectory) if os.path.exists(os.path.join(parentDir, "translations")): # Classic layout, e.g. "/usr/local/Trolltech/Qt-4.x.x" self.qtPath = parentDir - elif os.path.exists(os.path.join(parentDir, "share", "qt4", "translations")): - # MacPorts layout, e.g. "/opt/local/share/qt4" - self.qtPath = os.path.join(parentDir, "share", "qt4") - elif os.path.exists(os.path.join(os.path.dirname(parentDir), "share", "qt4", "translations")): - # Newer Macports layout - self.qtPath = os.path.join(os.path.dirname(parentDir), "share", "qt4") else: self.qtPath = os.getenv("QTDIR", None) @@ -187,31 +173,27 @@ class DeploymentInfo(object): if os.path.exists(pluginPath): self.pluginPath = pluginPath - def usesFramework(self, name): - nameDot = "%s." % name - libNameDot = "lib%s." % name + def usesFramework(self, name: str) -> bool: for framework in self.deployedFrameworks: if framework.endswith(".framework"): - if framework.startswith(nameDot): + if framework.startswith(f"{name}."): return True elif framework.endswith(".dylib"): - if framework.startswith(libNameDot): + if framework.startswith(f"lib{name}."): return True return False -def getFrameworks(binaryPath, verbose): - if verbose >= 3: - print "Inspecting with otool: " + binaryPath +def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]: + if verbose: + print(f"Inspecting with otool: {binaryPath}") otoolbin=os.getenv("OTOOL", "otool") - otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - o_stdout, o_stderr = otool.communicate() + otool = run([otoolbin, "-L", binaryPath], stdout=PIPE, stderr=PIPE, universal_newlines=True) if otool.returncode != 0: - if verbose >= 1: - sys.stderr.write(o_stderr) - sys.stderr.flush() - raise RuntimeError("otool failed with return code %d" % otool.returncode) - - otoolLines = o_stdout.split("\n") + sys.stderr.write(otool.stderr) + sys.stderr.flush() + raise RuntimeError(f"otool failed with return code {otool.returncode}") + + otoolLines = otool.stdout.split("\n") otoolLines.pop(0) # First line is the inspected binary if ".framework" in binaryPath or binaryPath.endswith(".dylib"): otoolLines.pop(0) # Frameworks and dylibs list themselves as a dependency. @@ -221,103 +203,91 @@ def getFrameworks(binaryPath, verbose): line = line.replace("@loader_path", os.path.dirname(binaryPath)) info = FrameworkInfo.fromOtoolLibraryLine(line.strip()) if info is not None: - if verbose >= 3: - print "Found framework:" - print info + if verbose: + print("Found framework:") + print(info) libraries.append(info) return libraries -def runInstallNameTool(action, *args): - installnametoolbin=os.getenv("INSTALLNAMETOOL", "install_name_tool") - subprocess.check_call([installnametoolbin, "-"+action] + list(args)) +def runInstallNameTool(action: str, *args): + installnametoolbin=os.getenv("INSTALL_NAME_TOOL", "install_name_tool") + run([installnametoolbin, "-"+action] + list(args), check=True) -def changeInstallName(oldName, newName, binaryPath, verbose): - if verbose >= 3: - print "Using install_name_tool:" - print " in", binaryPath - print " change reference", oldName - print " to", newName +def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int): + if verbose: + print("Using install_name_tool:") + print(" in", binaryPath) + print(" change reference", oldName) + print(" to", newName) runInstallNameTool("change", oldName, newName, binaryPath) -def changeIdentification(id, binaryPath, verbose): - if verbose >= 3: - print "Using install_name_tool:" - print " change identification in", binaryPath - print " to", id +def changeIdentification(id: str, binaryPath: str, verbose: int): + if verbose: + print("Using install_name_tool:") + print(" change identification in", binaryPath) + print(" to", id) runInstallNameTool("id", id, binaryPath) -def runStrip(binaryPath, verbose): +def runStrip(binaryPath: str, verbose: int): stripbin=os.getenv("STRIP", "strip") - if verbose >= 3: - print "Using strip:" - print " stripped", binaryPath - subprocess.check_call([stripbin, "-x", binaryPath]) + if verbose: + print("Using strip:") + print(" stripped", binaryPath) + run([stripbin, "-x", binaryPath], check=True) -def copyFramework(framework, path, verbose): +def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional[str]: if framework.sourceFilePath.startswith("Qt"): #standard place for Nokia Qt installer's frameworks - fromPath = "/Library/Frameworks/" + framework.sourceFilePath + fromPath = f"/Library/Frameworks/{framework.sourceFilePath}" else: fromPath = framework.sourceFilePath toDir = os.path.join(path, framework.destinationDirectory) toPath = os.path.join(toDir, framework.binaryName) - - if not os.path.exists(fromPath): - raise RuntimeError("No file at " + fromPath) - - if os.path.exists(toPath): - return None # Already there - - if not os.path.exists(toDir): - os.makedirs(toDir) - - shutil.copy2(fromPath, toPath) - if verbose >= 3: - print "Copied:", fromPath - print " to:", toPath + + if framework.isDylib(): + if not os.path.exists(fromPath): + raise RuntimeError(f"No file at {fromPath}") + + if os.path.exists(toPath): + return None # Already there + + if not os.path.exists(toDir): + os.makedirs(toDir) + + shutil.copy2(fromPath, toPath) + if verbose: + print("Copied:", fromPath) + print(" to:", toPath) + else: + to_dir = os.path.join(path, "Contents", "Frameworks", framework.frameworkName) + if os.path.exists(to_dir): + return None # Already there + + from_dir = framework.frameworkPath + if not os.path.exists(from_dir): + raise RuntimeError(f"No directory at {from_dir}") + + shutil.copytree(from_dir, to_dir, symlinks=True) + if verbose: + print("Copied:", from_dir) + print(" to:", to_dir) + + headers_link = os.path.join(to_dir, "Headers") + if os.path.exists(headers_link): + os.unlink(headers_link) + + headers_dir = os.path.join(to_dir, framework.binaryDirectory, "Headers") + if os.path.exists(headers_dir): + shutil.rmtree(headers_dir) permissions = os.stat(toPath) if not permissions.st_mode & stat.S_IWRITE: os.chmod(toPath, permissions.st_mode | stat.S_IWRITE) - if not framework.isDylib(): # Copy resources for real frameworks - - linkfrom = os.path.join(path, "Contents","Frameworks", framework.frameworkName, "Versions", "Current") - linkto = framework.version - if not os.path.exists(linkfrom): - os.symlink(linkto, linkfrom) - if verbose >= 2: - print "Linked:", linkfrom, "->", linkto - fromResourcesDir = framework.sourceResourcesDirectory - if os.path.exists(fromResourcesDir): - toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory) - shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True) - if verbose >= 3: - print "Copied resources:", fromResourcesDir - print " to:", toResourcesDir - fromContentsDir = framework.sourceVersionContentsDirectory - if not os.path.exists(fromContentsDir): - fromContentsDir = framework.sourceContentsDirectory - if os.path.exists(fromContentsDir): - toContentsDir = os.path.join(path, framework.destinationVersionContentsDirectory) - shutil.copytree(fromContentsDir, toContentsDir, symlinks=True) - contentslinkfrom = os.path.join(path, framework.destinationContentsDirectory) - if verbose >= 3: - print "Copied Contents:", fromContentsDir - print " to:", toContentsDir - elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout) - qtMenuNibSourcePath = os.path.join(framework.frameworkDirectory, "Resources", "qt_menu.nib") - qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib") - if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath): - shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True) - if verbose >= 3: - print "Copied for libQtGui:", qtMenuNibSourcePath - print " to:", qtMenuNibDestinationPath - return toPath -def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploymentInfo=None): +def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: if deploymentInfo is None: deploymentInfo = DeploymentInfo() @@ -325,22 +295,20 @@ def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploym framework = frameworks.pop(0) deploymentInfo.deployedFrameworks.append(framework.frameworkName) - if verbose >= 2: - print "Processing", framework.frameworkName, "..." + print("Processing", framework.frameworkName, "...") # Get the Qt path from one of the Qt frameworks if deploymentInfo.qtPath is None and framework.isQtFramework(): deploymentInfo.detectQtPath(framework.frameworkDirectory) if framework.installName.startswith("@executable_path") or framework.installName.startswith(bundlePath): - if verbose >= 2: - print framework.frameworkName, "already deployed, skipping." + print(framework.frameworkName, "already deployed, skipping.") continue # install_name_tool the new id into the binary changeInstallName(framework.installName, framework.deployedInstallName, binaryPath, verbose) - # Copy farmework to app bundle. + # Copy framework to app bundle. deployedBinaryPath = copyFramework(framework, bundlePath, verbose) # Skip the rest if already was deployed. if deployedBinaryPath is None: @@ -363,88 +331,34 @@ def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploym return deploymentInfo -def deployFrameworksForAppBundle(applicationBundle, strip, verbose): +def deployFrameworksForAppBundle(applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int) -> DeploymentInfo: frameworks = getFrameworks(applicationBundle.binaryPath, verbose) - if len(frameworks) == 0 and verbose >= 1: - print "Warning: Could not find any external frameworks to deploy in %s." % (applicationBundle.path) + if len(frameworks) == 0: + print(f"Warning: Could not find any external frameworks to deploy in {applicationBundle.path}.") return DeploymentInfo() else: return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose) -def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): - # Lookup available plugins, exclude unneeded +def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: DeploymentInfo, strip: bool, verbose: int): plugins = [] if deploymentInfo.pluginPath is None: return for dirpath, dirnames, filenames in os.walk(deploymentInfo.pluginPath): pluginDirectory = os.path.relpath(dirpath, deploymentInfo.pluginPath) - if pluginDirectory == "designer": - # Skip designer plugins + + if pluginDirectory not in ['styles', 'platforms']: continue - elif pluginDirectory == "phonon" or pluginDirectory == "phonon_backend": - # Deploy the phonon plugins only if phonon is in use - if not deploymentInfo.usesFramework("phonon"): - continue - elif pluginDirectory == "sqldrivers": - # Deploy the sql plugins only if QtSql is in use - if not deploymentInfo.usesFramework("QtSql"): - continue - elif pluginDirectory == "script": - # Deploy the script plugins only if QtScript is in use - if not deploymentInfo.usesFramework("QtScript"): - continue - elif pluginDirectory == "qmltooling" or pluginDirectory == "qml1tooling": - # Deploy the qml plugins only if QtDeclarative is in use - if not deploymentInfo.usesFramework("QtDeclarative"): - continue - elif pluginDirectory == "bearer": - # Deploy the bearer plugins only if QtNetwork is in use - if not deploymentInfo.usesFramework("QtNetwork"): - continue - elif pluginDirectory == "position": - # Deploy the position plugins only if QtPositioning is in use - if not deploymentInfo.usesFramework("QtPositioning"): - continue - elif pluginDirectory == "sensors" or pluginDirectory == "sensorgestures": - # Deploy the sensor plugins only if QtSensors is in use - if not deploymentInfo.usesFramework("QtSensors"): - continue - elif pluginDirectory == "audio" or pluginDirectory == "playlistformats": - # Deploy the audio plugins only if QtMultimedia is in use - if not deploymentInfo.usesFramework("QtMultimedia"): - continue - elif pluginDirectory == "mediaservice": - # Deploy the mediaservice plugins only if QtMultimediaWidgets is in use - if not deploymentInfo.usesFramework("QtMultimediaWidgets"): - continue for pluginName in filenames: pluginPath = os.path.join(pluginDirectory, pluginName) - if pluginName.endswith("_debug.dylib"): - # Skip debug plugins + + if pluginName.split('.')[0] not in ['libqminimal', 'libqcocoa', 'libqmacstyle']: continue - elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib": - # Deploy the svg plugins only if QtSvg is in use - if not deploymentInfo.usesFramework("QtSvg"): - continue - elif pluginPath == "accessible/libqtaccessiblecompatwidgets.dylib": - # Deploy accessibility for Qt3Support only if the Qt3Support is in use - if not deploymentInfo.usesFramework("Qt3Support"): - continue - elif pluginPath == "graphicssystems/libqglgraphicssystem.dylib": - # Deploy the opengl graphicssystem plugin only if QtOpenGL is in use - if not deploymentInfo.usesFramework("QtOpenGL"): - continue - elif pluginPath == "accessible/libqtaccessiblequick.dylib": - # Deploy the accessible qtquick plugin only if QtQuick is in use - if not deploymentInfo.usesFramework("QtQuick"): - continue plugins.append((pluginDirectory, pluginName)) for pluginDirectory, pluginName in plugins: - if verbose >= 2: - print "Processing plugin", os.path.join(pluginDirectory, pluginName), "..." + print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...") sourcePath = os.path.join(deploymentInfo.pluginPath, pluginDirectory, pluginName) destinationDirectory = os.path.join(appBundleInfo.pluginPath, pluginDirectory) @@ -453,9 +367,9 @@ def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): destinationPath = os.path.join(destinationDirectory, pluginName) shutil.copy2(sourcePath, destinationPath) - if verbose >= 3: - print "Copied:", sourcePath - print " to:", destinationPath + if verbose: + print("Copied:", sourcePath) + print(" to:", destinationPath) if strip: runStrip(destinationPath, verbose) @@ -469,139 +383,54 @@ def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose): if dependency.frameworkName not in deploymentInfo.deployedFrameworks: deployFrameworks([dependency], appBundleInfo.path, destinationPath, strip, verbose, deploymentInfo) -qt_conf="""[Paths] -Translations=Resources -Plugins=PlugIns -""" - ap = ArgumentParser(description="""Improved version of macdeployqt. Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file. Note, that the "dist" folder will be deleted before deploying on each run. -Optionally, Qt translation files (.qm) and additional resources can be added to the bundle. - -Also optionally signs the .app bundle; set the CODESIGNARGS environment variable to pass arguments -to the codesign tool. -E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""") +Optionally, Qt translation files (.qm) can be added to the bundle.""") ap.add_argument("app_bundle", nargs=1, metavar="app-bundle", help="application bundle to be deployed") -ap.add_argument("-verbose", type=int, nargs=1, default=[1], metavar="<0-3>", help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug") +ap.add_argument("appname", nargs=1, metavar="appname", help="name of the app being deployed") +ap.add_argument("-verbose", nargs="?", const=True, help="Output additional debugging information") ap.add_argument("-no-plugins", dest="plugins", action="store_false", default=True, help="skip plugin deployment") ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, help="don't run 'strip' on the binaries") -ap.add_argument("-sign", dest="sign", action="store_true", default=False, help="sign .app bundle with codesign tool") -ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used") -ap.add_argument("-fancy", nargs=1, metavar="plist", default=[], help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work") -ap.add_argument("-add-qt-tr", nargs=1, metavar="languages", default=[], help="add Qt translation files to the bundle's ressources; the language list must be separated with commas, not with whitespace") -ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translation files") -ap.add_argument("-add-resources", nargs="+", metavar="path", default=[], help="list of additional files or folders to be copied into the bundle's resources; must be the last argument") +ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image") +ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translations. Base translations will automatically be added to the bundle's resources.") config = ap.parse_args() -verbose = config.verbose[0] +verbose = config.verbose # ------------------------------------------------ app_bundle = config.app_bundle[0] +appname = config.appname[0] if not os.path.exists(app_bundle): - if verbose >= 1: - sys.stderr.write("Error: Could not find app bundle \"%s\"\n" % (app_bundle)) + sys.stderr.write(f"Error: Could not find app bundle \"{app_bundle}\"\n") sys.exit(1) -app_bundle_name = os.path.splitext(os.path.basename(app_bundle))[0] - -# ------------------------------------------------ -translations_dir = None -if config.translations_dir and config.translations_dir[0]: - if os.path.exists(config.translations_dir[0]): - translations_dir = config.translations_dir[0] - else: - if verbose >= 1: - sys.stderr.write("Error: Could not find translation dir \"%s\"\n" % (translations_dir)) - sys.exit(1) -# ------------------------------------------------ - -for p in config.add_resources: - if verbose >= 3: - print "Checking for \"%s\"..." % p - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find additional resource file \"%s\"\n" % (p)) - sys.exit(1) - -# ------------------------------------------------ - -if len(config.fancy) == 1: - if verbose >= 3: - print "Fancy: Importing plistlib..." - try: - import plistlib - except ImportError: - if verbose >= 1: - sys.stderr.write("Error: Could not import plistlib which is required for fancy disk images.\n") - sys.exit(1) - - p = config.fancy[0] - if verbose >= 3: - print "Fancy: Loading \"%s\"..." % p - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find fancy disk image plist at \"%s\"\n" % (p)) - sys.exit(1) - - try: - fancy = plistlib.readPlist(p) - except: - if verbose >= 1: - sys.stderr.write("Error: Could not parse fancy disk image plist at \"%s\"\n" % (p)) - sys.exit(1) - - try: - assert not fancy.has_key("window_bounds") or (isinstance(fancy["window_bounds"], list) and len(fancy["window_bounds"]) == 4) - assert not fancy.has_key("background_picture") or isinstance(fancy["background_picture"], str) - assert not fancy.has_key("icon_size") or isinstance(fancy["icon_size"], int) - assert not fancy.has_key("applications_symlink") or isinstance(fancy["applications_symlink"], bool) - if fancy.has_key("items_position"): - assert isinstance(fancy["items_position"], dict) - for key, value in fancy["items_position"].iteritems(): - assert isinstance(value, list) and len(value) == 2 and isinstance(value[0], int) and isinstance(value[1], int) - except: - if verbose >= 1: - sys.stderr.write("Error: Bad format of fancy disk image plist at \"%s\"\n" % (p)) - sys.exit(1) - - if fancy.has_key("background_picture"): - bp = fancy["background_picture"] - if verbose >= 3: - print "Fancy: Resolving background picture \"%s\"..." % bp - if not os.path.exists(bp): - bp = os.path.join(os.path.dirname(p), bp) - if not os.path.exists(bp): - if verbose >= 1: - sys.stderr.write("Error: Could not find background picture at \"%s\" or \"%s\"\n" % (fancy["background_picture"], bp)) - sys.exit(1) - else: - fancy["background_picture"] = bp -else: - fancy = None - # ------------------------------------------------ if os.path.exists("dist"): - if verbose >= 2: - print "+ Removing old dist folder +" - + print("+ Removing existing dist folder +") shutil.rmtree("dist") +if os.path.exists(appname + ".dmg"): + print("+ Removing existing DMG +") + os.unlink(appname + ".dmg") + +if os.path.exists(appname + ".temp.dmg"): + os.unlink(appname + ".temp.dmg") + # ------------------------------------------------ -target = os.path.join("dist", app_bundle) +target = os.path.join("dist", "Bitcoin-Qt.app") -if verbose >= 2: - print "+ Copying source bundle +" -if verbose >= 3: - print app_bundle, "->", target +print("+ Copying source bundle +") +if verbose: + print(app_bundle, "->", target) os.mkdir("dist") shutil.copytree(app_bundle, target, symlinks=True) @@ -610,273 +439,160 @@ applicationBundle = ApplicationBundleInfo(target) # ------------------------------------------------ -if verbose >= 2: - print "+ Deploying frameworks +" +print("+ Deploying frameworks +") try: deploymentInfo = deployFrameworksForAppBundle(applicationBundle, config.strip, verbose) if deploymentInfo.qtPath is None: deploymentInfo.qtPath = os.getenv("QTDIR", None) if deploymentInfo.qtPath is None: - if verbose >= 1: - sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n") + sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n") config.plugins = False except RuntimeError as e: - if verbose >= 1: - sys.stderr.write("Error: %s\n" % str(e)) + sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ if config.plugins: - if verbose >= 2: - print "+ Deploying plugins +" + print("+ Deploying plugins +") try: deployPlugins(applicationBundle, deploymentInfo, config.strip, verbose) except RuntimeError as e: - if verbose >= 1: - sys.stderr.write("Error: %s\n" % str(e)) + sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ -if len(config.add_qt_tr) == 0: - add_qt_tr = [] -else: - if translations_dir is not None: - qt_tr_dir = translations_dir - else: - if deploymentInfo.qtPath is not None: - qt_tr_dir = os.path.join(deploymentInfo.qtPath, "translations") - else: - sys.stderr.write("Error: Could not find Qt translation path\n") - sys.exit(1) - add_qt_tr = ["qt_%s.qm" % lng for lng in config.add_qt_tr[0].split(",")] - for lng_file in add_qt_tr: - p = os.path.join(qt_tr_dir, lng_file) - if verbose >= 3: - print "Checking for \"%s\"..." % p - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find Qt translation file \"%s\"\n" % (lng_file)) - sys.exit(1) +if config.translations_dir: + if not Path(config.translations_dir[0]).exists(): + sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n") + sys.exit(1) -# ------------------------------------------------ +print("+ Adding Qt translations +") -if verbose >= 2: - print "+ Installing qt.conf +" +translations = Path(config.translations_dir[0]) -f = open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") -f.write(qt_conf) -f.close() +regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') + +lang_files = [x for x in translations.iterdir() if regex.match(x.name)] + +for file in lang_files: + if verbose: + print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) + shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) # ------------------------------------------------ -if len(add_qt_tr) > 0 and verbose >= 2: - print "+ Adding Qt translations +" +print("+ Installing qt.conf +") -for lng_file in add_qt_tr: - if verbose >= 3: - print os.path.join(qt_tr_dir, lng_file), "->", os.path.join(applicationBundle.resourcesPath, lng_file) - shutil.copy2(os.path.join(qt_tr_dir, lng_file), os.path.join(applicationBundle.resourcesPath, lng_file)) +qt_conf="""[Paths] +Translations=Resources +Plugins=PlugIns +""" + +with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f: + f.write(qt_conf.encode()) # ------------------------------------------------ -if len(config.add_resources) > 0 and verbose >= 2: - print "+ Adding additional resources +" +print("+ Generating .DS_Store +") + +output_file = os.path.join("dist", ".DS_Store") + +ds = DSStore.open(output_file, 'w+') + +ds['.']['bwsp'] = { + 'WindowBounds': '{{300, 280}, {500, 343}}', + 'PreviewPaneVisibility': False, +} + +icvp = { + 'gridOffsetX': 0.0, + 'textSize': 12.0, + 'viewOptionsVersion': 1, + 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', + 'backgroundColorBlue': 1.0, + 'iconSize': 96.0, + 'backgroundColorGreen': 1.0, + 'arrangeBy': 'none', + 'showIconPreview': True, + 'gridSpacing': 100.0, + 'gridOffsetY': 0.0, + 'showItemInfo': False, + 'labelOnBottom': True, + 'backgroundType': 2, + 'backgroundColorRed': 1.0 +} +alias = Alias().from_bytes(icvp['backgroundImageAlias']) +alias.volume.name = appname +alias.volume.posix_path = '/Volumes/' + appname +icvp['backgroundImageAlias'] = alias.to_bytes() +ds['.']['icvp'] = icvp + +ds['.']['vSrn'] = ('long', 1) + +ds['Applications']['Iloc'] = (370, 156) +ds['Bitcoin-Qt.app']['Iloc'] = (128, 156) + +ds.flush() +ds.close() -for p in config.add_resources: - t = os.path.join(applicationBundle.resourcesPath, os.path.basename(p)) - if verbose >= 3: - print p, "->", t - if os.path.isdir(p): - shutil.copytree(p, t, symlinks=True) - else: - shutil.copy2(p, t) +# ------------------------------------------------ + +if platform.system() == "Darwin": + subprocess.check_call(f"codesign --deep --force --sign - {target}", shell=True) + +print("+ Installing background.tiff +") + +bg_path = os.path.join('dist', '.background', 'background.tiff') +os.mkdir(os.path.dirname(bg_path)) + +tiff_path = os.path.join('contrib', 'macdeploy', 'background.tiff') +shutil.copy2(tiff_path, bg_path) # ------------------------------------------------ -if config.sign and 'CODESIGNARGS' not in os.environ: - print "You must set the CODESIGNARGS environment variable. Skipping signing." -elif config.sign: - if verbose >= 1: - print "Code-signing app bundle %s"%(target,) - subprocess.check_call("codesign --force %s %s"%(os.environ['CODESIGNARGS'], target), shell=True) +print("+ Generating symlink for /Applications +") + +os.symlink("/Applications", os.path.join('dist', "Applications")) # ------------------------------------------------ if config.dmg is not None: - #Patch in check_output for Python 2.6 - if "check_output" not in dir( subprocess ): - def f(*popenargs, **kwargs): - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise CalledProcessError(retcode, cmd) - return output - subprocess.check_output = f - - def runHDIUtil(verb, image_basename, **kwargs): - hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"] - if kwargs.has_key("capture_stdout"): - del kwargs["capture_stdout"] - run = subprocess.check_output - else: - if verbose < 2: - hdiutil_args.append("-quiet") - elif verbose >= 3: - hdiutil_args.append("-verbose") - run = subprocess.check_call - - for key, value in kwargs.iteritems(): - hdiutil_args.append("-" + key) - if not value is True: - hdiutil_args.append(str(value)) - - return run(hdiutil_args) - - if verbose >= 2: - if fancy is None: - print "+ Creating .dmg disk image +" - else: - print "+ Preparing .dmg disk image +" - - if config.dmg != "": - dmg_name = config.dmg - else: - spl = app_bundle_name.split(" ") - dmg_name = spl[0] + "".join(p.capitalize() for p in spl[1:]) - - if fancy is None: - try: - runHDIUtil("create", dmg_name, srcfolder="dist", format="UDBZ", volname=app_bundle_name, ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - else: - if verbose >= 3: - print "Determining size of \"dist\"..." - size = 0 - for path, dirs, files in os.walk("dist"): - for file in files: - size += os.path.getsize(os.path.join(path, file)) - size += int(size * 0.1) - - if verbose >= 3: - print "Creating temp image for modification..." - try: - runHDIUtil("create", dmg_name + ".temp", srcfolder="dist", format="UDRW", size=size, volname=app_bundle_name, ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - if verbose >= 3: - print "Attaching temp image..." - try: - output = runHDIUtil("attach", dmg_name + ".temp", readwrite=True, noverify=True, noautoopen=True, capture_stdout=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - m = re.search("/Volumes/(.+$)", output) - disk_root = m.group(0) - disk_name = m.group(1) - - if verbose >= 2: - print "+ Applying fancy settings +" - - if fancy.has_key("background_picture"): - bg_path = os.path.join(disk_root, os.path.basename(fancy["background_picture"])) - if verbose >= 3: - print fancy["background_picture"], "->", bg_path - shutil.copy2(fancy["background_picture"], bg_path) - else: - bg_path = None - - if fancy.get("applications_symlink", False): - os.symlink("/Applications", os.path.join(disk_root, "Applications")) - - # The Python appscript package broke with OSX 10.8 and isn't being fixed. - # So we now build up an AppleScript string and use the osascript command - # to make the .dmg file pretty: - appscript = Template( """ - on run argv - tell application "Finder" - tell disk "$disk" - open - set current view of container window to icon view - set toolbar visible of container window to false - set statusbar visible of container window to false - set the bounds of container window to {$window_bounds} - set theViewOptions to the icon view options of container window - set arrangement of theViewOptions to not arranged - set icon size of theViewOptions to $icon_size - $background_commands - $items_positions - close -- close/reopen works around a bug... - open - update without registering applications - delay 5 - eject - end tell - end tell - end run - """) - - itemscript = Template('set position of item "${item}" of container window to {${position}}') - items_positions = [] - if fancy.has_key("items_position"): - for name, position in fancy["items_position"].iteritems(): - params = { "item" : name, "position" : ",".join([str(p) for p in position]) } - items_positions.append(itemscript.substitute(params)) - - params = { - "disk" : "Bitcoin-Qt", - "window_bounds" : "300,300,800,620", - "icon_size" : "96", - "background_commands" : "", - "items_positions" : "\n ".join(items_positions) - } - if fancy.has_key("window_bounds"): - params["window.bounds"] = ",".join([str(p) for p in fancy["window_bounds"]]) - if fancy.has_key("icon_size"): - params["icon_size"] = str(fancy["icon_size"]) - if bg_path is not None: - # Set background file, then call SetFile to make it invisible. - # (note: making it invisible first makes set background picture fail) - bgscript = Template("""set background picture of theViewOptions to file "$bgpic" - do shell script "SetFile -a V /Volumes/$disk/$bgpic" """) - params["background_commands"] = bgscript.substitute({"bgpic" : os.path.basename(bg_path), "disk" : params["disk"]}) - - s = appscript.substitute(params) - if verbose >= 2: - print("Running AppleScript:") - print(s) - - p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE) - p.communicate(input=s) - if p.returncode: - print("Error running osascript.") - - if verbose >= 2: - print "+ Finalizing .dmg disk image +" - time.sleep(5) - - try: - runHDIUtil("convert", dmg_name + ".temp", format="UDBZ", o=dmg_name + ".dmg", ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - os.unlink(dmg_name + ".temp.dmg") + print("+ Preparing .dmg disk image +") + + if verbose: + print("Determining size of \"dist\"...") + size = 0 + for path, dirs, files in os.walk("dist"): + for file in files: + size += os.path.getsize(os.path.join(path, file)) + size += int(size * 0.15) + + if verbose: + print("Creating temp image for modification...") + + tempname: str = appname + ".temp.dmg" + + run(["hdiutil", "create", tempname, "-srcfolder", "dist", "-format", "UDRW", "-size", str(size), "-volname", appname], check=True, universal_newlines=True) + + if verbose: + print("Attaching temp image...") + output = run(["hdiutil", "attach", tempname, "-readwrite"], check=True, universal_newlines=True, stdout=PIPE).stdout + + print("+ Finalizing .dmg disk image +") + + run(["hdiutil", "detach", f"/Volumes/{appname}"], universal_newlines=True) + + run(["hdiutil", "convert", tempname, "-format", "UDZO", "-o", appname, "-imagekey", "zlib-level=9"], check=True, universal_newlines=True) + + os.unlink(tempname) # ------------------------------------------------ -if verbose >= 2: - print "+ Done +" +print("+ Done +") sys.exit(0) diff --git a/contrib/message-capture/message-capture-docs.md b/contrib/message-capture/message-capture-docs.md new file mode 100644 index 0000000000000..730196846134f --- /dev/null +++ b/contrib/message-capture/message-capture-docs.md @@ -0,0 +1,25 @@ +# Per-Peer Message Capture + +## Purpose + +This feature allows for message capture on a per-peer basis. It answers the simple question: "Can I see what messages my node is sending and receiving?" + +## Usage and Functionality + +* Run `bitcoind` with the `-capturemessages` option. +* Look in the `message_capture` folder in your datadir. + * Typically this will be `~/.bitcoin/message_capture`. + * See that there are many folders inside, one for each peer names with its IP address and port. + * Inside each peer's folder there are two `.dat` files: one is for received messages (`msgs_recv.dat`) and the other is for sent messages (`msgs_sent.dat`). +* Run `contrib/message-capture/message-capture-parser.py` with the proper arguments. + * See the `-h` option for help. + * To see all messages, both sent and received, for all peers use: + ``` + ./contrib/message-capture/message-capture-parser.py -o out.json \ + ~/.bitcoin/message_capture/**/*.dat + ``` + * Note: The messages in the given `.dat` files will be interleaved in chronological order. So, giving both received and sent `.dat` files (as above with `*.dat`) will result in all messages being interleaved in chronological order. + * If an output file is not provided (i.e. the `-o` option is not used), then the output prints to `stdout`. +* View the resulting output. + * The output file is `JSON` formatted. + * Suggestion: use `jq` to view the output, with `jq . out.json` diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py new file mode 100755 index 0000000000000..9988478f1b90c --- /dev/null +++ b/contrib/message-capture/message-capture-parser.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Parse message capture binary files. To be used in conjunction with -capturemessages.""" + +import argparse +import os +import shutil +import sys +from io import BytesIO +import json +from pathlib import Path +from typing import Any, List, Optional + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional')) + +from test_framework.messages import ser_uint256 # noqa: E402 +from test_framework.p2p import MESSAGEMAP # noqa: E402 + +TIME_SIZE = 8 +LENGTH_SIZE = 4 +MSGTYPE_SIZE = 12 + +# The test framework classes stores hashes as large ints in many cases. +# These are variables of type uint256 in core. +# There isn't a way to distinguish between a large int and a large int that is actually a blob of bytes. +# As such, they are itemized here. +# Any variables with these names that are of type int are actually uint256 variables. +# (These can be easily found by looking for calls to deser_uint256, deser_uint256_vector, and uint256_from_str in messages.py) +HASH_INTS = [ + "blockhash", + "block_hash", + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "hashstop", + "prev_header", + "sha256", + "stop_hash", +] + +HASH_INT_VECTORS = [ + "hashes", + "headers", + "vHave", + "vHash", +] + + +class ProgressBar: + def __init__(self, total: float): + self.total = total + self.running = 0 + + def set_progress(self, progress: float): + cols = shutil.get_terminal_size()[0] + if cols <= 12: + return + max_blocks = cols - 9 + num_blocks = int(max_blocks * progress) + print('\r[ {}{} ] {:3.0f}%' + .format('#' * num_blocks, + ' ' * (max_blocks - num_blocks), + progress * 100), + end ='') + + def update(self, more: float): + self.running += more + self.set_progress(self.running / self.total) + + +def to_jsonable(obj: Any) -> Any: + if hasattr(obj, "__dict__"): + return obj.__dict__ + elif hasattr(obj, "__slots__"): + ret = {} # type: Any + for slot in obj.__slots__: + val = getattr(obj, slot, None) + if slot in HASH_INTS and isinstance(val, int): + ret[slot] = ser_uint256(val).hex() + elif slot in HASH_INT_VECTORS and isinstance(val[0], int): + ret[slot] = [ser_uint256(a).hex() for a in val] + else: + ret[slot] = to_jsonable(val) + return ret + elif isinstance(obj, list): + return [to_jsonable(a) for a in obj] + elif isinstance(obj, bytes): + return obj.hex() + else: + return obj + + +def process_file(path: str, messages: List[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None: + with open(path, 'rb') as f_in: + if progress_bar: + bytes_read = 0 + + while True: + if progress_bar: + # Update progress bar + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + bytes_read = f_in.tell() - 1 + + # Read the Header + tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int + msgtype = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] # type: bytes + length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int + + # Start converting the message to a dictionary + msg_dict = {} + msg_dict["direction"] = "recv" if recv else "sent" + msg_dict["time"] = time + msg_dict["size"] = length # "size" is less readable here, but more readable in the output + + msg_ser = BytesIO(f_in.read(length)) + + # Determine message type + if msgtype not in MESSAGEMAP: + # Unrecognized message type + try: + msgtype_tmp = msgtype.decode() + if not msgtype_tmp.isprintable(): + raise UnicodeDecodeError + msg_dict["msgtype"] = msgtype_tmp + except UnicodeDecodeError: + msg_dict["msgtype"] = "UNREADABLE" + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unrecognized message type." + messages.append(msg_dict) + print(f"WARNING - Unrecognized message type {msgtype} in {path}", file=sys.stderr) + continue + + # Deserialize the message + msg = MESSAGEMAP[msgtype]() + msg_dict["msgtype"] = msgtype.decode() + + try: + msg.deserialize(msg_ser) + except KeyboardInterrupt: + raise + except Exception: + # Unable to deserialize message body + msg_ser.seek(0, os.SEEK_SET) + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unable to deserialize message." + messages.append(msg_dict) + print(f"WARNING - Unable to deserialize message in {path}", file=sys.stderr) + continue + + # Convert body of message into a jsonable object + if length: + msg_dict["body"] = to_jsonable(msg) + messages.append(msg_dict) + + if progress_bar: + # Update the progress bar to the end of the current file + # in case we exited the loop early + f_in.seek(0, os.SEEK_END) # Go to end of file + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + epilog="EXAMPLE \n\t{0} -o out.json /message_capture/**/*.dat".format(sys.argv[0]), + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "capturepaths", + nargs='+', + help="binary message capture files to parse.") + parser.add_argument( + "-o", "--output", + help="output file. If unset print to stdout") + parser.add_argument( + "-n", "--no-progress-bar", + action='store_true', + help="disable the progress bar. Automatically set if the output is not a terminal") + args = parser.parse_args() + capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths] + output = Path.cwd() / Path(args.output) if args.output else False + use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() + + messages = [] # type: List[Any] + if use_progress_bar: + total_size = sum(capture.stat().st_size for capture in capturepaths) + progress_bar = ProgressBar(total_size) + else: + progress_bar = None + + for capture in capturepaths: + process_file(str(capture), messages, "recv" in capture.stem, progress_bar) + + messages.sort(key=lambda msg: msg['time']) + + if use_progress_bar: + progress_bar.set_progress(1) + + jsonrep = json.dumps(messages) + if output: + with open(str(output), 'w+', encoding="utf8") as f_out: + f_out.write(jsonrep) + else: + print(jsonrep) + +if __name__ == "__main__": + main() diff --git a/contrib/qos/README.md b/contrib/qos/README.md index 5e0a975fc64ff..0ded87c58f4ee 100644 --- a/contrib/qos/README.md +++ b/contrib/qos/README.md @@ -1,5 +1,5 @@ -### Qos ### +### QoS (Quality of service) ### -This is a Linux bash script that will set up tc to limit the outgoing bandwidth for connections to the Bitcoin network. It limits outbound TCP traffic with a source or destination port of 8333, but not if the destination IP is within a LAN (defined as 192.168.x.x). +This is a Linux bash script that will set up tc to limit the outgoing bandwidth for connections to the Bitcoin network. It limits outbound TCP traffic with a source or destination port of 8333, but not if the destination IP is within a LAN. This means one can have an always-on bitcoind instance running, and another local bitcoind/bitcoin-qt instance which connects to this node and receives blocks from it. diff --git a/contrib/qos/tc.sh b/contrib/qos/tc.sh old mode 100644 new mode 100755 index f62060421202c..7ebcbf225175a --- a/contrib/qos/tc.sh +++ b/contrib/qos/tc.sh @@ -1,13 +1,22 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2017-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C #network interface on which to limit traffic IF="eth0" #limit of the network interface in question LINKCEIL="1gbit" #limit outbound Bitcoin protocol traffic to this rate LIMIT="160kbit" -#defines the address space for which you wish to disable rate limiting -LOCALNET="192.168.0.0/16" +#defines the IPv4 address space for which you wish to disable rate limiting +LOCALNET_V4="192.168.0.0/16" +#defines the IPv6 address space for which you wish to disable rate limiting +LOCALNET_V6="fe80::/10" -#delete existing rules +#delete existing rules ('Error: Cannot delete qdisc with handle of zero.' means there weren't any.) tc qdisc del dev ${IF} root #add root class @@ -24,6 +33,12 @@ tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} p tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10 tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11 +if [ -n "${LOCALNET_V6}" ] ; then + # v6 cannot have the same priority value as v4 + tc filter add dev ${IF} parent 1: protocol ipv6 prio 3 handle 1 fw classid 1:10 + tc filter add dev ${IF} parent 1: protocol ipv6 prio 4 handle 2 fw classid 1:11 +fi + #delete any existing rules #disable for now #ret=0 @@ -33,9 +48,15 @@ tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11 #done #limit outgoing traffic to and from port 8333. but not when dealing with a host on the local network -# (defined by $LOCALNET) -# --set-mark marks packages matching these criteria with the number "2" -# these packages are filtered by the tc filter with "handle 2" +# (defined by $LOCALNET_V4 and $LOCALNET_V6) +# --set-mark marks packages matching these criteria with the number "2" (v4) +# --set-mark marks packages matching these criteria with the number "4" (v6) +# these packets are filtered by the tc filter with "handle 2" # this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT} -iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2 -iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2 +iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2 +iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2 + +if [ -n "${LOCALNET_V6}" ] ; then + ip6tables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4 + ip6tables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4 +fi diff --git a/contrib/qt_translations.py b/contrib/qt_translations.py deleted file mode 100755 index fd8a8b71298c6..0000000000000 --- a/contrib/qt_translations.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python - -# Helpful little script that spits out a comma-separated list of -# language codes for Qt icons that should be included -# in binary bitcoin distributions - -import glob -import os -import re -import sys - -if len(sys.argv) != 3: - sys.exit("Usage: %s $QTDIR/translations $BITCOINDIR/src/qt/locale"%sys.argv[0]) - -d1 = sys.argv[1] -d2 = sys.argv[2] - -l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ]) -l2 = set([ re.search(r'bitcoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'bitcoin_*.qm')) ]) - -print ",".join(sorted(l1.intersection(l2))) - diff --git a/contrib/seeds/.gitignore b/contrib/seeds/.gitignore new file mode 100644 index 0000000000000..e4a39d6093436 --- /dev/null +++ b/contrib/seeds/.gitignore @@ -0,0 +1 @@ +seeds_main.txt diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md index f9a0c277e2e7f..c53446bfb0c34 100644 --- a/contrib/seeds/README.md +++ b/contrib/seeds/README.md @@ -1,11 +1,28 @@ -### Seeds ### +# Seeds -Utility to generate the pnSeed[] array that is compiled into the client -(see [src/net.cpp](/src/net.cpp)). +Utility to generate the seeds.txt list that is compiled into the client +(see [src/chainparamsseeds.h](/src/chainparamsseeds.h) and other utilities in [contrib/seeds](/contrib/seeds)). -The 600 seeds compiled into the 0.8 release were created from sipa's DNS seed data, like this: +Be sure to update `PATTERN_AGENT` in `makeseeds.py` to include the current version, +and remove old versions as necessary (at a minimum when GetDesirableServiceFlags +changes its default return value, as those are the services which seeds are added +to addrman with). - curl -s http://bitcoin.sipa.be/seeds.txt | head -1000 | makeseeds.py +The seeds compiled into the release are created from sipa's DNS seed data, like this: -The input to makeseeds.py is assumed to be approximately sorted from most-reliable to least-reliable, -with IP:port first on each line (lines that don't match IPv4:port are ignored). + curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt + python3 makeseeds.py < seeds_main.txt > nodes_main.txt + cat nodes_main_manual.txt >> nodes_main.txt + python3 generate-seeds.py . > ../../src/chainparamsseeds.h + +## Dependencies + +Ubuntu, Debian: + + sudo apt-get install python3-dnspython + +and/or for other operating systems: + + pip install dnspython + +See https://dnspython.readthedocs.io/en/latest/installation.html for more information. diff --git a/contrib/seeds/generate-seeds.py b/contrib/seeds/generate-seeds.py new file mode 100755 index 0000000000000..44345e39874ca --- /dev/null +++ b/contrib/seeds/generate-seeds.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Script to generate list of seed nodes for chainparams.cpp. + +This script expects two text files in the directory that is passed as an +argument: + + nodes_main.txt + nodes_test.txt + +These files must consist of lines in the format + + : + []: + .onion: + .b32.i2p: + +The output will be two data structures with the peers in binary format: + + static const uint8_t chainparams_seed_{main,test}[]={ + ... + } + +These should be pasted into `src/chainparamsseeds.h`. +''' + +from base64 import b32decode +from enum import Enum +import struct +import sys +import os +import re + +class BIP155Network(Enum): + IPV4 = 1 + IPV6 = 2 + TORV2 = 3 # no longer supported + TORV3 = 4 + I2P = 5 + CJDNS = 6 + +def name_to_bip155(addr): + '''Convert address string to BIP155 (networkID, addr) tuple.''' + if addr.endswith('.onion'): + vchAddr = b32decode(addr[0:-6], True) + if len(vchAddr) == 35: + assert vchAddr[34] == 3 + return (BIP155Network.TORV3, vchAddr[:32]) + elif len(vchAddr) == 10: + return (BIP155Network.TORV2, vchAddr) + else: + raise ValueError('Invalid onion %s' % vchAddr) + elif addr.endswith('.b32.i2p'): + vchAddr = b32decode(addr[0:-8] + '====', True) + if len(vchAddr) == 32: + return (BIP155Network.I2P, vchAddr) + else: + raise ValueError(f'Invalid I2P {vchAddr}') + elif '.' in addr: # IPv4 + return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.')))) + elif ':' in addr: # IPv6 or CJDNS + sub = [[], []] # prefix, suffix + x = 0 + addr = addr.split(':') + for i,comp in enumerate(addr): + if comp == '': + if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end + continue + x += 1 # :: skips to suffix + assert(x < 2) + else: # two bytes per component + val = int(comp, 16) + sub[x].append(val >> 8) + sub[x].append(val & 0xff) + nullbytes = 16 - len(sub[0]) - len(sub[1]) + assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) + addr_bytes = bytes(sub[0] + ([0] * nullbytes) + sub[1]) + if addr_bytes[0] == 0xfc: + # Assume that seeds with fc00::/8 addresses belong to CJDNS, + # not to the publicly unroutable "Unique Local Unicast" network, see + # RFC4193: https://datatracker.ietf.org/doc/html/rfc4193#section-8 + return (BIP155Network.CJDNS, addr_bytes) + else: + return (BIP155Network.IPV6, addr_bytes) + else: + raise ValueError('Could not parse address %s' % addr) + +def parse_spec(s): + '''Convert endpoint string to BIP155 (networkID, addr, port) tuple.''' + match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s) + if match: # ipv6 + host = match.group(1) + port = match.group(2) + elif s.count(':') > 1: # ipv6, no port + host = s + port = '' + else: + (host,_,port) = s.partition(':') + + if not port: + port = 0 + else: + port = int(port) + + host = name_to_bip155(host) + + if host[0] == BIP155Network.TORV2: + return None # TORV2 is no longer supported, so we ignore it + else: + return host + (port, ) + +def ser_compact_size(l): + r = b"" + if l < 253: + r = struct.pack("B", l) + elif l < 0x10000: + r = struct.pack("H', spec[2]) + return r + +def process_nodes(g, f, structname): + g.write('static const uint8_t %s[] = {\n' % structname) + for line in f: + comment = line.find('#') + if comment != -1: + line = line[0:comment] + line = line.strip() + if not line: + continue + + spec = parse_spec(line) + if spec is None: # ignore this entry (e.g. no longer supported addresses like TORV2) + continue + blob = bip155_serialize(spec) + hoststr = ','.join(('0x%02x' % b) for b in blob) + g.write(f' {hoststr},\n') + g.write('};\n') + +def main(): + if len(sys.argv)<2: + print(('Usage: %s ' % sys.argv[0]), file=sys.stderr) + sys.exit(1) + g = sys.stdout + indir = sys.argv[1] + g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n') + g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n') + g.write('/**\n') + g.write(' * List of fixed seed nodes for the bitcoin network\n') + g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n') + g.write(' *\n') + g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n') + g.write(' */\n') + with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f: + process_nodes(g, f, 'chainparams_seed_main') + g.write('\n') + with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f: + process_nodes(g, f, 'chainparams_seed_test') + g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') + +if __name__ == '__main__': + main() diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py index 1d01fd7d209fa..78eb04a8367fe 100755 --- a/contrib/seeds/makeseeds.py +++ b/contrib/seeds/makeseeds.py @@ -1,32 +1,244 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 +# Copyright (c) 2013-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. # -# Generate pnSeed[] from Pieter's DNS seeder +# Generate seeds.txt from Pieter's DNS seeder # -NSEEDS=600 - import re import sys -from subprocess import check_output +import dns.resolver +import collections +from typing import List, Dict, Union -def main(): - lines = sys.stdin.readlines() +NSEEDS=512 + +MAX_SEEDS_PER_ASN = { + 'ipv4': 2, + 'ipv6': 10, +} + +MIN_BLOCKS = 730000 + +PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") +PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") +PATTERN_ONION = re.compile(r"^([a-z2-7]{56}\.onion):(\d+)$") +PATTERN_AGENT = re.compile( + r"^/Satoshi:(" + r"0.14.(0|1|2|3|99)|" + r"0.15.(0|1|2|99)|" + r"0.16.(0|1|2|3|99)|" + r"0.17.(0|0.1|1|2|99)|" + r"0.18.(0|1|99)|" + r"0.19.(0|1|2|99)|" + r"0.20.(0|1|2|99)|" + r"0.21.(0|1|2|99)|" + r"22.(0|99)|" + r"23.99" + r")") - ips = [] - pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):8333") - for line in lines: - m = pattern.match(line) +def parseline(line: str) -> Union[dict, None]: + """ Parses a line from `seeds_main.txt` into a dictionary of details for that line. + or `None`, if the line could not be parsed. + """ + sline = line.split() + if len(sline) < 11: + # line too short to be valid, skip it. + return None + m = PATTERN_IPV4.match(sline[0]) + sortkey = None + ip = None + if m is None: + m = PATTERN_IPV6.match(sline[0]) if m is None: - continue + m = PATTERN_ONION.match(sline[0]) + if m is None: + return None + else: + net = 'onion' + ipstr = sortkey = m.group(1) + port = int(m.group(2)) + else: + net = 'ipv6' + if m.group(1) in ['::']: # Not interested in localhost + return None + ipstr = m.group(1) + sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds + port = int(m.group(2)) + else: + # Do IPv4 sanity check ip = 0 for i in range(0,4): - ip = ip + (int(m.group(i+1)) << (8*(i))) + if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255: + return None + ip = ip + (int(m.group(i+2)) << (8*(3-i))) if ip == 0: + return None + net = 'ipv4' + sortkey = ip + ipstr = m.group(1) + port = int(m.group(6)) + # Skip bad results. + if sline[1] == 0: + return None + # Extract uptime %. + uptime30 = float(sline[7][:-1]) + # Extract Unix timestamp of last success. + lastsuccess = int(sline[2]) + # Extract protocol version. + version = int(sline[10]) + # Extract user agent. + agent = sline[11][1:-1] + # Extract service flags. + service = int(sline[9], 16) + # Extract blocks. + blocks = int(sline[8]) + # Construct result. + return { + 'net': net, + 'ip': ipstr, + 'port': port, + 'ipnum': ip, + 'uptime': uptime30, + 'lastsuccess': lastsuccess, + 'version': version, + 'agent': agent, + 'service': service, + 'blocks': blocks, + 'sortkey': sortkey, + } + +def dedup(ips: List[Dict]) -> List[Dict]: + """ Remove duplicates from `ips` where multiple ips share address and port. """ + d = {} + for ip in ips: + d[ip['ip'],ip['port']] = ip + return list(d.values()) + +def filtermultiport(ips: List[Dict]) -> List[Dict]: + """ Filter out hosts with more nodes per IP""" + hist = collections.defaultdict(list) + for ip in ips: + hist[ip['sortkey']].append(ip) + return [value[0] for (key,value) in list(hist.items()) if len(value)==1] + +def lookup_asn(net: str, ip: str) -> Union[int, None]: + """ Look up the asn for an `ip` address by querying cymru.com + on network `net` (e.g. ipv4 or ipv6). + + Returns in integer ASN or None if it could not be found. + """ + try: + if net == 'ipv4': + ipaddr = ip + prefix = '.origin' + else: # http://www.team-cymru.com/IP-ASN-mapping.html + res = str() # 2001:4860:b002:23::68 + for nb in ip.split(':')[:4]: # pick the first 4 nibbles + for c in nb.zfill(4): # right padded with '0' + res += c + '.' # 2001 4860 b002 0023 + ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3 + prefix = '.origin6' + + asn = int([x.to_text() for x in dns.resolver.resolve('.'.join( + reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', + 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) + return asn + except Exception as e: + sys.stderr.write(f'ERR: Could not resolve ASN for "{ip}": {e}\n') + return None + +# Based on Greg Maxwell's seed_filter.py +def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: + """ Prunes `ips` by + (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and + (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. + """ + # Sift out ips by type + ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] + ips_onion = [ip for ip in ips if ip['net'] == 'onion'] + + # Filter IPv46 by ASN, and limit to max_per_net per network + result = [] + net_count: Dict[str, int] = collections.defaultdict(int) + asn_count: Dict[int, int] = collections.defaultdict(int) + + for i, ip in enumerate(ips_ipv46): + if i % 10 == 0: + # give progress update + print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True) + + if net_count[ip['net']] == max_per_net: + # do not add this ip as we already too many + # ips from this network + continue + asn = lookup_asn(ip['net'], ip['ip']) + if asn is None or asn_count[asn] == max_per_asn[ip['net']]: + # do not add this ip as we already have too many + # ips from this ASN on this network continue - ips.append(ip) + asn_count[asn] += 1 + net_count[ip['net']] += 1 + result.append(ip) + + # Add back Onions (up to max_per_net) + result.extend(ips_onion[0:max_per_net]) + return result + +def ip_stats(ips: List[Dict]) -> str: + """ Format and return pretty string from `ips`. """ + hist: Dict[str, int] = collections.defaultdict(int) + for ip in ips: + if ip is not None: + hist[ip['net']] += 1 + + return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}" + +def main(): + lines = sys.stdin.readlines() + ips = [parseline(line) for line in lines] - for row in range(0, min(NSEEDS,len(ips)), 8): - print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + "," + print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr) + print(f'{ip_stats(ips):s} Initial', file=sys.stderr) + # Skip entries with invalid address. + ips = [ip for ip in ips if ip is not None] + print(f'{ip_stats(ips):s} Skip entries with invalid address', file=sys.stderr) + # Skip duplicates (in case multiple seeds files were concatenated) + ips = dedup(ips) + print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr) + # Enforce minimal number of blocks. + ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] + print(f'{ip_stats(ips):s} Enforce minimal number of blocks', file=sys.stderr) + # Require service bit 1. + ips = [ip for ip in ips if (ip['service'] & 1) == 1] + print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr) + # Require at least 50% 30-day uptime for clearnet, 10% for onion. + req_uptime = { + 'ipv4': 50, + 'ipv6': 50, + 'onion': 10, + } + ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]] + print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr) + # Require a known and recent user agent. + ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] + print(f'{ip_stats(ips):s} Require a known and recent user agent', file=sys.stderr) + # Sort by availability (and use last success as tie breaker) + ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) + # Filter out hosts with multiple bitcoin ports, these are likely abusive + ips = filtermultiport(ips) + print(f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', file=sys.stderr) + # Look up ASNs and limit results, both per ASN and globally. + ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) + print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr) + # Sort the results by IP address (for deterministic output). + ips.sort(key=lambda x: (x['net'], x['sortkey'])) + for ip in ips: + if ip['net'] == 'ipv6': + print('[%s]:%i' % (ip['ip'], ip['port'])) + else: + print('%s:%i' % (ip['ip'], ip['port'])) if __name__ == '__main__': main() diff --git a/contrib/seeds/nodes_main.txt b/contrib/seeds/nodes_main.txt new file mode 100644 index 0000000000000..d8e34bdb602af --- /dev/null +++ b/contrib/seeds/nodes_main.txt @@ -0,0 +1,689 @@ +2.37.30.144:8777 +2.138.174.158:8333 +2.152.78.124:8333 +5.8.18.154:8333 +5.45.74.50:8333 +5.79.123.3:8333 +5.102.168.217:22220 +5.103.137.146:9333 +5.128.87.126:8333 +5.172.132.200:8333 +5.188.62.18:8333 +5.254.101.226:8334 +8.210.18.56:8333 +8.210.92.32:8333 +14.13.34.225:16181 +14.39.151.167:8333 +18.196.79.108:8333 +18.218.139.58:48333 +20.184.15.116:8433 +23.175.0.220:8333 +23.233.107.21:8333 +24.35.68.229:8333 +24.37.3.26:8333 +24.102.91.203:8333 +24.116.153.115:8333 +24.134.6.165:8333 +24.155.218.13:8333 +24.160.137.173:8333 +24.177.106.85:8333 +24.184.0.146:8333 +24.194.222.116:8333 +24.205.215.192:8333 +27.124.108.19:8333 +31.14.40.64:8333 +31.47.202.112:8333 +31.165.115.7:8333 +34.65.45.157:8333 +34.78.48.104:8333 +34.80.134.68:8333 +34.101.132.198:8333 +34.227.68.216:8333 +35.137.212.22:8333 +35.231.190.134:8333 +37.1.217.35:8333 +37.15.62.32:8333 +37.143.118.174:8333 +37.200.59.67:8333 +37.205.9.165:8333 +38.23.180.228:8333 +38.65.119.26:8333 +38.141.134.140:8333 +39.109.122.127:8444 +41.79.70.146:8333 +41.193.122.191:8333 +43.225.62.107:8333 +45.35.73.152:8333 +45.43.97.103:8333 +45.63.10.52:20008 +45.84.153.40:8333 +45.95.64.225:8333 +45.129.180.214:8333 +45.154.255.162:8333 +45.226.80.102:8333 +46.6.10.230:8333 +46.23.87.218:8333 +46.32.50.98:8333 +46.47.84.85:8333 +46.48.126.58:8333 +46.146.248.89:8333 +46.165.221.209:9333 +46.166.142.2:8333 +46.166.162.45:20001 +46.173.50.58:8333 +46.175.178.3:8333 +46.188.30.118:8333 +46.219.120.59:3673 +46.229.238.187:8333 +47.93.230.171:8333 +47.100.162.210:18332 +47.144.106.249:8333 +47.188.70.205:8333 +47.227.226.242:8333 +50.2.13.164:8333 +50.5.46.195:8333 +50.45.128.28:8333 +51.148.153.60:8333 +51.154.62.103:8333 +51.154.131.18:8333 +51.158.150.155:8333 +51.159.2.218:8333 +54.198.19.34:8333 +58.105.168.41:8333 +58.158.0.86:8333 +60.251.129.61:8336 +61.239.91.250:8333 +62.28.190.194:8333 +62.152.58.16:9421 +62.171.129.32:8333 +62.251.54.163:8333 +63.247.147.166:8333 +64.33.68.176:8333 +64.156.192.61:8333 +64.187.175.226:8333 +64.233.245.39:8333 +64.237.82.149:8333 +65.101.247.26:8333 +66.29.129.218:8333 +66.49.204.11:8333 +66.58.243.215:8333 +66.85.234.129:8333 +66.130.120.52:8333 +67.10.121.145:8333 +67.210.228.203:8333 +67.213.87.21:8333 +68.181.4.12:8333 +69.7.124.146:8333 +69.8.175.201:8333 +69.59.18.22:8333 +69.119.193.9:8333 +69.130.201.27:8333 +69.131.101.176:8333 +70.15.194.32:8333 +70.64.27.12:8333 +72.29.170.151:8333 +72.74.34.99:8333 +72.133.177.119:8333 +73.166.84.222:8333 +74.67.240.204:8333 +74.91.115.229:8333 +74.118.137.119:8333 +74.213.251.203:8333 +74.220.255.190:8333 +76.11.60.155:8333 +76.66.144.127:8333 +77.70.16.245:8333 +77.85.204.149:8333 +77.105.87.97:8333 +77.120.113.69:8433 +77.120.113.71:8433 +77.120.122.116:8433 +77.120.122.118:8433 +77.162.190.90:8333 +77.167.245.239:55544 +77.232.41.189:8333 +78.20.227.249:8333 +78.21.167.8:8333 +78.27.139.13:8333 +78.43.208.25:8333 +78.63.28.146:8333 +78.72.228.239:8333 +78.108.102.8:8333 +78.129.0.39:8333 +78.129.169.69:8333 +79.77.182.180:8333 +79.77.182.183:8333 +79.107.178.59:8333 +80.55.225.158:8333 +80.64.211.102:8333 +80.64.211.103:8333 +80.71.57.50:8333 +80.81.3.27:8333 +80.82.55.43:8333 +80.88.172.227:64264 +80.89.203.172:8001 +80.93.213.246:8333 +80.147.82.165:8333 +80.229.28.60:8333 +80.247.233.40:8333 +80.255.8.93:8333 +81.7.17.202:8333 +81.10.241.165:8333 +81.21.86.157:8333 +81.171.22.143:8333 +81.237.206.224:8343 +82.69.23.195:8333 +82.96.96.40:8333 +82.116.50.101:8333 +82.136.99.122:8333 +82.149.97.25:17567 +82.154.24.209:8333 +82.165.241.50:8333 +82.197.218.253:8333 +82.202.68.231:8333 +83.137.41.10:8333 +83.208.6.211:8333 +83.217.8.31:44420 +83.220.110.48:8333 +83.222.138.85:8333 +83.243.191.199:8333 +84.22.139.57:8333 +84.27.155.17:8333 +84.75.28.247:8333 +84.112.60.16:8333 +84.211.7.56:8333 +84.237.7.249:8333 +85.23.51.177:8333 +85.24.145.198:8333 +85.184.138.108:8333 +85.194.238.134:8333 +85.195.54.110:8333 +85.208.71.36:8333 +85.208.71.39:8333 +85.214.136.45:8333 +85.214.161.252:8333 +85.227.245.128:8333 +86.18.34.243:8333 +86.20.50.170:8333 +86.49.105.90:8333 +86.76.7.132:8333 +86.100.26.188:8333 +86.106.143.143:55373 +86.120.58.66:8333 +86.133.251.239:8901 +86.149.8.23:8901 +87.78.197.234:8333 +87.120.8.5:20008 +87.121.37.156:8333 +88.82.181.44:8333 +88.87.93.52:1691 +88.98.235.134:8333 +88.136.187.214:8333 +88.147.244.250:8333 +88.148.153.148:8333 +88.212.45.166:8333 +88.212.55.138:8333 +89.38.96.153:9273 +89.47.161.135:8333 +89.88.62.190:8333 +89.158.32.44:8333 +89.163.145.240:8333 +89.163.249.234:3673 +89.176.196.80:8333 +89.216.21.96:8333 +90.84.227.255:8333 +90.146.130.214:8333 +90.250.9.1:8333 +91.93.194.154:8333 +91.106.188.229:8333 +91.126.40.109:8333 +91.137.127.123:8333 +91.147.232.98:8333 +91.152.123.18:8333 +91.178.17.120:8333 +91.204.99.178:8333 +91.223.175.14:8333 +92.42.110.242:8333 +92.53.90.84:8333 +92.221.155.228:8333 +93.57.81.162:8333 +93.95.88.13:8333 +93.103.13.1:8333 +93.123.180.164:8333 +93.190.117.26:8333 +94.105.125.240:8333 +94.110.23.215:8333 +94.154.159.99:8333 +94.189.161.119:8333 +94.203.255.70:8333 +94.232.173.93:8333 +95.79.122.99:8333 +95.80.1.110:8333 +95.83.73.31:8333 +95.110.133.223:8333 +95.110.234.93:8333 +95.164.65.194:8333 +95.165.8.182:8333 +95.174.219.101:8333 +95.191.130.100:8333 +95.214.53.154:8333 +95.215.205.180:8333 +96.43.130.234:8333 +98.25.201.31:8333 +98.128.247.182:8333 +98.171.21.129:8333 +99.147.135.161:8333 +101.100.163.118:8327 +102.132.245.16:8333 +102.182.204.96:8333 +102.182.235.245:8333 +103.14.245.250:8333 +103.47.192.15:8333 +103.84.84.250:8335 +103.99.168.130:8333 +103.99.168.140:8333 +103.198.192.14:20008 +103.232.104.227:8333 +104.143.2.195:8333 +104.172.235.227:8333 +104.238.220.199:8333 +107.11.115.68:8333 +107.173.166.43:8333 +108.4.212.83:8333 +109.136.73.97:8333 +109.173.98.23:8333 +109.190.68.116:8333 +109.235.246.60:8333 +109.248.206.13:8333 +110.12.64.96:8333 +111.90.140.46:8333 +111.90.159.184:50001 +113.107.201.131:8333 +115.47.141.250:8885 +116.58.171.67:8333 +116.87.57.218:8333 +116.202.161.56:8333 +117.51.159.130:8333 +118.103.126.140:28333 +121.45.190.210:8333 +121.99.193.25:8333 +122.112.148.153:8339 +122.148.135.234:8333 +128.0.190.26:8333 +128.65.194.136:8333 +129.126.172.115:8333 +129.226.125.10:8333 +131.188.40.191:8333 +134.195.185.52:8333 +135.180.44.61:8333 +136.52.114.123:8333 +136.56.170.96:8333 +137.116.213.143:8333 +137.226.34.46:8333 +138.43.233.57:8333 +139.130.41.82:8333 +140.190.12.129:8333 +142.4.105.77:8333 +142.54.181.218:8333 +143.177.231.247:8333 +143.178.64.10:8333 +144.34.161.65:18333 +146.4.124.134:8333 +146.83.56.69:8333 +146.90.193.68:8333 +146.196.55.156:28833 +148.66.50.50:8335 +148.251.1.20:8343 +151.48.95.212:8333 +151.252.193.245:8333 +152.44.137.83:8333 +152.115.191.196:8333 +154.221.31.86:8333 +156.17.103.2:8088 +157.138.20.22:8333 +158.58.188.37:8333 +158.140.209.79:8333 +159.89.230.128:8333 +159.246.25.52:8333 +160.20.59.250:8433 +162.0.234.190:8333 +162.62.26.218:8333 +162.250.188.194:8333 +162.251.70.82:8333 +163.158.206.255:8333 +164.68.105.105:8333 +165.228.174.117:8333 +166.62.82.103:32771 +166.70.49.26:8333 +166.78.241.9:8333 +166.78.241.25:8333 +167.71.73.244:8333 +167.179.147.155:8333 +168.91.238.8:8333 +172.105.21.216:8333 +172.117.105.95:8333 +173.23.103.30:8000 +173.205.92.151:54805 +173.205.92.154:54805 +173.205.92.157:54805 +173.208.152.218:8333 +173.241.227.243:8333 +174.3.4.232:8333 +174.17.11.22:8333 +174.88.241.167:8333 +174.114.102.41:8333 +174.114.250.86:8333 +174.142.191.136:8333 +175.39.72.87:8333 +176.12.16.135:8333 +176.37.23.30:8333 +176.62.179.221:8333 +176.74.136.237:8333 +176.99.6.226:8333 +176.212.185.153:8333 +177.81.236.117:8333 +178.19.106.26:8333 +178.21.118.178:8333 +178.33.232.69:8333 +178.79.84.139:8333 +178.124.162.209:8333 +178.132.2.246:8333 +178.150.96.46:8333 +178.162.212.44:8333 +178.193.226.120:8333 +178.236.137.63:8333 +180.150.46.187:8333 +181.164.210.228:8530 +183.110.220.210:30301 +184.95.58.166:8336 +184.164.147.82:41333 +184.171.208.109:8333 +185.17.143.220:8333 +185.21.217.49:8333 +185.25.48.184:8333 +185.28.96.16:8333 +185.31.136.246:8333 +185.64.116.15:8333 +185.68.249.91:8333 +185.108.247.190:8333 +185.141.60.36:8333 +185.148.3.227:8333 +185.148.145.74:8333 +185.159.20.143:8333 +185.167.113.59:8333 +185.185.26.141:8111 +185.189.132.178:57780 +185.204.197.112:8333 +185.209.70.17:8333 +185.220.156.193:8333 +185.238.129.113:8333 +185.239.221.5:8333 +185.244.217.39:8333 +185.254.97.164:8333 +186.33.167.11:8333 +188.32.14.31:8334 +188.42.40.234:18333 +188.134.8.36:8333 +188.138.88.14:8333 +188.156.110.239:8333 +188.165.244.143:8333 +188.213.68.38:8333 +188.214.129.65:20012 +188.242.15.74:8333 +188.244.4.78:8333 +189.39.6.82:8333 +189.207.46.32:8333 +189.212.121.74:8333 +192.3.11.20:8333 +192.65.170.15:8333 +192.146.137.44:8333 +192.182.157.119:8333 +192.187.109.141:8333 +192.227.80.83:8333 +193.10.203.23:8334 +193.32.127.160:58477 +193.32.127.162:58477 +193.58.196.212:8333 +193.106.29.106:8333 +193.138.154.43:8333 +193.178.170.232:8333 +193.196.37.62:8333 +193.222.130.14:8333 +193.234.50.227:8333 +194.14.246.205:8333 +194.135.135.69:8333 +194.147.113.201:8333 +194.165.30.20:8333 +194.219.62.23:8333 +195.56.63.4:8333 +195.134.183.188:8333 +195.208.103.30:8444 +195.208.103.31:8444 +198.1.231.6:8333 +198.12.14.136:8333 +198.84.237.70:8333 +198.178.120.5:8112 +199.48.92.184:8333 +199.68.199.19:8333 +199.182.184.204:8333 +199.189.242.141:8333 +199.247.7.208:8333 +200.122.181.37:8333 +201.191.6.103:8333 +202.107.219.130:8333 +202.108.211.135:8333 +203.94.33.112:8333 +203.130.48.117:8885 +203.132.94.196:8333 +203.162.13.181:8332 +204.191.201.43:8333 +204.229.10.90:8333 +205.178.41.124:8333 +206.55.178.157:8333 +206.126.203.8:8333 +206.174.115.96:8333 +206.223.153.52:8333 +207.188.159.25:8333 +207.229.46.80:8333 +209.58.145.157:8333 +209.126.81.147:8333 +209.145.63.150:8333 +209.209.10.30:8333 +209.237.127.227:8333 +212.99.226.36:9020 +212.185.86.84:8333 +212.227.211.87:8333 +213.5.36.58:8333 +213.89.236.219:8333 +213.93.145.183:8333 +213.214.66.182:8333 +216.41.249.178:8333 +216.146.251.8:8333 +216.249.70.22:8333 +217.11.240.4:8333 +217.15.178.7:8333 +217.24.233.116:8333 +217.64.148.98:51401 +217.113.121.169:8333 +217.170.124.170:8333 +220.132.135.54:8333 +220.221.58.25:8333 +220.233.178.199:8333 +221.219.97.105:2001 +[2001:1608:1b:f9::1]:26491 +[2001:1620:510::2]:8333 +[2001:1bc0:c1::2000]:8333 +[2001:470:1f0a:89a::2]:8333 +[2001:470:de5a::ec]:9333 +[2001:4b98:dc0:45:216:3eff:fea2:95cd]:8333 +[2001:4dd0:3564:0:fd76:c1d3:1854:5bd9]:8333 +[2001:4de8:b1b2:1:0:dead:beef:7]:8333 +[2001:638:a000:4140::ffff:191]:8333 +[2001:648:2800:131:4b1f:f6fc:20f7:f99f]:8333 +[2001:678:cc8::1:10:88]:20008 +[2001:67c:26b4:ff00::44]:8333 +[2001:67c:2db8:13::92]:8333 +[2001:7c0:2310:0:f816:3eff:fe6c:4f58]:8333 +[2001:818:ea1b:7600:f053:aade:f47b:b701]:8333 +[2001:8f1:1404:3700:8e49:715a:2e09:b634]:9444 +[2001:985:55a0:1::2]:8333 +[2001:999:270:2c2c:c8b:3a20:3f2f:318f]:8333 +[2001:b07:ac9:442b:79d6:bbbe:b37c:a783]:8333 +[2002:2f5b:a5f9::2f5b:a5f9]:8885 +[2002:b6ff:3dca::b6ff:3dca]:28364 +[2400:2410:cea2:d00:41bc:c9ea:861b:51ee]:8333 +[2400:3b00:20:c:bacb:29ff:feab:8886]:8333 +[2401:d002:3902:700:d72c:5e22:4e95:389d]:8333 +[2403:6200:88a0:fb17:f5f2:d8b5:b7ba:f4d3]:8333 +[2405:9800:b910:5f8e:1830:f630:2cc6:88fb]:8333 +[2405:9800:b970:c64c:109f:74e7:ae5f:87c7]:8333 +[2405:aa00:2::40]:8333 +[2407:8800:bc61:2202:d63d:7eff:fe6c:dc36]:8333 +[2408:8248:7004:f831::83c]:8333 +[2409:10:ca20:1df0:224:e8ff:fe1f:60d9]:8333 +[240b:11:43a1:bd00:e589:f8a7:49b:3b86]:8333 +[240d:1a:791:3400:d65d:64ff:fe28:927e]:8333 +[240d:1a:791:3400:d681:d7ff:fef6:a21e]:10050 +[2600:1700:5b2b:5f::8040]:8333 +[2600:2104:1003:c5ab:dc5e:90ff:fe18:1d08]:8333 +[2600:3c00:e002:2e32::1:14]:8333 +[2600:8805:2400:14e:12dd:b1ff:fef2:3013]:8333 +[2602:ffb8::208:72:57:200]:8333 +[2603:301f:1ebf:e000:e23f:49ff:fee7:7431]:8333 +[2603:6081:1800:6600:16dd:a9ff:feee:b2f3]:8333 +[2604:1380:1000:7400::1]:8333 +[2604:4500::2e06]:8112 +[2604:5500:c134:4000:7285:c2ff:fe4a:e143]:32797 +[2604:5500:c134:4000::3fc]:32797 +[2604:7c00:120:4b::eb24]:8333 +[2605:6400:30:f220::]:8333 +[2605:6f80:0:7:fc1b:ccff:fe8a:d822]:8333 +[2605:ae00:203::203]:8333 +[2605:c000:2a0a:1::102]:8333 +[2605:f700:c0:827:225:90ff:fee3:34a6]:8333 +[2607:9280:b:73b:250:56ff:fe14:25b5]:8333 +[2607:f2f8:ad40:bc1::1]:8333 +[2607:fa18:3a01::20]:8333 +[2620:11c:5001:1118:d267:e5ff:fee9:e673]:8333 +[2620:11c:5001:2199:d267:e5ff:fee9:e673]:8333 +[2620:6:2003:105:2d8:61ff:fe0f:853]:8333 +[2620:6e:a000:1:42:42:42:42]:8333 +[2803:cf00:af8:f200:b89e:cf34:92c7:2d26]:8333 +[2804:14c:65d1:402c:bc53:bf5d:68a:2136]:8333 +[2804:7f1:e783:d401:661c:67ff:feba:5547]:8333 +[2804:d57:5537:4800:21e:67ff:fea8:d798]:8333 +[2804:d57:5537:4800:3615:9eff:fe23:d610]:8333 +[2806:2f0:2080:62a:86f:1a01:c44f:1794]:8333 +[2a00:1028:8382:bf22:5f7f:b78f:2737:7739]:8333 +[2a00:12e0:101:99:20c:29ff:fe29:d03f]:8333 +[2a00:1328:e101:c00::163]:8333 +[2a00:1630:10:1003:0:b19:b00b:babe]:8333 +[2a00:1768:2001:27::ef6a]:8333 +[2a00:1828:a004:2::666]:8333 +[2a00:1838:2a:1400:92e2:baff:fe4a:c416]:8333 +[2a00:1c10:2:709::217]:22220 +[2a00:1f40:5001:108:5d17:7703:b0f5:4133]:8333 +[2a00:6020:15dd:ee00:c8c2:2c77:1749:35db]:8333 +[2a00:6020:b482:9200:491a:358c:d8f7:1da]:8333 +[2a00:7145:c1:1:ae29:727:2b87:f64]:5141 +[2a00:8a60:e012:a00::21]:8333 +[2a00:a040:100:f3:45a5:ac0:fea3:71e1]:8333 +[2a01:488:2000:9801::d]:8333 +[2a01:490:16:301::2]:8333 +[2a01:5200:6c:6162:7a61:746b:6f2e:736b]:8333 +[2a01:6380:fffe:73:4e3:b3cc:a871:36d1]:8333 +[2a01:7a0:2:137c::3]:8333 +[2a01:7c8:aac9:c9:5054:ff:fedf:ff95]:8333 +[2a01:7c8:d001:1c1:5054:ff:feee:3e1a]:8333 +[2a01:8740:1:ffc5::8c6a]:8333 +[2a01:cb00:d3d:7700:227:eff:fe28:c565]:8333 +[2a01:d0:0:1c::253]:8333 +[2a01:d0:bef2::12]:8333 +[2a01:e0a:9fb:b0e0:54f8:1901:6e83:62c1]:8333 +[2a01:e0a:aa7:c8c0:9679:affa:b6e5:efc7]:8333 +[2a02:13b8:f000:101::a]:8333 +[2a02:168:6328:0:2a8:2cff:fe68:e32c]:8333 +[2a02:2780:9000:70::7]:8333 +[2a02:2e02:3900:5400:a099:e1ff:feb6:d0e]:8333 +[2a02:390:9000:0:aaa1:59ff:fe43:b57b]:8333 +[2a02:58:97:7d20::60]:8333 +[2a02:6d40:305e:601:dea6:32ff:fe44:4b25]:8333 +[2a02:7a01::91:228:45:130]:8333 +[2a02:7aa0:1619::adc:8de0]:8333 +[2a02:7b40:3e4d:998d::1]:8333 +[2a02:7b40:592f:a187::1]:8333 +[2a02:8388:e5c6:d380:201:2eff:fe82:b3cc]:8333 +[2a02:9a0:102::110]:8333 +[2a02:a311:8143:8c00::4]:8353 +[2a02:af8:fab0:808:85:234:145:132]:8333 +[2a02:e00:fff0:506::1]:8444 +[2a02:e00:fff0:506::a]:8444 +[2a02:e98:20:1504::1]:8333 +[2a03:4000:47:f1::1]:8333 +[2a03:6000:870:0:46:23:87:218]:8333 +[2a03:7380:3015:524:afc5:d3bc:7c66:8f94]:8333 +[2a03:ec0:0:928:8c00:93ff:fe84:a007]:8333 +[2a03:ec0:0:928::701]:8333 +[2a04:2180:0:2::aa]:8333 +[2a04:52c0:101:29e::]:8333 +[2a04:52c0:103:c455::1]:8333 +[2a04:bc40:1dc3:8d::2:1001]:8333 +[2a05:1500:702:0:1c00:40ff:fe00:c]:8333 +[2a06:dd00:10:3:225:90ff:fe32:64cc]:8333 +[2a06:dd00:1:22:225:90ff:fe0e:bd48]:8333 +[2a07:6b47:100:464::9357:ffda]:8333 +[2a07:a880:4601:1062:b4b4:bd2a:39d4:7acf]:51401 +[2a07:abc4::1:946]:8333 +[2a07:abc4::89:234:180:194]:8333 +[2a09:2681:102::210]:8333 +[2a0a:c801:1:7::183]:8333 +[2a0b:f300:2:6::2]:8333 +[2a0c:59c0:18::a20e]:57658 +[2a0d:5600:24:a8e::a91e]:55373 +[2a0d:eb00:8005:1::13]:8333 +[2a10:4740:45:1:a013:d1ff:fe85:36e3]:8333 +[2a10:8b40:1::103]:8335 +[2c0f:f8f0:da51:0:70c3:eea9:9717:9579]:8333 + +# manually added 2021-03 for minimal torv3 bootstrap support +2g5qfdkn2vvcbqhzcyvyiitg4ceukybxklraxjnu7atlhd22gdwywaid.onion:8333 +2jmtxvyup3ijr7u6uvu7ijtnojx4g5wodvaedivbv74w4vzntxbrhvad.onion:8333 +37m62wn7dz3uqpathpc4qfmgrbupachj52nt3jbtbjugpbu54kbud7yd.onion:8333 +5g72ppm3krkorsfopcm2bi7wlv4ohhs4u4mlseymasn7g7zhdcyjpfid.onion:8333 +7cgwjuwi5ehvcay4tazy7ya6463bndjk6xzrttw5t3xbpq4p22q6fyid.onion:8333 +7pyrpvqdhmayxggpcyqn5l3m5vqkw3qubnmgwlpya2mdo6x7pih7r7id.onion:8333 +b64xcbleqmwgq2u46bh4hegnlrzzvxntyzbmucn3zt7cssm7y4ubv3id.onion:8333 +ejxefzf5fpst4mg2rib7grksvscl7p6fvjp6agzgfc2yglxnjtxc3aid.onion:8333 +fjdyxicpm4o42xmedlwl3uvk5gmqdfs5j37wir52327vncjzvtpfv7yd.onion:8333 +fpz6r5ppsakkwypjcglz6gcnwt7ytfhxskkfhzu62tnylcknh3eq6pad.onion:8333 +fzhn4uoxfbfss7h7d6ffbn266ca432ekbbzvqtsdd55ylgxn4jucm5qd.onion:8333 +gxo5anvfnffnftfy5frkgvplq3rpga2ie3tcblo2vl754fvnhgorn5yd.onion:8333 +ifdu5qvbofrt4ekui2iyb3kbcyzcsglazhx2hn4wfskkrx2v24qxriid.onion:8333 +itz3oxsihs62muvknc237xabl5f6w6rfznfhbpayrslv2j2ubels47yd.onion:8333 +lrjh6fywjqttmlifuemq3puhvmshxzzyhoqx7uoufali57eypuenzzid.onion:8333 +m7cbpjolo662uel7rpaid46as2otcj44vvwg3gccodnvaeuwbm3anbyd.onion:8333 +opnyfyeiibe5qo5a3wbxzbb4xdiagc32bbce46owmertdknta5mi7uyd.onion:8333 +owjsdxmzla6d7lrwkbmetywqym5cyswpihciesfl5qdv2vrmwsgy4uqd.onion:8333 +q7kgmd7n7h27ds4fg7wocgniuqb3oe2zxp4nfe4skd5da6wyipibqzqd.onion:8333 +rp7k2go3s5lyj3fnj6zn62ktarlrsft2ohlsxkyd7v3e3idqyptvread.onion:8333 +sys54sv4xv3hn3sdiv3oadmzqpgyhd4u4xphv4xqk64ckvaxzm57a7yd.onion:8333 +tddeij4qigtjr6jfnrmq6btnirmq5msgwcsdpcdjr7atftm7cxlqztid.onion:8333 +vi5bnbxkleeqi6hfccjochnn65lcxlfqs4uwgmhudph554zibiusqnad.onion:8333 +xqt25cobm5zqucac3634zfght72he6u3eagfyej5ellbhcdgos7t2had.onion:8333 + +# manually added 2021-08 for minimal i2p bootstrap support +a5qsnv3maw77mlmmzlcglu6twje6ttctd3fhpbfwcbpmewx6fczq.b32.i2p:0 +bitcornrd36coazsbzsz4pdebyzvaplmsalq4kpoljmn6cg6x5zq.b32.i2p:0 +c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:0 +dhtq2p76tyhi442aidb3vd2bv7yxxjuddpb2jydnnrl2ons5bhha.b32.i2p:0 +h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:0 +hnbbyjpxx54623l555sta7pocy3se4sdgmuebi5k6reesz5rjp6q.b32.i2p:0 +jz3s4eurm5vzjresf4mwo7oni4bk36daolwxh4iqtewakylgkxmq.b32.i2p:0 +kokkmpquqlkptu5hkmzqlttsmtwxicldr4so7wqsufk6bwf32nma.b32.i2p:0 +sedndhv5vpcgdmykyi5st4yqhdxl3hpdtglta4do435wupahhx6q.b32.i2p:0 +wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:0 +zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:0 + +# manually added 2022-01 for minimal cjdns bootstrap support +[fc32:17ea:e415:c3bf:9808:149d:b5a2:c9aa]:8333 +[fcc7:be49:ccd1:dc91:3125:f0da:457d:8ce]:8333 diff --git a/contrib/seeds/nodes_main_manual.txt b/contrib/seeds/nodes_main_manual.txt new file mode 100644 index 0000000000000..a6e0b8763a9b4 --- /dev/null +++ b/contrib/seeds/nodes_main_manual.txt @@ -0,0 +1,43 @@ + +# manually added 2021-03 for minimal torv3 bootstrap support +2g5qfdkn2vvcbqhzcyvyiitg4ceukybxklraxjnu7atlhd22gdwywaid.onion:8333 +2jmtxvyup3ijr7u6uvu7ijtnojx4g5wodvaedivbv74w4vzntxbrhvad.onion:8333 +37m62wn7dz3uqpathpc4qfmgrbupachj52nt3jbtbjugpbu54kbud7yd.onion:8333 +5g72ppm3krkorsfopcm2bi7wlv4ohhs4u4mlseymasn7g7zhdcyjpfid.onion:8333 +7cgwjuwi5ehvcay4tazy7ya6463bndjk6xzrttw5t3xbpq4p22q6fyid.onion:8333 +7pyrpvqdhmayxggpcyqn5l3m5vqkw3qubnmgwlpya2mdo6x7pih7r7id.onion:8333 +b64xcbleqmwgq2u46bh4hegnlrzzvxntyzbmucn3zt7cssm7y4ubv3id.onion:8333 +ejxefzf5fpst4mg2rib7grksvscl7p6fvjp6agzgfc2yglxnjtxc3aid.onion:8333 +fjdyxicpm4o42xmedlwl3uvk5gmqdfs5j37wir52327vncjzvtpfv7yd.onion:8333 +fpz6r5ppsakkwypjcglz6gcnwt7ytfhxskkfhzu62tnylcknh3eq6pad.onion:8333 +fzhn4uoxfbfss7h7d6ffbn266ca432ekbbzvqtsdd55ylgxn4jucm5qd.onion:8333 +gxo5anvfnffnftfy5frkgvplq3rpga2ie3tcblo2vl754fvnhgorn5yd.onion:8333 +ifdu5qvbofrt4ekui2iyb3kbcyzcsglazhx2hn4wfskkrx2v24qxriid.onion:8333 +itz3oxsihs62muvknc237xabl5f6w6rfznfhbpayrslv2j2ubels47yd.onion:8333 +lrjh6fywjqttmlifuemq3puhvmshxzzyhoqx7uoufali57eypuenzzid.onion:8333 +m7cbpjolo662uel7rpaid46as2otcj44vvwg3gccodnvaeuwbm3anbyd.onion:8333 +opnyfyeiibe5qo5a3wbxzbb4xdiagc32bbce46owmertdknta5mi7uyd.onion:8333 +owjsdxmzla6d7lrwkbmetywqym5cyswpihciesfl5qdv2vrmwsgy4uqd.onion:8333 +q7kgmd7n7h27ds4fg7wocgniuqb3oe2zxp4nfe4skd5da6wyipibqzqd.onion:8333 +rp7k2go3s5lyj3fnj6zn62ktarlrsft2ohlsxkyd7v3e3idqyptvread.onion:8333 +sys54sv4xv3hn3sdiv3oadmzqpgyhd4u4xphv4xqk64ckvaxzm57a7yd.onion:8333 +tddeij4qigtjr6jfnrmq6btnirmq5msgwcsdpcdjr7atftm7cxlqztid.onion:8333 +vi5bnbxkleeqi6hfccjochnn65lcxlfqs4uwgmhudph554zibiusqnad.onion:8333 +xqt25cobm5zqucac3634zfght72he6u3eagfyej5ellbhcdgos7t2had.onion:8333 + +# manually added 2021-08 for minimal i2p bootstrap support +a5qsnv3maw77mlmmzlcglu6twje6ttctd3fhpbfwcbpmewx6fczq.b32.i2p:0 +bitcornrd36coazsbzsz4pdebyzvaplmsalq4kpoljmn6cg6x5zq.b32.i2p:0 +c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:0 +dhtq2p76tyhi442aidb3vd2bv7yxxjuddpb2jydnnrl2ons5bhha.b32.i2p:0 +h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:0 +hnbbyjpxx54623l555sta7pocy3se4sdgmuebi5k6reesz5rjp6q.b32.i2p:0 +jz3s4eurm5vzjresf4mwo7oni4bk36daolwxh4iqtewakylgkxmq.b32.i2p:0 +kokkmpquqlkptu5hkmzqlttsmtwxicldr4so7wqsufk6bwf32nma.b32.i2p:0 +sedndhv5vpcgdmykyi5st4yqhdxl3hpdtglta4do435wupahhx6q.b32.i2p:0 +wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:0 +zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:0 + +# manually added 2022-01 for minimal cjdns bootstrap support +[fc32:17ea:e415:c3bf:9808:149d:b5a2:c9aa]:8333 +[fcc7:be49:ccd1:dc91:3125:f0da:457d:8ce]:8333 diff --git a/contrib/seeds/nodes_test.txt b/contrib/seeds/nodes_test.txt new file mode 100644 index 0000000000000..118bec280e218 --- /dev/null +++ b/contrib/seeds/nodes_test.txt @@ -0,0 +1,16 @@ +# List of fixed seed nodes for testnet + +# Onion nodes +35k2va6vyw4oo5ly2quvcszgdqr56kcnfgcqpnpcffut4jn3mhhwgbid.onion:18333 +blo2esfvk2rr7sr4jspmu3vt2vpgr5rigflsj645fnku7v4qmljurtid.onion:18333 +fuckcswupr5rmlvx2kqqrrosxvjyong4hatmuvxsvtcwe4dsh5rus7qd.onion:18333 +gblylyacjlitd2ywdmo2qqylwtdky7kgeqfvlhiw4zdag4x62tx54hyd.onion:18333 +gzwpduv33l7yze3bcdzj3inebiyjwddjnwvnjhh5wvnv4me76mjt2kad.onion:18333 +h3rphzofxzq52tb63mg5f6kc4my3fkcrgh3m5qryeatts43iljbawiid.onion:18333 +kf4qlhek34b3kgyxyodlmvgm4bxfrjsbjtgayyaiuyhr2eoyfgtm3bad.onion:18333 +mc7k47ndjvvhcgs54wmjzxvate4rtuybbjoryikdssjhcxlx27psbyqd.onion:18333 +mrhiniicugfo7mgrwv3wtolk3tptlcw2uq7ih6sq43fa4k4zbilut3yd.onion:18333 +uiudyws3qizgmepfoh7wwjmsoxoxut4qrmotjjhrn247xnjopr7sfcid.onion:18333 +zc2wvoqcezcrf64trji6jmhtss34a5ds5ntzdhqegzvex3ynrd7nxcad.onion:18333 +zd5m3dgdn46naj36pxvvcalfw2paecle6sdxq64ptwxtxjomkywpklqd.onion:18333 + diff --git a/contrib/shell/git-utils.bash b/contrib/shell/git-utils.bash new file mode 100644 index 0000000000000..37bac1f38d8f0 --- /dev/null +++ b/contrib/shell/git-utils.bash @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +git_root() { + git rev-parse --show-toplevel 2> /dev/null +} + +git_head_version() { + local recent_tag + if recent_tag="$(git describe --exact-match HEAD 2> /dev/null)"; then + echo "${recent_tag#v}" + else + git rev-parse --short=12 HEAD + fi +} diff --git a/contrib/shell/realpath.bash b/contrib/shell/realpath.bash new file mode 100644 index 0000000000000..389b77b56266d --- /dev/null +++ b/contrib/shell/realpath.bash @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# Based on realpath.sh written by Michael Kropat +# Found at: https://github.com/mkropat/sh-realpath/blob/65512368b8155b176b67122aa395ac580d9acc5b/realpath.sh + +bash_realpath() { + canonicalize_path "$(resolve_symlinks "$1")" +} + +resolve_symlinks() { + _resolve_symlinks "$1" +} + +_resolve_symlinks() { + _assert_no_path_cycles "$@" || return + + local dir_context path + if path=$(readlink -- "$1"); then + dir_context=$(dirname -- "$1") + _resolve_symlinks "$(_prepend_dir_context_if_necessary "$dir_context" "$path")" "$@" + else + printf '%s\n' "$1" + fi +} + +_prepend_dir_context_if_necessary() { + if [ "$1" = . ]; then + printf '%s\n' "$2" + else + _prepend_path_if_relative "$1" "$2" + fi +} + +_prepend_path_if_relative() { + case "$2" in + /* ) printf '%s\n' "$2" ;; + * ) printf '%s\n' "$1/$2" ;; + esac +} + +_assert_no_path_cycles() { + local target path + + target=$1 + shift + + for path in "$@"; do + if [ "$path" = "$target" ]; then + return 1 + fi + done +} + +canonicalize_path() { + if [ -d "$1" ]; then + _canonicalize_dir_path "$1" + else + _canonicalize_file_path "$1" + fi +} + +_canonicalize_dir_path() { + (cd "$1" 2>/dev/null && pwd -P) +} + +_canonicalize_file_path() { + local dir file + dir=$(dirname -- "$1") + file=$(basename -- "$1") + (cd "$dir" 2>/dev/null && printf '%s/%s\n' "$(pwd -P)" "$file") +} diff --git a/contrib/signet/README.md b/contrib/signet/README.md new file mode 100644 index 0000000000000..706b296c54942 --- /dev/null +++ b/contrib/signet/README.md @@ -0,0 +1,83 @@ +Contents +======== +This directory contains tools related to Signet, both for running a Signet yourself and for using one. + +getcoins.py +=========== + +A script to call a faucet to get Signet coins. + +Syntax: `getcoins.py [-h|--help] [-c|--cmd=] [-f|--faucet=] [-a|--addr=] [-p|--password=] [--] []` + +* `--cmd` lets you customize the bitcoin-cli path. By default it will look for it in the PATH +* `--faucet` lets you specify which faucet to use; the faucet is assumed to be compatible with https://github.com/kallewoof/bitcoin-faucet +* `--addr` lets you specify a Signet address; by default, the address must be a bech32 address. This and `--cmd` above complement each other (i.e. you do not need `bitcoin-cli` if you use `--addr`) +* `--password` lets you specify a faucet password; this is handy if you are in a classroom and set up your own faucet for your students; (above faucet does not limit by IP when password is enabled) + +If using the default network, invoking the script with no arguments should be sufficient under normal +circumstances, but if multiple people are behind the same IP address, the faucet will by default only +accept one claim per day. See `--password` above. + +miner +===== + +You will first need to pick a difficulty target. Since signet chains are primarily protected by a signature rather than proof of work, there is no need to spend as much energy as possible mining, however you may wish to choose to spend more time than the absolute minimum. The calibrate subcommand can be used to pick a target appropriate for your hardware, eg: + + cd src/ + MINER="../contrib/signet/miner" + GRIND="./bitcoin-util grind" + $MINER calibrate --grind-cmd="$GRIND" + nbits=1e00f403 for 25s average mining time + +It defaults to estimating an nbits value resulting in 25s average time to find a block, but the --seconds parameter can be used to pick a different target, or the --nbits parameter can be used to estimate how long it will take for a given difficulty. + +To mine the first block in your custom chain, you can run: + + CLI="./bitcoin-cli -conf=mysignet.conf" + ADDR=$($CLI -signet getnewaddress) + NBITS=1e00f403 + $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=$NBITS + +This will mine a single block with a backdated timestamp designed to allow 100 blocks to be mined as quickly as possible, so that it is possible to do transactions. + +Adding the --ongoing parameter will then cause the signet miner to create blocks indefinitely. It will pick the time between blocks so that difficulty is adjusted to match the provided --nbits value. + + $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=$NBITS --ongoing + +Other options +------------- + +The --debug and --quiet options are available to control how noisy the signet miner's output is. Note that the --debug, --quiet and --cli parameters must all appear before the subcommand (generate, calibrate, etc) if used. + +Instead of specifying --ongoing, you can specify --max-blocks=N to mine N blocks and stop. + +The --set-block-time option is available to manually move timestamps forward or backward (subject to the rules that blocktime must be greater than mediantime, and dates can't be more than two hours in the future). It can only be used when mining a single block (ie, not when using --ongoing or --max-blocks greater than 1). + +Instead of using a single address, a ranged descriptor may be provided via the --descriptor parameter, with the reward for the block at height H being sent to the H'th address generated from the descriptor. + +Instead of calculating a specific nbits value, --min-nbits can be specified instead, in which case the minimum signet difficulty will be targeted. Signet's minimum difficulty corresponds to --nbits=1e0377ae. + +By default, the signet miner mines blocks at fixed intervals with minimal variation. If you want blocks to appear more randomly, as they do in mainnet, specify the --poisson option. + +Using the --multiminer parameter allows mining to be distributed amongst multiple miners. For example, if you have 3 miners and want to share blocks between them, specify --multiminer=1/3 on one, --multiminer=2/3 on another, and --multiminer=3/3 on the last one. If you want one to do 10% of blocks and two others to do 45% each, --multiminer=1-10/100 on the first, and --multiminer=11-55 and --multiminer=56-100 on the others. Note that which miner mines which block is determined by the previous block hash, so occasional runs of one miner doing many blocks in a row is to be expected. + +When --multiminer is used, if a miner is down and does not mine a block within five minutes of when it is due, the other miners will automatically act as redundant backups ensuring the chain does not halt. The --backup-delay parameter can be used to change how long a given miner waits, allowing one to be the primary backup (after five minutes) and another to be the secondary backup (after six minutes, eg). + +The --standby-delay parameter can be used to make a backup miner that only mines if a block doesn't arrive on time. This can be combined with --multiminer if desired. Setting --standby-delay also prevents the first block from being mined immediately. + +Advanced usage +-------------- + +The process generate follows internally is to get a block template, convert that into a PSBT, sign the PSBT, move the signature from the signed PSBT into the block template's coinbase, grind proof of work for the block, and then submit the block to the network. + +These steps can instead be done explicitly: + + $CLI -signet getblocktemplate '{"rules": ["signet","segwit"]}' | + $MINER --cli="$CLI" genpsbt --address="$ADDR" | + $CLI -signet -stdin walletprocesspsbt | + jq -r .psbt | + $MINER --cli="$CLI" solvepsbt --grind-cmd="$GRIND" | + $CLI -signet -stdin submitblock + +This is intended to allow you to replace part of the pipeline for further experimentation (eg, to sign the block with a hardware wallet). + diff --git a/contrib/signet/getcoins.py b/contrib/signet/getcoins.py new file mode 100755 index 0000000000000..147d12600df6d --- /dev/null +++ b/contrib/signet/getcoins.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import argparse +import io +import requests +import subprocess +import sys +import xml.etree.ElementTree + +DEFAULT_GLOBAL_FAUCET = 'https://signetfaucet.com/claim' +DEFAULT_GLOBAL_CAPTCHA = 'https://signetfaucet.com/captcha' +GLOBAL_FIRST_BLOCK_HASH = '00000086d6b2636cb2a392d45edc4ec544a10024d30141c9adf4bfd9de533b53' + +# braille unicode block +BASE = 0x2800 +BIT_PER_PIXEL = [ + [0x01, 0x08], + [0x02, 0x10], + [0x04, 0x20], + [0x40, 0x80], +] +BW = 2 +BH = 4 + +# imagemagick or compatible fork (used for converting SVG) +CONVERT = 'convert' + +class PPMImage: + ''' + Load a PPM image (Pillow-ish API). + ''' + def __init__(self, f): + if f.readline() != b'P6\n': + raise ValueError('Invalid ppm format: header') + line = f.readline() + (width, height) = (int(x) for x in line.rstrip().split(b' ')) + if f.readline() != b'255\n': + raise ValueError('Invalid ppm format: color depth') + data = f.read(width * height * 3) + stride = width * 3 + self.size = (width, height) + self._grid = [[tuple(data[stride * y + 3 * x:stride * y + 3 * (x + 1)]) for x in range(width)] for y in range(height)] + + def getpixel(self, pos): + return self._grid[pos[1]][pos[0]] + +def print_image(img, threshold=128): + '''Print black-and-white image to terminal in braille unicode characters.''' + x_blocks = (img.size[0] + BW - 1) // BW + y_blocks = (img.size[1] + BH - 1) // BH + + for yb in range(y_blocks): + line = [] + for xb in range(x_blocks): + ch = BASE + for y in range(BH): + for x in range(BW): + try: + val = img.getpixel((xb * BW + x, yb * BH + y)) + except IndexError: + pass + else: + if val[0] < threshold: + ch |= BIT_PER_PIXEL[y][x] + line.append(chr(ch)) + print(''.join(line)) + +parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing bitcoin-cli arguments.') +parser.add_argument('-c', '--cmd', dest='cmd', default='bitcoin-cli', help='bitcoin-cli command to use') +parser.add_argument('-f', '--faucet', dest='faucet', default=DEFAULT_GLOBAL_FAUCET, help='URL of the faucet') +parser.add_argument('-g', '--captcha', dest='captcha', default=DEFAULT_GLOBAL_CAPTCHA, help='URL of the faucet captcha, or empty if no captcha is needed') +parser.add_argument('-a', '--addr', dest='addr', default='', help='Bitcoin address to which the faucet should send') +parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any') +parser.add_argument('-n', '--amount', dest='amount', default='0.001', help='Amount to request (0.001-0.1, default is 0.001)') +parser.add_argument('-i', '--imagemagick', dest='imagemagick', default=CONVERT, help='Path to imagemagick convert utility') +parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to bitcoin-cli (default: -signet)') + +args = parser.parse_args() + +if args.bitcoin_cli_args == []: + args.bitcoin_cli_args = ['-signet'] + + +def bitcoin_cli(rpc_command_and_params): + argv = [args.cmd] + args.bitcoin_cli_args + rpc_command_and_params + try: + return subprocess.check_output(argv).strip().decode() + except FileNotFoundError: + raise SystemExit(f"The binary {args.cmd} could not be found") + except subprocess.CalledProcessError: + cmdline = ' '.join(argv) + raise SystemExit(f"-----\nError while calling {cmdline} (see output above).") + + +if args.faucet.lower() == DEFAULT_GLOBAL_FAUCET: + # Get the hash of the block at height 1 of the currently active signet chain + curr_signet_hash = bitcoin_cli(['getblockhash', '1']) + if curr_signet_hash != GLOBAL_FIRST_BLOCK_HASH: + raise SystemExit('The global faucet cannot be used with a custom Signet network. Please use the global signet or setup your custom faucet to use this functionality.\n') +else: + # For custom faucets, don't request captcha by default. + if args.captcha == DEFAULT_GLOBAL_CAPTCHA: + args.captcha = '' + +if args.addr == '': + # get address for receiving coins + args.addr = bitcoin_cli(['getnewaddress', 'faucet', 'bech32']) + +data = {'address': args.addr, 'password': args.password, 'amount': args.amount} + +# Store cookies +# for debugging: print(session.cookies.get_dict()) +session = requests.Session() + +if args.captcha != '': # Retrieve a captcha + try: + res = session.get(args.captcha) + res.raise_for_status() + except requests.exceptions.RequestException as e: + raise SystemExit(f"Unexpected error when contacting faucet: {e}") + + # Size limitation + svg = xml.etree.ElementTree.fromstring(res.content) + if svg.attrib.get('width') != '150' or svg.attrib.get('height') != '50': + raise SystemExit("Captcha size doesn't match expected dimensions 150x50") + + # Convert SVG image to PPM, and load it + try: + rv = subprocess.run([args.imagemagick, 'svg:-', '-depth', '8', 'ppm:-'], input=res.content, check=True, capture_output=True) + except FileNotFoundError: + raise SystemExit(f"The binary {args.imagemagick} could not be found. Please make sure ImageMagick (or a compatible fork) is installed and that the correct path is specified.") + + img = PPMImage(io.BytesIO(rv.stdout)) + + # Terminal interaction + print_image(img) + print(f"Captcha from URL {args.captcha}") + data['captcha'] = input('Enter captcha: ') + +try: + res = session.post(args.faucet, data=data) +except: + raise SystemExit(f"Unexpected error when contacting faucet: {sys.exc_info()[0]}") + +# Display the output as per the returned status code +if res: + # When the return code is in between 200 and 400 i.e. successful + print(res.text) +elif res.status_code == 404: + print('The specified faucet URL does not exist. Please check for any server issues/typo.') +elif res.status_code == 429: + print('The script does not allow for repeated transactions as the global faucet is rate-limitied to 1 request/IP/day. You can access the faucet website to get more coins manually') +else: + print(f'Returned Error Code {res.status_code}\n{res.text}\n') + print('Please check the provided arguments for their validity and/or any possible typo.') diff --git a/contrib/signet/miner b/contrib/signet/miner new file mode 100755 index 0000000000000..b366b98e2d8d9 --- /dev/null +++ b/contrib/signet/miner @@ -0,0 +1,631 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import argparse +import base64 +import json +import logging +import math +import os +import re +import struct +import sys +import time +import subprocess + +from io import BytesIO + +PATH_BASE_CONTRIB_SIGNET = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +PATH_BASE_TEST_FUNCTIONAL = os.path.abspath(os.path.join(PATH_BASE_CONTRIB_SIGNET, "..", "..", "test", "functional")) +sys.path.insert(0, PATH_BASE_TEST_FUNCTIONAL) + +from test_framework.blocktools import WITNESS_COMMITMENT_HEADER, script_BIP34_coinbase_height # noqa: E402 +from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, from_hex, deser_string, hash256, ser_compact_size, ser_string, ser_uint256, tx_from_hex, uint256_from_str # noqa: E402 +from test_framework.script import CScriptOp # noqa: E402 + +logging.basicConfig( + format='%(asctime)s %(levelname)s %(message)s', + level=logging.INFO, + datefmt='%Y-%m-%d %H:%M:%S') + +SIGNET_HEADER = b"\xec\xc7\xda\xa2" +PSBT_SIGNET_BLOCK = b"\xfc\x06signetb" # proprietary PSBT global field holding the block being signed +RE_MULTIMINER = re.compile("^(\d+)(-(\d+))?/(\d+)$") + +# #### some helpers that could go into test_framework + +# like from_hex, but without the hex part +def FromBinary(cls, stream): + """deserialize a binary stream (or bytes object) into an object""" + # handle bytes object by turning it into a stream + was_bytes = isinstance(stream, bytes) + if was_bytes: + stream = BytesIO(stream) + obj = cls() + obj.deserialize(stream) + if was_bytes: + assert len(stream.read()) == 0 + return obj + +class PSBTMap: + """Class for serializing and deserializing PSBT maps""" + + def __init__(self, map=None): + self.map = map if map is not None else {} + + def deserialize(self, f): + m = {} + while True: + k = deser_string(f) + if len(k) == 0: + break + v = deser_string(f) + if len(k) == 1: + k = k[0] + assert k not in m + m[k] = v + self.map = m + + def serialize(self): + m = b"" + for k,v in self.map.items(): + if isinstance(k, int) and 0 <= k and k <= 255: + k = bytes([k]) + m += ser_compact_size(len(k)) + k + m += ser_compact_size(len(v)) + v + m += b"\x00" + return m + +class PSBT: + """Class for serializing and deserializing PSBTs""" + + def __init__(self): + self.g = PSBTMap() + self.i = [] + self.o = [] + self.tx = None + + def deserialize(self, f): + assert f.read(5) == b"psbt\xff" + self.g = FromBinary(PSBTMap, f) + assert 0 in self.g.map + self.tx = FromBinary(CTransaction, self.g.map[0]) + self.i = [FromBinary(PSBTMap, f) for _ in self.tx.vin] + self.o = [FromBinary(PSBTMap, f) for _ in self.tx.vout] + return self + + def serialize(self): + assert isinstance(self.g, PSBTMap) + assert isinstance(self.i, list) and all(isinstance(x, PSBTMap) for x in self.i) + assert isinstance(self.o, list) and all(isinstance(x, PSBTMap) for x in self.o) + assert 0 in self.g.map + tx = FromBinary(CTransaction, self.g.map[0]) + assert len(tx.vin) == len(self.i) + assert len(tx.vout) == len(self.o) + + psbt = [x.serialize() for x in [self.g] + self.i + self.o] + return b"psbt\xff" + b"".join(psbt) + + def to_base64(self): + return base64.b64encode(self.serialize()).decode("utf8") + + @classmethod + def from_base64(cls, b64psbt): + return FromBinary(cls, base64.b64decode(b64psbt)) + +# ##### + +def create_coinbase(height, value, spk): + cb = CTransaction() + cb.vin = [CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff)] + cb.vout = [CTxOut(value, spk)] + return cb + +def get_witness_script(witness_root, witness_nonce): + commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce))) + return b"\x6a" + CScriptOp.encode_op_pushdata(WITNESS_COMMITMENT_HEADER + ser_uint256(commitment)) + +def signet_txs(block, challenge): + # assumes signet solution has not been added yet so does not need + # to be removed + + txs = block.vtx[:] + txs[0] = CTransaction(txs[0]) + txs[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER) + hashes = [] + for tx in txs: + tx.rehash() + hashes.append(ser_uint256(tx.sha256)) + mroot = block.get_merkle_root(hashes) + + sd = b"" + sd += struct.pack("> 24) & 0xff + return (nbits & 0x00ffffff) * 2**(8*(shift - 3)) + +def target_to_nbits(target): + tstr = "{0:x}".format(target) + if len(tstr) < 6: + tstr = ("000000"+tstr)[-6:] + if len(tstr) % 2 != 0: + tstr = "0" + tstr + if int(tstr[0],16) >= 0x8: + # avoid "negative" + tstr = "00" + tstr + fix = int(tstr[:6], 16) + sz = len(tstr)//2 + if tstr[6:] != "0"*(sz*2-6): + fix += 1 + + return int("%02x%06x" % (sz,fix), 16) + +def seconds_to_hms(s): + if s == 0: + return "0s" + neg = (s < 0) + if neg: + s = -s + out = "" + if s % 60 > 0: + out = "%ds" % (s % 60) + s //= 60 + if s % 60 > 0: + out = "%dm%s" % (s % 60, out) + s //= 60 + if s > 0: + out = "%dh%s" % (s, out) + if neg: + out = "-" + out + return out + +def next_block_delta(last_nbits, last_hash, ultimate_target, do_poisson): + # strategy: + # 1) work out how far off our desired target we are + # 2) cap it to a factor of 4 since that's the best we can do in a single retarget period + # 3) use that to work out the desired average interval in this retarget period + # 4) if doing poisson, use the last hash to pick a uniformly random number in [0,1), and work out a random multiplier to vary the average by + # 5) cap the resulting interval between 1 second and 1 hour to avoid extremes + + INTERVAL = 600.0*2016/2015 # 10 minutes, adjusted for the off-by-one bug + + current_target = nbits_to_target(last_nbits) + retarget_factor = ultimate_target / current_target + retarget_factor = max(0.25, min(retarget_factor, 4.0)) + + avg_interval = INTERVAL * retarget_factor + + if do_poisson: + det_rand = int(last_hash[-8:], 16) * 2**-32 + this_interval_variance = -math.log1p(-det_rand) + else: + this_interval_variance = 1 + + this_interval = avg_interval * this_interval_variance + this_interval = max(1, min(this_interval, 3600)) + + return this_interval + +def next_block_is_mine(last_hash, my_blocks): + det_rand = int(last_hash[-16:-8], 16) + return my_blocks[0] <= (det_rand % my_blocks[2]) < my_blocks[1] + +def do_generate(args): + if args.max_blocks is not None: + if args.ongoing: + logging.error("Cannot specify both --ongoing and --max-blocks") + return 1 + if args.max_blocks < 1: + logging.error("N must be a positive integer") + return 1 + max_blocks = args.max_blocks + elif args.ongoing: + max_blocks = None + else: + max_blocks = 1 + + if args.set_block_time is not None and max_blocks != 1: + logging.error("Cannot specify --ongoing or --max-blocks > 1 when using --set-block-time") + return 1 + if args.set_block_time is not None and args.set_block_time < 0: + args.set_block_time = time.time() + logging.info("Treating negative block time as current time (%d)" % (args.set_block_time)) + + if args.min_nbits: + if args.nbits is not None: + logging.error("Cannot specify --nbits and --min-nbits") + return 1 + args.nbits = "1e0377ae" + logging.info("Using nbits=%s" % (args.nbits)) + + if args.set_block_time is None: + if args.nbits is None or len(args.nbits) != 8: + logging.error("Must specify --nbits (use calibrate command to determine value)") + return 1 + + if args.multiminer is None: + my_blocks = (0,1,1) + else: + if not args.ongoing: + logging.error("Cannot specify --multiminer without --ongoing") + return 1 + m = RE_MULTIMINER.match(args.multiminer) + if m is None: + logging.error("--multiminer argument must be k/m or j-k/m") + return 1 + start,_,stop,total = m.groups() + if stop is None: + stop = start + start, stop, total = map(int, (start, stop, total)) + if stop < start or start <= 0 or total < stop or total == 0: + logging.error("Inconsistent values for --multiminer") + return 1 + my_blocks = (start-1, stop, total) + + ultimate_target = nbits_to_target(int(args.nbits,16)) + + mined_blocks = 0 + bestheader = {"hash": None} + lastheader = None + while max_blocks is None or mined_blocks < max_blocks: + + # current status? + bci = json.loads(args.bcli("getblockchaininfo")) + + if bestheader["hash"] != bci["bestblockhash"]: + bestheader = json.loads(args.bcli("getblockheader", bci["bestblockhash"])) + + if lastheader is None: + lastheader = bestheader["hash"] + elif bestheader["hash"] != lastheader: + next_delta = next_block_delta(int(bestheader["bits"], 16), bestheader["hash"], ultimate_target, args.poisson) + next_delta += bestheader["time"] - time.time() + next_is_mine = next_block_is_mine(bestheader["hash"], my_blocks) + logging.info("Received new block at height %d; next in %s (%s)", bestheader["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup")) + lastheader = bestheader["hash"] + + # when is the next block due to be mined? + now = time.time() + if args.set_block_time is not None: + logging.debug("Setting start time to %d", args.set_block_time) + mine_time = args.set_block_time + action_time = now + is_mine = True + elif bestheader["height"] == 0: + time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson) + time_delta *= 100 # 100 blocks + logging.info("Backdating time for first block to %d minutes ago" % (time_delta/60)) + mine_time = now - time_delta + action_time = now + is_mine = True + else: + time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson) + mine_time = bestheader["time"] + time_delta + + is_mine = next_block_is_mine(bci["bestblockhash"], my_blocks) + + action_time = mine_time + if not is_mine: + action_time += args.backup_delay + + if args.standby_delay > 0: + action_time += args.standby_delay + elif mined_blocks == 0: + # for non-standby, always mine immediately on startup, + # even if the next block shouldn't be ours + action_time = now + + # don't want fractional times so round down + mine_time = int(mine_time) + action_time = int(action_time) + + # can't mine a block 2h in the future; 1h55m for some safety + action_time = max(action_time, mine_time - 6900) + + # ready to go? otherwise sleep and check for new block + if now < action_time: + sleep_for = min(action_time - now, 60) + if mine_time < now: + # someone else might have mined the block, + # so check frequently, so we don't end up late + # mining the next block if it's ours + sleep_for = min(20, sleep_for) + minestr = "mine" if is_mine else "backup" + logging.debug("Sleeping for %s, next block due in %s (%s)" % (seconds_to_hms(sleep_for), seconds_to_hms(mine_time - now), minestr)) + time.sleep(sleep_for) + continue + + # gbt + tmpl = json.loads(args.bcli("getblocktemplate", '{"rules":["signet","segwit"]}')) + if tmpl["previousblockhash"] != bci["bestblockhash"]: + logging.warning("GBT based off unexpected block (%s not %s), retrying", tmpl["previousblockhash"], bci["bestblockhash"]) + time.sleep(1) + continue + + logging.debug("GBT template: %s", tmpl) + + if tmpl["mintime"] > mine_time: + logging.info("Updating block time from %d to %d", mine_time, tmpl["mintime"]) + mine_time = tmpl["mintime"] + if mine_time > now: + logging.error("GBT mintime is in the future: %d is %d seconds later than %d", mine_time, (mine_time-now), now) + return 1 + + # address for reward + reward_addr, reward_spk = get_reward_addr_spk(args, tmpl["height"]) + + # mine block + logging.debug("Mining block delta=%s start=%s mine=%s", seconds_to_hms(mine_time-bestheader["time"]), mine_time, is_mine) + mined_blocks += 1 + psbt = generate_psbt(tmpl, reward_spk, blocktime=mine_time) + input_stream = os.linesep.join([psbt, "true", "ALL"]).encode('utf8') + psbt_signed = json.loads(args.bcli("-stdin", "walletprocesspsbt", input=input_stream)) + if not psbt_signed.get("complete",False): + logging.debug("Generated PSBT: %s" % (psbt,)) + sys.stderr.write("PSBT signing failed\n") + return 1 + block, signet_solution = do_decode_psbt(psbt_signed["psbt"]) + block = finish_block(block, signet_solution, args.grind_cmd) + + # submit block + r = args.bcli("-stdin", "submitblock", input=block.serialize().hex().encode('utf8')) + + # report + bstr = "block" if is_mine else "backup block" + + next_delta = next_block_delta(block.nBits, block.hash, ultimate_target, args.poisson) + next_delta += block.nTime - time.time() + next_is_mine = next_block_is_mine(block.hash, my_blocks) + + logging.debug("Block hash %s payout to %s", block.hash, reward_addr) + logging.info("Mined %s at height %d; next in %s (%s)", bstr, tmpl["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup")) + if r != "": + logging.warning("submitblock returned %s for height %d hash %s", r, tmpl["height"], block.hash) + lastheader = block.hash + +def do_calibrate(args): + if args.nbits is not None and args.seconds is not None: + sys.stderr.write("Can only specify one of --nbits or --seconds\n") + return 1 + if args.nbits is not None and len(args.nbits) != 8: + sys.stderr.write("Must specify 8 hex digits for --nbits\n") + return 1 + + TRIALS = 600 # gets variance down pretty low + TRIAL_BITS = 0x1e3ea75f # takes about 5m to do 600 trials + + header = CBlockHeader() + header.nBits = TRIAL_BITS + targ = nbits_to_target(header.nBits) + + start = time.time() + count = 0 + for i in range(TRIALS): + header.nTime = i + header.nNonce = 0 + headhex = header.serialize().hex() + cmd = args.grind_cmd.split(" ") + [headhex] + newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip() + + avg = (time.time() - start) * 1.0 / TRIALS + + if args.nbits is not None: + want_targ = nbits_to_target(int(args.nbits,16)) + want_time = avg*targ/want_targ + else: + want_time = args.seconds if args.seconds is not None else 25 + want_targ = int(targ*(avg/want_time)) + + print("nbits=%08x for %ds average mining time" % (target_to_nbits(want_targ), want_time)) + return 0 + +def bitcoin_cli(basecmd, args, **kwargs): + cmd = basecmd + ["-signet"] + args + logging.debug("Calling bitcoin-cli: %r", cmd) + out = subprocess.run(cmd, stdout=subprocess.PIPE, **kwargs, check=True).stdout + if isinstance(out, bytes): + out = out.decode('utf8') + return out.strip() + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--cli", default="bitcoin-cli", type=str, help="bitcoin-cli command") + parser.add_argument("--debug", action="store_true", help="Print debugging info") + parser.add_argument("--quiet", action="store_true", help="Only print warnings/errors") + + cmds = parser.add_subparsers(help="sub-commands") + genpsbt = cmds.add_parser("genpsbt", help="Generate a block PSBT for signing") + genpsbt.set_defaults(fn=do_genpsbt) + + solvepsbt = cmds.add_parser("solvepsbt", help="Solve a signed block PSBT") + solvepsbt.set_defaults(fn=do_solvepsbt) + + generate = cmds.add_parser("generate", help="Mine blocks") + generate.set_defaults(fn=do_generate) + generate.add_argument("--ongoing", action="store_true", help="Keep mining blocks") + generate.add_argument("--max-blocks", default=None, type=int, help="Max blocks to mine (default=1)") + generate.add_argument("--set-block-time", default=None, type=int, help="Set block time (unix timestamp)") + generate.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)") + generate.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)") + generate.add_argument("--poisson", action="store_true", help="Simulate randomised block times") + generate.add_argument("--multiminer", default=None, type=str, help="Specify which set of blocks to mine (eg: 1-40/100 for the first 40%%, 2/3 for the second 3rd)") + generate.add_argument("--backup-delay", default=300, type=int, help="Seconds to delay before mining blocks reserved for other miners (default=300)") + generate.add_argument("--standby-delay", default=0, type=int, help="Seconds to delay before mining blocks (default=0)") + + calibrate = cmds.add_parser("calibrate", help="Calibrate difficulty") + calibrate.set_defaults(fn=do_calibrate) + calibrate.add_argument("--nbits", type=str, default=None) + calibrate.add_argument("--seconds", type=int, default=None) + + for sp in [genpsbt, generate]: + sp.add_argument("--address", default=None, type=str, help="Address for block reward payment") + sp.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment") + + for sp in [solvepsbt, generate, calibrate]: + sp.add_argument("--grind-cmd", default=None, type=str, required=(sp==calibrate), help="Command to grind a block header for proof-of-work") + + args = parser.parse_args(sys.argv[1:]) + + args.bcli = lambda *a, input=b"", **kwargs: bitcoin_cli(args.cli.split(" "), list(a), input=input, **kwargs) + + if hasattr(args, "address") and hasattr(args, "descriptor"): + if args.address is None and args.descriptor is None: + sys.stderr.write("Must specify --address or --descriptor\n") + return 1 + elif args.address is not None and args.descriptor is not None: + sys.stderr.write("Only specify one of --address or --descriptor\n") + return 1 + args.derived_addresses = {} + + if args.debug: + logging.getLogger().setLevel(logging.DEBUG) + elif args.quiet: + logging.getLogger().setLevel(logging.WARNING) + else: + logging.getLogger().setLevel(logging.INFO) + + if hasattr(args, "fn"): + return args.fn(args) + else: + logging.error("Must specify command") + return 1 + +if __name__ == "__main__": + main() + + diff --git a/contrib/spendfrom/README.md b/contrib/spendfrom/README.md deleted file mode 100644 index c0a9c9ccf93d3..0000000000000 --- a/contrib/spendfrom/README.md +++ /dev/null @@ -1,35 +0,0 @@ -### SpendFrom ### - -Use the raw transactions API to send coins received on a particular -address (or addresses). - -### Usage: ### -Depends on [jsonrpc](http://json-rpc.org/). - - spendfrom.py --from=FROMADDRESS1[,FROMADDRESS2] --to=TOADDRESS --amount=amount \ - --fee=fee --datadir=/path/to/.bitcoin --testnet --dry_run - -With no arguments, outputs a list of amounts associated with addresses. - -With arguments, sends coins received by the `FROMADDRESS` addresses to the `TOADDRESS`. - -### Notes ### - -- You may explicitly specify how much fee to pay (a fee more than 1% of the amount -will fail, though, to prevent bitcoin-losing accidents). Spendfrom may fail if -it thinks the transaction would never be confirmed (if the amount being sent is -too small, or if the transaction is too many bytes for the fee). - -- If a change output needs to be created, the change will be sent to the last -`FROMADDRESS` (if you specify just one `FROMADDRESS`, change will go back to it). - -- If `--datadir` is not specified, the default datadir is used. - -- The `--dry_run` option will just create and sign the transaction and print -the transaction data (as hexadecimal), instead of broadcasting it. - -- If the transaction is created and broadcast successfully, a transaction id -is printed. - -- If this was a tool for end-users and not programmers, it would have much friendlier -error-handling. diff --git a/contrib/spendfrom/setup.py b/contrib/spendfrom/setup.py deleted file mode 100644 index 01b9768a5b06a..0000000000000 --- a/contrib/spendfrom/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -setup(name='btcspendfrom', - version='1.0', - description='Command-line utility for bitcoin "coin control"', - author='Gavin Andresen', - author_email='gavin@bitcoinfoundation.org', - requires=['jsonrpc'], - scripts=['spendfrom.py'], - ) diff --git a/contrib/spendfrom/spendfrom.py b/contrib/spendfrom/spendfrom.py deleted file mode 100755 index 72ee0425eb236..0000000000000 --- a/contrib/spendfrom/spendfrom.py +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/env python -# -# Use the raw transactions API to spend bitcoins received on particular addresses, -# and send any change back to that same address. -# -# Example usage: -# spendfrom.py # Lists available funds -# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 -# -# Assumes it will talk to a bitcoind or Bitcoin-Qt running -# on localhost. -# -# Depends on jsonrpc -# - -from decimal import * -import getpass -import math -import os -import os.path -import platform -import sys -import time -from jsonrpc import ServiceProxy, json - -BASE_FEE=Decimal("0.001") - -def check_json_precision(): - """Make sure json library being used does not lose precision converting BTC values""" - n = Decimal("20000000.00000003") - satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) - if satoshis != 2000000000000003: - raise RuntimeError("JSON encode/decode loses precision") - -def determine_db_dir(): - """Return the default location of the bitcoin data directory""" - if platform.system() == "Darwin": - return os.path.expanduser("~/Library/Application Support/Bitcoin/") - elif platform.system() == "Windows": - return os.path.join(os.environ['APPDATA'], "Bitcoin") - return os.path.expanduser("~/.bitcoin") - -def read_bitcoin_config(dbdir): - """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" - from ConfigParser import SafeConfigParser - - class FakeSecHead(object): - def __init__(self, fp): - self.fp = fp - self.sechead = '[all]\n' - def readline(self): - if self.sechead: - try: return self.sechead - finally: self.sechead = None - else: - s = self.fp.readline() - if s.find('#') != -1: - s = s[0:s.find('#')].strip() +"\n" - return s - - config_parser = SafeConfigParser() - config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) - return dict(config_parser.items("all")) - -def connect_JSON(config): - """Connect to a bitcoin JSON-RPC server""" - testnet = config.get('testnet', '0') - testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False - if not 'rpcport' in config: - config['rpcport'] = 18332 if testnet else 8332 - connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) - try: - result = ServiceProxy(connect) - # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, - # but also make sure the bitcoind we're talking to is/isn't testnet: - if result.getmininginfo()['testnet'] != testnet: - sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") - sys.exit(1) - return result - except: - sys.stderr.write("Error connecting to RPC server at "+connect+"\n") - sys.exit(1) - -def unlock_wallet(bitcoind): - info = bitcoind.getinfo() - if 'unlocked_until' not in info: - return True # wallet is not encrypted - t = int(info['unlocked_until']) - if t <= time.time(): - try: - passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") - bitcoind.walletpassphrase(passphrase, 5) - except: - sys.stderr.write("Wrong passphrase\n") - - info = bitcoind.getinfo() - return int(info['unlocked_until']) > time.time() - -def list_available(bitcoind): - address_summary = dict() - - address_to_account = dict() - for info in bitcoind.listreceivedbyaddress(0): - address_to_account[info["address"]] = info["account"] - - unspent = bitcoind.listunspent(0) - for output in unspent: - # listunspent doesn't give addresses, so: - rawtx = bitcoind.getrawtransaction(output['txid'], 1) - vout = rawtx["vout"][output['vout']] - pk = vout["scriptPubKey"] - - # This code only deals with ordinary pay-to-bitcoin-address - # or pay-to-script-hash outputs right now; anything exotic is ignored. - if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": - continue - - address = pk["addresses"][0] - if address in address_summary: - address_summary[address]["total"] += vout["value"] - address_summary[address]["outputs"].append(output) - else: - address_summary[address] = { - "total" : vout["value"], - "outputs" : [output], - "account" : address_to_account.get(address, "") - } - - return address_summary - -def select_coins(needed, inputs): - # Feel free to improve this, this is good enough for my simple needs: - outputs = [] - have = Decimal("0.0") - n = 0 - while have < needed and n < len(inputs): - outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) - have += inputs[n]["amount"] - n += 1 - return (outputs, have-needed) - -def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): - all_coins = list_available(bitcoind) - - total_available = Decimal("0.0") - needed = amount+fee - potential_inputs = [] - for addr in fromaddresses: - if addr not in all_coins: - continue - potential_inputs.extend(all_coins[addr]["outputs"]) - total_available += all_coins[addr]["total"] - - if total_available < needed: - sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); - sys.exit(1) - - # - # Note: - # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. - # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode - # Decimals, I'm casting amounts to float before sending them to bitcoind. - # - outputs = { toaddress : float(amount) } - (inputs, change_amount) = select_coins(needed, potential_inputs) - if change_amount > BASE_FEE: # don't bother with zero or tiny change - change_address = fromaddresses[-1] - if change_address in outputs: - outputs[change_address] += float(change_amount) - else: - outputs[change_address] = float(change_amount) - - rawtx = bitcoind.createrawtransaction(inputs, outputs) - signed_rawtx = bitcoind.signrawtransaction(rawtx) - if not signed_rawtx["complete"]: - sys.stderr.write("signrawtransaction failed\n") - sys.exit(1) - txdata = signed_rawtx["hex"] - - return txdata - -def compute_amount_in(bitcoind, txinfo): - result = Decimal("0.0") - for vin in txinfo['vin']: - in_info = bitcoind.getrawtransaction(vin['txid'], 1) - vout = in_info['vout'][vin['vout']] - result = result + vout['value'] - return result - -def compute_amount_out(txinfo): - result = Decimal("0.0") - for vout in txinfo['vout']: - result = result + vout['value'] - return result - -def sanity_test_fee(bitcoind, txdata_hex, max_fee): - class FeeError(RuntimeError): - pass - try: - txinfo = bitcoind.decoderawtransaction(txdata_hex) - total_in = compute_amount_in(bitcoind, txinfo) - total_out = compute_amount_out(txinfo) - if total_in-total_out > max_fee: - raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) - - tx_size = len(txdata_hex)/2 - kb = tx_size/1000 # integer division rounds down - if kb > 1 and fee < BASE_FEE: - raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") - if total_in < 0.01 and fee < BASE_FEE: - raise FeeError("Rejecting no-fee, tiny-amount transaction") - # Exercise for the reader: compute transaction priority, and - # warn if this is a very-low-priority transaction - - except FeeError as err: - sys.stderr.write((str(err)+"\n")) - sys.exit(1) - -def main(): - import optparse - - parser = optparse.OptionParser(usage="%prog [options]") - parser.add_option("--from", dest="fromaddresses", default=None, - help="addresses to get bitcoins from") - parser.add_option("--to", dest="to", default=None, - help="address to get send bitcoins to") - parser.add_option("--amount", dest="amount", default=None, - help="amount to send") - parser.add_option("--fee", dest="fee", default="0.0", - help="fee to include") - parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), - help="location of bitcoin.conf file with RPC username/password (default: %default)") - parser.add_option("--testnet", dest="testnet", default=False, action="store_true", - help="Use the test network") - parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", - help="Don't broadcast the transaction, just create and print the transaction data") - - (options, args) = parser.parse_args() - - check_json_precision() - config = read_bitcoin_config(options.datadir) - if options.testnet: config['testnet'] = True - bitcoind = connect_JSON(config) - - if options.amount is None: - address_summary = list_available(bitcoind) - for address,info in address_summary.iteritems(): - n_transactions = len(info['outputs']) - if n_transactions > 1: - print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) - else: - print("%s %.8f %s"%(address, info['total'], info['account'])) - else: - fee = Decimal(options.fee) - amount = Decimal(options.amount) - while unlock_wallet(bitcoind) == False: - pass # Keep asking for passphrase until they get it right - txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) - sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) - if options.dry_run: - print(txdata) - else: - txid = bitcoind.sendrawtransaction(txdata) - print(txid) - -if __name__ == '__main__': - main() diff --git a/contrib/test-patches/README.md b/contrib/test-patches/README.md deleted file mode 100644 index def40b0d6c2a5..0000000000000 --- a/contrib/test-patches/README.md +++ /dev/null @@ -1,7 +0,0 @@ -### Test Patches ### - -These patches are applied when the automated pull-tester -tests each pull and when master is tested using jenkins. -You can find more information about the tests run at -[http://jenkins.bluematt.me/pull-tester/files/ -](http://jenkins.bluematt.me/pull-tester/files/) \ No newline at end of file diff --git a/contrib/test-patches/temp-revert-2.patch b/contrib/test-patches/temp-revert-2.patch deleted file mode 100644 index 1cd043d0d7739..0000000000000 --- a/contrib/test-patches/temp-revert-2.patch +++ /dev/null @@ -1,20 +0,0 @@ -commit cfae26916dba311f6f75d444301c1f9362267c3e -Author: Matt Corallo -Date: Sun Mar 24 20:45:50 2013 -0400 - - Revert "Checkpoint at first block in 11 March chain fork" - - This reverts commit f817c496a1482d05b22c8e539de67f07db1c09d9. - -diff --git a/src/checkpoints.cpp b/src/checkpoints.cpp -index 62234b9..9b11f0b 100644 ---- a/src/checkpoints.cpp -+++ b/src/checkpoints.cpp -@@ -44,7 +44,6 @@ namespace Checkpoints - (193000, uint256("0x000000000000059f452a5f7340de6682a977387c17010ff6e6c3bd83ca8b1317")) - (210000, uint256("0x000000000000048b95347e83192f69cf0366076336c639f9b7228e9ba171342e")) - (216116, uint256("0x00000000000001b4f4b433e81ee46494af945cf96014816a4e2370f11b23df4e")) -- (225430, uint256("0x00000000000001c108384350f74090433e7fcf79a606b8e797f065b130575932")) - ; - static const CCheckpointData data = { - &mapCheckpoints, diff --git a/contrib/testgen/README.md b/contrib/testgen/README.md index 83624f443a7f9..2f0288df165b3 100644 --- a/contrib/testgen/README.md +++ b/contrib/testgen/README.md @@ -2,7 +2,7 @@ Utilities to generate test vectors for the data-driven Bitcoin tests. -Usage: +To use inside a scripted-diff (or just execute directly): - gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json - gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json \ No newline at end of file + ./gen_key_io_test_vectors.py valid 70 > ../../src/test/data/key_io_valid.json + ./gen_key_io_test_vectors.py invalid 70 > ../../src/test/data/key_io_invalid.json diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py deleted file mode 100644 index b716495145f77..0000000000000 --- a/contrib/testgen/base58.py +++ /dev/null @@ -1,104 +0,0 @@ -''' -Bitcoin base58 encoding and decoding. - -Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain) -''' -import hashlib - -# for compatibility with following code... -class SHA256: - new = hashlib.sha256 - -if str != bytes: - # Python 3.x - def ord(c): - return c - def chr(n): - return bytes( (n,) ) - -__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' -__b58base = len(__b58chars) -b58chars = __b58chars - -def b58encode(v): - """ encode v, which is a string of bytes, to base58. - """ - long_value = 0 - for (i, c) in enumerate(v[::-1]): - long_value += (256**i) * ord(c) - - result = '' - while long_value >= __b58base: - div, mod = divmod(long_value, __b58base) - result = __b58chars[mod] + result - long_value = div - result = __b58chars[long_value] + result - - # Bitcoin does a little leading-zero-compression: - # leading 0-bytes in the input become leading-1s - nPad = 0 - for c in v: - if c == '\0': nPad += 1 - else: break - - return (__b58chars[0]*nPad) + result - -def b58decode(v, length = None): - """ decode v into a string of len bytes - """ - long_value = 0 - for (i, c) in enumerate(v[::-1]): - long_value += __b58chars.find(c) * (__b58base**i) - - result = bytes() - while long_value >= 256: - div, mod = divmod(long_value, 256) - result = chr(mod) + result - long_value = div - result = chr(long_value) + result - - nPad = 0 - for c in v: - if c == __b58chars[0]: nPad += 1 - else: break - - result = chr(0)*nPad + result - if length is not None and len(result) != length: - return None - - return result - -def checksum(v): - """Return 32-bit checksum based on SHA256""" - return SHA256.new(SHA256.new(v).digest()).digest()[0:4] - -def b58encode_chk(v): - """b58encode a string, with 32-bit checksum""" - return b58encode(v + checksum(v)) - -def b58decode_chk(v): - """decode a base58 string, check and remove checksum""" - result = b58decode(v) - if result is None: - return None - h3 = checksum(result[:-4]) - if result[-4:] == checksum(result[:-4]): - return result[:-4] - else: - return None - -def get_bcaddress_version(strAddress): - """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ - addr = b58decode_chk(strAddress) - if addr is None or len(addr)!=21: return None - version = addr[0] - return ord(version) - -if __name__ == '__main__': - # Test case (from http://gitorious.org/bitcoin/python-base58.git) - assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 - _ohai = 'o hai'.encode('ascii') - _tmp = b58encode(_ohai) - assert _tmp == 'DYB3oMS' - assert b58decode(_tmp, 5) == _ohai - print("Tests passed") diff --git a/contrib/testgen/gen_base58_test_vectors.py b/contrib/testgen/gen_base58_test_vectors.py deleted file mode 100755 index 181343695363a..0000000000000 --- a/contrib/testgen/gen_base58_test_vectors.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python -''' -Generate valid and invalid base58 address and private key test vectors. - -Usage: - gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json - gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json -''' -# 2012 Wladimir J. van der Laan -# Released under MIT License -import os -from itertools import islice -from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars -import random -from binascii import b2a_hex - -# key types -PUBKEY_ADDRESS = 0 -SCRIPT_ADDRESS = 5 -PUBKEY_ADDRESS_TEST = 111 -SCRIPT_ADDRESS_TEST = 196 -PRIVKEY = 128 -PRIVKEY_TEST = 239 - -metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed'] -# templates for valid sequences -templates = [ - # prefix, payload_size, suffix, metadata - # None = N/A - ((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)), - ((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)), - ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)), - ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)), - ((PRIVKEY,), 32, (), (True, False, None, False)), - ((PRIVKEY,), 32, (1,), (True, False, None, True)), - ((PRIVKEY_TEST,), 32, (), (True, True, None, False)), - ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)) -] - -def is_valid(v): - '''Check vector v for validity''' - result = b58decode_chk(v) - if result is None: - return False - valid = False - for template in templates: - prefix = str(bytearray(template[0])) - suffix = str(bytearray(template[2])) - if result.startswith(prefix) and result.endswith(suffix): - if (len(result) - len(prefix) - len(suffix)) == template[1]: - return True - return False - -def gen_valid_vectors(): - '''Generate valid test vectors''' - while True: - for template in templates: - prefix = str(bytearray(template[0])) - payload = os.urandom(template[1]) - suffix = str(bytearray(template[2])) - rv = b58encode_chk(prefix + payload + suffix) - assert is_valid(rv) - metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None]) - yield (rv, b2a_hex(payload), metadata) - -def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix): - '''Generate possibly invalid vector''' - if corrupt_prefix: - prefix = os.urandom(1) - else: - prefix = str(bytearray(template[0])) - - if randomize_payload_size: - payload = os.urandom(max(int(random.expovariate(0.5)), 50)) - else: - payload = os.urandom(template[1]) - - if corrupt_suffix: - suffix = os.urandom(len(template[2])) - else: - suffix = str(bytearray(template[2])) - - return b58encode_chk(prefix + payload + suffix) - -def randbool(p = 0.5): - '''Return True with P(p)''' - return random.random() < p - -def gen_invalid_vectors(): - '''Generate invalid test vectors''' - # start with some manual edge-cases - yield "", - yield "x", - while True: - # kinds of invalid vectors: - # invalid prefix - # invalid payload length - # invalid (randomized) suffix (add random data) - # corrupt checksum - for template in templates: - val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2)) - if random.randint(0,10)<1: # line corruption - if randbool(): # add random character to end - val += random.choice(b58chars) - else: # replace random character in the middle - n = random.randint(0, len(val)) - val = val[0:n] + random.choice(b58chars) + val[n+1:] - if not is_valid(val): - yield val, - -if __name__ == '__main__': - import sys, json - iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} - try: - uiter = iters[sys.argv[1]] - except IndexError: - uiter = gen_valid_vectors - try: - count = int(sys.argv[2]) - except IndexError: - count = 0 - - data = list(islice(uiter(), count)) - json.dump(data, sys.stdout, sort_keys=True, indent=4) - sys.stdout.write('\n') - diff --git a/contrib/testgen/gen_key_io_test_vectors.py b/contrib/testgen/gen_key_io_test_vectors.py new file mode 100755 index 0000000000000..7bfb1d76a8b76 --- /dev/null +++ b/contrib/testgen/gen_key_io_test_vectors.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +# Copyright (c) 2012-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Generate valid and invalid base58/bech32(m) address and private key test vectors. +''' + +from itertools import islice +import os +import random +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional')) + +from test_framework.address import base58_to_byte, byte_to_base58, b58chars # noqa: E402 +from test_framework.script import OP_0, OP_1, OP_2, OP_3, OP_16, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, OP_CHECKSIG # noqa: E402 +from test_framework.segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET, Encoding # noqa: E402 + +# key types +PUBKEY_ADDRESS = 0 +SCRIPT_ADDRESS = 5 +PUBKEY_ADDRESS_TEST = 111 +SCRIPT_ADDRESS_TEST = 196 +PUBKEY_ADDRESS_REGTEST = 111 +SCRIPT_ADDRESS_REGTEST = 196 +PRIVKEY = 128 +PRIVKEY_TEST = 239 +PRIVKEY_REGTEST = 239 + +# script +pubkey_prefix = (OP_DUP, OP_HASH160, 20) +pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG) +script_prefix = (OP_HASH160, 20) +script_suffix = (OP_EQUAL,) +p2wpkh_prefix = (OP_0, 20) +p2wsh_prefix = (OP_0, 32) +p2tr_prefix = (OP_1, 32) + +metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip'] +# templates for valid sequences +templates = [ + # prefix, payload_size, suffix, metadata, output_prefix, output_suffix + # None = N/A + ((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix), + ((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix), + ((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix), + ((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix), + ((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), pubkey_prefix, pubkey_suffix), + ((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), script_prefix, script_suffix), + ((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix), + ((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix), + ((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()), + ((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()), + ((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()), + ((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()), + ((PRIVKEY_TEST,), 32, (), (True, 'signet', False, None), (), ()), + ((PRIVKEY_TEST,), 32, (1,), (True, 'signet', True, None), (), ()), + ((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()), + ((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ()) +] +# templates for valid bech32 sequences +bech32_templates = [ + # hrp, version, witprog_size, metadata, encoding, output_prefix + ('bc', 0, 20, (False, 'main', None, True), Encoding.BECH32, p2wpkh_prefix), + ('bc', 0, 32, (False, 'main', None, True), Encoding.BECH32, p2wsh_prefix), + ('bc', 1, 32, (False, 'main', None, True), Encoding.BECH32M, p2tr_prefix), + ('bc', 2, 2, (False, 'main', None, True), Encoding.BECH32M, (OP_2, 2)), + ('tb', 0, 20, (False, 'test', None, True), Encoding.BECH32, p2wpkh_prefix), + ('tb', 0, 32, (False, 'test', None, True), Encoding.BECH32, p2wsh_prefix), + ('tb', 1, 32, (False, 'test', None, True), Encoding.BECH32M, p2tr_prefix), + ('tb', 3, 16, (False, 'test', None, True), Encoding.BECH32M, (OP_3, 16)), + ('tb', 0, 20, (False, 'signet', None, True), Encoding.BECH32, p2wpkh_prefix), + ('tb', 0, 32, (False, 'signet', None, True), Encoding.BECH32, p2wsh_prefix), + ('tb', 1, 32, (False, 'signet', None, True), Encoding.BECH32M, p2tr_prefix), + ('tb', 3, 32, (False, 'signet', None, True), Encoding.BECH32M, (OP_3, 32)), + ('bcrt', 0, 20, (False, 'regtest', None, True), Encoding.BECH32, p2wpkh_prefix), + ('bcrt', 0, 32, (False, 'regtest', None, True), Encoding.BECH32, p2wsh_prefix), + ('bcrt', 1, 32, (False, 'regtest', None, True), Encoding.BECH32M, p2tr_prefix), + ('bcrt', 16, 40, (False, 'regtest', None, True), Encoding.BECH32M, (OP_16, 40)) +] +# templates for invalid bech32 sequences +bech32_ng_templates = [ + # hrp, version, witprog_size, encoding, invalid_bech32, invalid_checksum, invalid_char + ('tc', 0, 20, Encoding.BECH32, False, False, False), + ('bt', 1, 32, Encoding.BECH32M, False, False, False), + ('tb', 17, 32, Encoding.BECH32M, False, False, False), + ('bcrt', 3, 1, Encoding.BECH32M, False, False, False), + ('bc', 15, 41, Encoding.BECH32M, False, False, False), + ('tb', 0, 16, Encoding.BECH32, False, False, False), + ('bcrt', 0, 32, Encoding.BECH32, True, False, False), + ('bc', 0, 16, Encoding.BECH32, True, False, False), + ('tb', 0, 32, Encoding.BECH32, False, True, False), + ('bcrt', 0, 20, Encoding.BECH32, False, False, True), + ('bc', 0, 20, Encoding.BECH32M, False, False, False), + ('tb', 0, 32, Encoding.BECH32M, False, False, False), + ('bcrt', 0, 20, Encoding.BECH32M, False, False, False), + ('bc', 1, 32, Encoding.BECH32, False, False, False), + ('tb', 2, 16, Encoding.BECH32, False, False, False), + ('bcrt', 16, 20, Encoding.BECH32, False, False, False), +] + +def is_valid(v): + '''Check vector v for validity''' + if len(set(v) - set(b58chars)) > 0: + return is_valid_bech32(v) + try: + payload, version = base58_to_byte(v) + result = bytes([version]) + payload + except ValueError: # thrown if checksum doesn't match + return is_valid_bech32(v) + for template in templates: + prefix = bytearray(template[0]) + suffix = bytearray(template[2]) + if result.startswith(prefix) and result.endswith(suffix): + if (len(result) - len(prefix) - len(suffix)) == template[1]: + return True + return is_valid_bech32(v) + +def is_valid_bech32(v): + '''Check vector v for bech32 validity''' + for hrp in ['bc', 'tb', 'bcrt']: + if decode_segwit_address(hrp, v) != (None, None): + return True + return False + +def gen_valid_base58_vector(template): + '''Generate valid base58 vector''' + prefix = bytearray(template[0]) + payload = rand_bytes(size=template[1]) + suffix = bytearray(template[2]) + dst_prefix = bytearray(template[4]) + dst_suffix = bytearray(template[5]) + assert len(prefix) == 1 + rv = byte_to_base58(payload + suffix, prefix[0]) + return rv, dst_prefix + payload + dst_suffix + +def gen_valid_bech32_vector(template): + '''Generate valid bech32 vector''' + hrp = template[0] + witver = template[1] + witprog = rand_bytes(size=template[2]) + encoding = template[4] + dst_prefix = bytearray(template[5]) + rv = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5)) + return rv, dst_prefix + witprog + +def gen_valid_vectors(): + '''Generate valid test vectors''' + glist = [gen_valid_base58_vector, gen_valid_bech32_vector] + tlist = [templates, bech32_templates] + while True: + for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]: + rv, payload = valid_vector_generator(template) + assert is_valid(rv) + metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None} + hexrepr = payload.hex() + yield (rv, hexrepr, metadata) + +def gen_invalid_base58_vector(template): + '''Generate possibly invalid vector''' + # kinds of invalid vectors: + # invalid prefix + # invalid payload length + # invalid (randomized) suffix (add random data) + # corrupt checksum + corrupt_prefix = randbool(0.2) + randomize_payload_size = randbool(0.2) + corrupt_suffix = randbool(0.2) + + if corrupt_prefix: + prefix = rand_bytes(size=1) + else: + prefix = bytearray(template[0]) + + if randomize_payload_size: + payload = rand_bytes(size=max(int(random.expovariate(0.5)), 50)) + else: + payload = rand_bytes(size=template[1]) + + if corrupt_suffix: + suffix = rand_bytes(size=len(template[2])) + else: + suffix = bytearray(template[2]) + + assert len(prefix) == 1 + val = byte_to_base58(payload + suffix, prefix[0]) + if random.randint(0,10)<1: # line corruption + if randbool(): # add random character to end + val += random.choice(b58chars) + else: # replace random character in the middle + n = random.randint(0, len(val)) + val = val[0:n] + random.choice(b58chars) + val[n+1:] + + return val + +def gen_invalid_bech32_vector(template): + '''Generate possibly invalid bech32 vector''' + no_data = randbool(0.1) + to_upper = randbool(0.1) + hrp = template[0] + witver = template[1] + witprog = rand_bytes(size=template[2]) + encoding = template[3] + + if no_data: + rv = bech32_encode(encoding, hrp, []) + else: + data = [witver] + convertbits(witprog, 8, 5) + if template[4] and not no_data: + if template[2] % 5 in {2, 4}: + data[-1] |= 1 + else: + data.append(0) + rv = bech32_encode(encoding, hrp, data) + + if template[5]: + i = len(rv) - random.randrange(1, 7) + rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:] + if template[6]: + i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4) + rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:] + + if to_upper: + rv = rv.swapcase() + + return rv + +def randbool(p = 0.5): + '''Return True with P(p)''' + return random.random() < p + +def rand_bytes(*, size): + return bytearray(random.getrandbits(8) for _ in range(size)) + +def gen_invalid_vectors(): + '''Generate invalid test vectors''' + # start with some manual edge-cases + yield "", + yield "x", + glist = [gen_invalid_base58_vector, gen_invalid_bech32_vector] + tlist = [templates, bech32_ng_templates] + while True: + for template, invalid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]: + val = invalid_vector_generator(template) + if not is_valid(val): + yield val, + +if __name__ == '__main__': + import json + iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} + random.seed(42) + try: + uiter = iters[sys.argv[1]] + except IndexError: + uiter = gen_valid_vectors + try: + count = int(sys.argv[2]) + except IndexError: + count = 0 + + data = list(islice(uiter(), count)) + json.dump(data, sys.stdout, sort_keys=True, indent=4) + sys.stdout.write('\n') + diff --git a/contrib/tidy_datadir.sh b/contrib/tidy_datadir.sh deleted file mode 100755 index 5d6d8264442fb..0000000000000 --- a/contrib/tidy_datadir.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -if [ -d "$1" ]; then - cd "$1" -else - echo "Usage: $0 " >&2 - echo "Removes obsolete Bitcoin database files" >&2 - exit 1 -fi - -LEVEL=0 -if [ -f wallet.dat -a -f addr.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=1; fi -if [ -f wallet.dat -a -f peers.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=2; fi -if [ -f wallet.dat -a -f peers.dat -a -f coins/CURRENT -a -f blktree/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=3; fi -if [ -f wallet.dat -a -f peers.dat -a -f chainstate/CURRENT -a -f blocks/index/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=4; fi - -case $LEVEL in - 0) - echo "Error: no Bitcoin datadir detected." - exit 1 - ;; - 1) - echo "Detected old Bitcoin datadir (before 0.7)." - echo "Nothing to do." - exit 0 - ;; - 2) - echo "Detected Bitcoin 0.7 datadir." - ;; - 3) - echo "Detected Bitcoin pre-0.8 datadir." - ;; - 4) - echo "Detected Bitcoin 0.8 datadir." - ;; -esac - -FILES="" -DIRS="" - -if [ $LEVEL -ge 3 ]; then FILES=$(echo $FILES blk????.dat blkindex.dat); fi -if [ $LEVEL -ge 2 ]; then FILES=$(echo $FILES addr.dat); fi -if [ $LEVEL -ge 4 ]; then DIRS=$(echo $DIRS coins blktree); fi - -for FILE in $FILES; do - if [ -f $FILE ]; then - echo "Deleting: $FILE" - rm -f $FILE - fi -done - -for DIR in $DIRS; do - if [ -d $DIR ]; then - echo "Deleting: $DIR/" - rm -rf $DIR - fi -done - -echo "Done." diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md new file mode 100644 index 0000000000000..a409a23ef8db8 --- /dev/null +++ b/contrib/tracing/README.md @@ -0,0 +1,288 @@ +Example scripts for User-space, Statically Defined Tracing (USDT) +================================================================= + +This directory contains scripts showcasing User-space, Statically Defined +Tracing (USDT) support for Bitcoin Core on Linux using. For more information on +USDT support in Bitcoin Core see the [USDT documentation]. + +[USDT documentation]: ../../doc/tracing.md + + +Examples for the two main eBPF front-ends, [bpftrace] and +[BPF Compiler Collection (BCC)], with support for USDT, are listed. BCC is used +for complex tools and daemons and `bpftrace` is preferred for one-liners and +shorter scripts. + +[bpftrace]: https://github.com/iovisor/bpftrace +[BPF Compiler Collection (BCC)]: https://github.com/iovisor/bcc + + +To develop and run bpftrace and BCC scripts you need to install the +corresponding packages. See [installing bpftrace] and [installing BCC] for more +information. For development there exist a [bpftrace Reference Guide], a +[BCC Reference Guide], and a [bcc Python Developer Tutorial]. + +[installing bpftrace]: https://github.com/iovisor/bpftrace/blob/master/INSTALL.md +[installing BCC]: https://github.com/iovisor/bcc/blob/master/INSTALL.md +[bpftrace Reference Guide]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md +[BCC Reference Guide]: https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md +[bcc Python Developer Tutorial]: https://github.com/iovisor/bcc/blob/master/docs/tutorial_bcc_python_developer.md + +## Examples + +The bpftrace examples contain a relative path to the `bitcoind` binary. By +default, the scripts should be run from the repository-root and assume a +self-compiled `bitcoind` binary. The paths in the examples can be changed, for +example, to point to release builds if needed. See the +[Bitcoin Core USDT documentation] on how to list available tracepoints in your +`bitcoind` binary. + +[Bitcoin Core USDT documentation]: ../../doc/tracing.md#listing-available-tracepoints + +**WARNING: eBPF programs require root privileges to be loaded into a Linux +kernel VM. This means the bpftrace and BCC examples must be executed with root +privileges. Make sure to carefully review any scripts that you run with root +privileges first!** + +### log_p2p_traffic.bt + +A bpftrace script logging information about inbound and outbound P2P network +messages. Based on the `net:inbound_message` and `net:outbound_message` +tracepoints. + +By default, `bpftrace` limits strings to 64 bytes due to the limited stack size +in the eBPF VM. For example, Tor v3 addresses exceed the string size limit which +results in the port being cut off during logging. The string size limit can be +increased with the `BPFTRACE_STRLEN` environment variable (`BPFTRACE_STRLEN=70` +works fine). + +``` +$ bpftrace contrib/tracing/log_p2p_traffic.bt +``` + +Output +``` +outbound 'ping' msg to peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes +inbound 'pong' msg from peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes +inbound 'inv' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes +outbound 'getdata' msg to peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes +inbound 'tx' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 222 bytes +outbound 'inv' msg to peer 9 (outbound-full-relay, faketorv3addressa2ufa6odvoi3s77j4uegey0xb10csyfyve2t33curbyd.onion:8333) with 37 bytes +outbound 'inv' msg to peer 7 (outbound-full-relay, XX.XX.XXX.242:8333) with 37 bytes +… +``` + +### p2p_monitor.py + +A BCC Python script using curses for an interactive P2P message monitor. Based +on the `net:inbound_message` and `net:outbound_message` tracepoints. + +Inbound and outbound traffic is listed for each peer together with information +about the connection. Peers can be selected individually to view recent P2P +messages. + +``` +$ python3 contrib/tracing/p2p_monitor.py ./src/bitcoind +``` + +Lists selectable peers and traffic and connection information. +``` + P2P Message Monitor + Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages + + PEER OUTBOUND INBOUND TYPE ADDR + 0 46 398 byte 61 1407590 byte block-relay-only XX.XX.XXX.196:8333 + 11 1156 253570 byte 3431 2394924 byte outbound-full-relay XXX.X.XX.179:8333 + 13 3425 1809620 byte 1236 305458 byte inbound XXX.X.X.X:60380 + 16 1046 241633 byte 1589 1199220 byte outbound-full-relay 4faketorv2pbfu7x.onion:8333 + 19 577 181679 byte 390 148951 byte outbound-full-relay kfake4vctorjv2o2.onion:8333 + 20 11 1248 byte 13 1283 byte block-relay-only [2600:fake:64d9:b10c:4436:aaaa:fe:bb]:8333 + 21 11 1248 byte 13 1299 byte block-relay-only XX.XXX.X.155:8333 + 22 5 103 byte 1 102 byte feeler XX.XX.XXX.173:8333 + 23 11 1248 byte 12 1255 byte block-relay-only XX.XXX.XXX.220:8333 + 24 3 103 byte 1 102 byte feeler XXX.XXX.XXX.64:8333 +… +``` + +Showing recent P2P messages between our node and a selected peer. + +``` + ---------------------------------------------------------------------- + | PEER 16 (4faketorv2pbfu7x.onion:8333) | + | OUR NODE outbound-full-relay PEER | + | <--- sendcmpct (9 bytes) | + | inv (37 byte) ---> | + | <--- ping (8 bytes) | + | pong (8 byte) ---> | + | inv (37 byte) ---> | + | <--- addr (31 bytes) | + | inv (37 byte) ---> | + | <--- getheaders (1029 bytes) | + | headers (1 byte) ---> | + | <--- feefilter (8 bytes) | + | <--- pong (8 bytes) | + | <--- headers (82 bytes) | + | <--- addr (30003 bytes) | + | inv (1261 byte) ---> | + | … | + +``` + +### log_raw_p2p_msgs.py + +A BCC Python script showcasing eBPF and USDT limitations when passing data +larger than about 32kb. Based on the `net:inbound_message` and +`net:outbound_message` tracepoints. + +Bitcoin P2P messages can be larger than 32kb (e.g. `tx`, `block`, ...). The +eBPF VM's stack is limited to 512 bytes, and we can't allocate more than about +32kb for a P2P message in the eBPF VM. The **message data is cut off** when the +message is larger than MAX_MSG_DATA_LENGTH (see script). This can be detected +in user-space by comparing the data length to the message length variable. The +message is cut off when the data length is smaller than the message length. +A warning is included with the printed message data. + +Data is submitted to user-space (i.e. to this script) via a ring buffer. The +throughput of the ring buffer is limited. Each p2p_message is about 32kb in +size. In- or outbound messages submitted to the ring buffer in rapid +succession fill the ring buffer faster than it can be read. Some messages are +lost. BCC prints: `Possibly lost 2 samples` on lost messages. + + +``` +$ python3 contrib/tracing/log_raw_p2p_msgs.py ./src/bitcoind +``` + +``` +Logging raw P2P messages. +Messages larger that about 32kb will be cut off! +Some messages might be lost! + outbound msg 'inv' from peer 4 (outbound-full-relay, XX.XXX.XX.4:8333) with 253 bytes: 0705000000be2245c8f844c9f763748e1a7… +… +Warning: incomplete message (only 32568 out of 53552 bytes)! inbound msg 'tx' from peer 32 (outbound-full-relay, XX.XXX.XXX.43:8333) with 53552 bytes: 020000000001fd3c01939c85ad6756ed9fc… +… +Possibly lost 2 samples +``` + +### connectblock_benchmark.bt + +A `bpftrace` script to benchmark the `ConnectBlock()` function during, for +example, a blockchain re-index. Based on the `validation:block_connected` USDT +tracepoint. + +The script takes three positional arguments. The first two arguments, the start, +and end height indicate between which blocks the benchmark should be run. The +third acts as a duration threshold in milliseconds. When the `ConnectBlock()` +function takes longer than the threshold, information about the block, is +printed. For more details, see the header comment in the script. + +The following command can be used to benchmark, for example, `ConnectBlock()` +between height 20000 and 38000 on SigNet while logging all blocks that take +longer than 25ms to connect. + +``` +$ bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25 +``` + +In a different terminal, starting Bitcoin Core in SigNet mode and with +re-indexing enabled. + +``` +$ ./src/bitcoind -signet -reindex +``` + +This produces the following output. +``` +Attaching 5 probes... +ConnectBlock Benchmark between height 20000 and 38000 inclusive +Logging blocks taking longer than 25 ms to connect. +Starting Connect Block Benchmark between height 20000 and 38000. +BENCH 39 blk/s 59 tx/s 59 inputs/s 20 sigops/s (height 20038) +Block 20492 (000000f555653bb05e2f3c6e79925e01a20dd57033f4dc7c354b46e34735d32b) 20 tx 2319 ins 2318 sigops took 38 ms +BENCH 1840 blk/s 2117 tx/s 4478 inputs/s 2471 sigops/s (height 21879) +BENCH 1816 blk/s 4972 tx/s 4982 inputs/s 125 sigops/s (height 23695) +BENCH 2095 blk/s 2890 tx/s 2910 inputs/s 152 sigops/s (height 25790) +BENCH 1684 blk/s 3979 tx/s 4053 inputs/s 288 sigops/s (height 27474) +BENCH 1155 blk/s 3216 tx/s 3252 inputs/s 115 sigops/s (height 28629) +BENCH 1797 blk/s 2488 tx/s 2503 inputs/s 111 sigops/s (height 30426) +BENCH 1849 blk/s 6318 tx/s 6569 inputs/s 12189 sigops/s (height 32275) +BENCH 946 blk/s 20209 tx/s 20775 inputs/s 83809 sigops/s (height 33221) +Block 33406 (0000002adfe4a15cfcd53bd890a89bbae836e5bb7f38bac566f61ad4548c87f6) 25 tx 2045 ins 2090 sigops took 29 ms +Block 33687 (00000073231307a9828e5607ceb8156b402efe56747271a4442e75eb5b77cd36) 52 tx 1797 ins 1826 sigops took 26 ms +BENCH 582 blk/s 21581 tx/s 27673 inputs/s 60345 sigops/s (height 33803) +BENCH 1035 blk/s 19735 tx/s 19776 inputs/s 51355 sigops/s (height 34838) +Block 35625 (0000006b00b347390c4768ea9df2655e9ff4b120f29d78594a2a702f8a02c997) 20 tx 3374 ins 3371 sigops took 49 ms +BENCH 887 blk/s 17857 tx/s 22191 inputs/s 24404 sigops/s (height 35725) +Block 35937 (000000d816d13d6e39b471cd4368db60463a764ba1f29168606b04a22b81ea57) 75 tx 3943 ins 3940 sigops took 61 ms +BENCH 823 blk/s 16298 tx/s 21031 inputs/s 18440 sigops/s (height 36548) +Block 36583 (000000c3e260556dbf42968aae3f904dba8b8c1ff96a6f6e3aa5365d2e3ad317) 24 tx 2198 ins 2194 sigops took 34 ms +Block 36700 (000000b3b173de9e65a3cfa738d976af6347aaf83fa17ab3f2a4d2ede3ddfac4) 73 tx 1615 ins 1611 sigops took 31 ms +Block 36832 (0000007859578c02c1ac37dabd1b9ec19b98f350b56935f5dd3a41e9f79f836e) 34 tx 1440 ins 1436 sigops took 26 ms +BENCH 613 blk/s 16718 tx/s 25074 inputs/s 23022 sigops/s (height 37161) +Block 37870 (000000f5c1086291ba2d943fb0c3bc82e71c5ee341ee117681d1456fbf6c6c38) 25 tx 1517 ins 1514 sigops took 29 ms +BENCH 811 blk/s 16031 tx/s 20921 inputs/s 18696 sigops/s (height 37972) + +Took 14055 ms to connect the blocks between height 20000 and 38000. + +Histogram of block connection times in milliseconds (ms). +@durations: +[0] 16838 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| +[1] 882 |@@ | +[2, 4) 236 | | +[4, 8) 23 | | +[8, 16) 9 | | +[16, 32) 9 | | +[32, 64) 4 | | +``` + +### log_utxocache_flush.py + +A BCC Python script to log the UTXO cache flushes. Based on the +`utxocache:flush` tracepoint. + +```bash +$ python3 contrib/tracing/log_utxocache_flush.py ./src/bitcoind +``` + +``` +Logging utxocache flushes. Ctrl-C to end... +Duration (µs) Mode Coins Count Memory Usage Prune +730451 IF_NEEDED 22990 3323.54 kB True +637657 ALWAYS 122320 17124.80 kB False +81349 ALWAYS 0 1383.49 kB False +``` + +### log_utxos.bt + +A `bpftrace` script to log information about the coins that are added, spent, or +uncached from the UTXO set. Based on the `utxocache:add`, `utxocache:spend` and +`utxocache:uncache` tracepoints. + +```bash +$ bpftrace contrib/tracing/log_utxos.bt +``` + +This should produce an output similar to the following. If you see bpftrace +warnings like `Lost 24 events`, the eBPF perf ring-buffer is filled faster +than it is being read. You can increase the ring-buffer size by setting the +ENV variable `BPFTRACE_PERF_RB_PAGES` (default 64) at a cost of higher +memory usage. See the [bpftrace reference guide] for more information. + +[bpftrace reference guide]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md#98-bpftrace_perf_rb_pages + +```bash +Attaching 4 probes... +OP Outpoint Value Height Coinbase +Added 6ba9ad857e1ef2eb2a2c94f06813c414c7ab273e3d6bd7ad64e000315a887e7c:1 10000 2094512 No +Spent fa7dc4db56637a151f6649d8f26732956d1c5424c82aae400a83d02b2cc2c87b:0 182264897 2094512 No +Added eeb2f099b1af6a2a12e6ddd2eeb16fc5968582241d7f08ba202d28b60ac264c7:0 10000 2094512 No +Added eeb2f099b1af6a2a12e6ddd2eeb16fc5968582241d7f08ba202d28b60ac264c7:1 182254756 2094512 No +Added a0c7f4ec9cccef2d89672a624a4e6c8237a17572efdd4679eea9e9ee70d2db04:0 10072679 2094513 Yes +Spent 25e0df5cc1aeb1b78e6056bf403e5e8b7e41f138060ca0a50a50134df0549a5e:2 540 2094508 No +Spent 42f383c04e09c26a2378272ec33aa0c1bf4883ca5ab739e8b7e06be5a5787d61:1 3848399 2007724 No +Added f85e3b4b89270863a389395cc9a4123e417ab19384cef96533c6649abd6b0561:0 3788399 2094513 No +Added f85e3b4b89270863a389395cc9a4123e417ab19384cef96533c6649abd6b0561:2 540 2094513 No +Spent a05880b8c77971ed0b9f73062c7c4cdb0ff3856ab14cbf8bc481ed571cd34b83:1 5591281046 2094511 No +Added eb689865f7d957938978d6207918748f74e6aa074f47874724327089445b0960:0 5589696005 2094513 No +Added eb689865f7d957938978d6207918748f74e6aa074f47874724327089445b0960:1 1565556 2094513 No +``` diff --git a/contrib/tracing/connectblock_benchmark.bt b/contrib/tracing/connectblock_benchmark.bt new file mode 100755 index 0000000000000..6e7a98ef07664 --- /dev/null +++ b/contrib/tracing/connectblock_benchmark.bt @@ -0,0 +1,156 @@ +#!/usr/bin/env bpftrace + +/* + + USAGE: + + bpftrace contrib/tracing/connectblock_benchmark.bt + + - sets the height at which the benchmark should start. Setting + the start height to 0 starts the benchmark immediately, even before the + first block is connected. + - sets the height after which the benchmark should end. Setting + the end height to 0 disables the benchmark. The script only logs blocks + over . + - Threshold + + This script requires a 'bitcoind' binary compiled with eBPF support and the + 'validation:block_connected' USDT. By default, it's assumed that 'bitcoind' is + located in './src/bitcoind'. This can be modified in the script below. + + EXAMPLES: + + bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000 + + When run together 'bitcoind -reindex', this benchmarks the time it takes to + connect the blocks between height 300.000 and 680.000 (inclusive) and prints + details about all blocks that take longer than 1000ms to connect. Prints a + histogram with block connection times when the benchmark is finished. + + + bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500 + + When running together 'bitcoind', all newly connected blocks that + take longer than 500ms to connect are logged. A histogram with block + connection times is shown when the script is terminated. + +*/ + +BEGIN +{ + $start_height = $1; + $end_height = $2; + $logging_threshold_ms = $3; + + if ($end_height < $start_height) { + printf("Error: start height (%d) larger than end height (%d)!\n", $start_height, $end_height); + exit(); + } + + if ($end_height > 0) { + printf("ConnectBlock benchmark between height %d and %d inclusive\n", $start_height, $end_height); + } else { + printf("ConnectBlock logging starting at height %d\n", $start_height); + } + + if ($logging_threshold_ms > 0) { + printf("Logging blocks taking longer than %d ms to connect.\n", $3); + } + + if ($start_height == 0) { + @start = nsecs; + } +} + +/* + Attaches to the 'validation:block_connected' USDT and collects stats when the + connected block is between the start and end height (or the end height is + unset). +*/ +usdt:./src/bitcoind:validation:block_connected /arg1 >= $1 && (arg1 <= $2 || $2 == 0 )/ +{ + $height = arg1; + $transactions = arg2; + $inputs = arg3; + $sigops = arg4; + $duration = (uint64) arg5; + + @height = $height; + + @blocks = @blocks + 1; + @transactions = @transactions + $transactions; + @inputs = @inputs + $inputs; + @sigops = @sigops + $sigops; + + @durations = hist($duration / 1000); + + if ($height == $1 && $height != 0) { + @start = nsecs; + printf("Starting Connect Block Benchmark between height %d and %d.\n", $1, $2); + } + + if ($2 > 0 && $height >= $2) { + @end = nsecs; + $duration = @end - @start; + printf("\nTook %d ms to connect the blocks between height %d and %d.\n", $duration / 1000000, $1, $2); + exit(); + } +} + +/* + Attaches to the 'validation:block_connected' USDT and logs information about + blocks where the time it took to connect the block is above the + . +*/ +usdt:./src/bitcoind:validation:block_connected / (uint64) arg5 / 1000> $3 / +{ + $hash = arg0; + $height = (int32) arg1; + $transactions = (uint64) arg2; + $inputs = (int32) arg3; + $sigops = (int64) arg4; + $duration = (int64) arg5; + + + printf("Block %d (", $height); + /* Prints each byte of the block hash as hex in big-endian (the block-explorer format) */ + $p = $hash + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p -= 1; + } + printf(") %4d tx %5d ins %5d sigops took %4d ms\n", $transactions, $inputs, $sigops, (uint64) $duration / 1000); +} + + +/* + Prints stats about the blocks, transactions, inputs, and sigops processed in + the last second (if any). +*/ +interval:s:1 { + if (@blocks > 0) { + printf("BENCH %4d blk/s %6d tx/s %7d inputs/s %8d sigops/s (height %d)\n", @blocks, @transactions, @inputs, @sigops, @height); + + zero(@blocks); + zero(@transactions); + zero(@inputs); + zero(@sigops); + } +} + +END +{ + printf("\nHistogram of block connection times in milliseconds (ms).\n"); + print(@durations); + + clear(@durations); + clear(@blocks); + clear(@transactions); + clear(@inputs); + clear(@sigops); + clear(@height); + clear(@start); + clear(@end); +} + diff --git a/contrib/tracing/log_p2p_traffic.bt b/contrib/tracing/log_p2p_traffic.bt new file mode 100755 index 0000000000000..f62956aa5e1c3 --- /dev/null +++ b/contrib/tracing/log_p2p_traffic.bt @@ -0,0 +1,28 @@ +#!/usr/bin/env bpftrace + +BEGIN +{ + printf("Logging P2P traffic\n") +} + +usdt:./src/bitcoind:net:inbound_message +{ + $peer_id = (int64) arg0; + $peer_addr = str(arg1); + $peer_type = str(arg2); + $msg_type = str(arg3); + $msg_len = arg4; + printf("inbound '%s' msg from peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len); +} + +usdt:./src/bitcoind:net:outbound_message +{ + $peer_id = (int64) arg0; + $peer_addr = str(arg1); + $peer_type = str(arg2); + $msg_type = str(arg3); + $msg_len = arg4; + + printf("outbound '%s' msg to peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len); +} + diff --git a/contrib/tracing/log_raw_p2p_msgs.py b/contrib/tracing/log_raw_p2p_msgs.py new file mode 100755 index 0000000000000..c0ab70410622b --- /dev/null +++ b/contrib/tracing/log_raw_p2p_msgs.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" Demonstration of eBPF limitations and the effect on USDT with the + net:inbound_message and net:outbound_message tracepoints. """ + +# This script shows a limitation of eBPF when data larger than 32kb is passed to +# user-space. It uses BCC (https://github.com/iovisor/bcc) to load a sandboxed +# eBPF program into the Linux kernel (root privileges are required). The eBPF +# program attaches to two statically defined tracepoints. The tracepoint +# 'net:inbound_message' is called when a new P2P message is received, and +# 'net:outbound_message' is called on outbound P2P messages. The eBPF program +# submits the P2P messages to this script via a BPF ring buffer. The submitted +# messages are printed. + +# eBPF Limitations: +# +# Bitcoin P2P messages can be larger than 32kb (e.g. tx, block, ...). The eBPF +# VM's stack is limited to 512 bytes, and we can't allocate more than about 32kb +# for a P2P message in the eBPF VM. The message data is cut off when the message +# is larger than MAX_MSG_DATA_LENGTH (see definition below). This can be detected +# in user-space by comparing the data length to the message length variable. The +# message is cut off when the data length is smaller than the message length. +# A warning is included with the printed message data. +# +# Data is submitted to user-space (i.e. to this script) via a ring buffer. The +# throughput of the ring buffer is limited. Each p2p_message is about 32kb in +# size. In- or outbound messages submitted to the ring buffer in rapid +# succession fill the ring buffer faster than it can be read. Some messages are +# lost. +# +# BCC prints: "Possibly lost 2 samples" on lost messages. + +import sys +from bcc import BPF, USDT + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +#include + +#define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; }) + +// Maximum possible allocation size +// from include/linux/percpu.h in the Linux kernel +#define PCPU_MIN_UNIT_SIZE (32 << 10) + +// Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). +#define MAX_PEER_ADDR_LENGTH 62 + 6 +#define MAX_PEER_CONN_TYPE_LENGTH 20 +#define MAX_MSG_TYPE_LENGTH 20 +#define MAX_MSG_DATA_LENGTH PCPU_MIN_UNIT_SIZE - 200 + +struct p2p_message +{ + u64 peer_id; + char peer_addr[MAX_PEER_ADDR_LENGTH]; + char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; + char msg_type[MAX_MSG_TYPE_LENGTH]; + u64 msg_size; + u8 msg[MAX_MSG_DATA_LENGTH]; +}; + +// We can't store the p2p_message struct on the eBPF stack as it is limited to +// 512 bytes and P2P message can be bigger than 512 bytes. However, we can use +// an BPF-array with a length of 1 to allocate up to 32768 bytes (this is +// defined by PCPU_MIN_UNIT_SIZE in include/linux/percpu.h in the Linux kernel). +// Also see https://github.com/iovisor/bcc/issues/2306 +BPF_ARRAY(msg_arr, struct p2p_message, 1); + +// Two BPF perf buffers for pushing data (here P2P messages) to user-space. +BPF_PERF_OUTPUT(inbound_messages); +BPF_PERF_OUTPUT(outbound_messages); + +int trace_inbound_message(struct pt_regs *ctx) { + int idx = 0; + struct p2p_message *msg = msg_arr.lookup(&idx); + + // lookup() does not return a NULL pointer. However, the BPF verifier + // requires an explicit check that that the `msg` pointer isn't a NULL + // pointer. See https://github.com/iovisor/bcc/issues/2595 + if (msg == NULL) return 1; + + bpf_usdt_readarg(1, ctx, &msg->peer_id); + bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg->msg_size); + bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); + + inbound_messages.perf_submit(ctx, msg, sizeof(*msg)); + return 0; +}; + +int trace_outbound_message(struct pt_regs *ctx) { + int idx = 0; + struct p2p_message *msg = msg_arr.lookup(&idx); + + // lookup() does not return a NULL pointer. However, the BPF verifier + // requires an explicit check that that the `msg` pointer isn't a NULL + // pointer. See https://github.com/iovisor/bcc/issues/2595 + if (msg == NULL) return 1; + + bpf_usdt_readarg(1, ctx, &msg->peer_id); + bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg->msg_size); + bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); + + outbound_messages.perf_submit(ctx, msg, sizeof(*msg)); + return 0; +}; +""" + + +def print_message(event, inbound): + print(f"%s %s msg '%s' from peer %d (%s, %s) with %d bytes: %s" % + ( + f"Warning: incomplete message (only %d out of %d bytes)!" % ( + len(event.msg), event.msg_size) if len(event.msg) < event.msg_size else "", + "inbound" if inbound else "outbound", + event.msg_type.decode("utf-8"), + event.peer_id, + event.peer_conn_type.decode("utf-8"), + event.peer_addr.decode("utf-8"), + event.msg_size, + bytes(event.msg[:event.msg_size]).hex(), + ) + ) + + +def main(bitcoind_path): + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="inbound_message", fn_name="trace_inbound_message") + bitcoind_with_usdts.enable_probe( + probe="outbound_message", fn_name="trace_outbound_message") + bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + # BCC: perf buffer handle function for inbound_messages + def handle_inbound(_, data, size): + """ Inbound message handler. + + Called each time a message is submitted to the inbound_messages BPF table.""" + + event = bpf["inbound_messages"].event(data) + print_message(event, True) + + # BCC: perf buffer handle function for outbound_messages + + def handle_outbound(_, data, size): + """ Outbound message handler. + + Called each time a message is submitted to the outbound_messages BPF table.""" + + event = bpf["outbound_messages"].event(data) + print_message(event, False) + + # BCC: add handlers to the inbound and outbound perf buffers + bpf["inbound_messages"].open_perf_buffer(handle_inbound) + bpf["outbound_messages"].open_perf_buffer(handle_outbound) + + print("Logging raw P2P messages.") + print("Messages larger that about 32kb will be cut off!") + print("Some messages might be lost!") + while True: + try: + bpf.perf_buffer_poll() + except KeyboardInterrupt: + exit() + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE:", sys.argv[0], "path/to/bitcoind") + exit() + path = sys.argv[1] + main(path) diff --git a/contrib/tracing/log_utxocache_flush.py b/contrib/tracing/log_utxocache_flush.py new file mode 100755 index 0000000000000..8c073bea0d181 --- /dev/null +++ b/contrib/tracing/log_utxocache_flush.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import sys +import ctypes +from bcc import BPF, USDT + +"""Example logging Bitcoin Core utxo set cache flushes utilizing + the utxocache:flush tracepoint.""" + +# USAGE: ./contrib/tracing/log_utxocache_flush.py path/to/bitcoind + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +# include + +struct data_t +{ + u64 duration; + u32 mode; + u64 coins_count; + u64 coins_mem_usage; + bool is_flush_for_prune; +}; + +// BPF perf buffer to push the data to user space. +BPF_PERF_OUTPUT(flush); + +int trace_flush(struct pt_regs *ctx) { + struct data_t data = {}; + bpf_usdt_readarg(1, ctx, &data.duration); + bpf_usdt_readarg(2, ctx, &data.mode); + bpf_usdt_readarg(3, ctx, &data.coins_count); + bpf_usdt_readarg(4, ctx, &data.coins_mem_usage); + bpf_usdt_readarg(5, ctx, &data.is_flush_for_prune); + flush.perf_submit(ctx, &data, sizeof(data)); + return 0; +} +""" + +FLUSH_MODES = [ + 'NONE', + 'IF_NEEDED', + 'PERIODIC', + 'ALWAYS' +] + + +class Data(ctypes.Structure): + # define output data structure corresponding to struct data_t + _fields_ = [ + ("duration", ctypes.c_uint64), + ("mode", ctypes.c_uint32), + ("coins_count", ctypes.c_uint64), + ("coins_mem_usage", ctypes.c_uint64), + ("is_flush_for_prune", ctypes.c_bool) + ] + + +def print_event(event): + print("%-15d %-10s %-15d %-15s %-8s" % ( + event.duration, + FLUSH_MODES[event.mode], + event.coins_count, + "%.2f kB" % (event.coins_mem_usage/1000), + event.is_flush_for_prune + )) + + +def main(bitcoind_path): + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program + # to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="flush", fn_name="trace_flush") + b = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + def handle_flush(_, data, size): + """ Coins Flush handler. + Called each time coin caches and indexes are flushed.""" + event = ctypes.cast(data, ctypes.POINTER(Data)).contents + print_event(event) + + b["flush"].open_perf_buffer(handle_flush) + print("Logging utxocache flushes. Ctrl-C to end...") + print("%-15s %-10s %-15s %-15s %-8s" % ("Duration (µs)", "Mode", + "Coins Count", "Memory Usage", + "Flush for Prune")) + + while True: + try: + b.perf_buffer_poll() + except KeyboardInterrupt: + exit(0) + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE: ", sys.argv[0], "path/to/bitcoind") + exit(1) + + path = sys.argv[1] + main(path) diff --git a/contrib/tracing/log_utxos.bt b/contrib/tracing/log_utxos.bt new file mode 100755 index 0000000000000..54d5010f825e4 --- /dev/null +++ b/contrib/tracing/log_utxos.bt @@ -0,0 +1,86 @@ +#!/usr/bin/env bpftrace + +/* + + USAGE: + + bpftrace contrib/tracing/log_utxos.bt + + This script requires a 'bitcoind' binary compiled with eBPF support and the + 'utxocache' tracepoints. By default, it's assumed that 'bitcoind' is + located in './src/bitcoind'. This can be modified in the script below. + + NOTE: requires bpftrace v0.12.0 or above. +*/ + +BEGIN +{ + printf("%-7s %-71s %16s %7s %8s\n", + "OP", "Outpoint", "Value", "Height", "Coinbase"); +} + +/* + Attaches to the 'utxocache:add' tracepoint and prints additions to the UTXO set cache. +*/ +usdt:./src/bitcoind:utxocache:add +{ + $txid = arg0; + $index = (uint32)arg1; + $height = (uint32)arg2; + $value = (int64)arg3; + $isCoinbase = arg4; + + printf("Added "); + $p = $txid + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p-=1; + } + + printf(":%-6d %16ld %7d %s\n", $index, $value, $height, ($isCoinbase ? "Yes" : "No" )); +} + +/* + Attaches to the 'utxocache:spent' tracepoint and prints spents from the UTXO set cache. +*/ +usdt:./src/bitcoind:utxocache:spent +{ + $txid = arg0; + $index = (uint32)arg1; + $height = (uint32)arg2; + $value = (int64)arg3; + $isCoinbase = arg4; + + printf("Spent "); + $p = $txid + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p-=1; + } + + printf(":%-6d %16ld %7d %s\n", $index, $value, $height, ($isCoinbase ? "Yes" : "No" )); +} + +/* + Attaches to the 'utxocache:uncache' tracepoint and uncache UTXOs from the UTXO set cache. +*/ +usdt:./src/bitcoind:utxocache:uncache +{ + $txid = arg0; + $index = (uint32)arg1; + $height = (uint32)arg2; + $value = (int64)arg3; + $isCoinbase = arg4; + + printf("Uncache "); + $p = $txid + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p-=1; + } + + printf(":%-6d %16ld %7d %s\n", $index, $value, $height, ($isCoinbase ? "Yes" : "No" )); +} diff --git a/contrib/tracing/p2p_monitor.py b/contrib/tracing/p2p_monitor.py new file mode 100755 index 0000000000000..4ff701cac3c8c --- /dev/null +++ b/contrib/tracing/p2p_monitor.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" Interactive bitcoind P2P network traffic monitor utilizing USDT and the + net:inbound_message and net:outbound_message tracepoints. """ + +# This script demonstrates what USDT for Bitcoin Core can enable. It uses BCC +# (https://github.com/iovisor/bcc) to load a sandboxed eBPF program into the +# Linux kernel (root privileges are required). The eBPF program attaches to two +# statically defined tracepoints. The tracepoint 'net:inbound_message' is called +# when a new P2P message is received, and 'net:outbound_message' is called on +# outbound P2P messages. The eBPF program submits the P2P messages to +# this script via a BPF ring buffer. + +import sys +import curses +from curses import wrapper, panel +from bcc import BPF, USDT + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +#include + +// Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). +// I2P addresses are 60 chars + 6 chars for the port (':12345'). +#define MAX_PEER_ADDR_LENGTH 62 + 6 +#define MAX_PEER_CONN_TYPE_LENGTH 20 +#define MAX_MSG_TYPE_LENGTH 20 + +struct p2p_message +{ + u64 peer_id; + char peer_addr[MAX_PEER_ADDR_LENGTH]; + char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; + char msg_type[MAX_MSG_TYPE_LENGTH]; + u64 msg_size; +}; + + +// Two BPF perf buffers for pushing data (here P2P messages) to user space. +BPF_PERF_OUTPUT(inbound_messages); +BPF_PERF_OUTPUT(outbound_messages); + +int trace_inbound_message(struct pt_regs *ctx) { + struct p2p_message msg = {}; + + bpf_usdt_readarg(1, ctx, &msg.peer_id); + bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg.msg_size); + + inbound_messages.perf_submit(ctx, &msg, sizeof(msg)); + return 0; +}; + +int trace_outbound_message(struct pt_regs *ctx) { + struct p2p_message msg = {}; + + bpf_usdt_readarg(1, ctx, &msg.peer_id); + bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg.msg_size); + + outbound_messages.perf_submit(ctx, &msg, sizeof(msg)); + return 0; +}; +""" + + +class Message: + """ A P2P network message. """ + msg_type = "" + size = 0 + data = bytes() + inbound = False + + def __init__(self, msg_type, size, inbound): + self.msg_type = msg_type + self.size = size + self.inbound = inbound + + +class Peer: + """ A P2P network peer. """ + id = 0 + address = "" + connection_type = "" + last_messages = list() + + total_inbound_msgs = 0 + total_inbound_bytes = 0 + total_outbound_msgs = 0 + total_outbound_bytes = 0 + + def __init__(self, id, address, connection_type): + self.id = id + self.address = address + self.connection_type = connection_type + self.last_messages = list() + + def add_message(self, message): + self.last_messages.append(message) + if len(self.last_messages) > 25: + self.last_messages.pop(0) + if message.inbound: + self.total_inbound_bytes += message.size + self.total_inbound_msgs += 1 + else: + self.total_outbound_bytes += message.size + self.total_outbound_msgs += 1 + + +def main(bitcoind_path): + peers = dict() + + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="inbound_message", fn_name="trace_inbound_message") + bitcoind_with_usdts.enable_probe( + probe="outbound_message", fn_name="trace_outbound_message") + bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + # BCC: perf buffer handle function for inbound_messages + def handle_inbound(_, data, size): + """ Inbound message handler. + + Called each time a message is submitted to the inbound_messages BPF table.""" + event = bpf["inbound_messages"].event(data) + if event.peer_id not in peers: + peer = Peer(event.peer_id, event.peer_addr.decode( + "utf-8"), event.peer_conn_type.decode("utf-8")) + peers[peer.id] = peer + peers[event.peer_id].add_message( + Message(event.msg_type.decode("utf-8"), event.msg_size, True)) + + # BCC: perf buffer handle function for outbound_messages + def handle_outbound(_, data, size): + """ Outbound message handler. + + Called each time a message is submitted to the outbound_messages BPF table.""" + event = bpf["outbound_messages"].event(data) + if event.peer_id not in peers: + peer = Peer(event.peer_id, event.peer_addr.decode( + "utf-8"), event.peer_conn_type.decode("utf-8")) + peers[peer.id] = peer + peers[event.peer_id].add_message( + Message(event.msg_type.decode("utf-8"), event.msg_size, False)) + + # BCC: add handlers to the inbound and outbound perf buffers + bpf["inbound_messages"].open_perf_buffer(handle_inbound) + bpf["outbound_messages"].open_perf_buffer(handle_outbound) + + wrapper(loop, bpf, peers) + + +def loop(screen, bpf, peers): + screen.nodelay(1) + cur_list_pos = 0 + win = curses.newwin(30, 70, 2, 7) + win.erase() + win.border(ord("|"), ord("|"), ord("-"), ord("-"), + ord("-"), ord("-"), ord("-"), ord("-")) + info_panel = panel.new_panel(win) + info_panel.hide() + + ROWS_AVALIABLE_FOR_LIST = curses.LINES - 5 + scroll = 0 + + while True: + try: + # BCC: poll the perf buffers for new events or timeout after 50ms + bpf.perf_buffer_poll(timeout=50) + + ch = screen.getch() + if (ch == curses.KEY_DOWN or ch == ord("j")) and cur_list_pos < len( + peers.keys()) -1 and info_panel.hidden(): + cur_list_pos += 1 + if cur_list_pos >= ROWS_AVALIABLE_FOR_LIST: + scroll += 1 + if (ch == curses.KEY_UP or ch == ord("k")) and cur_list_pos > 0 and info_panel.hidden(): + cur_list_pos -= 1 + if scroll > 0: + scroll -= 1 + if ch == ord('\n') or ch == ord(' '): + if info_panel.hidden(): + info_panel.show() + else: + info_panel.hide() + screen.erase() + render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel) + curses.panel.update_panels() + screen.refresh() + except KeyboardInterrupt: + exit() + + +def render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel): + """ renders the list of peers and details panel + + This code is unrelated to USDT, BCC and BPF. + """ + header_format = "%6s %-20s %-20s %-22s %-67s" + row_format = "%6s %-5d %9d byte %-5d %9d byte %-22s %-67s" + + screen.addstr(0, 1, (" P2P Message Monitor "), curses.A_REVERSE) + screen.addstr( + 1, 0, (" Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages"), curses.A_NORMAL) + screen.addstr(3, 0, + header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), curses.A_BOLD | curses.A_UNDERLINE) + peer_list = sorted(peers.keys())[scroll:ROWS_AVALIABLE_FOR_LIST+scroll] + for i, peer_id in enumerate(peer_list): + peer = peers[peer_id] + screen.addstr(i + 4, 0, + row_format % (peer.id, peer.total_outbound_msgs, peer.total_outbound_bytes, + peer.total_inbound_msgs, peer.total_inbound_bytes, + peer.connection_type, peer.address), + curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL) + if i + scroll == cur_list_pos: + info_window = info_panel.window() + info_window.erase() + info_window.border( + ord("|"), ord("|"), ord("-"), ord("-"), + ord("-"), ord("-"), ord("-"), ord("-")) + + info_window.addstr( + 1, 1, f"PEER {peer.id} ({peer.address})".center(68), curses.A_REVERSE | curses.A_BOLD) + info_window.addstr( + 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", + curses.A_BOLD) + for i, msg in enumerate(peer.last_messages): + if msg.inbound: + info_window.addstr( + i + 3, 1, "%68s" % + (f"<--- {msg.msg_type} ({msg.size} bytes) "), curses.A_NORMAL) + else: + info_window.addstr( + i + 3, 1, " %s (%d byte) --->" % + (msg.msg_type, msg.size), curses.A_NORMAL) + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE:", sys.argv[0], "path/to/bitcoind") + exit() + path = sys.argv[1] + main(path) diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp new file mode 100644 index 0000000000000..6efe49254b90a --- /dev/null +++ b/contrib/valgrind.supp @@ -0,0 +1,137 @@ +# This valgrind suppressions file includes known Valgrind warnings in our +# dependencies that cannot be fixed in-tree. +# +# Example use: +# $ valgrind --suppressions=contrib/valgrind.supp src/test/test_bitcoin +# $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \ +# --show-leak-kinds=all src/test/test_bitcoin +# +# To create suppressions for found issues, use the --gen-suppressions=all option: +# $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \ +# --show-leak-kinds=all --gen-suppressions=all --show-reachable=yes \ +# --error-limit=no src/test/test_bitcoin +# +# Note that suppressions may depend on OS and/or library versions. +# Tested on: +# * aarch64 (Ubuntu 22.04 system libs, clang, without gui) +# * x86_64 (Ubuntu 22.04 system libs, clang, without gui) +{ + Suppress libstdc++ warning - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65434 + Memcheck:Leak + match-leak-kinds: reachable + fun:malloc + obj:*/libstdc++.* + fun:call_init.part.0 + fun:call_init + fun:_dl_init + obj:*/ld-*.so +} +{ + Suppress libdb warning - https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=662917 + Memcheck:Cond + obj:*/libdb_cxx-*.so + fun:__log_put +} +{ + Suppress libdb warning + Memcheck:Param + pwrite64(buf) + fun:pwrite + fun:__os_io +} +{ + Suppress libdb warning + Memcheck:Cond + fun:__log_putr.isra.1 +} +{ + Suppress libdb warning + Memcheck:Param + pwrite64(buf) + ... + obj:*/libdb_cxx-*.so +} +{ + Suppress uninitialized bytes warning in compat code + Memcheck:Param + ioctl(TCSET{S,SW,SF}) + fun:tcsetattr +} +{ + Suppress libdb warning + Memcheck:Leak + fun:malloc + ... + obj:*/libdb_cxx-*.so +} +{ + Suppress leaks on init + Memcheck:Leak + ... + fun:_Z11AppInitMainR11NodeContext +} +{ + Suppress leaks on shutdown + Memcheck:Leak + ... + fun:_Z8ShutdownR11NodeContext +} +{ + Ignore GUI warning + Memcheck:Leak + ... + obj:/usr/lib64/libgdk-3.so.0.2404.7 +} +{ + Suppress leveldb warning (leveldb::InitModule()) - https://github.com/google/leveldb/issues/113 + Memcheck:Leak + match-leak-kinds: reachable + fun:_Znwm + fun:_ZN7leveldbL10InitModuleEv +} +{ + Suppress leveldb warning (leveldb::Env::Default()) - https://github.com/google/leveldb/issues/113 + Memcheck:Leak + match-leak-kinds: reachable + fun:_Znwm + ... + fun:_ZN7leveldbL14InitDefaultEnvEv +} +{ + Suppress leveldb leak + Memcheck:Leak + match-leak-kinds: reachable + fun:_Znwm + ... + fun:_ZN7leveldb6DBImpl14BackgroundCallEv +} +{ + Suppress leveldb leak + Memcheck:Leak + fun:_Znwm + ... + fun:GetCoin +} +{ + Suppress LogInstance still reachable memory warning + Memcheck:Leak + match-leak-kinds: reachable + fun:_Znwm + fun:_Z11LogInstancev +} +{ + Suppress secp256k1_context_create still reachable memory warning + Memcheck:Leak + match-leak-kinds: reachable + fun:malloc + ... + fun:secp256k1_context_create +} +{ + Suppress BCLog::Logger::StartLogging() still reachable memory warning + Memcheck:Leak + match-leak-kinds: reachable + fun:malloc + ... + fun:_ZN5BCLog6Logger12StartLoggingEv +} diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md new file mode 100644 index 0000000000000..b8b15280bae2f --- /dev/null +++ b/contrib/verify-commits/README.md @@ -0,0 +1,57 @@ +Tooling for verification of PGP signed commits +---------------------------------------------- + +This is an incomplete work in progress, but currently includes a pre-push hook +script (`pre-push-hook.sh`) for maintainers to ensure that their own commits +are PGP signed (nearly always merge commits), as well as a Python 3 script to verify +commits against a trusted keys list. + + +Using verify-commits.py safely +------------------------------ + +Remember that you can't use an untrusted script to verify itself. This means +that checking out code, then running `verify-commits.py` against `HEAD` is +_not_ safe, because the version of `verify-commits.py` that you just ran could +be backdoored. Instead, you need to use a trusted version of verify-commits +prior to checkout to make sure you're checking out only code signed by trusted +keys: + + ```sh + git fetch origin && \ + ./contrib/verify-commits/verify-commits.py origin/master && \ + git checkout origin/master + ``` + +Note that the above isn't a good UI/UX yet, and needs significant improvements +to make it more convenient and reduce the chance of errors; pull-reqs +improving this process would be much appreciated. + +Configuration files +------------------- + +* `trusted-git-root`: This file should contain a single git commit hash which is the first unsigned git commit (hence it is the "root of trust"). +* `trusted-sha512-root-commit`: This file should contain a single git commit hash which is the first commit without a SHA512 root commitment. +* `trusted-keys`: This file should contain a \n-delimited list of all PGP fingerprints of authorized commit signers (primary, not subkeys). +* `allow-revsig-commits`: This file should contain a \n-delimited list of git commit hashes. See next section for more info. + +Import trusted keys +------------------- +In order to check the commit signatures, you must add the trusted PGP keys to your machine. [GnuPG](https://gnupg.org/) may be used to import the trusted keys by running the following command: + +```sh +gpg --keyserver hkps://keys.openpgp.org --recv-keys $(/dev/null)" +else + # Note how we've disabled SHA1 with the --weak-digest option, disabling + # signatures - including selfsigs - that use SHA1. While you might think that + # collision attacks shouldn't be an issue as they'd be an attack on yourself, + # in fact because what's being signed is a commit object that's + # semi-deterministically generated by untrusted input (the pull-req) in theory + # an attacker could construct a pull-req that results in a commit object that + # they've created a collision for. Not the most likely attack, but preventing + # it is pretty easy so we do so as a "belt-and-suspenders" measure. + GPG_RES="" + for LINE in $(gpg --version); do + case "$LINE" in + "gpg (GnuPG) 1.4.1"*|"gpg (GnuPG) 2.0."*) + echo "Please upgrade to at least gpg 2.1.10 to check for weak signatures" > /dev/stderr + GPG_RES="$(printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null)" + ;; + # We assume if you're running 2.1+, you're probably running 2.1.10+ + # gpg will fail otherwise + # We assume if you're running 1.X, it is either 1.4.1X or 1.4.20+ + # gpg will fail otherwise + esac + done + [ "$GPG_RES" = "" ] && GPG_RES="$(printf '%s\n' "$INPUT" | gpg --trust-model always --weak-digest sha1 "$@" 2>/dev/null)" +fi +for LINE in $GPG_RES; do + case "$LINE" in + "[GNUPG:] VALIDSIG "*) + while read KEY; do + [ "${LINE#?GNUPG:? VALIDSIG * * * * * * * * * }" = "$KEY" ] && VALID=true + done < ./contrib/verify-commits/trusted-keys + ;; + "[GNUPG:] REVKEYSIG "*) + [ "$BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG" != 1 ] && exit 1 + REVSIG=true + GOODREVSIG="[GNUPG:] GOODSIG ${LINE#* * *}" + ;; + "[GNUPG:] EXPKEYSIG "*) + [ "$BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG" != 1 ] && exit 1 + REVSIG=true + GOODREVSIG="[GNUPG:] GOODSIG ${LINE#* * *}" + ;; + esac +done +if ! $VALID; then + exit 1 +fi +if $VALID && $REVSIG; then + printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null | grep "^\[GNUPG:\] \(NEWSIG\|SIG_ID\|VALIDSIG\)" + echo "$GOODREVSIG" +else + printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null +fi diff --git a/contrib/verify-commits/pre-push-hook.sh b/contrib/verify-commits/pre-push-hook.sh new file mode 100755 index 0000000000000..995324837887f --- /dev/null +++ b/contrib/verify-commits/pre-push-hook.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) 2014-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C +if ! [[ "$2" =~ ^(git@)?(www.)?github.com(:|/)bitcoin/bitcoin(.git)?$ ]]; then + exit 0 +fi + +while read LINE; do + set -- A "$LINE" + if [ "$4" != "refs/heads/master" ]; then + continue + fi + if ! ./contrib/verify-commits/verify-commits.py "$3" > /dev/null 2>&1; then + echo "ERROR: A commit is not signed, can't push" + ./contrib/verify-commits/verify-commits.py + exit 1 + fi +done < /dev/stdin diff --git a/contrib/verify-commits/trusted-git-root b/contrib/verify-commits/trusted-git-root new file mode 100644 index 0000000000000..c60f8ab695e9c --- /dev/null +++ b/contrib/verify-commits/trusted-git-root @@ -0,0 +1 @@ +82bcf405f6db1d55b684a1f63a4aabad376cdad7 diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys new file mode 100644 index 0000000000000..e83bfd7345374 --- /dev/null +++ b/contrib/verify-commits/trusted-keys @@ -0,0 +1,8 @@ +71A3B16735405025D447E8F274810B012346C9A6 +133EAC179436F14A5CF1B794860FEB804E669320 +32EE5C4C3FA15CCADB46ABE529D4BCB6416F53EC +B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B +CA03882CB1FC067B5D3ACFE4D300116E1C875A3D +E777299FC265DD04793070EB944D35F9AC3DB76A +D1DBF2C4B96F2DEBF4C16654410108112E7EA81F +152812300785C96444D3334D17565732E08E5E41 diff --git a/contrib/verify-commits/trusted-sha512-root-commit b/contrib/verify-commits/trusted-sha512-root-commit new file mode 100644 index 0000000000000..7d41f90ad70ef --- /dev/null +++ b/contrib/verify-commits/trusted-sha512-root-commit @@ -0,0 +1 @@ +309bf16257b2395ce502017be627186b749ee749 diff --git a/contrib/verify-commits/verify-commits.py b/contrib/verify-commits/verify-commits.py new file mode 100755 index 0000000000000..2ff14c1f86d61 --- /dev/null +++ b/contrib/verify-commits/verify-commits.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018-2019 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Verify commits against a trusted keys list.""" +import argparse +import hashlib +import logging +import os +import subprocess +import sys +import time + +GIT = os.getenv('GIT', 'git') + +def tree_sha512sum(commit='HEAD'): + """Calculate the Tree-sha512 for the commit. + + This is copied from github-merge.py. See https://github.com/bitcoin-core/bitcoin-maintainer-tools.""" + + # request metadata for entire tree, recursively + files = [] + blob_by_name = {} + for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines(): + name_sep = line.index(b'\t') + metadata = line[:name_sep].split() # perms, 'blob', blobid + assert metadata[1] == b'blob' + name = line[name_sep + 1:] + files.append(name) + blob_by_name[name] = metadata[2] + + files.sort() + # open connection to git-cat-file in batch mode to request data for all blobs + # this is much faster than launching it per file + p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE) + overall = hashlib.sha512() + for f in files: + blob = blob_by_name[f] + # request blob + p.stdin.write(blob + b'\n') + p.stdin.flush() + # read header: blob, "blob", size + reply = p.stdout.readline().split() + assert reply[0] == blob and reply[1] == b'blob' + size = int(reply[2]) + # hash the blob data + intern = hashlib.sha512() + ptr = 0 + while ptr < size: + bs = min(65536, size - ptr) + piece = p.stdout.read(bs) + if len(piece) == bs: + intern.update(piece) + else: + raise IOError('Premature EOF reading git cat-file output') + ptr += bs + dig = intern.hexdigest() + assert p.stdout.read(1) == b'\n' # ignore LF that follows blob data + # update overall hash with file hash + overall.update(dig.encode("utf-8")) + overall.update(" ".encode("utf-8")) + overall.update(f) + overall.update("\n".encode("utf-8")) + p.stdin.close() + if p.wait(): + raise IOError('Non-zero return value executing git cat-file') + return overall.hexdigest() + +def main(): + + # Enable debug logging if running in CI + if 'CI' in os.environ and os.environ['CI'].lower() == "true": + logging.getLogger().setLevel(logging.DEBUG) + + # Parse arguments + parser = argparse.ArgumentParser(usage='%(prog)s [options] [commit id]') + parser.add_argument('--disable-tree-check', action='store_false', dest='verify_tree', help='disable SHA-512 tree check') + parser.add_argument('--clean-merge', type=float, dest='clean_merge', default=float('inf'), help='Only check clean merge after days ago (default: %(default)s)', metavar='NUMBER') + parser.add_argument('commit', nargs='?', default='HEAD', help='Check clean merge up to commit ') + args = parser.parse_args() + + # get directory of this program and read data files + dirname = os.path.dirname(os.path.abspath(__file__)) + print("Using verify-commits data from " + dirname) + with open(dirname + "/trusted-git-root", "r", encoding="utf8") as f: + verified_root = f.read().splitlines()[0] + with open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8") as f: + verified_sha512_root = f.read().splitlines()[0] + with open(dirname + "/allow-revsig-commits", "r", encoding="utf8") as f: + revsig_allowed = f.read().splitlines() + with open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf8") as f: + unclean_merge_allowed = f.read().splitlines() + with open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf8") as f: + incorrect_sha512_allowed = f.read().splitlines() + + # Set commit and branch and set variables + current_commit = args.commit + if ' ' in current_commit: + print("Commit must not contain spaces", file=sys.stderr) + sys.exit(1) + verify_tree = args.verify_tree + no_sha1 = True + prev_commit = "" + initial_commit = current_commit + branch = subprocess.check_output([GIT, 'show', '-s', '--format=%H', initial_commit]).decode('utf8').splitlines()[0] + + # Iterate through commits + while True: + + # Log a message to prevent Travis from timing out + logging.debug("verify-commits: [in-progress] processing commit {}".format(current_commit[:8])) + + if current_commit == verified_root: + print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root)) + sys.exit(0) + if current_commit == verified_sha512_root: + if verify_tree: + print("All Tree-SHA512s matched up to {}".format(verified_sha512_root), file=sys.stderr) + verify_tree = False + no_sha1 = False + + os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_SHA1'] = "0" if no_sha1 else "1" + os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG'] = "1" if current_commit in revsig_allowed else "0" + + # Check that the commit (and parents) was signed with a trusted key + if subprocess.call([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', current_commit], stdout=subprocess.DEVNULL): + if prev_commit != "": + print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr) + print("Parents are:", file=sys.stderr) + parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit]).decode('utf8').splitlines()[0].split(' ') + for parent in parents: + subprocess.call([GIT, 'show', '-s', parent], stdout=sys.stderr) + else: + print("{} was not signed with a trusted key!".format(current_commit), file=sys.stderr) + sys.exit(1) + + # Check the Tree-SHA512 + if (verify_tree or prev_commit == "") and current_commit not in incorrect_sha512_allowed: + tree_hash = tree_sha512sum(current_commit) + if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit]).decode('utf8').splitlines(): + print("Tree-SHA512 did not match for commit " + current_commit, file=sys.stderr) + sys.exit(1) + + # Merge commits should only have two parents + parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit]).decode('utf8').splitlines()[0].split(' ') + if len(parents) > 2: + print("Commit {} is an octopus merge".format(current_commit), file=sys.stderr) + sys.exit(1) + + # Check that the merge commit is clean + commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit]).decode('utf8').splitlines()[0]) + check_merge = commit_time > time.time() - args.clean_merge * 24 * 60 * 60 # Only check commits in clean_merge days + allow_unclean = current_commit in unclean_merge_allowed + if len(parents) == 2 and check_merge and not allow_unclean: + current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit]).decode('utf8').splitlines()[0] + subprocess.call([GIT, 'checkout', '--force', '--quiet', parents[0]]) + subprocess.call([GIT, 'merge', '--no-ff', '--quiet', '--no-gpg-sign', parents[1]], stdout=subprocess.DEVNULL) + recreated_tree = subprocess.check_output([GIT, 'show', '--format=format:%T', 'HEAD']).decode('utf8').splitlines()[0] + if current_tree != recreated_tree: + print("Merge commit {} is not clean".format(current_commit), file=sys.stderr) + subprocess.call([GIT, 'diff', current_commit]) + subprocess.call([GIT, 'checkout', '--force', '--quiet', branch]) + sys.exit(1) + subprocess.call([GIT, 'checkout', '--force', '--quiet', branch]) + + prev_commit = current_commit + current_commit = parents[0] + +if __name__ == '__main__': + main() diff --git a/contrib/verifybinaries/README.md b/contrib/verifybinaries/README.md new file mode 100644 index 0000000000000..c50d4bef7151c --- /dev/null +++ b/contrib/verifybinaries/README.md @@ -0,0 +1,41 @@ +### Verify Binaries + +#### Preparation: + +Make sure you obtain the proper release signing key and verify the fingerprint with several independent sources. + +```sh +$ gpg --fingerprint "Bitcoin Core binary release signing key" +pub 4096R/36C2E964 2015-06-24 [expires: YYYY-MM-DD] + Key fingerprint = 01EA 5486 DE18 A882 D4C2 6845 90C8 019E 36C2 E964 +uid Wladimir J. van der Laan (Bitcoin Core binary release signing key) +``` + +#### Usage: + +This script attempts to download the signature file `SHA256SUMS.asc` from https://bitcoin.org. + +It first checks if the signature passes, and then downloads the files specified in the file, and checks if the hashes of these files match those that are specified in the signature file. + +The script returns 0 if everything passes the checks. It returns 1 if either the signature check or the hash check doesn't pass. If an error occurs the return value is 2. + + +```sh +./verify.py bitcoin-core-0.11.2 +./verify.py bitcoin-core-0.12.0 +./verify.py bitcoin-core-0.13.0-rc3 +``` + +If you only want to download the binaries of certain platform, add the corresponding suffix, e.g.: + +```sh +./verify.py bitcoin-core-0.11.2-osx +./verify.py 0.12.0-linux +./verify.py bitcoin-core-0.13.0-rc3-win64 +``` + +If you do not want to keep the downloaded binaries, specify anything as the second parameter. + +```sh +./verify.py bitcoin-core-0.13.0 delete +``` diff --git a/contrib/verifybinaries/verify.py b/contrib/verifybinaries/verify.py new file mode 100755 index 0000000000000..b5e4f1318b954 --- /dev/null +++ b/contrib/verifybinaries/verify.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Script for verifying Bitcoin Core release binaries + +This script attempts to download the signature file SHA256SUMS.asc from +bitcoincore.org and bitcoin.org and compares them. +It first checks if the signature passes, and then downloads the files +specified in the file, and checks if the hashes of these files match those +that are specified in the signature file. +The script returns 0 if everything passes the checks. It returns 1 if either +the signature check or the hash check doesn't pass. If an error occurs the +return value is >= 2. +""" +from hashlib import sha256 +import os +import subprocess +import sys +from textwrap import indent + +WORKINGDIR = "/tmp/bitcoin_verify_binaries" +HASHFILE = "hashes.tmp" +HOST1 = "https://bitcoincore.org" +HOST2 = "https://bitcoin.org" +VERSIONPREFIX = "bitcoin-core-" +SIGNATUREFILENAME = "SHA256SUMS.asc" + + +def parse_version_string(version_str): + if version_str.startswith(VERSIONPREFIX): # remove version prefix + version_str = version_str[len(VERSIONPREFIX):] + + parts = version_str.split('-') + version_base = parts[0] + version_rc = "" + version_os = "" + if len(parts) == 2: # "-rcN" or "version-platform" + if "rc" in parts[1]: + version_rc = parts[1] + else: + version_os = parts[1] + elif len(parts) == 3: # "-rcN-platform" + version_rc = parts[1] + version_os = parts[2] + + return version_base, version_rc, version_os + + +def download_with_wget(remote_file, local_file=None): + if local_file: + wget_args = ['wget', '-O', local_file, remote_file] + else: + # use timestamping mechanism if local filename is not explicitly set + wget_args = ['wget', '-N', remote_file] + + result = subprocess.run(wget_args, + stderr=subprocess.STDOUT, stdout=subprocess.PIPE) + return result.returncode == 0, result.stdout.decode().rstrip() + + +def files_are_equal(filename1, filename2): + with open(filename1, 'rb') as file1: + contents1 = file1.read() + with open(filename2, 'rb') as file2: + contents2 = file2.read() + return contents1 == contents2 + + +def verify_with_gpg(signature_filename, output_filename): + result = subprocess.run(['gpg', '--yes', '--decrypt', '--output', + output_filename, signature_filename], + stderr=subprocess.STDOUT, stdout=subprocess.PIPE) + return result.returncode, result.stdout.decode().rstrip() + + +def remove_files(filenames): + for filename in filenames: + os.remove(filename) + + +def main(args): + # sanity check + if len(args) < 1: + print("Error: need to specify a version on the command line") + return 3 + + # determine remote dir dependent on provided version string + version_base, version_rc, os_filter = parse_version_string(args[0]) + remote_dir = f"/bin/{VERSIONPREFIX}{version_base}/" + if version_rc: + remote_dir += f"test.{version_rc}/" + remote_sigfile = remote_dir + SIGNATUREFILENAME + + # create working directory + os.makedirs(WORKINGDIR, exist_ok=True) + os.chdir(WORKINGDIR) + + # fetch first signature file + sigfile1 = SIGNATUREFILENAME + success, output = download_with_wget(HOST1 + remote_sigfile, sigfile1) + if not success: + print("Error: couldn't fetch signature file. " + "Have you specified the version number in the following format?") + print(f"[{VERSIONPREFIX}][-rc[0-9]][-platform] " + f"(example: {VERSIONPREFIX}0.21.0-rc3-osx)") + print("wget output:") + print(indent(output, '\t')) + return 4 + + # fetch second signature file + sigfile2 = SIGNATUREFILENAME + ".2" + success, output = download_with_wget(HOST2 + remote_sigfile, sigfile2) + if not success: + print("bitcoin.org failed to provide signature file, " + "but bitcoincore.org did?") + print("wget output:") + print(indent(output, '\t')) + remove_files([sigfile1]) + return 5 + + # ensure that both signature files are equal + if not files_are_equal(sigfile1, sigfile2): + print("bitcoin.org and bitcoincore.org signature files were not equal?") + print(f"See files {WORKINGDIR}/{sigfile1} and {WORKINGDIR}/{sigfile2}") + return 6 + + # check signature and extract data into file + retval, output = verify_with_gpg(sigfile1, HASHFILE) + if retval != 0: + if retval == 1: + print("Bad signature.") + elif retval == 2: + print("gpg error. Do you have the Bitcoin Core binary release " + "signing key installed?") + print("gpg output:") + print(indent(output, '\t')) + remove_files([sigfile1, sigfile2, HASHFILE]) + return 1 + + # extract hashes/filenames of binaries to verify from hash file; + # each line has the following format: " " + with open(HASHFILE, 'r', encoding='utf8') as hash_file: + hashes_to_verify = [ + line.split()[:2] for line in hash_file if os_filter in line] + remove_files([HASHFILE]) + if not hashes_to_verify: + print("error: no files matched the platform specified") + return 7 + + # download binaries + for _, binary_filename in hashes_to_verify: + print(f"Downloading {binary_filename}") + download_with_wget(HOST1 + remote_dir + binary_filename) + + # verify hashes + offending_files = [] + for hash_expected, binary_filename in hashes_to_verify: + with open(binary_filename, 'rb') as binary_file: + hash_calculated = sha256(binary_file.read()).hexdigest() + if hash_calculated != hash_expected: + offending_files.append(binary_filename) + if offending_files: + print("Hashes don't match.") + print("Offending files:") + print('\n'.join(offending_files)) + return 1 + verified_binaries = [entry[1] for entry in hashes_to_verify] + + # clean up files if desired + if len(args) >= 2: + print("Clean up the binaries") + remove_files([sigfile1, sigfile2] + verified_binaries) + else: + print(f"Keep the binaries in {WORKINGDIR}") + + print("Verified hashes of") + print('\n'.join(verified_binaries)) + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/contrib/verifysfbinaries/README.md b/contrib/verifysfbinaries/README.md deleted file mode 100644 index 8c038865bdaff..0000000000000 --- a/contrib/verifysfbinaries/README.md +++ /dev/null @@ -1,6 +0,0 @@ -### Verify SF Binaries ### -This script attempts to download the signature file `SHA256SUMS.asc` from https://bitcoin.org. - -It first checks if the signature passes, and then downloads the files specified in the file, and checks if the hashes of these files match those that are specified in the signature file. - -The script returns 0 if everything passes the checks. It returns 1 if either the signature check or the hash check doesn't pass. If an error occurs the return value is 2. \ No newline at end of file diff --git a/contrib/verifysfbinaries/verify.sh b/contrib/verifysfbinaries/verify.sh deleted file mode 100755 index 3eb46938835cc..0000000000000 --- a/contrib/verifysfbinaries/verify.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash - -### This script attempts to download the signature file SHA256SUMS.asc from bitcoin.org -### It first checks if the signature passes, and then downloads the files specified in -### the file, and checks if the hashes of these files match those that are specified -### in the signature file. -### The script returns 0 if everything passes the checks. It returns 1 if either the -### signature check or the hash check doesn't pass. If an error occurs the return value is 2 - -function clean_up { - for file in $* - do - rm "$file" 2> /dev/null - done -} - -WORKINGDIR="/tmp/bitcoin" -TMPFILE="hashes.tmp" - -#this URL is used if a version number is not specified as an argument to the script -SIGNATUREFILE="https://bitcoin.org/bin/0.9.2.1/SHA256SUMS.asc" - -SIGNATUREFILENAME="SHA256SUMS.asc" -RCSUBDIR="test/" -BASEDIR="https://bitcoin.org/bin/" -VERSIONPREFIX="bitcoin-" -RCVERSIONSTRING="rc" - -if [ ! -d "$WORKINGDIR" ]; then - mkdir "$WORKINGDIR" -fi - -cd "$WORKINGDIR" - -#test if a version number has been passed as an argument -if [ -n "$1" ]; then - #let's also check if the version number includes the prefix 'bitcoin-', - # and add this prefix if it doesn't - if [[ $1 == "$VERSIONPREFIX"* ]]; then - VERSION="$1" - else - VERSION="$VERSIONPREFIX$1" - fi - - #now let's see if the version string contains "rc", and strip it off if it does - # and simultaneously add RCSUBDIR to BASEDIR, where we will look for SIGNATUREFILENAME - if [[ $VERSION == *"$RCVERSIONSTRING"* ]]; then - BASEDIR="$BASEDIR${VERSION/%-$RCVERSIONSTRING*}/" - BASEDIR="$BASEDIR$RCSUBDIR" - else - BASEDIR="$BASEDIR$VERSION/" - fi - - SIGNATUREFILE="$BASEDIR$SIGNATUREFILENAME" -else - BASEDIR="${SIGNATUREFILE%/*}/" -fi - -#first we fetch the file containing the signature -WGETOUT=$(wget -N "$BASEDIR$SIGNATUREFILENAME" 2>&1) - -#and then see if wget completed successfully -if [ $? -ne 0 ]; then - echo "Error: couldn't fetch signature file. Have you specified the version number in the following format?" - echo "[bitcoin-]-[rc[0-9]] (example: bitcoin-0.9.2-rc1)" - echo "wget output:" - echo "$WGETOUT"|sed 's/^/\t/g' - exit 2 -fi - -#then we check it -GPGOUT=$(gpg --yes --decrypt --output "$TMPFILE" "$SIGNATUREFILENAME" 2>&1) - -#return value 0: good signature -#return value 1: bad signature -#return value 2: gpg error - -RET="$?" -if [ $RET -ne 0 ]; then - if [ $RET -eq 1 ]; then - #and notify the user if it's bad - echo "Bad signature." - elif [ $RET -eq 2 ]; then - #or if a gpg error has occurred - echo "gpg error. Do you have Gavin's code signing key installed?" - fi - - echo "gpg output:" - echo "$GPGOUT"|sed 's/^/\t/g' - clean_up $SIGNATUREFILENAME $TMPFILE - exit "$RET" -fi - -#here we extract the filenames from the signature file -FILES=$(awk '{print $2}' "$TMPFILE") - -#and download these one by one -for file in in $FILES -do - wget --quiet -N "$BASEDIR$file" -done - -#check hashes -DIFF=$(diff <(sha256sum $FILES) "$TMPFILE") - -if [ $? -eq 1 ]; then - echo "Hashes don't match." - echo "Offending files:" - echo "$DIFF"|grep "^<"|awk '{print "\t"$3}' - exit 1 -elif [ $? -gt 1 ]; then - echo "Error executing 'diff'" - exit 2 -fi - -#everything matches! clean up the mess -clean_up $FILES $SIGNATUREFILENAME $TMPFILE - -exit 0 diff --git a/contrib/windeploy/detached-sig-create.sh b/contrib/windeploy/detached-sig-create.sh new file mode 100755 index 0000000000000..82fcf2d40684f --- /dev/null +++ b/contrib/windeploy/detached-sig-create.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# Copyright (c) 2014-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C +if [ -z "$OSSLSIGNCODE" ]; then + OSSLSIGNCODE=osslsigncode +fi + +if [ -z "$1" ]; then + echo "usage: $0 " + echo "example: $0 -key codesign.key" + exit 1 +fi + +OUT=signature-win.tar.gz +SRCDIR=unsigned +WORKDIR=./.tmp +OUTDIR="${WORKDIR}/out" +OUTSUBDIR="${OUTDIR}/win" +TIMESERVER=http://timestamp.comodoca.com +CERTFILE="win-codesign.cert" + +mkdir -p "${OUTSUBDIR}" +# shellcheck disable=SC2046 +basename -a $(ls -1 "${SRCDIR}"/*-unsigned.exe) | while read UNSIGNED; do + echo Signing "${UNSIGNED}" + "${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -h sha256 -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@" + "${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}" +done + +rm -f "${OUT}" +tar -C "${OUTDIR}" -czf "${OUT}" . +rm -rf "${WORKDIR}" +echo "Created ${OUT}" diff --git a/contrib/windeploy/win-codesign.cert b/contrib/windeploy/win-codesign.cert new file mode 100644 index 0000000000000..e763df5847875 --- /dev/null +++ b/contrib/windeploy/win-codesign.cert @@ -0,0 +1,89 @@ +-----BEGIN CERTIFICATE----- +MIIGQzCCBSugAwIBAgIQBSN7Cm16Z0UT9p7lA2jiKDANBgkqhkiG9w0BAQsFADBy +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBTSEEyIEFzc3VyZWQg +SUQgQ29kZSBTaWduaW5nIENBMB4XDTIxMDUyMTAwMDAwMFoXDTIyMDUyNjIzNTk1 +OVowgYAxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhEZWxhd2FyZTEOMAwGA1UEBxMF +TGV3ZXMxJjAkBgNVBAoTHUJpdGNvaW4gQ29yZSBDb2RlIFNpZ25pbmcgTExDMSYw +JAYDVQQDEx1CaXRjb2luIENvcmUgQ29kZSBTaWduaW5nIExMQzCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAKe6xtFgKAQ68MvxwCjNtpgPobfDQCLKvCAN +uBKGYuub6ufQB5dhCLN9fjMgfg33AyauvU3PcEUDUWD3/k925bPqgxHC3E7YqoB+ +11b/2Y7a86okqUgcGgvKhaKoHmXxElpM9EjQHjJ0yL4QAR1Lp+9CMMW3wIulBYKt +wLIArFvbuQhMO/6rxL8frpK049v//WfQzB16GXuFnzN/6fDK7oOt5IrKTg4H6EY2 +fj4+QaUj0lNX7aHnZ6Ki45h2RUPDgN1ipRIuhM67npyZ/tdzPPjI3PUgfXCccN6D ++qWWnbbbvPuOht4ziPciVnPd57PqJmAOnLI86gisDfd7VKlcpOSEaagdUGvMbU6f +uAps818GwnJzwCGllxlKASCgXDAckLLvMuit4RfYAhhdhw5R0AsaWK0HW88oHOqi +U7eWlMCbSGk34x9hBrxYl7tvcNcLPWIPYrrhFWNFpkV8bVVIoV5rUNRgWvBcdOq1 +CCPTfsJp3nEH2WCoBghZquDZLSW12wMw2UsQyEojBeGhrR1inn8uK93wSnVCC8F4 +21yWNRMNe/LQVhmZDgFOen9r/WijBsBdQw1bL8N4zGdYv8+soqkrWzW417FfSx81 +pj4j5FEXYXXV5k/4/eBpIARXVRR8xya0nGkhNJmBk0jjDGD8fPW2gFQbqnUwAQ34 +vOr8NUqHAgMBAAGjggHEMIIBwDAfBgNVHSMEGDAWgBRaxLl7KgqjpepxA8Bg+S32 +ZXUOWDAdBgNVHQ4EFgQUVSLtZnifEHvd8z3E7AyLYNuDiaMwDgYDVR0PAQH/BAQD +AgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMDMHcGA1UdHwRwMG4wNaAzoDGGL2h0dHA6 +Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9zaGEyLWFzc3VyZWQtY3MtZzEuY3JsMDWgM6Ax +hi9odHRwOi8vY3JsNC5kaWdpY2VydC5jb20vc2hhMi1hc3N1cmVkLWNzLWcxLmNy +bDBLBgNVHSAERDBCMDYGCWCGSAGG/WwDATApMCcGCCsGAQUFBwIBFhtodHRwOi8v +d3d3LmRpZ2ljZXJ0LmNvbS9DUFMwCAYGZ4EMAQQBMIGEBggrBgEFBQcBAQR4MHYw +JAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBOBggrBgEFBQcw +AoZCaHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0U0hBMkFzc3Vy +ZWRJRENvZGVTaWduaW5nQ0EuY3J0MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEL +BQADggEBAOaJneI91NJgqghUxgc0AWQ01SAJTgN4z7xMQ3W0ZAtwGbA0byT7YRlj +j7h+j+hMX/JYkRJETTh8Nalq2tPWJBiMMEPOGFVttFER1pwouHkK9pSKyp4xRvNU +L0LPh7fE4EYMJoynys6ZTpMCHLku+X3jFat1+1moh9TJRvK5+ETZYGl0seFNU3mJ +dZzusObm4scffIGgi40kmmISKd5ZRuooRTu9FFR/3vpfbA+7Vg4RSH3CcQPo9bfk ++h/qRQhSfQInTBn7obRpIlvEcK782qivqseJGdtnTmcdVRShD5ckTVza1yv25uQz +l/yTqmG2LXlYjl5iMSdF0C1xYq6IsOA= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFMDCCBBigAwIBAgIQBAkYG1/Vu2Z1U0O1b5VQCDANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMTMxMDIyMTIwMDAwWhcNMjgxMDIyMTIwMDAwWjByMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBTSEEyIEFzc3VyZWQgSUQgQ29kZSBT +aWduaW5nIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+NOzHH8O +Ea9ndwfTCzFJGc/Q+0WZsTrbRPV/5aid2zLXcep2nQUut4/6kkPApfmJ1DcZ17aq +8JyGpdglrA55KDp+6dFn08b7KSfH03sjlOSRI5aQd4L5oYQjZhJUM1B0sSgmuyRp +wsJS8hRniolF1C2ho+mILCCVrhxKhwjfDPXiTWAYvqrEsq5wMWYzcT6scKKrzn/p +fMuSoeU7MRzP6vIK5Fe7SrXpdOYr/mzLfnQ5Ng2Q7+S1TqSp6moKq4TzrGdOtcT3 +jNEgJSPrCGQ+UpbB8g8S9MWOD8Gi6CxR93O8vYWxYoNzQYIH5DiLanMg0A9kczye +n6Yzqf0Z3yWT0QIDAQABo4IBzTCCAckwEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNV +HQ8BAf8EBAMCAYYwEwYDVR0lBAwwCgYIKwYBBQUHAwMweQYIKwYBBQUHAQEEbTBr +MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wQwYIKwYBBQUH +MAKGN2h0dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEFzc3VyZWRJ +RFJvb3RDQS5jcnQwgYEGA1UdHwR6MHgwOqA4oDaGNGh0dHA6Ly9jcmw0LmRpZ2lj +ZXJ0LmNvbS9EaWdpQ2VydEFzc3VyZWRJRFJvb3RDQS5jcmwwOqA4oDaGNGh0dHA6 +Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEFzc3VyZWRJRFJvb3RDQS5jcmww +TwYDVR0gBEgwRjA4BgpghkgBhv1sAAIEMCowKAYIKwYBBQUHAgEWHGh0dHBzOi8v +d3d3LmRpZ2ljZXJ0LmNvbS9DUFMwCgYIYIZIAYb9bAMwHQYDVR0OBBYEFFrEuXsq +CqOl6nEDwGD5LfZldQ5YMB8GA1UdIwQYMBaAFEXroq/0ksuCMS1Ri6enIZ3zbcgP +MA0GCSqGSIb3DQEBCwUAA4IBAQA+7A1aJLPzItEVyCx8JSl2qB1dHC06GsTvMGHX +fgtg/cM9D8Svi/3vKt8gVTew4fbRknUPUbRupY5a4l4kgU4QpO4/cY5jDhNLrddf +RHnzNhQGivecRk5c/5CxGwcOkRX7uq+1UcKNJK4kxscnKqEpKBo6cSgCPC6Ro8Al +EeKcFEehemhor5unXCBc2XGxDI+7qPjFEmifz0DLQESlE/DmZAwlCEIysjaKJAL+ +L3J+HNdJRZboWR3p+nRka7LrZkPas7CM1ekN3fYBIM6ZMWM9CBoYs4GbT8aTEAb8 +B4H6i9r5gkn3Ym6hU/oSlBiFLpKR6mhsRDKyZqHnGKSaZFHv +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py new file mode 100755 index 0000000000000..d8087a4db387c --- /dev/null +++ b/contrib/zmq/zmq_sub.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" + ZMQ example using python3's asyncio + + Bitcoin should be started with the command line arguments: + bitcoind -testnet -daemon \ + -zmqpubrawtx=tcp://127.0.0.1:28332 \ + -zmqpubrawblock=tcp://127.0.0.1:28332 \ + -zmqpubhashtx=tcp://127.0.0.1:28332 \ + -zmqpubhashblock=tcp://127.0.0.1:28332 \ + -zmqpubsequence=tcp://127.0.0.1:28332 + + We use the asyncio library here. `self.handle()` installs itself as a + future at the end of the function. Since it never returns with the event + loop having an empty stack of futures, this creates an infinite loop. An + alternative is to wrap the contents of `handle` inside `while True`. + + A blocking example using python 2.7 can be obtained from the git history: + https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py +""" + +import asyncio +import zmq +import zmq.asyncio +import signal +import struct +import sys + +if (sys.version_info.major, sys.version_info.minor) < (3, 5): + print("This example only works with Python 3.5 and greater") + sys.exit(1) + +port = 28332 + +class ZMQHandler(): + def __init__(self): + self.loop = asyncio.get_event_loop() + self.zmqContext = zmq.asyncio.Context() + + self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) + self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0) + self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") + self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx") + self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock") + self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx") + self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "sequence") + self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port) + + async def handle(self) : + topic, body, seq = await self.zmqSubSocket.recv_multipart() + sequence = "Unknown" + if len(seq) == 4: + sequence = str(struct.unpack(' in our config.site, either one of two things needs +# to be true for the build system to work correctly: +# +# 1. If we refer to the program by name (e.g. AR=riscv64-gnu-linux-ar), the +# tool needs to be available in $PATH at all times. +# +# 2. If the tool is _**not**_ expected to be available in $PATH at all times +# (such as is the case for our native_cctools binutils tools), it needs to +# be referred to by its absolute path, such as would be output by the +# AC_PATH_{PROG,TOOL} macros. +# +# Minor note: it is also okay to refer to tools by their absolute path even if +# we expect them to be available in $PATH at all times, more specificity does +# not hurt. $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_build_id) - $(AT)@mkdir -p $(@D) - $(AT)sed -e 's|@HOST@|$(host)|' \ - -e 's|@CC@|$(toolchain_path)$(host_CC)|' \ - -e 's|@CXX@|$(toolchain_path)$(host_CXX)|' \ - -e 's|@AR@|$(toolchain_path)$(host_AR)|' \ - -e 's|@RANLIB@|$(toolchain_path)$(host_RANLIB)|' \ - -e 's|@NM@|$(toolchain_path)$(host_NM)|' \ - -e 's|@STRIP@|$(toolchain_path)$(host_STRIP)|' \ + @mkdir -p $(@D) + sed -e 's|@HOST@|$(host)|' \ + -e 's|@CC@|$(host_CC)|' \ + -e 's|@CXX@|$(host_CXX)|' \ + -e 's|@AR@|$(host_AR)|' \ + -e 's|@RANLIB@|$(host_RANLIB)|' \ + -e 's|@NM@|$(host_NM)|' \ + -e 's|@STRIP@|$(host_STRIP)|' \ + -e 's|@OTOOL@|$(host_OTOOL)|' \ + -e 's|@INSTALL_NAME_TOOL@|$(host_INSTALL_NAME_TOOL)|' \ + -e 's|@DSYMUTIL@|$(host_DSYMUTIL)|' \ -e 's|@build_os@|$(build_os)|' \ -e 's|@host_os@|$(host_os)|' \ -e 's|@CFLAGS@|$(strip $(host_CFLAGS) $(host_$(release_type)_CFLAGS))|' \ -e 's|@CXXFLAGS@|$(strip $(host_CXXFLAGS) $(host_$(release_type)_CXXFLAGS))|' \ -e 's|@CPPFLAGS@|$(strip $(host_CPPFLAGS) $(host_$(release_type)_CPPFLAGS))|' \ -e 's|@LDFLAGS@|$(strip $(host_LDFLAGS) $(host_$(release_type)_LDFLAGS))|' \ + -e 's|@allow_host_packages@|$(ALLOW_HOST_PACKAGES)|' \ -e 's|@no_qt@|$(NO_QT)|' \ + -e 's|@no_qr@|$(NO_QR)|' \ + -e 's|@no_zmq@|$(NO_ZMQ)|' \ -e 's|@no_wallet@|$(NO_WALLET)|' \ + -e 's|@no_bdb@|$(NO_BDB)|' \ + -e 's|@no_sqlite@|$(NO_SQLITE)|' \ -e 's|@no_upnp@|$(NO_UPNP)|' \ + -e 's|@no_usdt@|$(NO_USDT)|' \ + -e 's|@no_natpmp@|$(NO_NATPMP)|' \ + -e 's|@multiprocess@|$(MULTIPROCESS)|' \ -e 's|@debug@|$(DEBUG)|' \ $< > $@ - $(AT)touch $@ + touch $@ + + +define check_or_remove_cached + mkdir -p $(BASE_CACHE)/$(host)/$(package) && cd $(BASE_CACHE)/$(host)/$(package); \ + $(build_SHA256SUM) -c $($(package)_cached_checksum) >/dev/null 2>/dev/null || \ + ( rm -f $($(package)_cached_checksum); \ + if test -f "$($(package)_cached)"; then echo "Checksum mismatch for $(package). Forcing rebuild.."; rm -f $($(package)_cached_checksum) $($(package)_cached); fi ) +endef + +define check_or_remove_sources + mkdir -p $($(package)_source_dir); cd $($(package)_source_dir); \ + test -f $($(package)_fetched) && ( $(build_SHA256SUM) -c $($(package)_fetched) >/dev/null 2>/dev/null || \ + ( echo "Checksum missing or mismatched for $(package) source. Forcing re-download."; \ + rm -f $($(package)_all_sources) $($(1)_fetched))) || true +endef + +check-packages: + @$(foreach package,$(all_packages),$(call check_or_remove_cached,$(package));) +check-sources: + @$(foreach package,$(all_packages),$(call check_or_remove_sources,$(package));) + +$(host_prefix)/share/config.site: check-packages + +check-packages: check-sources + +clean-all: clean + @rm -rf $(SOURCES_PATH) x86_64* i686* mips* arm* aarch64* powerpc* riscv32* riscv64* s390x* + +clean: + @rm -rf $(WORK_PATH) $(BASE_CACHE) $(BUILD) *.log + +install: check-packages $(host_prefix)/share/config.site + + +download-one: check-sources $(all_sources) + +download-osx: + @$(MAKE) -s HOST=x86_64-apple-darwin download-one +download-linux: + @$(MAKE) -s HOST=x86_64-unknown-linux-gnu download-one +download-win: + @$(MAKE) -s HOST=x86_64-w64-mingw32 download-one +download: download-osx download-linux download-win + +$(foreach package,$(all_packages),$(eval $(call ext_add_stages,$(package)))) -install: $(host_prefix)/share/config.site -download: $(all_sources) -.PHONY: install cached +.PHONY: install cached clean clean-all download-one download-osx download-linux download-win download check-packages check-sources +.PHONY: FORCE +$(V).SILENT: diff --git a/depends/README b/depends/README deleted file mode 100644 index fed2f9b5ab9f3..0000000000000 --- a/depends/README +++ /dev/null @@ -1,55 +0,0 @@ -This is a system of building and caching dependencies necessary for building -Bitcoin. - -There are several features that make it different from most similar systems: - -- It is designed to be builder and host agnostic - -In theory, binaries for any target OS/architecture can be created, from a -builder running any OS/architecture. In practice, build-side tools must be -specified when the defaults don't fit, and packages must be ammended to work -on new hosts. For now, a build architecture of x86_64 is assumed, either on -Linux or OSX. - -- No reliance on timestamps - -File presence is used to determine what needs to be built. This makes the -results distributable and easily digestable by automated builders. - -- Each build only has its specified dependencies available at build-time. - -For each build, the sysroot is wiped and the (recursive) dependencies are -installed. This makes each build deterministic, since there will never be any -unknown files available to cause side-effects. - -- Each package is cached and only rebuilt as needed. - -Before building, a unique build-id is generated for each package. This id -consists of a hash of all files used to build the package (Makefiles, packages, -etc), and as well as a hash of the same data for each recursive dependency. If -any portion of a package's build recipe changes, it will be rebuilt as well as -any other package that depends on it. If any of the main makefiles (Makefile, -funcs.mk, etc) are changed, all packages will be rebuilt. After building, the -results are cached into a tarball that can be re-used and distributed. - -- Package build results are (relatively) deterministic. - -Each package is configured and patched so that it will yield the same -build-results with each consequent build, within a reasonable set of -constraints. Some things like timestamp insertion are unavoidable, and are -beyond the scope of this system. Additionally, the toolchain itself must be -capable of deterministic results. When revisions are properly bumped, a cached -build should represent an exact single payload. - -- Sources are fetched and verified automatically - -Each package must define its source location and checksum. The build will fail -if the fetched source does not match. Sources may be pre-seeded and/or cached -as desired. - -- Self-cleaning - -Build and staging dirs are wiped after use, and any previous version of a -cached result is removed following a successful build. Automated builders -should be able to build each revision and store the results with no further -intervention. diff --git a/depends/README.md b/depends/README.md new file mode 100644 index 0000000000000..da2a74e0e7893 --- /dev/null +++ b/depends/README.md @@ -0,0 +1,135 @@ +### Usage + +To build dependencies for the current arch+OS: + + make + +To build for another arch/OS: + + make HOST=host-platform-triplet + +For example: + + make HOST=x86_64-w64-mingw32 -j4 + +**Bitcoin Core's `configure` script by default will ignore the depends output.** In +order for it to pick up libraries, tools, and settings from the depends build, +you must set the `CONFIG_SITE` environment variable to point to a `config.site` settings file. +In the above example, a file named `depends/x86_64-w64-mingw32/share/config.site` will be +created. To use it during compilation: + + CONFIG_SITE=$PWD/depends/x86_64-w64-mingw32/share/config.site ./configure + +The default install prefix when using `config.site` is `--prefix=depends/`, +so depends build outputs will be installed in that location. + +Common `host-platform-triplet`s for cross compilation are: + +- `i686-pc-linux-gnu` for Linux 32 bit +- `x86_64-pc-linux-gnu` for x86 Linux +- `x86_64-w64-mingw32` for Win64 +- `x86_64-apple-darwin` for macOS +- `arm64-apple-darwin` for ARM macOS +- `arm-linux-gnueabihf` for Linux ARM 32 bit +- `aarch64-linux-gnu` for Linux ARM 64 bit +- `powerpc64-linux-gnu` for Linux POWER 64-bit (big endian) +- `powerpc64le-linux-gnu` for Linux POWER 64-bit (little endian) +- `riscv32-linux-gnu` for Linux RISC-V 32 bit +- `riscv64-linux-gnu` for Linux RISC-V 64 bit +- `s390x-linux-gnu` for Linux S390X +- `armv7a-linux-android` for Android ARM 32 bit +- `aarch64-linux-android` for Android ARM 64 bit +- `x86_64-linux-android` for Android x86 64 bit + +The paths are automatically configured and no other options are needed unless targeting [Android](../doc/build-android.md). + +### Install the required dependencies: Ubuntu & Debian + +#### For macOS cross compilation + + sudo apt-get install curl bsdmainutils cmake libz-dev python3-setuptools libtinfo5 xorriso + +Note: You must obtain the macOS SDK before proceeding with a cross-compile. +Under the depends directory, create a subdirectory named `SDKs`. +Then, place the extracted SDK under this new directory. +For more information, see [SDK Extraction](../contrib/macdeploy/README.md#sdk-extraction). + +#### For Win64 cross compilation + +- see [build-windows.md](../doc/build-windows.md#cross-compilation-for-ubuntu-and-windows-subsystem-for-linux) + +#### For linux (including i386, ARM) cross compilation + +Common linux dependencies: + + sudo apt-get install make automake cmake curl g++-multilib libtool binutils-gold bsdmainutils pkg-config python3 patch bison + +For linux ARM cross compilation: + + sudo apt-get install g++-arm-linux-gnueabihf binutils-arm-linux-gnueabihf + +For linux AARCH64 cross compilation: + + sudo apt-get install g++-aarch64-linux-gnu binutils-aarch64-linux-gnu + +For linux POWER 64-bit cross compilation (there are no packages for 32-bit): + + sudo apt-get install g++-powerpc64-linux-gnu binutils-powerpc64-linux-gnu g++-powerpc64le-linux-gnu binutils-powerpc64le-linux-gnu + +For linux RISC-V 64-bit cross compilation (there are no packages for 32-bit): + + sudo apt-get install g++-riscv64-linux-gnu binutils-riscv64-linux-gnu + +For linux S390X cross compilation: + + sudo apt-get install g++-s390x-linux-gnu binutils-s390x-linux-gnu + +### Install the required dependencies: OpenBSD + + pkg_add bash gtar + +### Dependency Options + +The following can be set when running make: `make FOO=bar` + +- `SOURCES_PATH`: Downloaded sources will be placed here +- `BASE_CACHE`: Built packages will be placed here +- `SDK_PATH`: Path where SDKs can be found (used by macOS) +- `FALLBACK_DOWNLOAD_PATH`: If a source file can't be fetched, try here before giving up +- `NO_QT`: Don't download/build/cache Qt and its dependencies +- `NO_QR`: Don't download/build/cache packages needed for enabling qrencode +- `NO_ZMQ`: Don't download/build/cache packages needed for enabling ZeroMQ +- `NO_WALLET`: Don't download/build/cache libs needed to enable the wallet +- `NO_BDB`: Don't download/build/cache BerkeleyDB +- `NO_SQLITE`: Don't download/build/cache SQLite +- `NO_UPNP`: Don't download/build/cache packages needed for enabling UPnP +- `NO_NATPMP`: Don't download/build/cache packages needed for enabling NAT-PMP +- `ALLOW_HOST_PACKAGES`: Packages that are missed in dependencies (due to `NO_*` option or + build script logic) are searched for among the host system packages using + `pkg-config`. It allows building with packages of other (newer) versions +- `MULTIPROCESS`: Build libmultiprocess (experimental, requires CMake) +- `DEBUG`: Disable some optimizations and enable more runtime checking +- `HOST_ID_SALT`: Optional salt to use when generating host package ids +- `BUILD_ID_SALT`: Optional salt to use when generating build package ids +- `FORCE_USE_SYSTEM_CLANG`: (EXPERTS ONLY) When cross-compiling for macOS, use Clang found in the + system's `$PATH` rather than the default prebuilt release of Clang + from llvm.org. Clang 8 or later is required +- `LOG`: Use file-based logging for individual packages. During a package build its log file + resides in the `depends` directory, and the log file is printed out automatically in case + of build error. After successful build log files are moved along with package archives + +If some packages are not built, for example `make NO_WALLET=1`, the appropriate +options will be passed to bitcoin's configure. In this case, `--disable-wallet`. + +### Additional targets + + download: run 'make download' to fetch all sources without building them + download-osx: run 'make download-osx' to fetch all sources needed for macOS builds + download-win: run 'make download-win' to fetch all sources needed for win builds + download-linux: run 'make download-linux' to fetch all sources needed for linux builds + + +### Other documentation + +- [description.md](description.md): General description of the depends system +- [packages.md](packages.md): Steps for adding packages diff --git a/depends/README.packages b/depends/README.packages deleted file mode 100644 index 5ab7ed7dee006..0000000000000 --- a/depends/README.packages +++ /dev/null @@ -1,128 +0,0 @@ -Each recipe consists of 3 main parts: defining identifiers, setting build -variables, and defining build commands. - -The package "mylib" will be used here as an example - -General tips: -mylib_foo is written as $(package)_foo in order to make recipes more similar. - -Identifiers: -Each package is required to define at least these variables: - $(package)_version: - Version of the upstream library or program. If there is no version, a - placeholder such as 1.0 can be used. - $(package)_download_path: - Location of the upstream source, without the file-name. Usually http or - ftp. - $(package)_file_name: - The upstream source filename available at the download path. - $(package)_sha256_hash: - The sha256 hash of the upstream file - -These variables are optional: - $(package)_build_subdir: - cd to this dir before running configure/build/stage commands. - $(package)_download_file: - The file-name of the upstream source if it differs from how it should be - stored locally. This can be used to avoid storing file-names with strange - characters. - $(package)_dependencies: - Names of any other packages that this one depends on. - $(package)_patches: - Filenames of any patches needed to build the package - - -Build Variables: -After defining the main identifiers, build variables may be added or customized -before running the build commands. They should be added to a function called -$(package)_set_vars. For example: - -define $(package)_set_vars -... -endef - -Most variables can be prefixed with the host, architecture, or both, to make -the modifications specific to that case. For example: - - Universal: $(package)_cc=gcc - Linux only: $(package)_linux_cc=gcc - x86_64 only: $(package)_x86_64_cc = gcc - x86_64 linux only: $(package)_x86_64_linux_cc = gcc - -These variables may be set to override or append their default values. - $(package)_cc - $(package)_cxx - $(package)_objc - $(package)_objcxx - $(package)_ar - $(package)_ranlib - $(package)_libtool - $(package)_nm - $(package)_cflags - $(package)_cxxflags - $(package)_ldflags - $(package)_cppflags - $(package)_config_env - $(package)_build_env - $(package)_stage_env - $(package)_build_opts - $(package)_config_opts - -The *_env variables are used to add environment variables to the respective -commands. - -Many variables respect a debug/release suffix as well, in order to use them for -only the appropriate build config. For example: - $(package)_cflags_release = -O3 - $(package)_cflags_i686_debug = -g - $(package)_config_opts_release = --disable-debug - -These will be used in addition to the options that do not specify -debug/release. All builds are considered to be release unless DEBUG=1 is set by -the user. - -Other variables may be defined as needed. - -Build commands: - - For each build, a unique build dir and staging dir are created. For example, - work/build/mylib/1.0-1adac830f6e and work/staging/mylib/1.0-1adac830f6e. - - The following build commands are available for each recipe: - - $(package)_fetch_cmds: - Runs from: build dir - Fetch the source file. If undefined, it will be fetched and verified - against its hash. - $(package)_extract_cmds: - Runs from: build dir - Verify the source file against its hash and extract it. If undefined, the - source is assumed to be a tarball. - $(package)_preprocess_cmds: - Runs from: build dir/$(package)_build_subdir - Preprocess the source as necessary. If undefined, does nothing. - $(package)_config_cmds: - Runs from: build dir/$(package)_build_subdir - Configure the source. If undefined, does nothing. - $(package)_build_cmds: - Runs from: build dir/$(package)_build_subdir - Build the source. If undefined, does nothing. - $(package)_stage_cmds: - Runs from: build dir/$(package)_build_subdir - Stage the build results. If undefined, does nothing. - - The following variables are available for each recipe: - $(1)_staging_dir: package's destination sysroot path - $(1)_staging_prefix_dir: prefix path inside of the package's staging dir - $(1)_extract_dir: path to the package's extracted sources - $(1)_build_dir: path where configure/build/stage commands will be run - $(1)_patch_dir: path where the package's patches (if any) are found - -Notes on build commands: - -For packages built with autotools, $($(package)_autoconf) can be used in the -configure step to (usually) correctly configure automatically. Any -$($(package)_config_opts) will be appended. - -Most autotools projects can be properly staged using: - $(MAKE) DESTDIR=$($(package)_staging_dir) install diff --git a/depends/README.usage b/depends/README.usage deleted file mode 100644 index e768feecf76a5..0000000000000 --- a/depends/README.usage +++ /dev/null @@ -1,32 +0,0 @@ -To build dependencies for the current arch+OS: - make -To build for another arch/OS: - make HOST=host-platform-triplet && make HOST=host-platform-triplet - (For example: make HOST=i686-w64-mingw32 -j4) - -A prefix will be generated that's suitable for plugging into Bitcoin's -configure. In the above example, a dir named i686-w64-mingw32 will be -created. To use it for Bitcoin: - -./configure --prefix=`pwd`/depends/i686-w64-mingw32 - -No other options are needed, the paths are automatically configured. - -Dependency Options: -The following can be set when running make: make FOO=bar - -SOURCES_PATH: downloaded sources will be placed here -BASE_CACHE: built packages will be placed here -SDK_PATH: Path where sdk's can be found (used by OSX) -FALLBACK_DOWNLOAD_PATH: If a source file can't be fetched, try here before giving up -NO_QT: Don't download/build/cache qt and its dependencies -NO_WALLET: Don't download/build/cache libs needed to enable the wallet -NO_UPNP: Don't download/build/cache packages needed for enabling upnp -DEBUG: disable some optimizations and enable more runtime checking -USE_LINUX_STATIC_QT5: Build a static qt5 rather than shared qt4. Linux only. - -If some packages are not built, for example 'make NO_WALLET=1', the appropriate -options will be passed to bitcoin's configure. In this case, --disable-wallet. - -Additional targets: -download: run 'make download' to fetch sources without building them diff --git a/depends/builders/darwin.mk b/depends/builders/darwin.mk index b366460e64bc1..8ed82b276df9c 100644 --- a/depends/builders/darwin.mk +++ b/depends/builders/darwin.mk @@ -1,17 +1,18 @@ -build_darwin_CC: = $(shell xcrun -f clang) -build_darwin_CXX: = $(shell xcrun -f clang++) -build_darwin_AR: = $(shell xcrun -f ar) -build_darwin_RANLIB: = $(shell xcrun -f ranlib) -build_darwin_STRIP: = $(shell xcrun -f strip) -build_darwin_OTOOL: = $(shell xcrun -f otool) -build_darwin_NM: = $(shell xcrun -f nm) +build_darwin_CC:=$(shell xcrun -f clang) -isysroot$(shell xcrun --show-sdk-path) +build_darwin_CXX:=$(shell xcrun -f clang++) -isysroot$(shell xcrun --show-sdk-path) +build_darwin_AR:=$(shell xcrun -f ar) +build_darwin_RANLIB:=$(shell xcrun -f ranlib) +build_darwin_STRIP:=$(shell xcrun -f strip) +build_darwin_OTOOL:=$(shell xcrun -f otool) +build_darwin_NM:=$(shell xcrun -f nm) build_darwin_INSTALL_NAME_TOOL:=$(shell xcrun -f install_name_tool) -build_darwin_SHA256SUM = shasum -a 256 -build_darwin_DOWNLOAD = curl --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -L -o +build_darwin_DSYMUTIL:=$(shell xcrun -f dsymutil) +build_darwin_SHA256SUM=shasum -a 256 +build_darwin_DOWNLOAD=curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o #darwin host on darwin builder. overrides darwin host preferences. -darwin_CC=$(shell xcrun -f clang) -mmacosx-version-min=$(OSX_MIN_VERSION) -darwin_CXX:=$(shell xcrun -f clang++) -mmacosx-version-min=$(OSX_MIN_VERSION) +darwin_CC=$(shell xcrun -f clang) -mmacosx-version-min=$(OSX_MIN_VERSION) -isysroot$(shell xcrun --show-sdk-path) +darwin_CXX:=$(shell xcrun -f clang++) -mmacosx-version-min=$(OSX_MIN_VERSION) -stdlib=libc++ -isysroot$(shell xcrun --show-sdk-path) darwin_AR:=$(shell xcrun -f ar) darwin_RANLIB:=$(shell xcrun -f ranlib) darwin_STRIP:=$(shell xcrun -f strip) @@ -19,4 +20,11 @@ darwin_LIBTOOL:=$(shell xcrun -f libtool) darwin_OTOOL:=$(shell xcrun -f otool) darwin_NM:=$(shell xcrun -f nm) darwin_INSTALL_NAME_TOOL:=$(shell xcrun -f install_name_tool) +darwin_DSYMUTIL:=$(shell xcrun -f dsymutil) +darwin_native_binutils= darwin_native_toolchain= + +x86_64_darwin_CFLAGS += -arch x86_64 +x86_64_darwin_CXXFLAGS += -arch x86_64 +aarch64_darwin_CFLAGS += -arch arm64 +aarch64_darwin_CXXFLAGS += -arch arm64 diff --git a/depends/builders/default.mk b/depends/builders/default.mk index f097db65d603d..cc6dec66c2ba0 100644 --- a/depends/builders/default.mk +++ b/depends/builders/default.mk @@ -1,18 +1,17 @@ default_build_CC = gcc default_build_CXX = g++ default_build_AR = ar +default_build_TAR = tar default_build_RANLIB = ranlib default_build_STRIP = strip default_build_NM = nm -default_build_OTOOL = otool -default_build_INSTALL_NAME_TOOL = install_name_tool define add_build_tool_func build_$(build_os)_$1 ?= $$(default_build_$1) build_$(build_arch)_$(build_os)_$1 ?= $$(build_$(build_os)_$1) build_$1=$$(build_$(build_arch)_$(build_os)_$1) endef -$(foreach var,CC CXX AR RANLIB NM STRIP SHA256SUM DOWNLOAD OTOOL INSTALL_NAME_TOOL,$(eval $(call add_build_tool_func,$(var)))) +$(foreach var,CC CXX AR TAR RANLIB NM STRIP SHA256SUM DOWNLOAD OTOOL INSTALL_NAME_TOOL DSYMUTIL,$(eval $(call add_build_tool_func,$(var)))) define add_build_flags_func build_$(build_arch)_$(build_os)_$1 += $(build_$(build_os)_$1) build_$1=$$(build_$(build_arch)_$(build_os)_$1) diff --git a/depends/builders/freebsd.mk b/depends/builders/freebsd.mk new file mode 100644 index 0000000000000..465f58e04dc7f --- /dev/null +++ b/depends/builders/freebsd.mk @@ -0,0 +1,5 @@ +build_freebsd_CC=clang +build_freebsd_CXX=clang++ + +build_freebsd_SHA256SUM = shasum -a 256 +build_freebsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o diff --git a/depends/builders/linux.mk b/depends/builders/linux.mk index 98d0e9de348fc..b03f42401047a 100644 --- a/depends/builders/linux.mk +++ b/depends/builders/linux.mk @@ -1,2 +1,2 @@ build_linux_SHA256SUM = sha256sum -build_linux_DOWNLOAD = wget --timeout=$(DOWNLOAD_CONNECT_TIMEOUT) --tries=$(DOWNLOAD_RETRIES) -nv -O +build_linux_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o diff --git a/depends/builders/netbsd.mk b/depends/builders/netbsd.mk new file mode 100644 index 0000000000000..b7cf1f7514189 --- /dev/null +++ b/depends/builders/netbsd.mk @@ -0,0 +1,2 @@ +build_netbsd_SHA256SUM = shasum -a 256 +build_netbsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o diff --git a/depends/builders/openbsd.mk b/depends/builders/openbsd.mk new file mode 100644 index 0000000000000..44825d106acf6 --- /dev/null +++ b/depends/builders/openbsd.mk @@ -0,0 +1,7 @@ +build_openbsd_CC = clang +build_openbsd_CXX = clang++ + +build_openbsd_SHA256SUM = sha256 +build_openbsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o + +build_openbsd_TAR = gtar diff --git a/depends/config.guess b/depends/config.guess index 1f5c50c0d1529..dc0a6b29976a9 100755 --- a/depends/config.guess +++ b/depends/config.guess @@ -1,8 +1,8 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2014 Free Software Foundation, Inc. +# Copyright 1992-2021 Free Software Foundation, Inc. -timestamp='2014-03-23' +timestamp='2021-05-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ timestamp='2014-03-23' # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -24,22 +24,22 @@ timestamp='2014-03-23' # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # -# Originally written by Per Bothner. +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess # -# Please send patches with a ChangeLog entry to config-patches@gnu.org. +# Please send patches to . -me=`echo "$0" | sed -e 's,.*/,,'` +me=$(echo "$0" | sed -e 's,.*/,,') usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -50,7 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2014 Free Software Foundation, Inc. +Copyright 1992-2021 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -84,8 +84,6 @@ if test $# != 0; then exit 1 fi -trap 'exit 1' 1 2 15 - # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a @@ -96,66 +94,89 @@ trap 'exit 1' 1 2 15 # Portable tmp directory creation inspired by the Autoconf team. -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' +tmp= +# shellcheck disable=SC2172 +trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 + +set_cc_for_build() { + # prevent multiple calls if $tmp is already set + test "$tmp" && return 0 + : "${TMPDIR=/tmp}" + # shellcheck disable=SC2039 + { tmp=$( (umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null) && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } + dummy=$tmp/dummy + case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in + ,,) echo "int x;" > "$dummy.c" + for driver in cc gcc c89 c99 ; do + if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then + CC_FOR_BUILD="$driver" + break + fi + done + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; + esac +} # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then +if test -f /.attbin/uname ; then PATH=$PATH:/.attbin ; export PATH fi -UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown -UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown -UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown +UNAME_MACHINE=$( (uname -m) 2>/dev/null) || UNAME_MACHINE=unknown +UNAME_RELEASE=$( (uname -r) 2>/dev/null) || UNAME_RELEASE=unknown +UNAME_SYSTEM=$( (uname -s) 2>/dev/null) || UNAME_SYSTEM=unknown +UNAME_VERSION=$( (uname -v) 2>/dev/null) || UNAME_VERSION=unknown -case "${UNAME_SYSTEM}" in +case $UNAME_SYSTEM in Linux|GNU|GNU/*) - # If the system lacks a compiler, then just pick glibc. - # We could probably try harder. - LIBC=gnu + LIBC=unknown - eval $set_cc_for_build - cat <<-EOF > $dummy.c + set_cc_for_build + cat <<-EOF > "$dummy.c" #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc - #else + #elif defined(__GLIBC__) LIBC=gnu + #else + #include + /* First heuristic to detect musl libc. */ + #ifdef __DEFINED_va_list + LIBC=musl + #endif #endif EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g')" + + # Second heuristic to detect musl libc. + if [ "$LIBC" = unknown ] && + command -v ldd >/dev/null && + ldd --version 2>&1 | grep -q ^musl; then + LIBC=musl + fi + + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + if [ "$LIBC" = unknown ]; then + LIBC=gnu + fi ;; esac # Note: order is significant - the case branches are not exclusive. -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in +case $UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, @@ -167,22 +188,32 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in + UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \ + /sbin/sysctl -n hw.machine_arch 2>/dev/null || \ + /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \ + echo unknown)) + case $UNAME_MACHINE_ARCH in + aarch64eb) machine=aarch64_be-unknown ;; armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + earmv*) + arch=$(echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,') + endian=$(echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p') + machine="${arch}${endian}"-unknown + ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; esac # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in + # to ELF recently (or will in the future) and ABI. + case $UNAME_MACHINE_ARCH in + earm*) + os=netbsdelf + ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build + set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then @@ -197,117 +228,140 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in os=netbsd ;; esac + # Determine ABI tags. + case $UNAME_MACHINE_ARCH in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=$(echo "$UNAME_MACHINE_ARCH" | sed -e "$expr") + ;; + esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in + case $UNAME_VERSION in Debian*) release='-gnu' ;; *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + release=$(echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2) ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" + echo "$machine-${os}${release}${abi-}" exit ;; *:Bitrig:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + UNAME_MACHINE_ARCH=$(arch | sed 's/Bitrig.//') + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" exit ;; *:OpenBSD:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + UNAME_MACHINE_ARCH=$(arch | sed 's/OpenBSD.//') + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" + exit ;; + *:SecBSD:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/SecBSD.//') + echo "$UNAME_MACHINE_ARCH"-unknown-secbsd"$UNAME_RELEASE" + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/^.*BSD\.//') + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" exit ;; *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" exit ;; *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" + exit ;; + *:OS108:*:*) + echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE" exit ;; macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:Sortix:*:*) + echo "$UNAME_MACHINE"-unknown-sortix + exit ;; + *:Twizzler:*:*) + echo "$UNAME_MACHINE"-unknown-twizzler + exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox + exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 exit ;; alpha:OSF1:*:*) + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + trap '' 0 case $UNAME_RELEASE in *4.0) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $3}') ;; *5.*) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $4}') ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. - ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in + ALPHA_CPU_TYPE=$(/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1) + case $ALPHA_CPU_TYPE in "EV4 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; + UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; + UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; + UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; + UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; + UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; + UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; + UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; + UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; + UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 + echo "$UNAME_MACHINE"-dec-osf"$(echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)" exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos + echo "$UNAME_MACHINE"-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos + echo "$UNAME_MACHINE"-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition @@ -319,7 +373,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} + echo arm-acorn-riscix"$UNAME_RELEASE" exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos @@ -329,7 +383,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then + if test "$( (/bin/universe) 2>/dev/null)" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd @@ -342,69 +396,69 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) - case `/usr/bin/uname -p` in + case $(/usr/bin/uname -p) in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo "$UNAME_MACHINE"-ibm-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" exit ;; sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-hal-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} + echo i386-pc-auroraux"$UNAME_RELEASE" exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" + set_cc_for_build + SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then - SUN_ARCH="x86_64" + SUN_ARCH=x86_64 fi fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo "$SUN_ARCH"-pc-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris3"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in + case $(/usr/bin/arch -k) in Series*|S4*) - UNAME_RELEASE=`uname -v` + UNAME_RELEASE=$(uname -v) ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + echo sparc-sun-sunos"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/')" exit ;; sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" exit ;; sun*:*:4.2BSD:*) - UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 - case "`/bin/arch`" in + UNAME_RELEASE=$( (sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null) + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 + case $(/bin/arch) in sun3) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" ;; sun4) - echo sparc-sun-sunos${UNAME_RELEASE} + echo sparc-sun-sunos"$UNAME_RELEASE" ;; esac exit ;; aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} + echo sparc-auspex-sunos"$UNAME_RELEASE" exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not @@ -415,44 +469,44 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} + echo m68k-milan-mint"$UNAME_RELEASE" exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} + echo m68k-hades-mint"$UNAME_RELEASE" exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} + echo m68k-unknown-mint"$UNAME_RELEASE" exit ;; m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} + echo m68k-apple-machten"$UNAME_RELEASE" exit ;; powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} + echo powerpc-apple-machten"$UNAME_RELEASE" exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} + echo mips-dec-ultrix"$UNAME_RELEASE" exit ;; VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} + echo vax-dec-ultrix"$UNAME_RELEASE" exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} + echo clipper-intergraph-clix"$UNAME_RELEASE" exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { @@ -461,23 +515,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=$(echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p') && + SYSTEM_NAME=$("$dummy" "$dummyarg") && { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} + echo mips-mips-riscos"$UNAME_RELEASE" exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax @@ -502,18 +556,18 @@ EOF exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures - UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + UNAME_PROCESSOR=$(/usr/bin/uname -p) + if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110 then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] + if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \ + test "$TARGET_BINARY_INTERFACE"x = x then - echo m88k-dg-dgux${UNAME_RELEASE} + echo m88k-dg-dgux"$UNAME_RELEASE" else - echo m88k-dg-dguxbcs${UNAME_RELEASE} + echo m88k-dg-dguxbcs"$UNAME_RELEASE" fi else - echo i586-dg-dgux${UNAME_RELEASE} + echo i586-dg-dgux"$UNAME_RELEASE" fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) @@ -530,26 +584,26 @@ EOF echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + echo mips-sgi-irix"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/g')" exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + exit ;; # Note that: echo "'$(uname -s)'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` + if test -x /usr/bin/oslevel ; then + IBM_REV=$(/usr/bin/oslevel) else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #include main() @@ -560,7 +614,7 @@ EOF exit(0); } EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") then echo "$SYSTEM_NAME" else @@ -573,27 +627,28 @@ EOF fi exit ;; *:AIX:*:[4567]) - IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_CPU_ID=$(/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }') + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` + if test -x /usr/bin/lslpp ; then + IBM_REV=$(/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/) else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx @@ -608,28 +663,28 @@ EOF echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; + HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') + case $UNAME_MACHINE in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then - sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` - sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + if test -x /usr/bin/getconf; then + sc_cpu_version=$(/usr/bin/getconf SC_CPU_VERSION 2>/dev/null) + sc_kernel_bits=$(/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null) + case $sc_cpu_version in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + case $sc_kernel_bits in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + if test "$HP_ARCH" = ""; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #define _HPUX_SOURCE #include @@ -662,13 +717,13 @@ EOF exit (0); } EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=$("$dummy") test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac - if [ ${HP_ARCH} = "hppa2.0w" ] + if test "$HP_ARCH" = hppa2.0w then - eval $set_cc_for_build + set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler @@ -679,23 +734,23 @@ EOF # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then - HP_ARCH="hppa2.0w" + HP_ARCH=hppa2.0w else - HP_ARCH="hppa64" + HP_ARCH=hppa64 fi fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" exit ;; ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} + HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') + echo ia64-hp-hpux"$HPUX_REV" exit ;; 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #include int main () @@ -720,11 +775,11 @@ EOF exit (0); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) @@ -733,17 +788,17 @@ EOF *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk + if test -x /usr/sbin/sysversion ; then + echo "$UNAME_MACHINE"-unknown-osf1mk else - echo ${UNAME_MACHINE}-unknown-osf1 + echo "$UNAME_MACHINE"-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) @@ -768,130 +823,123 @@ EOF echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + FUJITSU_PROC=$(uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz) + FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') + FUJITSU_REL=$(echo "$UNAME_RELEASE" | sed -e 's/ /_/') echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') + FUJITSU_REL=$(echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/') echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" exit ;; sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} + echo sparc-unknown-bsdi"$UNAME_RELEASE" exit ;; *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" + exit ;; + arm:FreeBSD:*:*) + UNAME_PROCESSOR=$(uname -p) + set_cc_for_build + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabi + else + echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabihf + fi exit ;; *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in + UNAME_PROCESSOR=$(/usr/bin/uname -p) + case $UNAME_PROCESSOR in amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; esac + echo "$UNAME_PROCESSOR"-unknown-freebsd"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" exit ;; i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin + echo "$UNAME_MACHINE"-pc-cygwin exit ;; *:MINGW64*:*) - echo ${UNAME_MACHINE}-pc-mingw64 + echo "$UNAME_MACHINE"-pc-mingw64 exit ;; *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 + echo "$UNAME_MACHINE"-pc-mingw32 exit ;; *:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 + echo "$UNAME_MACHINE"-pc-msys exit ;; i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 + echo "$UNAME_MACHINE"-pc-pw32 exit ;; *:Interix*:*) - case ${UNAME_MACHINE} in + case $UNAME_MACHINE in x86) - echo i586-pc-interix${UNAME_RELEASE} + echo i586-pc-interix"$UNAME_RELEASE" exit ;; authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} + echo x86_64-unknown-interix"$UNAME_RELEASE" exit ;; IA64) - echo ia64-unknown-interix${UNAME_RELEASE} + echo ia64-unknown-interix"$UNAME_RELEASE" exit ;; esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin + echo "$UNAME_MACHINE"-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-unknown-cygwin - exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin + echo x86_64-pc-cygwin exit ;; prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo powerpcle-unknown-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; *:GNU:*:*) # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + echo "$(echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,')-unknown-$LIBC$(echo "$UNAME_RELEASE"|sed -e 's,/.*$,,')" exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + echo "$UNAME_MACHINE-unknown-$(echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]")$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')-$LIBC" exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix + *:Minix:*:*) + echo "$UNAME_MACHINE"-unknown-minix exit ;; aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + case $(sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null) in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; @@ -901,129 +949,182 @@ EOF EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="gnulibc1" ; fi - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; - arc:Linux:*:* | arceb:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + arc:Linux:*:* | arceb:Linux:*:* | arc64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) - eval $set_cc_for_build + set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi else - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf fi fi exit ;; avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + e2k:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:Linux:*:*) - echo ${UNAME_MACHINE}-pc-linux-${LIBC} + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + k1om:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + IS_GLIBC=0 + test x"${LIBC}" = xgnu && IS_GLIBC=1 + sed 's/^ //' << EOF > "$dummy.c" #undef CPU - #undef ${UNAME_MACHINE} - #undef ${UNAME_MACHINE}el + #undef mips + #undef mipsel + #undef mips64 + #undef mips64el + #if ${IS_GLIBC} && defined(_ABI64) + LIBCABI=gnuabi64 + #else + #if ${IS_GLIBC} && defined(_ABIN32) + LIBCABI=gnuabin32 + #else + LIBCABI=${LIBC} + #endif + #endif + + #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa64r6 + #else + #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa32r6 + #else + #if defined(__mips64) + CPU=mips64 + #else + CPU=mips + #endif + #endif + #endif + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=${UNAME_MACHINE}el + MIPS_ENDIAN=el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=${UNAME_MACHINE} + MIPS_ENDIAN= #else - CPU= + MIPS_ENDIAN= #endif #endif EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI')" + test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } ;; + mips64el:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; openrisc*:Linux:*:*) - echo or1k-unknown-linux-${LIBC} + echo or1k-unknown-linux-"$LIBC" exit ;; or32:Linux:*:* | or1k*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; padre:Linux:*:*) - echo sparc-unknown-linux-${LIBC} + echo sparc-unknown-linux-"$LIBC" exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-${LIBC} + echo hppa64-unknown-linux-"$LIBC" exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level - case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; - PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; - *) echo hppa-unknown-linux-${LIBC} ;; + case $(grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2) in + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; esac exit ;; ppc64:Linux:*:*) - echo powerpc64-unknown-linux-${LIBC} + echo powerpc64-unknown-linux-"$LIBC" exit ;; ppc:Linux:*:*) - echo powerpc-unknown-linux-${LIBC} + echo powerpc-unknown-linux-"$LIBC" exit ;; ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-${LIBC} + echo powerpc64le-unknown-linux-"$LIBC" exit ;; ppcle:Linux:*:*) - echo powerpcle-unknown-linux-${LIBC} + echo powerpcle-unknown-linux-"$LIBC" + exit ;; + riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" exit ;; sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-${LIBC} + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" exit ;; x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + set_cc_for_build + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_X32 >/dev/null + then + LIBCABI="$LIBC"x32 + fi + fi + echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI" exit ;; xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. @@ -1037,51 +1138,51 @@ EOF # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx + echo "$UNAME_MACHINE"-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop + echo "$UNAME_MACHINE"-unknown-stop exit ;; i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos + echo "$UNAME_MACHINE"-unknown-atheos exit ;; i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable + echo "$UNAME_MACHINE"-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} + echo i386-unknown-lynxos"$UNAME_RELEASE" exit ;; i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp + echo "$UNAME_MACHINE"-pc-msdosdjgpp exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + i*86:*:4.*:*) + UNAME_REL=$(echo "$UNAME_RELEASE" | sed 's/\/MP$//') if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. - case `/bin/uname -X | grep "^Machine"` in + case $(/bin/uname -X | grep "^Machine") in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}" exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then - UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then - UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + UNAME_REL=$( (/bin/uname -X|grep Release|sed -e 's/.*= //')) (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 @@ -1089,9 +1190,9 @@ EOF && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv32 + echo "$UNAME_MACHINE"-pc-sysv32 fi exit ;; pc:*:*:*) @@ -1099,7 +1200,7 @@ EOF # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that + # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; @@ -1111,9 +1212,9 @@ EOF exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) @@ -1131,41 +1232,41 @@ EOF 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} + echo m68k-unknown-lynxos"$UNAME_RELEASE" exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} + echo sparc-unknown-lynxos"$UNAME_RELEASE" exit ;; rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} + echo rs6000-unknown-lynxos"$UNAME_RELEASE" exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} + echo powerpc-unknown-lynxos"$UNAME_RELEASE" exit ;; SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} + echo mips-dde-sysv"$UNAME_RELEASE" exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 @@ -1175,8 +1276,8 @@ EOF exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then - UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 + UNAME_MACHINE=$( (uname -p) 2>/dev/null) + echo "$UNAME_MACHINE"-sni-sysv4 else echo ns32k-sni-sysv fi @@ -1196,23 +1297,23 @@ EOF exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos + echo "$UNAME_MACHINE"-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} + echo m68k-apple-aux"$UNAME_RELEASE" exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} + if test -d /usr/nec; then + echo mips-nec-sysv"$UNAME_RELEASE" else - echo mips-unknown-sysv${UNAME_RELEASE} + echo mips-unknown-sysv"$UNAME_RELEASE" fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. @@ -1231,77 +1332,97 @@ EOF echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} + echo sx4-nec-superux"$UNAME_RELEASE" exit ;; SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} + echo sx5-nec-superux"$UNAME_RELEASE" exit ;; SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} + echo sx6-nec-superux"$UNAME_RELEASE" exit ;; SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} + echo sx7-nec-superux"$UNAME_RELEASE" exit ;; SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} + echo sx8-nec-superux"$UNAME_RELEASE" exit ;; SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} + echo sx8r-nec-superux"$UNAME_RELEASE" + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux"$UNAME_RELEASE" exit ;; Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} + echo powerpc-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" + exit ;; + arm64:Darwin:*:*) + echo aarch64-apple-darwin"$UNAME_RELEASE" exit ;; *:Darwin:*:*) - UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - eval $set_cc_for_build - if test "$UNAME_PROCESSOR" = unknown ; then - UNAME_PROCESSOR=powerpc + UNAME_PROCESSOR=$(uname -p) + case $UNAME_PROCESSOR in + unknown) UNAME_PROCESSOR=powerpc ;; + esac + if command -v xcode-select > /dev/null 2> /dev/null && \ + ! xcode-select --print-path > /dev/null 2> /dev/null ; then + # Avoid executing cc if there is no toolchain installed as + # cc will be a stub that puts up a graphical alert + # prompting the user to install developer tools. + CC_FOR_BUILD=no_compiler_found + else + set_cc_for_build fi - if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - case $UNAME_PROCESSOR in - i386) UNAME_PROCESSOR=x86_64 ;; - powerpc) UNAME_PROCESSOR=powerpc64 ;; - esac - fi + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc fi elif test "$UNAME_PROCESSOR" = i386 ; then - # Avoid executing cc on OS X 10.9, as it ships with a stub - # that puts up a graphical alert prompting to install - # developer tools. Any system running Mac OS X 10.7 or - # later (Darwin 11 and later) is required to have a 64-bit - # processor. This is not true of the ARM version of Darwin - # that Apple uses in portable devices. - UNAME_PROCESSOR=x86_64 + # uname -m returns i386 or x86_64 + UNAME_PROCESSOR=$UNAME_MACHINE fi - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) - UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=$(uname -p) + if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk"$UNAME_RELEASE" exit ;; NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} + echo nse-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk"$UNAME_RELEASE" exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk"$UNAME_RELEASE" exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux @@ -1310,18 +1431,18 @@ EOF echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. - if test "$cputype" = "386"; then + if test "${cputype-}" = 386; then UNAME_MACHINE=i386 - else + elif test "x${cputype-}" != x; then UNAME_MACHINE="$cputype" fi - echo ${UNAME_MACHINE}-unknown-plan9 + echo "$UNAME_MACHINE"-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 @@ -1342,14 +1463,14 @@ EOF echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} + echo mips-sei-seiux"$UNAME_RELEASE" exit ;; *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + echo "$UNAME_MACHINE"-unknown-dragonfly"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" exit ;; *:*VMS:*:*) - UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in + UNAME_MACHINE=$( (uname -p) 2>/dev/null) + case $UNAME_MACHINE in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; @@ -1358,62 +1479,223 @@ EOF echo i386-pc-xenix exit ;; i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + echo "$UNAME_MACHINE"-pc-skyos"$(echo "$UNAME_RELEASE" | sed -e 's/ .*$//')" exit ;; i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos + echo "$UNAME_MACHINE"-pc-rdos exit ;; - i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros + *:AROS:*:*) + echo "$UNAME_MACHINE"-unknown-aros exit ;; x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx + echo "$UNAME_MACHINE"-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; + *:Unleashed:*:*) + echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE" exit ;; esac +# No uname command or uname output not recognized. +set_cc_for_build +cat > "$dummy.c" < +#include +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#include +#if defined(_SIZE_T_) || defined(SIGLOST) +#include +#endif +#endif +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=$( (hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null); + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); +#endif + +#if defined (vax) +#if !defined (ultrix) +#include +#if defined (BSD) +#if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +#else +#if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#endif +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#else +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname un; + uname (&un); + printf ("vax-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("vax-dec-ultrix\n"); exit (0); +#endif +#endif +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname *un; + uname (&un); + printf ("mips-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("mips-dec-ultrix\n"); exit (0); +#endif +#endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=$($dummy) && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. +test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } + +echo "$0: unable to guess system type" >&2 + +case $UNAME_MACHINE:$UNAME_SYSTEM in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 <&2 < in order to provide the needed -information to handle your system. +year=$(echo $timestamp | sed 's,-.*,,') +# shellcheck disable=SC2003 +if test "$(expr "$(date +%Y)" - "$year")" -lt 3 ; then + cat >&2 </dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` +uname -m = $( (uname -m) 2>/dev/null || echo unknown) +uname -r = $( (uname -r) 2>/dev/null || echo unknown) +uname -s = $( (uname -s) 2>/dev/null || echo unknown) +uname -v = $( (uname -v) 2>/dev/null || echo unknown) -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null` +/usr/bin/uname -p = $( (/usr/bin/uname -p) 2>/dev/null) +/bin/uname -X = $( (/bin/uname -X) 2>/dev/null) -hostinfo = `(hostinfo) 2>/dev/null` -/bin/universe = `(/bin/universe) 2>/dev/null` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` -/bin/arch = `(/bin/arch) 2>/dev/null` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` +hostinfo = $( (hostinfo) 2>/dev/null) +/bin/universe = $( (/bin/universe) 2>/dev/null) +/usr/bin/arch -k = $( (/usr/bin/arch -k) 2>/dev/null) +/bin/arch = $( (/bin/arch) 2>/dev/null) +/usr/bin/oslevel = $( (/usr/bin/oslevel) 2>/dev/null) +/usr/convex/getsysinfo = $( (/usr/convex/getsysinfo) 2>/dev/null) -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" EOF +fi exit 1 # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/depends/config.site.in b/depends/config.site.in index 873f66018d118..03dabeea0a4a7 100644 --- a/depends/config.site.in +++ b/depends/config.site.in @@ -1,85 +1,139 @@ +# shellcheck shell=sh disable=SC2034 # Many variables set will be used in + # ./configure but shellcheck doesn't know + # that, hence: disable=SC2034 + +true # Dummy command because shellcheck treats all directives before first + # command as file-wide, and we only want to disable for one line. + # + # See: https://github.com/koalaman/shellcheck/wiki/Directive + +# shellcheck disable=SC2154 +depends_prefix="$(cd "$(dirname "$ac_site_file")/.." && pwd)" + cross_compiling=maybe -host_alias=@HOST@ -ac_tool_prefix=${host_alias}- +host_alias="@HOST@" +ac_tool_prefix="${host_alias}-" -if test -z $with_boost; then - with_boost=$prefix +if test -z "$with_boost"; then + with_boost="$depends_prefix" fi -if test -z $with_qt_plugindir; then - with_qt_plugindir=$prefix/plugins +if test -z "$with_qt_plugindir"; then + with_qt_plugindir="${depends_prefix}/plugins" fi -if test -z $with_qt_translationdir; then - with_qt_translationdir=$prefix/translations +if test -z "$with_qt_translationdir"; then + with_qt_translationdir="${depends_prefix}/translations" fi -if test -z $with_qt_bindir; then - with_qt_bindir=$prefix/native/bin +if test -z "$with_qt_bindir" && test -z "@no_qt@"; then + with_qt_bindir="${depends_prefix}/native/bin" fi -if test -z $with_protoc_bindir; then - with_protoc_bindir=$prefix/native/bin -fi -if test -z $with_comparison_tool; then - with_comparison_tool=$prefix/native/share/BitcoindComparisonTool_jar/BitcoindComparisonTool.jar +if test -z "$with_mpgen" && test -n "@multiprocess@"; then + with_mpgen="${depends_prefix}/native" fi +if test -z "$with_qrencode" && test -n "@no_qr@"; then + with_qrencode=no +fi -if test -z $enable_wallet && test -n "@no_wallet@"; then +if test -z "$enable_wallet" && test -n "@no_wallet@"; then enable_wallet=no fi -if test -z $with_miniupnpc && test -n "@no_upnp@"; then +if test -z "$with_bdb" && test -n "@no_bdb@"; then + with_bdb=no +fi + +if test -z "$with_sqlite" && test -n "@no_sqlite@"; then + with_sqlite=no +fi + +if test -z "$enable_multiprocess" && test -n "@multiprocess@"; then + enable_multiprocess=yes +fi + +if test -z "$with_miniupnpc" && test -n "@no_upnp@"; then with_miniupnpc=no fi -if test -z $with_gui && test -n "@no_qt@"; then +if test -z "$with_natpmp" && test -n "@no_natpmp@"; then + with_natpmp=no +fi + +if test -z "$with_gui" && test -n "@no_qt@"; then with_gui=no fi -if test x@host_os@ = xdarwin; then - BREW=no - PORT=no +if test -n "@debug@" && test -z "@no_qt@" && test "$with_gui" != "no"; then + with_gui=qt5_debug fi -if test x@host_os@ = xmingw32; then - if test -z $with_qt_incdir; then - with_qt_incdir=$prefix/include - fi - if test -z $with_qt_libdir; then - with_qt_libdir=$prefix/lib - fi +if test -z "$enable_zmq" && test -n "@no_zmq@"; then + enable_zmq=no +fi + +if test -z "$enable_usdt" && test -n "@no_usdt@"; then + enable_usdt=no fi -PATH=$prefix/native/bin:$PATH -PKG_CONFIG="`which pkg-config` --static" +if test "@host_os@" = darwin; then + BREW=no +fi + +PKG_CONFIG="$(which pkg-config) --static" # These two need to remain exported because pkg-config does not see them # otherwise. That means they must be unexported at the end of configure.ac to # avoid ruining the cache. Sigh. +export PKG_CONFIG_PATH="${depends_prefix}/share/pkgconfig:${depends_prefix}/lib/pkgconfig" +if test -z "@allow_host_packages@"; then + export PKG_CONFIG_LIBDIR="${depends_prefix}/lib/pkgconfig" +fi -export PKG_CONFIG_LIBDIR=$prefix/lib/pkgconfig -export PKG_CONFIG_PATH=$prefix/share/pkgconfig - -CPPFLAGS="-I$prefix/include/ $CPPFLAGS" -LDFLAGS="-L$prefix/lib $LDFLAGS" +CPPFLAGS="-I${depends_prefix}/include/ ${CPPFLAGS}" +LDFLAGS="-L${depends_prefix}/lib ${LDFLAGS}" -CC="@CC@" -CXX="@CXX@" -OBJC="${CC}" -OBJCXX="${CXX}" -CCACHE=$prefix/native/bin/ccache +if test -n "@CC@" -a -z "${CC}"; then + CC="@CC@" +fi +if test -n "@CXX@" -a -z "${CXX}"; then + CXX="@CXX@" +fi +PYTHONPATH="${depends_prefix}/native/lib/python3/dist-packages${PYTHONPATH:+${PATH_SEPARATOR}}${PYTHONPATH}" if test -n "@AR@"; then - AR=@AR@ - ac_cv_path_ac_pt_AR=${AR} + AR="@AR@" + ac_cv_path_ac_pt_AR="${AR}" fi if test -n "@RANLIB@"; then - RANLIB=@RANLIB@ - ac_cv_path_ac_pt_RANLIB=${RANLIB} + RANLIB="@RANLIB@" + ac_cv_path_ac_pt_RANLIB="${RANLIB}" fi if test -n "@NM@"; then - NM=@NM@ - ac_cv_path_ac_pt_NM=${NM} + NM="@NM@" + ac_cv_path_ac_pt_NM="${NM}" +fi + +if test -n "@STRIP@"; then + STRIP="@STRIP@" + ac_cv_path_ac_pt_STRIP="${STRIP}" +fi + +if test "@host_os@" = darwin; then + if test -n "@OTOOL@"; then + OTOOL="@OTOOL@" + ac_cv_path_ac_pt_OTOOL="${OTOOL}" + fi + + if test -n "@INSTALL_NAME_TOOL@"; then + INSTALL_NAME_TOOL="@INSTALL_NAME_TOOL@" + ac_cv_path_ac_pt_INSTALL_NAME_TOOL="${INSTALL_NAME_TOOL}" + fi + + if test -n "@DSYMUTIL@"; then + DSYMUTIL="@DSYMUTIL@" + ac_cv_path_ac_pt_DSYMUTIL="${DSYMUTIL}" + fi fi if test -n "@debug@"; then @@ -87,14 +141,14 @@ if test -n "@debug@"; then fi if test -n "@CFLAGS@"; then - CFLAGS="@CFLAGS@ $CFLAGS" + CFLAGS="@CFLAGS@ ${CFLAGS}" fi if test -n "@CXXFLAGS@"; then - CXXFLAGS="@CXXFLAGS@ $CXXFLAGS" + CXXFLAGS="@CXXFLAGS@ ${CXXFLAGS}" fi if test -n "@CPPFLAGS@"; then - CPPFLAGS="@CPPFLAGS@ $CPPFLAGS" + CPPFLAGS="@CPPFLAGS@ ${CPPFLAGS}" fi if test -n "@LDFLAGS@"; then - LDFLAGS="@LDFLAGS@ $LDFLAGS" + LDFLAGS="@LDFLAGS@ ${LDFLAGS}" fi diff --git a/depends/config.sub b/depends/config.sub index d654d03cdcd22..7384e9198b405 100755 --- a/depends/config.sub +++ b/depends/config.sub @@ -1,8 +1,8 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2014 Free Software Foundation, Inc. +# Copyright 1992-2021 Free Software Foundation, Inc. -timestamp='2014-05-01' +timestamp='2021-04-30' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ timestamp='2014-05-01' # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -25,7 +25,7 @@ timestamp='2014-05-01' # of the GNU General Public License, version 3 ("GPLv3"). -# Please send patches with a ChangeLog entry to config-patches@gnu.org. +# Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. @@ -33,7 +33,7 @@ timestamp='2014-05-01' # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD +# https://git.savannah.gnu.org/cgit/config.git/plain/config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases @@ -50,15 +50,14 @@ timestamp='2014-05-01' # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. -me=`echo "$0" | sed -e 's,.*/,,'` +me=$(echo "$0" | sed -e 's,.*/,,') usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -68,7 +67,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright 1992-2014 Free Software Foundation, Inc. +Copyright 1992-2021 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -90,12 +89,12 @@ while test $# -gt 0 ; do - ) # Use stdin as input. break ;; -* ) - echo "$me: invalid option $1$help" + echo "$me: invalid option $1$help" >&2 exit 1 ;; *local*) # First pass through any local machine types. - echo $1 + echo "$1" exit ;; * ) @@ -111,1215 +110,1173 @@ case $# in exit 1;; esac -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac +# Split fields of configuration type +# shellcheck disable=SC2162 +IFS="-" read field1 field2 field3 field4 <&2 + exit 1 ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + *-*-*-*) + basic_machine=$field1-$field2 + basic_os=$field3-$field4 ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` + *-*-*) + # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two + # parts + maybe_os=$field2-$field3 + case $maybe_os in + nto-qnx* | linux-* | uclinux-uclibc* \ + | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ + | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ + | storm-chaos* | os2-emx* | rtmk-nova*) + basic_machine=$field1 + basic_os=$maybe_os + ;; + android-linux) + basic_machine=$field1-unknown + basic_os=linux-android + ;; + *) + basic_machine=$field1-$field2 + basic_os=$field3 + ;; + esac ;; - -psos*) - os=-psos + *-*) + # A lone config we happen to match not fitting any pattern + case $field1-$field2 in + decstation-3100) + basic_machine=mips-dec + basic_os= + ;; + *-*) + # Second component is usually, but not always the OS + case $field2 in + # Prevent following clause from handling this valid os + sun*os*) + basic_machine=$field1 + basic_os=$field2 + ;; + # Manufacturers + dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ + | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ + | unicom* | ibm* | next | hp | isi* | apollo | altos* \ + | convergent* | ncr* | news | 32* | 3600* | 3100* \ + | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ + | ultra | tti* | harris | dolphin | highlevel | gould \ + | cbm | ns | masscomp | apple | axis | knuth | cray \ + | microblaze* | sim | cisco \ + | oki | wec | wrs | winbond) + basic_machine=$field1-$field2 + basic_os= + ;; + *) + basic_machine=$field1 + basic_os=$field2 + ;; + esac + ;; + esac ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint + *) + # Convert single-component short-hands not valid as part of + # multi-component configurations. + case $field1 in + 386bsd) + basic_machine=i386-pc + basic_os=bsd + ;; + a29khif) + basic_machine=a29k-amd + basic_os=udi + ;; + adobe68k) + basic_machine=m68010-adobe + basic_os=scout + ;; + alliant) + basic_machine=fx80-alliant + basic_os= + ;; + altos | altos3068) + basic_machine=m68k-altos + basic_os= + ;; + am29k) + basic_machine=a29k-none + basic_os=bsd + ;; + amdahl) + basic_machine=580-amdahl + basic_os=sysv + ;; + amiga) + basic_machine=m68k-unknown + basic_os= + ;; + amigaos | amigados) + basic_machine=m68k-unknown + basic_os=amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + basic_os=sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + basic_os=sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + basic_os=bsd + ;; + aros) + basic_machine=i386-pc + basic_os=aros + ;; + aux) + basic_machine=m68k-apple + basic_os=aux + ;; + balance) + basic_machine=ns32k-sequent + basic_os=dynix + ;; + blackfin) + basic_machine=bfin-unknown + basic_os=linux + ;; + cegcc) + basic_machine=arm-unknown + basic_os=cegcc + ;; + convex-c1) + basic_machine=c1-convex + basic_os=bsd + ;; + convex-c2) + basic_machine=c2-convex + basic_os=bsd + ;; + convex-c32) + basic_machine=c32-convex + basic_os=bsd + ;; + convex-c34) + basic_machine=c34-convex + basic_os=bsd + ;; + convex-c38) + basic_machine=c38-convex + basic_os=bsd + ;; + cray) + basic_machine=j90-cray + basic_os=unicos + ;; + crds | unos) + basic_machine=m68k-crds + basic_os= + ;; + da30) + basic_machine=m68k-da30 + basic_os= + ;; + decstation | pmax | pmin | dec3100 | decstatn) + basic_machine=mips-dec + basic_os= + ;; + delta88) + basic_machine=m88k-motorola + basic_os=sysv3 + ;; + dicos) + basic_machine=i686-pc + basic_os=dicos + ;; + djgpp) + basic_machine=i586-pc + basic_os=msdosdjgpp + ;; + ebmon29k) + basic_machine=a29k-amd + basic_os=ebmon + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + basic_os=ose + ;; + gmicro) + basic_machine=tron-gmicro + basic_os=sysv + ;; + go32) + basic_machine=i386-pc + basic_os=go32 + ;; + h8300hms) + basic_machine=h8300-hitachi + basic_os=hms + ;; + h8300xray) + basic_machine=h8300-hitachi + basic_os=xray + ;; + h8500hms) + basic_machine=h8500-hitachi + basic_os=hms + ;; + harris) + basic_machine=m88k-harris + basic_os=sysv3 + ;; + hp300 | hp300hpux) + basic_machine=m68k-hp + basic_os=hpux + ;; + hp300bsd) + basic_machine=m68k-hp + basic_os=bsd + ;; + hppaosf) + basic_machine=hppa1.1-hp + basic_os=osf + ;; + hppro) + basic_machine=hppa1.1-hp + basic_os=proelf + ;; + i386mach) + basic_machine=i386-mach + basic_os=mach + ;; + isi68 | isi) + basic_machine=m68k-isi + basic_os=sysv + ;; + m68knommu) + basic_machine=m68k-unknown + basic_os=linux + ;; + magnum | m3230) + basic_machine=mips-mips + basic_os=sysv + ;; + merlin) + basic_machine=ns32k-utek + basic_os=sysv + ;; + mingw64) + basic_machine=x86_64-pc + basic_os=mingw64 + ;; + mingw32) + basic_machine=i686-pc + basic_os=mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + basic_os=mingw32ce + ;; + monitor) + basic_machine=m68k-rom68k + basic_os=coff + ;; + morphos) + basic_machine=powerpc-unknown + basic_os=morphos + ;; + moxiebox) + basic_machine=moxie-unknown + basic_os=moxiebox + ;; + msdos) + basic_machine=i386-pc + basic_os=msdos + ;; + msys) + basic_machine=i686-pc + basic_os=msys + ;; + mvs) + basic_machine=i370-ibm + basic_os=mvs + ;; + nacl) + basic_machine=le32-unknown + basic_os=nacl + ;; + ncr3000) + basic_machine=i486-ncr + basic_os=sysv4 + ;; + netbsd386) + basic_machine=i386-pc + basic_os=netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + basic_os=linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + basic_os=newsos + ;; + news1000) + basic_machine=m68030-sony + basic_os=newsos + ;; + necv70) + basic_machine=v70-nec + basic_os=sysv + ;; + nh3000) + basic_machine=m68k-harris + basic_os=cxux + ;; + nh[45]000) + basic_machine=m88k-harris + basic_os=cxux + ;; + nindy960) + basic_machine=i960-intel + basic_os=nindy + ;; + mon960) + basic_machine=i960-intel + basic_os=mon960 + ;; + nonstopux) + basic_machine=mips-compaq + basic_os=nonstopux + ;; + os400) + basic_machine=powerpc-ibm + basic_os=os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + basic_os=ose + ;; + os68k) + basic_machine=m68k-none + basic_os=os68k + ;; + paragon) + basic_machine=i860-intel + basic_os=osf + ;; + parisc) + basic_machine=hppa-unknown + basic_os=linux + ;; + psp) + basic_machine=mipsallegrexel-sony + basic_os=psp + ;; + pw32) + basic_machine=i586-unknown + basic_os=pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + basic_os=rdos + ;; + rdos32) + basic_machine=i386-pc + basic_os=rdos + ;; + rom68k) + basic_machine=m68k-rom68k + basic_os=coff + ;; + sa29200) + basic_machine=a29k-amd + basic_os=udi + ;; + sei) + basic_machine=mips-sei + basic_os=seiux + ;; + sequent) + basic_machine=i386-sequent + basic_os= + ;; + sps7) + basic_machine=m68k-bull + basic_os=sysv2 + ;; + st2000) + basic_machine=m68k-tandem + basic_os= + ;; + stratus) + basic_machine=i860-stratus + basic_os=sysv4 + ;; + sun2) + basic_machine=m68000-sun + basic_os= + ;; + sun2os3) + basic_machine=m68000-sun + basic_os=sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + basic_os=sunos4 + ;; + sun3) + basic_machine=m68k-sun + basic_os= + ;; + sun3os3) + basic_machine=m68k-sun + basic_os=sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + basic_os=sunos4 + ;; + sun4) + basic_machine=sparc-sun + basic_os= + ;; + sun4os3) + basic_machine=sparc-sun + basic_os=sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + basic_os=sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + basic_os=solaris2 + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + basic_os= + ;; + sv1) + basic_machine=sv1-cray + basic_os=unicos + ;; + symmetry) + basic_machine=i386-sequent + basic_os=dynix + ;; + t3e) + basic_machine=alphaev5-cray + basic_os=unicos + ;; + t90) + basic_machine=t90-cray + basic_os=unicos + ;; + toad1) + basic_machine=pdp10-xkl + basic_os=tops20 + ;; + tpf) + basic_machine=s390x-ibm + basic_os=tpf + ;; + udi29k) + basic_machine=a29k-amd + basic_os=udi + ;; + ultra3) + basic_machine=a29k-nyu + basic_os=sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + basic_os=none + ;; + vaxv) + basic_machine=vax-dec + basic_os=sysv + ;; + vms) + basic_machine=vax-dec + basic_os=vms + ;; + vsta) + basic_machine=i386-pc + basic_os=vsta + ;; + vxworks960) + basic_machine=i960-wrs + basic_os=vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + basic_os=vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + basic_os=vxworks + ;; + xbox) + basic_machine=i686-pc + basic_os=mingw32 + ;; + ymp) + basic_machine=ymp-cray + basic_os=unicos + ;; + *) + basic_machine=$1 + basic_os= + ;; + esac ;; esac -# Decode aliases for certain CPU-COMPANY combinations. +# Decode 1-component or ad-hoc basic machines case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ - | avr | avr32 \ - | be32 | be64 \ - | bfin \ - | c4x | c8051 | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia64 \ - | ip2k | iq2000 \ - | k1om \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa32r6 | mipsisa32r6el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64r6 | mipsisa64r6el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nios | nios2 | nios2eb | nios2el \ - | ns16k | ns32k \ - | open8 | or1k | or1knd | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pyramid \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - ms1) - basic_machine=mt-unknown + # Here we handle the default manufacturer of certain CPU types. It is in + # some cases the only manufacturer, in others, it is the most popular. + w89k) + cpu=hppa1.1 + vendor=winbond ;; - - strongarm | thumb | xscale) - basic_machine=arm-unknown + op50n) + cpu=hppa1.1 + vendor=oki ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none + op60c) + cpu=hppa1.1 + vendor=oki ;; - xscaleeb) - basic_machine=armeb-unknown + ibm*) + cpu=i370 + vendor=ibm ;; - - xscaleel) - basic_machine=armel-unknown + orion105) + cpu=clipper + vendor=highlevel ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 + mac | mpw | mac-mpw) + cpu=m68k + vendor=apple ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* | avr32-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ - | ip2k-* | iq2000-* \ - | k1om-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ - | microblaze-* | microblazeel-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa32r6-* | mipsisa32r6el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64r6-* | mipsisa64r6el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* | nios2eb-* | nios2el-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | or1k*-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pyramid-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown + pmac | pmac-mpw) + cpu=powerpc + vendor=apple ;; + # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att + cpu=m68000 + vendor=att ;; 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; - blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux + cpu=we32k + vendor=att ;; bluegene*) - basic_machine=powerpc-ibm - os=-cnk - ;; - c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec + cpu=powerpc + vendor=ibm + basic_os=cnk ;; decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 + cpu=pdp10 + vendor=dec + basic_os=tops10 ;; decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 + cpu=pdp10 + vendor=dec + basic_os=tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos + cpu=m68k + vendor=motorola ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd + dpx2*) + cpu=m68k + vendor=bull + basic_os=sysv3 ;; encore | umax | mmax) - basic_machine=ns32k-encore + cpu=ns32k + vendor=encore ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose + elxsi) + cpu=elxsi + vendor=elxsi + basic_os=${basic_os:-bsd} ;; fx2800) - basic_machine=i860-alliant + cpu=i860 + vendor=alliant ;; genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 + cpu=ns32k + vendor=ns ;; h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp + cpu=hppa1.0 + vendor=hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp + cpu=m68000 + vendor=hp ;; hp9k3[2-9][0-9]) - basic_machine=m68k-hp + cpu=m68k + vendor=hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp + cpu=hppa1.0 + vendor=hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp + cpu=hppa1.1 + vendor=hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp + cpu=hppa1.1 + vendor=hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp + cpu=hppa1.1 + vendor=hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp + cpu=hppa1.1 + vendor=hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm + cpu=hppa1.0 + vendor=hp ;; i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv32 ;; i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv4 ;; i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv ;; i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=solaris2 ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta + j90 | j90-cray) + cpu=j90 + vendor=cray + basic_os=${basic_os:-unicos} ;; iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) + cpu=mips + vendor=sgi + case $basic_os in + irix*) ;; *) - os=-irix4 + basic_os=irix4 ;; esac ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux - ;; - m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze*) - basic_machine=microblaze-xilinx - ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint + cpu=m68000 + vendor=convergent ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i686-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos + *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) + cpu=m68k + vendor=atari + basic_os=mint ;; news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) + cpu=mips + vendor=sony + basic_os=newsos + ;; + next | m*-next) + cpu=m68k + vendor=next + case $basic_os in + openstep*) + ;; + nextstep*) ;; - -ns2*) - os=-nextstep2 + ns2*) + basic_os=nextstep2 ;; *) - os=-nextstep3 + basic_os=nextstep3 ;; esac ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem + cpu=np1 + vendor=gould ;; op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k + cpu=hppa1.1 + vendor=oki + basic_os=proelf ;; pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux - ;; - parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 ;; pbd) - basic_machine=sparc-tti + cpu=sparc + vendor=tti ;; pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc + cpu=m68k + vendor=tti ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + pc532) + cpu=ns32k + vendor=pc532 ;; pn) - basic_machine=pn-gould + cpu=pn + vendor=gould ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle | ppc-le | powerpc-little) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + power) + cpu=power + vendor=ibm ;; ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=-rdos - ;; - rdos32) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff + cpu=i386 + vendor=ibm ;; rm[46]00) - basic_machine=mips-siemens + cpu=mips + vendor=siemens ;; rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm + cpu=romp + vendor=ibm ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown + sde) + cpu=mipsisa32 + vendor=sde + basic_os=${basic_os:-elf} ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown + simso-wrs) + cpu=sparclite + vendor=wrs + basic_os=vxworks ;; - sde) - basic_machine=mipsisa32-sde - os=-elf + tower | tower-32) + cpu=m68k + vendor=ncr ;; - sei) - basic_machine=mips-sei - os=-seiux + vpp*|vx|vx-*) + cpu=f301 + vendor=fujitsu ;; - sequent) - basic_machine=i386-sequent + w65) + cpu=w65 + vendor=wdc ;; - sh) - basic_machine=sh-hitachi - os=-hms + w89k-*) + cpu=hppa1.1 + vendor=winbond + basic_os=proelf ;; - sh5el) - basic_machine=sh5le-unknown + none) + cpu=none + vendor=none ;; - sh64) - basic_machine=sh64-unknown + leon|leon[3-9]) + cpu=sparc + vendor=$basic_machine ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks + leon-*|leon[3-9]-*) + cpu=sparc + vendor=$(echo "$basic_machine" | sed 's/-.*//') ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 + + *-*) + # shellcheck disable=SC2162 + IFS="-" read cpu vendor <&2 - exit 1 + # Recognize the canonical CPU types that are allowed with any + # company name. + case $cpu in + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | abacus \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \ + | alphapca5[67] | alpha64pca5[67] \ + | am33_2.0 \ + | amdgcn \ + | arc | arceb | arc64 \ + | arm | arm[lb]e | arme[lb] | armv* \ + | avr | avr32 \ + | asmjs \ + | ba \ + | be32 | be64 \ + | bfin | bpf | bs2000 \ + | c[123]* | c30 | [cjt]90 | c4x \ + | c8051 | clipper | craynv | csky | cydra \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | elxsi | epiphany \ + | f30[01] | f700 | fido | fr30 | frv | ft32 | fx80 \ + | h8300 | h8500 \ + | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i*86 | i860 | i960 | ia16 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | loongarch32 | loongarch64 | loongarchx32 \ + | m32c | m32r | m32rle \ + | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \ + | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \ + | m88110 | m88k | maxq | mb | mcore | mep | metag \ + | microblaze | microblazeel \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64eb | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r3 | mipsisa32r3el \ + | mipsisa32r5 | mipsisa32r5el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r3 | mipsisa64r3el \ + | mipsisa64r5 | mipsisa64r5el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mmix \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nfp \ + | nios | nios2 | nios2eb | nios2el \ + | none | np1 | ns16k | ns32k | nvptx \ + | open8 \ + | or1k* \ + | or32 \ + | orion \ + | picochip \ + | pdp10 | pdp11 | pj | pjl | pn | power \ + | powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \ + | pru \ + | pyramid \ + | riscv | riscv32 | riscv32be | riscv64 | riscv64be \ + | rl78 | romp | rs6000 | rx \ + | s390 | s390x \ + | score \ + | sh | shl \ + | sh[1234] | sh[24]a | sh[24]ae[lb] | sh[23]e | she[lb] | sh[lb]e \ + | sh[1234]e[lb] | sh[12345][lb]e | sh[23]ele | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet \ + | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v | sv1 | sx* \ + | spu \ + | tahoe \ + | thumbv7* \ + | tic30 | tic4x | tic54x | tic55x | tic6x | tic80 \ + | tron \ + | ubicom32 \ + | v70 | v850 | v850e | v850e1 | v850es | v850e2 | v850e2v3 \ + | vax \ + | visium \ + | w65 \ + | wasm32 | wasm64 \ + | we32k \ + | x86 | x86_64 | xc16x | xgate | xps100 \ + | xstormy16 | xtensa* \ + | ymp \ + | z8k | z80) + ;; + + *) + echo Invalid configuration \`"$1"\': machine \`"$cpu-$vendor"\' not recognized 1>&2 + exit 1 + ;; + esac ;; esac # Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` +case $vendor in + digital*) + vendor=dec ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + commodore*) + vendor=cbm ;; *) ;; @@ -1327,200 +1284,213 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if [ x"$os" != x"" ] +if test x$basic_os != x then + +# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just +# set os. +case $basic_os in + gnu/linux*) + kernel=linux + os=$(echo $basic_os | sed -e 's|gnu/linux|gnu|') + ;; + os2-emx) + kernel=os2 + os=$(echo $basic_os | sed -e 's|os2-emx|emx|') + ;; + nto-qnx*) + kernel=nto + os=$(echo $basic_os | sed -e 's|nto-qnx|qnx|') + ;; + *-*) + # shellcheck disable=SC2162 + IFS="-" read kernel os <&2 - exit 1 + # No normalization, but not necessarily accepted, that comes below. ;; esac + else # Here we handle the default operating systems that come with various machines. @@ -1533,261 +1503,361 @@ else # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. -case $basic_machine in +kernel= +case $cpu-$vendor in score-*) - os=-elf + os=elf ;; spu-*) - os=-elf + os=elf ;; *-acorn) - os=-riscix1.2 + os=riscix1.2 ;; arm*-rebel) - os=-linux + kernel=linux + os=gnu ;; arm*-semi) - os=-aout + os=aout ;; c4x-* | tic4x-*) - os=-coff + os=coff ;; c8051-*) - os=-elf + os=elf + ;; + clipper-intergraph) + os=clix ;; hexagon-*) - os=-elf + os=elf ;; tic54x-*) - os=-coff + os=coff ;; tic55x-*) - os=-coff + os=coff ;; tic6x-*) - os=-coff + os=coff ;; # This must come before the *-dec entry. pdp10-*) - os=-tops20 + os=tops20 ;; pdp11-*) - os=-none + os=none ;; *-dec | vax-*) - os=-ultrix4.2 + os=ultrix4.2 ;; m68*-apollo) - os=-domain + os=domain ;; i386-sun) - os=-sunos4.0.2 + os=sunos4.0.2 ;; m68000-sun) - os=-sunos3 + os=sunos3 ;; m68*-cisco) - os=-aout + os=aout ;; mep-*) - os=-elf + os=elf ;; mips*-cisco) - os=-elf + os=elf ;; mips*-*) - os=-elf + os=elf ;; or32-*) - os=-coff + os=coff ;; *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 + os=sysv3 ;; sparc-* | *-sun) - os=-sunos4.1.1 + os=sunos4.1.1 ;; - *-be) - os=-beos + pru-*) + os=elf ;; - *-haiku) - os=-haiku + *-be) + os=beos ;; *-ibm) - os=-aix + os=aix ;; *-knuth) - os=-mmixware + os=mmixware ;; *-wec) - os=-proelf + os=proelf ;; *-winbond) - os=-proelf + os=proelf ;; *-oki) - os=-proelf + os=proelf ;; *-hp) - os=-hpux + os=hpux ;; *-hitachi) - os=-hiux + os=hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv + os=sysv ;; *-cbm) - os=-amigaos + os=amigaos ;; *-dg) - os=-dgux + os=dgux ;; *-dolphin) - os=-sysv3 + os=sysv3 ;; m68k-ccur) - os=-rtu + os=rtu ;; m88k-omron*) - os=-luna + os=luna ;; - *-next ) - os=-nextstep + *-next) + os=nextstep ;; *-sequent) - os=-ptx + os=ptx ;; *-crds) - os=-unos + os=unos ;; *-ns) - os=-genix + os=genix ;; i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 + os=mvs ;; *-gould) - os=-sysv + os=sysv ;; *-highlevel) - os=-bsd + os=bsd ;; *-encore) - os=-bsd + os=bsd ;; *-sgi) - os=-irix + os=irix ;; *-siemens) - os=-sysv4 + os=sysv4 ;; *-masscomp) - os=-rtu + os=rtu ;; f30[01]-fujitsu | f700-fujitsu) - os=-uxpv + os=uxpv ;; *-rom68k) - os=-coff + os=coff ;; *-*bug) - os=-coff + os=coff ;; *-apple) - os=-macos + os=macos ;; *-atari*) - os=-mint + os=mint + ;; + *-wrs) + os=vxworks ;; *) - os=-none + os=none ;; esac + fi +# Now, validate our (potentially fixed-up) OS. +case $os in + # Sometimes we do "kernel-libc", so those need to count as OSes. + musl* | newlib* | uclibc*) + ;; + # Likewise for "kernel-abi" + eabi* | gnueabi*) + ;; + # VxWorks passes extra cpu info in the 4th filed. + simlinux | simwindows | spe) + ;; + # Now accept the basic system types. + # The portable systems comes first. + # Each alternative MUST end in a * to match a version number. + gnu* | android* | bsd* | mach* | minix* | genix* | ultrix* | irix* \ + | *vms* | esix* | aix* | cnk* | sunos | sunos[34]* \ + | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ + | sym* | plan9* | psp* | sim* | xray* | os68k* | v88r* \ + | hiux* | abug | nacl* | netware* | windows* \ + | os9* | macos* | osx* | ios* \ + | mpw* | magic* | mmixware* | mon960* | lnews* \ + | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ + | aos* | aros* | cloudabi* | sortix* | twizzler* \ + | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \ + | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \ + | mirbsd* | netbsd* | dicos* | openedition* | ose* \ + | bitrig* | openbsd* | secbsd* | solidbsd* | libertybsd* | os108* \ + | ekkobsd* | freebsd* | riscix* | lynxos* | os400* \ + | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ + | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ + | udi* | lites* | ieee* | go32* | aux* | hcos* \ + | chorusrdb* | cegcc* | glidix* | serenity* \ + | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ + | midipix* | mingw32* | mingw64* | mint* \ + | uxpv* | beos* | mpeix* | udk* | moxiebox* \ + | interix* | uwin* | mks* | rhapsody* | darwin* \ + | openstep* | oskit* | conix* | pw32* | nonstopux* \ + | storm-chaos* | tops10* | tenex* | tops20* | its* \ + | os2* | vos* | palmos* | uclinux* | nucleus* | morphos* \ + | scout* | superux* | sysv* | rtmk* | tpf* | windiss* \ + | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \ + | skyos* | haiku* | rdos* | toppers* | drops* | es* \ + | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ + | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \ + | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx*) + ;; + # This one is extra strict with allowed versions + sco3.2v2 | sco3.2v[4-9]* | sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + ;; + none) + ;; + *) + echo Invalid configuration \`"$1"\': OS \`"$os"\' not recognized 1>&2 + exit 1 + ;; +esac + +# As a final step for OS-related things, validate the OS-kernel combination +# (given a valid OS), if there is a kernel. +case $kernel-$os in + linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* | linux-musl* | linux-uclibc* ) + ;; + uclinux-uclibc* ) + ;; + -dietlibc* | -newlib* | -musl* | -uclibc* ) + # These are just libc implementations, not actual OSes, and thus + # require a kernel. + echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 + exit 1 + ;; + kfreebsd*-gnu* | kopensolaris*-gnu*) + ;; + vxworks-simlinux | vxworks-simwindows | vxworks-spe) + ;; + nto-qnx*) + ;; + os2-emx) + ;; + *-eabi* | *-gnueabi*) + ;; + -*) + # Blank kernel with real OS is always fine. + ;; + *-*) + echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 + exit 1 + ;; +esac + # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) +case $vendor in + unknown) + case $cpu-$os in + *-riscix*) vendor=acorn ;; - -sunos*) + *-sunos*) vendor=sun ;; - -cnk*|-aix*) + *-cnk* | *-aix*) vendor=ibm ;; - -beos*) + *-beos*) vendor=be ;; - -hpux*) + *-hpux*) vendor=hp ;; - -mpeix*) + *-mpeix*) vendor=hp ;; - -hiux*) + *-hiux*) vendor=hitachi ;; - -unos*) + *-unos*) vendor=crds ;; - -dgux*) + *-dgux*) vendor=dg ;; - -luna*) + *-luna*) vendor=omron ;; - -genix*) + *-genix*) vendor=ns ;; - -mvs* | -opened*) + *-clix*) + vendor=intergraph + ;; + *-mvs* | *-opened*) + vendor=ibm + ;; + *-os400*) vendor=ibm ;; - -os400*) + s390-* | s390x-*) vendor=ibm ;; - -ptx*) + *-ptx*) vendor=sequent ;; - -tpf*) + *-tpf*) vendor=ibm ;; - -vxsim* | -vxworks* | -windiss*) + *-vxsim* | *-vxworks* | *-windiss*) vendor=wrs ;; - -aux*) + *-aux*) vendor=apple ;; - -hms*) + *-hms*) vendor=hitachi ;; - -mpw* | -macos*) + *-mpw* | *-macos*) vendor=apple ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) vendor=atari ;; - -vos*) + *-vos*) vendor=stratus ;; esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` ;; esac -echo $basic_machine$os +echo "$cpu-$vendor-${kernel:+$kernel-}$os" exit # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/depends/description.md b/depends/description.md new file mode 100644 index 0000000000000..0a6f2e6442190 --- /dev/null +++ b/depends/description.md @@ -0,0 +1,53 @@ +This is a system of building and caching dependencies necessary for building Bitcoin. +There are several features that make it different from most similar systems: + +### It is designed to be builder and host agnostic + +In theory, binaries for any target OS/architecture can be created, from a +builder running any OS/architecture. In practice, build-side tools must be +specified when the defaults don't fit, and packages must be amended to work +on new hosts. For now, a build architecture of x86_64 is assumed, either on +Linux or macOS. + +### No reliance on timestamps + +File presence is used to determine what needs to be built. This makes the +results distributable and easily digestable by automated builders. + +### Each build only has its specified dependencies available at build-time. + +For each build, the sysroot is wiped and the (recursive) dependencies are +installed. This makes each build deterministic, since there will never be any +unknown files available to cause side-effects. + +### Each package is cached and only rebuilt as needed. + +Before building, a unique build-id is generated for each package. This id +consists of a hash of all files used to build the package (Makefiles, packages, +etc), and as well as a hash of the same data for each recursive dependency. If +any portion of a package's build recipe changes, it will be rebuilt as well as +any other package that depends on it. If any of the main makefiles (Makefile, +funcs.mk, etc) are changed, all packages will be rebuilt. After building, the +results are cached into a tarball that can be re-used and distributed. + +### Package build results are (relatively) deterministic. + +Each package is configured and patched so that it will yield the same +build-results with each consequent build, within a reasonable set of +constraints. Some things like timestamp insertion are unavoidable, and are +beyond the scope of this system. Additionally, the toolchain itself must be +capable of deterministic results. When revisions are properly bumped, a cached +build should represent an exact single payload. + +### Sources are fetched and verified automatically + +Each package must define its source location and checksum. The build will fail +if the fetched source does not match. Sources may be pre-seeded and/or cached +as desired. + +### Self-cleaning + +Build and staging dirs are wiped after use, and any previous version of a +cached result is removed following a successful build. Automated builders +should be able to build each revision and store the results with no further +intervention. diff --git a/depends/funcs.mk b/depends/funcs.mk index c1fc0a0e33169..a00f3802363ae 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -1,17 +1,23 @@ define int_vars #Set defaults for vars which may be overridden per-package -$(1)_cc=$($($(1)_type)_CC) -$(1)_cxx=$($($(1)_type)_CXX) -$(1)_objc=$($($(1)_type)_OBJC) -$(1)_objcxx=$($($(1)_type)_OBJCXX) -$(1)_ar=$($($(1)_type)_AR) -$(1)_ranlib=$($($(1)_type)_RANLIB) -$(1)_libtool=$($($(1)_type)_LIBTOOL) -$(1)_nm=$($($(1)_type)_NM) -$(1)_cflags=$($($(1)_type)_CFLAGS) $($($(1)_type)_$(release_type)_CFLAGS) -$(1)_cxxflags=$($($(1)_type)_CXXFLAGS) $($($(1)_type)_$(release_type)_CXXFLAGS) -$(1)_ldflags=$($($(1)_type)_LDFLAGS) $($($(1)_type)_$(release_type)_LDFLAGS) -L$($($(1)_type)_prefix)/lib -$(1)_cppflags=$($($(1)_type)_CPPFLAGS) $($($(1)_type)_$(release_type)_CPPFLAGS) -I$($($(1)_type)_prefix)/include +$(1)_cc=$$($$($(1)_type)_CC) +$(1)_cxx=$$($$($(1)_type)_CXX) +$(1)_objc=$$($$($(1)_type)_OBJC) +$(1)_objcxx=$$($$($(1)_type)_OBJCXX) +$(1)_ar=$$($$($(1)_type)_AR) +$(1)_ranlib=$$($$($(1)_type)_RANLIB) +$(1)_libtool=$$($$($(1)_type)_LIBTOOL) +$(1)_nm=$$($$($(1)_type)_NM) +$(1)_cflags=$$($$($(1)_type)_CFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CFLAGS) +$(1)_cxxflags=$$($$($(1)_type)_CXXFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CXXFLAGS) +$(1)_ldflags=$$($$($(1)_type)_LDFLAGS) \ + $$($$($(1)_type)_$$(release_type)_LDFLAGS) \ + -L$$($($(1)_type)_prefix)/lib +$(1)_cppflags=$$($$($(1)_type)_CPPFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CPPFLAGS) \ + -I$$($$($(1)_type)_prefix)/include $(1)_recipe_hash:= endef @@ -19,43 +25,53 @@ define int_get_all_dependencies $(sort $(foreach dep,$(2),$(2) $(call int_get_all_dependencies,$(1),$($(dep)_dependencies)))) endef +define fetch_file_inner + ( mkdir -p $$($(1)_download_dir) && echo Fetching $(3) from $(2) && \ + $(build_DOWNLOAD) "$$($(1)_download_dir)/$(4).temp" "$(2)/$(3)" && \ + echo "$(5) $$($(1)_download_dir)/$(4).temp" > $$($(1)_download_dir)/.$(4).hash && \ + $(build_SHA256SUM) -c $$($(1)_download_dir)/.$(4).hash && \ + mv $$($(1)_download_dir)/$(4).temp $$($(1)_source_dir)/$(4) && \ + rm -rf $$($(1)_download_dir) ) +endef + define fetch_file -(test -f $(SOURCES_PATH)/$(4) || \ - ( mkdir -p $$($(1)_extract_dir) && \ - ( $(build_DOWNLOAD) "$$($(1)_extract_dir)/$(4).temp" "$(2)/$(3)" || \ - $(build_DOWNLOAD) "$$($(1)_extract_dir)/$(4).temp" "$(FALLBACK_DOWNLOAD_PATH)/$(3)" ) && \ - echo "$(5) $$($(1)_extract_dir)/$(4).temp" > $$($(1)_extract_dir)/.$(4).hash && \ - $(build_SHA256SUM) -c $$($(1)_extract_dir)/.$(4).hash && \ - mv $$($(1)_extract_dir)/$(4).temp $(SOURCES_PATH)/$(4) )) + ( test -f $$($(1)_source_dir)/$(4) || \ + ( $(call fetch_file_inner,$(1),$(2),$(3),$(4),$(5)) || \ + $(call fetch_file_inner,$(1),$(FALLBACK_DOWNLOAD_PATH),$(3),$(4),$(5)))) endef define int_get_build_recipe_hash -$(eval $(1)_all_file_checksums:=$(shell $(build_SHA256SUM) $(meta_depends) packages/$(1).mk $(addprefix $(PATCHES_PATH)/$(1)/,$($(1)_patches)))) -$(eval $(1)_recipe_hash:=$(shell echo -n "$($(1)_all_file_checksums)" | $(build_SHA256SUM))) +$(eval $(1)_all_file_checksums:=$(shell $(build_SHA256SUM) $(meta_depends) packages/$(1).mk $(addprefix $(PATCHES_PATH)/$(1)/,$($(1)_patches)) | cut -d" " -f1)) +$(eval $(1)_recipe_hash:=$(shell echo -n "$($(1)_all_file_checksums)" | $(build_SHA256SUM) | cut -d" " -f1)) endef define int_get_build_id $(eval $(1)_dependencies += $($(1)_$(host_arch)_$(host_os)_dependencies) $($(1)_$(host_os)_dependencies)) -$(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type)_native_toolchain) $($(1)_dependencies))) +$(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type)_native_toolchain) $($($(1)_type)_native_binutils) $($(1)_dependencies))) $(foreach dep,$($(1)_all_dependencies),$(eval $(1)_build_id_deps+=$(dep)-$($(dep)_version)-$($(dep)_recipe_hash))) -$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps)) +$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id)) $(eval $(1)_build_id:=$(shell echo -n "$($(1)_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH))) final_build_id_long+=$($(package)_build_id_long) #compute package-specific paths $(1)_build_subdir?=. $(1)_download_file?=$($(1)_file_name) -$(1)_source:=$(SOURCES_PATH)/$($(1)_file_name) +$(1)_source_dir:=$(SOURCES_PATH) +$(1)_source:=$$($(1)_source_dir)/$($(1)_file_name) $(1)_staging_dir=$(base_staging_dir)/$(host)/$(1)/$($(1)_version)-$($(1)_build_id) $(1)_staging_prefix_dir:=$$($(1)_staging_dir)$($($(1)_type)_prefix) $(1)_extract_dir:=$(base_build_dir)/$(host)/$(1)/$($(1)_version)-$($(1)_build_id) +$(1)_download_dir:=$(base_download_dir)/$(1)-$($(1)_version) $(1)_build_dir:=$$($(1)_extract_dir)/$$($(1)_build_subdir) +$(1)_cached_checksum:=$(BASE_CACHE)/$(host)/$(1)/$(1)-$($(1)_version)-$($(1)_build_id).tar.gz.hash $(1)_patch_dir:=$(base_build_dir)/$(host)/$(1)/$($(1)_version)-$($(1)_build_id)/.patches-$($(1)_build_id) $(1)_prefixbin:=$($($(1)_type)_prefix)/bin/ $(1)_cached:=$(BASE_CACHE)/$(host)/$(1)/$(1)-$($(1)_version)-$($(1)_build_id).tar.gz +$(1)_build_log:=$(BASEDIR)/$(1)-$($(1)_version)-$($(1)_build_id).log +$(1)_all_sources=$($(1)_file_name) $($(1)_extra_sources) #stamps -$(1)_fetched=$$($(1)_extract_dir)/.stamp_fetched +$(1)_fetched=$(SOURCES_PATH)/download-stamps/.stamp_fetched-$(1)-$($(1)_file_name).hash $(1)_extracted=$$($(1)_extract_dir)/.stamp_extracted $(1)_preprocessed=$$($(1)_extract_dir)/.stamp_preprocessed $(1)_cleaned=$$($(1)_extract_dir)/.stamp_cleaned @@ -67,9 +83,10 @@ $(1)_download_path_fixed=$(subst :,\:,$$($(1)_download_path)) #default commands +# The default behavior for tar will try to set ownership when running as uid 0 and may not succeed, --no-same-owner disables this behavior $(1)_fetch_cmds ?= $(call fetch_file,$(1),$(subst \:,:,$$($(1)_download_path_fixed)),$$($(1)_download_file),$($(1)_file_name),$($(1)_sha256_hash)) -$(1)_extract_cmds ?= mkdir -p $$($(1)_extract_dir) && echo "$$($(1)_sha256_hash) $$($(1)_source)" > $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_SHA256SUM) -c $$($(1)_extract_dir)/.$$($(1)_file_name).hash && tar --strip-components=1 -xf $$($(1)_source) -$(1)_preprocess_cmds ?= +$(1)_extract_cmds ?= mkdir -p $$($(1)_extract_dir) && echo "$$($(1)_sha256_hash) $$($(1)_source)" > $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_SHA256SUM) -c $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_TAR) --no-same-owner --strip-components=1 -xf $$($(1)_source) +$(1)_preprocess_cmds ?= true $(1)_build_cmds ?= $(1)_config_cmds ?= $(1)_stage_cmds ?= @@ -120,11 +137,17 @@ $(1)_config_env+=$($(1)_config_env_$(host_arch)_$(host_os)) $($(1)_config_env_$( $(1)_config_env+=PKG_CONFIG_LIBDIR=$($($(1)_type)_prefix)/lib/pkgconfig $(1)_config_env+=PKG_CONFIG_PATH=$($($(1)_type)_prefix)/share/pkgconfig +$(1)_config_env+=CMAKE_MODULE_PATH=$($($(1)_type)_prefix)/lib/cmake $(1)_config_env+=PATH=$(build_prefix)/bin:$(PATH) $(1)_build_env+=PATH=$(build_prefix)/bin:$(PATH) $(1)_stage_env+=PATH=$(build_prefix)/bin:$(PATH) -$(1)_autoconf=./configure --host=$($($(1)_type)_host) --disable-dependency-tracking --prefix=$($($(1)_type)_prefix) $$($(1)_config_opts) CC="$$($(1)_cc)" CXX="$$($(1)_cxx)" +# Setting a --build type that differs from --host will explicitly enable +# cross-compilation mode. Note that --build defaults to the output of +# config.guess, which is what we set it too here. This also quells autoconf +# warnings, "If you wanted to set the --build type, don't use --host.", +# when using versions older than 2.70. +$(1)_autoconf=./configure --build=$(BUILD) --host=$($($(1)_type)_host) --prefix=$($($(1)_type)_prefix) $$($(1)_config_opts) CC="$$($(1)_cc)" CXX="$$($(1)_cxx)" ifneq ($($(1)_nm),) $(1)_autoconf += NM="$$($(1)_nm)" endif @@ -146,64 +169,97 @@ endif ifneq ($($(1)_ldflags),) $(1)_autoconf += LDFLAGS="$$($(1)_ldflags)" endif + +$(1)_cmake=env CC="$$($(1)_cc)" \ + CFLAGS="$$($(1)_cppflags) $$($(1)_cflags)" \ + CXX="$$($(1)_cxx)" \ + CXXFLAGS="$$($(1)_cppflags) $$($(1)_cxxflags)" \ + LDFLAGS="$$($(1)_ldflags)" \ + cmake -DCMAKE_INSTALL_PREFIX:PATH="$$($($(1)_type)_prefix)" $$($(1)_cmake_opts) +ifeq ($($(1)_type),build) +$(1)_cmake += -DCMAKE_INSTALL_RPATH:PATH="$$($($(1)_type)_prefix)/lib" +else +ifneq ($(host),$(build)) +$(1)_cmake += -DCMAKE_SYSTEM_NAME=$($(host_os)_cmake_system) +$(1)_cmake += -DCMAKE_C_COMPILER_TARGET=$(host) +$(1)_cmake += -DCMAKE_CXX_COMPILER_TARGET=$(host) +endif +endif endef define int_add_cmds +ifneq ($(LOG),) +$(1)_logging = >>$$($(1)_build_log) 2>&1 || { if test -f $$($(1)_build_log); then cat $$($(1)_build_log); fi; exit 1; } +endif + $($(1)_fetched): - $(AT)echo Fetching $(1)... - $(AT)mkdir -p $$(@D) $(SOURCES_PATH) - $(AT)cd $$(@D); $(call $(1)_fetch_cmds,$(1)) - $(AT)touch $$@ + mkdir -p $$(@D) $(SOURCES_PATH) + rm -f $$@ + touch $$@ + cd $$(@D); $($(1)_fetch_cmds) + cd $($(1)_source_dir); $(foreach source,$($(1)_all_sources),$(build_SHA256SUM) $(source) >> $$(@);) + touch $$@ $($(1)_extracted): | $($(1)_fetched) - $(AT)echo Extracting $(1)... - $(AT)mkdir -p $$(@D) - $(AT)cd $$(@D); $(call $(1)_extract_cmds,$(1)) - $(AT)touch $$@ -$($(1)_preprocessed): | $($(1)_dependencies) $($(1)_extracted) - $(AT)echo Preprocessing $(1)... - $(AT)mkdir -p $$(@D) $($(1)_patch_dir) - $(AT)$(foreach patch,$($(1)_patches),cd $(PATCHES_PATH)/$(1); cp $(patch) $($(1)_patch_dir) ;) - $(AT)cd $$(@D); $(call $(1)_preprocess_cmds, $(1)) - $(AT)touch $$@ -$($(1)_configured): | $($(1)_preprocessed) - $(AT)echo Configuring $(1)... - $(AT)rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), tar xf $($(package)_cached); ) - $(AT)mkdir -p $$(@D) - $(AT)+cd $$(@D); $($(1)_config_env) $(call $(1)_config_cmds, $(1)) - $(AT)touch $$@ + echo Extracting $(1)... + mkdir -p $$(@D) + cd $$(@D); $($(1)_extract_cmds) + touch $$@ +$($(1)_preprocessed): | $($(1)_extracted) + echo Preprocessing $(1)... + mkdir -p $$(@D) $($(1)_patch_dir) + $(foreach patch,$($(1)_patches),cd $(PATCHES_PATH)/$(1); cp $(patch) $($(1)_patch_dir) ;) + { cd $$(@D); $($(1)_preprocess_cmds); } $$($(1)_logging) + touch $$@ +$($(1)_configured): | $($(1)_dependencies) $($(1)_preprocessed) + echo Configuring $(1)... + rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), $(build_TAR) --no-same-owner -xf $($(package)_cached); ) + mkdir -p $$(@D) + +{ cd $$(@D); $($(1)_config_env) $($(1)_config_cmds); } $$($(1)_logging) + touch $$@ $($(1)_built): | $($(1)_configured) - $(AT)echo Building $(1)... - $(AT)mkdir -p $$(@D) - $(AT)+cd $$(@D); $($(1)_build_env) $(call $(1)_build_cmds, $(1)) - $(AT)touch $$@ + echo Building $(1)... + mkdir -p $$(@D) + +{ cd $$(@D); $($(1)_build_env) $($(1)_build_cmds); } $$($(1)_logging) + touch $$@ $($(1)_staged): | $($(1)_built) - $(AT)echo Staging $(1)... - $(AT)mkdir -p $($(1)_staging_dir)/$(host_prefix) - $(AT)cd $($(1)_build_dir); $($(1)_stage_env) $(call $(1)_stage_cmds, $(1)) - $(AT)rm -rf $($(1)_extract_dir) - $(AT)touch $$@ + echo Staging $(1)... + mkdir -p $($(1)_staging_dir)/$(host_prefix) + +{ cd $($(1)_build_dir); $($(1)_stage_env) $($(1)_stage_cmds); } $$($(1)_logging) + rm -rf $($(1)_extract_dir) + touch $$@ $($(1)_postprocessed): | $($(1)_staged) - $(AT)echo Postprocessing $(1)... - $(AT)cd $($(1)_staging_prefix_dir); $(call $(1)_postprocess_cmds) - $(AT)touch $$@ + echo Postprocessing $(1)... + cd $($(1)_staging_prefix_dir); $($(1)_postprocess_cmds) + touch $$@ $($(1)_cached): | $($(1)_dependencies) $($(1)_postprocessed) - $(AT)echo Caching $(1)... - $(AT)cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | tar --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T - - $(AT)mkdir -p $$(@D) - $(AT)rm -rf $$(@D) && mkdir -p $$(@D) - $(AT)mv $$($(1)_staging_dir)/$$(@F) $$(@) - $(AT)rm -rf $($(1)_staging_dir) + echo Caching $(1)... + cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | $(build_TAR) --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T - + mkdir -p $$(@D) + rm -rf $$(@D) && mkdir -p $$(@D) + mv $$($(1)_staging_dir)/$$(@F) $$(@) + rm -rf $($(1)_staging_dir) + if test -f $($(1)_build_log); then mv $($(1)_build_log) $$(@D); fi +$($(1)_cached_checksum): $($(1)_cached) + cd $$(@D); $(build_SHA256SUM) $$( $$(@) .PHONY: $(1) -$(1): | $($(1)_cached) -.SECONDARY: $($(1)_postprocessed) $($(1)_staged) $($(1)_built) $($(1)_configured) $($(1)_preprocessed) $($(1)_extracted) $($(1)_fetched) +$(1): | $($(1)_cached_checksum) +.SECONDARY: $($(1)_cached) $($(1)_postprocessed) $($(1)_staged) $($(1)_built) $($(1)_configured) $($(1)_preprocessed) $($(1)_extracted) $($(1)_fetched) + +endef + +stages = fetched extracted preprocessed configured built staged postprocessed cached cached_checksum +define ext_add_stages +$(foreach stage,$(stages), + $(1)_$(stage): $($(1)_$(stage)) + .PHONY: $(1)_$(stage)) endef # These functions create the build targets for each package. They must be # broken down into small steps so that each part is done for all packages # before moving on to the next step. Otherwise, a package's info -# (build-id for example) would only be avilable to another package if it +# (build-id for example) would only be available to another package if it # happened to be computed already. #set the type for host/build packages. @@ -214,7 +270,8 @@ $(foreach package,$(packages),$(eval $(package)_type=$(host_arch)_$(host_os))) $(foreach package,$(all_packages),$(eval $(call int_vars,$(package)))) #include package files -$(foreach package,$(all_packages),$(eval include packages/$(package).mk)) +$(foreach native_package,$(native_packages),$(eval include packages/$(native_package).mk)) +$(foreach package,$(packages),$(eval include packages/$(package).mk)) #compute a hash of all files that comprise this package's build recipe $(foreach package,$(all_packages),$(eval $(call int_get_build_recipe_hash,$(package)))) @@ -229,4 +286,4 @@ $(foreach package,$(all_packages),$(eval $(call int_config_attach_build_config,$ $(foreach package,$(all_packages),$(eval $(call int_add_cmds,$(package)))) #special exception: if a toolchain package exists, all non-native packages depend on it -$(foreach package,$(packages),$(eval $($(package)_unpacked): |$($($(host_arch)_$(host_os)_native_toolchain)_cached) )) +$(foreach package,$(packages),$(eval $($(package)_extracted): |$($($(host_arch)_$(host_os)_native_toolchain)_cached) $($($(host_arch)_$(host_os)_native_binutils)_cached) )) diff --git a/depends/gen_id b/depends/gen_id new file mode 100755 index 0000000000000..ac69ca7ee1fa2 --- /dev/null +++ b/depends/gen_id @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# Usage: env [ CC=... ] [ CXX=... ] [ AR=... ] [ RANLIB=... ] [ STRIP=... ] \ +# [ DEBUG=... ] ./build-id [ID_SALT]... +# +# Prints to stdout a SHA256 hash representing the current toolset, used by +# depends/Makefile as a build id for caching purposes (detecting when the +# toolset has changed and the cache needs to be invalidated). +# +# If the DEBUG environment variable is non-empty and the system has `tee` +# available in its $PATH, the pre-image to the SHA256 hash will be printed to +# stderr. This is to help developers debug caching issues in depends. + +# This script explicitly does not `set -e` because id determination is mostly +# opportunistic: it is fine that things fail, as long as they fail consistently. + +# Command variables (CC/CXX/AR) which can be blank are invoked with `bash -c`, +# because the "command not found" error message printed by shells often include +# the line number, like so: +# +# ./depends/gen_id: line 43: --version: command not found +# +# By invoking with `bash -c`, we ensure that the line number is always 1 + +( + # Redirect stderr to stdout + exec 2>&1 + + echo "BEGIN ALL" + + # Include any ID salts supplied via command line + echo "BEGIN ID SALT" + echo "$@" + echo "END ID SALT" + + # GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want + # the information from "-v -E -" as well, so just include both. + echo "BEGIN CC" + bash -c "${CC} -v" + bash -c "${CC} -v -E -xc -o /dev/null - < /dev/null" + bash -c "${CC} -v -E -xobjective-c -o /dev/null - < /dev/null" + echo "END CC" + + echo "BEGIN CXX" + bash -c "${CXX} -v" + bash -c "${CXX} -v -E -xc++ -o /dev/null - < /dev/null" + bash -c "${CXX} -v -E -xobjective-c++ -o /dev/null - < /dev/null" + echo "END CXX" + + echo "BEGIN AR" + bash -c "${AR} --version" + env | grep '^AR_' + echo "ZERO_AR_DATE=${ZERO_AR_DATE}" + echo "END AR" + + echo "BEGIN RANLIB" + bash -c "${RANLIB} --version" + env | grep '^RANLIB_' + echo "END RANLIB" + + echo "BEGIN STRIP" + bash -c "${STRIP} --version" + env | grep '^STRIP_' + echo "END STRIP" + + echo "END ALL" +) | if [ -n "$DEBUG" ] && command -v tee > /dev/null 2>&1; then + # When debugging and `tee` is available, output the preimage to stderr + # in addition to passing through stdin to stdout + tee >(cat 1>&2) + else + # Otherwise, passthrough stdin to stdout + cat + fi | ${SHA256SUM} - | cut -d' ' -f1 diff --git a/depends/hosts/android.mk b/depends/hosts/android.mk new file mode 100644 index 0000000000000..fcc1c4f5c3c64 --- /dev/null +++ b/depends/hosts/android.mk @@ -0,0 +1,11 @@ +ifeq ($(HOST),armv7a-linux-android) +android_CXX=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)eabi$(ANDROID_API_LEVEL)-clang++ +android_CC=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)eabi$(ANDROID_API_LEVEL)-clang +else +android_CXX=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang++ +android_CC=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang +endif +android_AR=$(ANDROID_TOOLCHAIN_BIN)/llvm-ar +android_RANLIB=$(ANDROID_TOOLCHAIN_BIN)/llvm-ranlib + +android_cmake_system=Android diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk index 8d718eba17be8..bf9b7625f28bf 100644 --- a/depends/hosts/darwin.mk +++ b/depends/hosts/darwin.mk @@ -1,8 +1,113 @@ -OSX_MIN_VERSION=10.6 -OSX_SDK_VERSION=10.7 -OSX_SDK=$(SDK_PATH)/MacOSX$(OSX_SDK_VERSION).sdk -darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) +OSX_MIN_VERSION=10.15 +OSX_SDK_VERSION=11.0 +XCODE_VERSION=12.2 +XCODE_BUILD_ID=12B45b +LD64_VERSION=609 + +OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers + +darwin_native_binutils=native_cctools + +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) +# FORCE_USE_SYSTEM_CLANG is empty, so we use our depends-managed, pinned clang +# from llvm.org + +# Clang is a dependency of native_cctools when FORCE_USE_SYSTEM_CLANG is empty +darwin_native_toolchain=native_cctools + +clang_prog=$(build_prefix)/bin/clang +clangxx_prog=$(clang_prog)++ + +clang_resource_dir=$(build_prefix)/lib/clang/$(native_clang_version) +else +# FORCE_USE_SYSTEM_CLANG is non-empty, so we use the clang from the user's +# system + +darwin_native_toolchain= + +# We can't just use $(shell command -v clang) because GNU Make handles builtins +# in a special way and doesn't know that `command` is a POSIX-standard builtin +# prior to 1af314465e5dfe3e8baa839a32a72e83c04f26ef, first released in v4.2.90. +# At the time of writing, GNU Make v4.2.1 is still being used in supported +# distro releases. +# +# Source: https://lists.gnu.org/archive/html/bug-make/2017-11/msg00017.html +clang_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang") +clangxx_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang++") + +clang_resource_dir=$(shell clang -print-resource-dir) +endif + +cctools_TOOLS=AR RANLIB STRIP NM LIBTOOL OTOOL INSTALL_NAME_TOOL DSYMUTIL + +# Make-only lowercase function +lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$1)))))))))))))))))))))))))) + +# For well-known tools provided by cctools, make sure that their well-known +# variable is set to the full path of the tool, just like how AC_PATH_{TOO,PROG} +# would. +$(foreach TOOL,$(cctools_TOOLS),$(eval darwin_$(TOOL) = $$(build_prefix)/bin/$$(host)-$(call lc,$(TOOL)))) + +# Flag explanations: +# +# -mlinker-version +# +# Ensures that modern linker features are enabled. See here for more +# details: https://github.com/bitcoin/bitcoin/pull/19407. +# +# -B$(build_prefix)/bin +# +# Explicitly point to our binaries (e.g. cctools) so that they are +# ensured to be found and preferred over other possibilities. +# +# -stdlib=libc++ -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1 +# +# Forces clang to use the libc++ headers from our SDK and completely +# forget about the libc++ headers from the standard directories +# +# -Xclang -*system \ +# -Xclang -*system \ +# -Xclang -*system ... +# +# Adds path_a, path_b, and path_c to the bottom of clang's list of +# include search paths. This is used to explicitly specify the list of +# system include search paths and its ordering, rather than rely on +# clang's autodetection routine. This routine has been shown to: +# 1. Fail to pickup libc++ headers in $SYSROOT/usr/include/c++/v1 +# when clang was built manually (see: https://github.com/bitcoin/bitcoin/pull/17919#issuecomment-656785034) +# 2. Fail to pickup C headers in $SYSROOT/usr/include when +# C_INCLUDE_DIRS was specified at configure time (see: https://gist.github.com/dongcarl/5cdc6990b7599e8a5bf6d2a9c70e82f9) +# +# Talking directly to cc1 with -Xclang here grants us access to specify +# more granular categories for these system include search paths, and we +# can use the correct categories that these search paths would have been +# placed in if the autodetection routine had worked correctly. (see: +# https://gist.github.com/dongcarl/5cdc6990b7599e8a5bf6d2a9c70e82f9#the-treatment) +# +# Furthermore, it places these search paths after any "non-Xclang" +# specified search paths. This prevents any additional clang options or +# environment variables from coming after or in between these system +# include search paths, as that would be wrong in general but would also +# break #include_next's. +# +darwin_CC=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ + -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ + -u LIBRARY_PATH \ + $(clang_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \ + -B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \ + -isysroot$(OSX_SDK) \ + -Xclang -internal-externc-isystem$(clang_resource_dir)/include \ + -Xclang -internal-externc-isystem$(OSX_SDK)/usr/include +darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ + -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ + -u LIBRARY_PATH \ + $(clangxx_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \ + -B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \ + -isysroot$(OSX_SDK) \ + -stdlib=libc++ \ + -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1 \ + -Xclang -internal-externc-isystem$(clang_resource_dir)/include \ + -Xclang -internal-externc-isystem$(OSX_SDK)/usr/include darwin_CFLAGS=-pipe darwin_CXXFLAGS=$(darwin_CFLAGS) @@ -13,4 +118,4 @@ darwin_release_CXXFLAGS=$(darwin_release_CFLAGS) darwin_debug_CFLAGS=-O1 darwin_debug_CXXFLAGS=$(darwin_debug_CFLAGS) -darwin_native_toolchain=native_cctools +darwin_cmake_system=Darwin diff --git a/depends/hosts/default.mk b/depends/hosts/default.mk index 6f60d6b3fd004..ea60f025dece9 100644 --- a/depends/hosts/default.mk +++ b/depends/hosts/default.mk @@ -1,17 +1,28 @@ +ifneq ($(host),$(build)) +host_toolchain:=$(host)- +endif + default_host_CC = $(host_toolchain)gcc default_host_CXX = $(host_toolchain)g++ default_host_AR = $(host_toolchain)ar default_host_RANLIB = $(host_toolchain)ranlib default_host_STRIP = $(host_toolchain)strip default_host_LIBTOOL = $(host_toolchain)libtool -default_host_INSTALL_NAME_TOOL = $(host_toolchain)install_name_tool -default_host_OTOOL = $(host_toolchain)otool default_host_NM = $(host_toolchain)nm define add_host_tool_func +ifneq ($(filter $(origin $1),undefined default),) +# Do not consider the well-known var $1 if it is undefined or is taking a value +# that is predefined by "make" (e.g. the make variable "CC" has a predefined +# value of "cc") $(host_os)_$1?=$$(default_host_$1) $(host_arch)_$(host_os)_$1?=$$($(host_os)_$1) $(host_arch)_$(host_os)_$(release_type)_$1?=$$($(host_os)_$1) +else +$(host_os)_$1=$(or $($1),$($(host_os)_$1),$(default_host_$1)) +$(host_arch)_$(host_os)_$1=$(or $($1),$($(host_arch)_$(host_os)_$1),$$($(host_os)_$1)) +$(host_arch)_$(host_os)_$(release_type)_$1=$(or $($1),$($(host_arch)_$(host_os)_$(release_type)_$1),$$($(host_os)_$1)) +endif host_$1=$$($(host_arch)_$(host_os)_$1) endef @@ -22,5 +33,5 @@ host_$1 = $$($(host_arch)_$(host_os)_$1) host_$(release_type)_$1 = $$($(host_arch)_$(host_os)_$(release_type)_$1) endef -$(foreach tool,CC CXX AR RANLIB STRIP NM LIBTOOL OTOOL INSTALL_NAME_TOOL,$(eval $(call add_host_tool_func,$(tool)))) +$(foreach tool,CC CXX AR RANLIB STRIP NM LIBTOOL OTOOL INSTALL_NAME_TOOL DSYMUTIL,$(eval $(call add_host_tool_func,$(tool)))) $(foreach flags,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS, $(eval $(call add_host_flags_func,$(flags)))) diff --git a/depends/hosts/freebsd.mk b/depends/hosts/freebsd.mk new file mode 100644 index 0000000000000..0a62347b57008 --- /dev/null +++ b/depends/hosts/freebsd.mk @@ -0,0 +1,31 @@ +freebsd_CFLAGS=-pipe +freebsd_CXXFLAGS=$(freebsd_CFLAGS) + +freebsd_release_CFLAGS=-O2 +freebsd_release_CXXFLAGS=$(freebsd_release_CFLAGS) + +freebsd_debug_CFLAGS=-O1 +freebsd_debug_CXXFLAGS=$(freebsd_debug_CFLAGS) + +ifeq (86,$(findstring 86,$(build_arch))) +i686_freebsd_CC=clang -m32 +i686_freebsd_CXX=clang++ -m32 +i686_freebsd_AR=ar +i686_freebsd_RANLIB=ranlib +i686_freebsd_NM=nm +i686_freebsd_STRIP=strip + +x86_64_freebsd_CC=clang -m64 +x86_64_freebsd_CXX=clang++ -m64 +x86_64_freebsd_AR=ar +x86_64_freebsd_RANLIB=ranlib +x86_64_freebsd_NM=nm +x86_64_freebsd_STRIP=strip +else +i686_freebsd_CC=$(default_host_CC) -m32 +i686_freebsd_CXX=$(default_host_CXX) -m32 +x86_64_freebsd_CC=$(default_host_CC) -m64 +x86_64_freebsd_CXX=$(default_host_CXX) -m64 +endif + +freebsd_cmake_system=FreeBSD diff --git a/depends/hosts/linux.mk b/depends/hosts/linux.mk index b13a0f1ad714a..07da752492c8b 100644 --- a/depends/hosts/linux.mk +++ b/depends/hosts/linux.mk @@ -7,7 +7,7 @@ linux_release_CXXFLAGS=$(linux_release_CFLAGS) linux_debug_CFLAGS=-O1 linux_debug_CXXFLAGS=$(linux_debug_CFLAGS) -linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC +linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC -D_LIBCPP_DEBUG=1 ifeq (86,$(findstring 86,$(build_arch))) i686_linux_CC=gcc -m32 @@ -29,3 +29,4 @@ i686_linux_CXX=$(default_host_CXX) -m32 x86_64_linux_CC=$(default_host_CC) -m64 x86_64_linux_CXX=$(default_host_CXX) -m64 endif +linux_cmake_system=Linux diff --git a/depends/hosts/mingw32.mk b/depends/hosts/mingw32.mk index dbfb62fdcf986..48020d71af058 100644 --- a/depends/hosts/mingw32.mk +++ b/depends/hosts/mingw32.mk @@ -1,3 +1,7 @@ +ifneq ($(shell $(SHELL) $(.SHELLFLAGS) "command -v $(host)-g++-posix"),) +mingw32_CXX := $(host)-g++-posix +endif + mingw32_CFLAGS=-pipe mingw32_CXXFLAGS=$(mingw32_CFLAGS) @@ -8,3 +12,5 @@ mingw32_debug_CFLAGS=-O1 mingw32_debug_CXXFLAGS=$(mingw32_debug_CFLAGS) mingw32_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC + +mingw32_cmake_system=Windows diff --git a/depends/hosts/netbsd.mk b/depends/hosts/netbsd.mk new file mode 100644 index 0000000000000..b3e4545a64e90 --- /dev/null +++ b/depends/hosts/netbsd.mk @@ -0,0 +1,31 @@ +netbsd_CFLAGS=-pipe +netbsd_CXXFLAGS=$(netbsd_CFLAGS) + +netbsd_release_CFLAGS=-O2 +netbsd_release_CXXFLAGS=$(netbsd_release_CFLAGS) + +netbsd_debug_CFLAGS=-O1 +netbsd_debug_CXXFLAGS=$(netbsd_debug_CFLAGS) + +ifeq (86,$(findstring 86,$(build_arch))) +i686_netbsd_CC=gcc -m32 +i686_netbsd_CXX=g++ -m32 +i686_netbsd_AR=ar +i686_netbsd_RANLIB=ranlib +i686_netbsd_NM=nm +i686_netbsd_STRIP=strip + +x86_64_netbsd_CC=gcc -m64 +x86_64_netbsd_CXX=g++ -m64 +x86_64_netbsd_AR=ar +x86_64_netbsd_RANLIB=ranlib +x86_64_netbsd_NM=nm +x86_64_netbsd_STRIP=strip +else +i686_netbsd_CC=$(default_host_CC) -m32 +i686_netbsd_CXX=$(default_host_CXX) -m32 +x86_64_netbsd_CC=$(default_host_CC) -m64 +x86_64_netbsd_CXX=$(default_host_CXX) -m64 +endif + +netbsd_cmake_system=NetBSD diff --git a/depends/hosts/openbsd.mk b/depends/hosts/openbsd.mk new file mode 100644 index 0000000000000..5988f24bff10f --- /dev/null +++ b/depends/hosts/openbsd.mk @@ -0,0 +1,31 @@ +openbsd_CFLAGS=-pipe +openbsd_CXXFLAGS=$(openbsd_CFLAGS) + +openbsd_release_CFLAGS=-O2 +openbsd_release_CXXFLAGS=$(openbsd_release_CFLAGS) + +openbsd_debug_CFLAGS=-O1 +openbsd_debug_CXXFLAGS=$(openbsd_debug_CFLAGS) + +ifeq (86,$(findstring 86,$(build_arch))) +i686_openbsd_CC=clang -m32 +i686_openbsd_CXX=clang++ -m32 +i686_openbsd_AR=ar +i686_openbsd_RANLIB=ranlib +i686_openbsd_NM=nm +i686_openbsd_STRIP=strip + +x86_64_openbsd_CC=clang -m64 +x86_64_openbsd_CXX=clang++ -m64 +x86_64_openbsd_AR=ar +x86_64_openbsd_RANLIB=ranlib +x86_64_openbsd_NM=nm +x86_64_openbsd_STRIP=strip +else +i686_openbsd_CC=$(default_host_CC) -m32 +i686_openbsd_CXX=$(default_host_CXX) -m32 +x86_64_openbsd_CC=$(default_host_CC) -m64 +x86_64_openbsd_CXX=$(default_host_CXX) -m64 +endif + +openbsd_cmake_system=OpenBSD diff --git a/depends/packages.md b/depends/packages.md new file mode 100644 index 0000000000000..4158b46d28d87 --- /dev/null +++ b/depends/packages.md @@ -0,0 +1,198 @@ +Each recipe consists of 3 main parts: defining identifiers, setting build +variables, and defining build commands. + +The package "mylib" will be used here as an example + +General tips: +- mylib_foo is written as $(package)_foo in order to make recipes more similar. +- Secondary dependency packages relative to the bitcoin binaries/libraries (i.e. + those not in `ALLOWED_LIBRARIES` in `contrib/devtools/symbol-check.py`) don't + need to be shared and should be built statically whenever possible. See + [below](#secondary-dependencies) for more details. + +## Identifiers +Each package is required to define at least these variables: + + $(package)_version: + Version of the upstream library or program. If there is no version, a + placeholder such as 1.0 can be used. + + $(package)_download_path: + Location of the upstream source, without the file-name. Usually http, https + or ftp. Secure transmission options like https should be preferred if + available. + + $(package)_file_name: + The upstream source filename available at the download path. + + $(package)_sha256_hash: + The sha256 hash of the upstream file + +These variables are optional: + + $(package)_build_subdir: + cd to this dir before running configure/build/stage commands. + + $(package)_download_file: + The file-name of the upstream source if it differs from how it should be + stored locally. This can be used to avoid storing file-names with strange + characters. + + $(package)_dependencies: + Names of any other packages that this one depends on. + + $(package)_patches: + Filenames of any patches needed to build the package + + $(package)_extra_sources: + Any extra files that will be fetched via $(package)_fetch_cmds. These are + specified so that they can be fetched and verified via 'make download'. + + +## Build Variables: +After defining the main identifiers, build variables may be added or customized +before running the build commands. They should be added to a function called +$(package)_set_vars. For example: + + define $(package)_set_vars + ... + endef + +Most variables can be prefixed with the host, architecture, or both, to make +the modifications specific to that case. For example: + + Universal: $(package)_cc=gcc + Linux only: $(package)_linux_cc=gcc + x86_64 only: $(package)_x86_64_cc = gcc + x86_64 linux only: $(package)_x86_64_linux_cc = gcc + +These variables may be set to override or append their default values. + + $(package)_cc + $(package)_cxx + $(package)_objc + $(package)_objcxx + $(package)_ar + $(package)_ranlib + $(package)_libtool + $(package)_nm + $(package)_cflags + $(package)_cxxflags + $(package)_ldflags + $(package)_cppflags + $(package)_config_env + $(package)_build_env + $(package)_stage_env + $(package)_build_opts + $(package)_config_opts + +The *_env variables are used to add environment variables to the respective +commands. + +Many variables respect a debug/release suffix as well, in order to use them for +only the appropriate build config. For example: + + $(package)_cflags_release = -O3 + $(package)_cflags_i686_debug = -g + $(package)_config_opts_release = --disable-debug + +These will be used in addition to the options that do not specify +debug/release. All builds are considered to be release unless DEBUG=1 is set by +the user. Other variables may be defined as needed. + +## Build commands: + + For each build, a unique build dir and staging dir are created. For example, + `work/build/mylib/1.0-1adac830f6e` and `work/staging/mylib/1.0-1adac830f6e`. + + The following build commands are available for each recipe: + + $(package)_fetch_cmds: + Runs from: build dir + Fetch the source file. If undefined, it will be fetched and verified + against its hash. + + $(package)_extract_cmds: + Runs from: build dir + Verify the source file against its hash and extract it. If undefined, the + source is assumed to be a tarball. + + $(package)_preprocess_cmds: + Runs from: build dir/$(package)_build_subdir + Preprocess the source as necessary. If undefined, does nothing. + + $(package)_config_cmds: + Runs from: build dir/$(package)_build_subdir + Configure the source. If undefined, does nothing. + + $(package)_build_cmds: + Runs from: build dir/$(package)_build_subdir + Build the source. If undefined, does nothing. + + $(package)_stage_cmds: + Runs from: build dir/$(package)_build_subdir + Stage the build results. If undefined, does nothing. + + The following variables are available for each recipe: + + $(1)_staging_dir: package's destination sysroot path + $(1)_staging_prefix_dir: prefix path inside of the package's staging dir + $(1)_extract_dir: path to the package's extracted sources + $(1)_build_dir: path where configure/build/stage commands will be run + $(1)_patch_dir: path where the package's patches (if any) are found + +Notes on build commands: + +For packages built with autotools, $($(package)_autoconf) can be used in the +configure step to (usually) correctly configure automatically. Any +$($(package)_config_opts) will be appended. + +Most autotools projects can be properly staged using: + + $(MAKE) DESTDIR=$($(package)_staging_dir) install + +## Build outputs: + +In general, the output of a depends package should not contain any libtool +archives. Instead, the package should output `.pc` (`pkg-config`) files where +possible. + +From the [Gentoo Wiki entry](https://wiki.gentoo.org/wiki/Project:Quality_Assurance/Handling_Libtool_Archives): + +> Libtool pulls in all direct and indirect dependencies into the .la files it +> creates. This leads to massive overlinking, which is toxic to the Gentoo +> ecosystem, as it leads to a massive number of unnecessary rebuilds. + +## Secondary dependencies: + +Secondary dependency packages relative to the bitcoin binaries/libraries (i.e. +those not in `ALLOWED_LIBRARIES` in `contrib/devtools/symbol-check.py`) don't +need to be shared and should be built statically whenever possible. This +improves general build reliability as illustrated by the following example: + +When linking an executable against a shared library `libprimary` that has its +own shared dependency `libsecondary`, we may need to specify the path to +`libsecondary` on the link command using the `-rpath/-rpath-link` options, it is +not sufficient to just say `libprimary`. + +For us, it's much easier to just link a static `libsecondary` into a shared +`libprimary`. Especially because in our case, we are linking against a dummy +`libprimary` anyway that we'll throw away. We don't care if the end-user has a +static or dynamic `libsecondary`, that's not our concern. With a static +`libsecondary`, when we need to link `libprimary` into our executable, there's no +dependency chain to worry about as `libprimary` has all the symbols. + +## Build targets: + +To build an individual package (useful for debugging), following build targets are available. + + make ${package} + make ${package}_fetched + make ${package}_extracted + make ${package}_preprocessed + make ${package}_configured + make ${package}_built + make ${package}_staged + make ${package}_postprocessed + make ${package}_cached + make ${package}_cached_checksum diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk index 68841afdb84c7..dc536fd3991e7 100644 --- a/depends/packages/bdb.mk +++ b/depends/packages/bdb.mk @@ -1,18 +1,27 @@ package=bdb $(package)_version=4.8.30 -$(package)_download_path=http://download.oracle.com/berkeley-db +$(package)_download_path=https://download.oracle.com/berkeley-db $(package)_file_name=db-$($(package)_version).NC.tar.gz $(package)_sha256_hash=12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef $(package)_build_subdir=build_unix +$(package)_patches=clang_cxx_11.patch define $(package)_set_vars -$(package)_config_opts=--disable-shared --enable-cxx --disable-replication +$(package)_config_opts=--disable-shared --enable-cxx --disable-replication --enable-option-checking $(package)_config_opts_mingw32=--enable-mingw $(package)_config_opts_linux=--with-pic +$(package)_config_opts_freebsd=--with-pic +$(package)_config_opts_netbsd=--with-pic +$(package)_config_opts_openbsd=--with-pic +$(package)_config_opts_android=--with-pic +$(package)_cflags+=-Wno-error=implicit-function-declaration +$(package)_cxxflags+=-std=c++17 +$(package)_cppflags_mingw32=-DUNICODE -D_UNICODE endef define $(package)_preprocess_cmds - sed -i.old 's/__atomic_compare_exchange/__atomic_compare_exchange_db/' dbinc/atomic.h + patch -p1 < $($(package)_patch_dir)/clang_cxx_11.patch && \ + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub dist endef define $(package)_config_cmds diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index f50828c546dae..563848c39889d 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -1,44 +1,10 @@ package=boost -$(package)_version=1_55_0 -$(package)_download_path=http://sourceforge.net/projects/boost/files/boost/1.55.0 -$(package)_file_name=$(package)_$($(package)_version).tar.bz2 -$(package)_sha256_hash=fff00023dd79486d444c8e29922f4072e1d451fc5a4d2b6075852ead7f2b7b52 -$(package)_patches=darwin_boost_atomic-1.patch darwin_boost_atomic-2.patch - -define $(package)_set_vars -$(package)_config_opts_release=variant=release -$(package)_config_opts_debug=variant=debug -$(package)_config_opts=--layout=tagged --build-type=complete --user-config=user-config.jam -$(package)_config_opts+=threading=multi link=static -sNO_BZIP2=1 -sNO_ZLIB=1 -$(package)_config_opts_linux=threadapi=pthread runtime-link=shared -$(package)_config_opts_darwin=--toolset=darwin-4.2.1 runtime-link=shared -$(package)_config_opts_mingw32=binary-format=pe target-os=windows threadapi=win32 runtime-link=static -$(package)_config_opts_x86_64_mingw32=address-model=64 -$(package)_config_opts_i686_mingw32=address-model=32 -$(package)_config_opts_i686_linux=address-model=32 architecture=x86 -$(package)_toolset_$(host_os)=gcc -$(package)_archiver_$(host_os)=$($(package)_ar) -$(package)_toolset_darwin=darwin -$(package)_archiver_darwin=$($(package)_libtool) -$(package)_config_libraries=chrono,filesystem,program_options,system,thread,test -$(package)_cxxflags=-fvisibility=hidden -$(package)_cxxflags_linux=-fPIC -endef - -define $(package)_preprocess_cmds - patch -p2 < $($(package)_patch_dir)/darwin_boost_atomic-1.patch && \ - patch -p2 < $($(package)_patch_dir)/darwin_boost_atomic-2.patch && \ - echo "using $(boost_toolset_$(host_os)) : : $($(package)_cxx) : \"$($(package)_cxxflags) $($(package)_cppflags)\" \"$($(package)_ldflags)\" \"$(boost_archiver_$(host_os))\" \"$(host_STRIP)\" \"$(host_RANLIB)\" \"$(host_WINDRES)\" : ;" > user-config.jam -endef - -define $(package)_config_cmds - ./bootstrap.sh --without-icu --with-libraries=$(boost_config_libraries) -endef - -define $(package)_build_cmds - ./b2 -d2 -j2 -d1 --prefix=$($(package)_staging_prefix_dir) $($(package)_config_opts) stage -endef +$(package)_version=1.77.0 +$(package)_download_path=https://boostorg.jfrog.io/artifactory/main/release/$($(package)_version)/source/ +$(package)_file_name=boost_$(subst .,_,$($(package)_version)).tar.bz2 +$(package)_sha256_hash=fc9f85fc030e233142908241af7a846e60630aa7388de9a5fafb1f3a26840854 define $(package)_stage_cmds - ./b2 -d0 -j4 --prefix=$($(package)_staging_prefix_dir) $($(package)_config_opts) install + mkdir -p $($(package)_staging_prefix_dir)/include && \ + cp -r boost $($(package)_staging_prefix_dir)/include endef diff --git a/depends/packages/capnp.mk b/depends/packages/capnp.mk new file mode 100644 index 0000000000000..8a3a14810d6ed --- /dev/null +++ b/depends/packages/capnp.mk @@ -0,0 +1,19 @@ +package=capnp +$(package)_version=$(native_$(package)_version) +$(package)_download_path=$(native_$(package)_download_path) +$(package)_download_file=$(native_$(package)_download_file) +$(package)_file_name=$(native_$(package)_file_name) +$(package)_sha256_hash=$(native_$(package)_sha256_hash) +$(package)_dependencies=native_$(package) + +define $(package)_config_cmds + $($(package)_autoconf) --with-external-capnp +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef diff --git a/depends/packages/dbus.mk b/depends/packages/dbus.mk deleted file mode 100644 index 8ac9ab742bfe4..0000000000000 --- a/depends/packages/dbus.mk +++ /dev/null @@ -1,23 +0,0 @@ -package=dbus -$(package)_version=1.8.6 -$(package)_download_path=http://dbus.freedesktop.org/releases/dbus -$(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=eded83ca007b719f32761e60fd8b9ffd0f5796a4caf455b01b5a5ef740ebd23f -$(package)_dependencies=expat - -define $(package)_set_vars - $(package)_config_opts=--disable-tests --disable-doxygen-docs --disable-xml-docs --disable-static --without-x -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -C dbus libdbus-1.la -endef - -define $(package)_stage_cmds - $(MAKE) -C dbus DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-dbusincludeHEADERS install-nodist_dbusarchincludeHEADERS && \ - $(MAKE) DESTDIR=$($(package)_staging_dir) install-pkgconfigDATA -endef diff --git a/depends/packages/expat.mk b/depends/packages/expat.mk index 1ac443537420b..50791ebc6e29e 100644 --- a/depends/packages/expat.mk +++ b/depends/packages/expat.mk @@ -1,11 +1,14 @@ package=expat -$(package)_version=2.1.0 -$(package)_download_path=http://sourceforge.net/projects/expat/files/expat/$($(package)_version) -$(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=823705472f816df21c8f6aa026dd162b280806838bb55b3432b0fb1fcca7eb86 +$(package)_version=2.4.1 +$(package)_download_path=https://github.com/libexpat/libexpat/releases/download/R_$(subst .,_,$($(package)_version))/ +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=cf032d0dba9b928636548e32b327a2d66b1aab63c4f4a13dd132c2d1d2f2fb6a define $(package)_set_vars -$(package)_config_opts=--disable-static + $(package)_config_opts=--disable-shared --without-docbook --without-tests --without-examples + $(package)_config_opts += --disable-dependency-tracking --enable-option-checking + $(package)_config_opts += --without-xmlwf + $(package)_config_opts_linux=--with-pic endef define $(package)_config_cmds @@ -19,3 +22,7 @@ endef define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef + +define $(package)_postprocess_cmds + rm -rf share lib/cmake lib/*.la +endef diff --git a/depends/packages/fontconfig.mk b/depends/packages/fontconfig.mk index 2cf553ed9657b..c8b2fc33d5735 100644 --- a/depends/packages/fontconfig.mk +++ b/depends/packages/fontconfig.mk @@ -1,12 +1,18 @@ package=fontconfig -$(package)_version=2.11.1 -$(package)_download_path=http://www.freedesktop.org/software/fontconfig/release/ +$(package)_version=2.12.6 +$(package)_download_path=https://www.freedesktop.org/software/fontconfig/release/ $(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=dc62447533bca844463a3c3fd4083b57c90f18a70506e7a9f4936b5a1e516a99 +$(package)_sha256_hash=cf0c30807d08f6a28ab46c61b8dbd55c97d2f292cf88f3a07d3384687f31f017 $(package)_dependencies=freetype expat +$(package)_patches=gperf_header_regen.patch define $(package)_set_vars - $(package)_config_opts=--disable-docs --disable-static + $(package)_config_opts=--disable-docs --disable-static --disable-libxml2 --disable-iconv + $(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/gperf_header_regen.patch endef define $(package)_config_cmds @@ -20,3 +26,7 @@ endef define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef + +define $(package)_postprocess_cmds + rm -rf var lib/*.la +endef diff --git a/depends/packages/freetype.mk b/depends/packages/freetype.mk index f7d6e0f9fc58d..6f5dbe0f01377 100644 --- a/depends/packages/freetype.mk +++ b/depends/packages/freetype.mk @@ -1,11 +1,12 @@ package=freetype -$(package)_version=2.5.3 -$(package)_download_path=http://downloads.sourceforge.net/$(package) -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=c0848b29d52ef3ca27ad92e08351f023c5e24ce8cea7d8fe69fc96358e65f75e +$(package)_version=2.11.0 +$(package)_download_path=https://download.savannah.gnu.org/releases/$(package) +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=8bee39bd3968c4804b70614a0a3ad597299ad0e824bc8aad5ce8aaf48067bde7 define $(package)_set_vars - $(package)_config_opts=--without-zlib --without-png --disable-static + $(package)_config_opts=--without-zlib --without-png --without-harfbuzz --without-bzip2 --disable-static + $(package)_config_opts += --enable-option-checking --without-brotli $(package)_config_opts_linux=--with-pic endef @@ -20,3 +21,7 @@ endef define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef + +define $(package)_postprocess_cmds + rm -rf share/man lib/*.la +endef diff --git a/depends/packages/libICE.mk b/depends/packages/libICE.mk deleted file mode 100644 index fc60323b1c918..0000000000000 --- a/depends/packages/libICE.mk +++ /dev/null @@ -1,23 +0,0 @@ -package=libICE -$(package)_version=1.0.9 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/lib/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=8f7032f2c1c64352b5423f6b48a8ebdc339cc63064af34d66a6c9aa79759e202 -$(package)_dependencies=xtrans xproto - -define $(package)_set_vars - $(package)_config_opts=--disable-static --disable-docs --disable-specs --without-xsltproc - $(package)_config_opts_linux=--with-pic -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef diff --git a/depends/packages/libSM.mk b/depends/packages/libSM.mk deleted file mode 100644 index 0f9307ca76acb..0000000000000 --- a/depends/packages/libSM.mk +++ /dev/null @@ -1,23 +0,0 @@ -package=libSM -$(package)_version=1.2.2 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/lib/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=0baca8c9f5d934450a70896c4ad38d06475521255ca63b717a6510fdb6e287bd -$(package)_dependencies=xtrans xproto libICE - -define $(package)_set_vars - $(package)_config_opts=--without-libuuid --without-xsltproc --disable-docs --disable-static - $(package)_config_opts_linux=--with-pic -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef diff --git a/depends/packages/libX11.mk b/depends/packages/libX11.mk deleted file mode 100644 index 178d592ee63ff..0000000000000 --- a/depends/packages/libX11.mk +++ /dev/null @@ -1,23 +0,0 @@ -package=libX11 -$(package)_version=1.6.2 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/lib/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=2aa027e837231d2eeea90f3a4afe19948a6eb4c8b2bec0241eba7dbc8106bd16 -$(package)_dependencies=libxcb xtrans xextproto xproto - -define $(package)_set_vars -$(package)_config_opts=--disable-xkb --disable-static -$(package)_config_opts_linux=--with-pic -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef diff --git a/depends/packages/libXau.mk b/depends/packages/libXau.mk index e87df2e4de676..b7e032c0b2d13 100644 --- a/depends/packages/libXau.mk +++ b/depends/packages/libXau.mk @@ -1,15 +1,22 @@ package=libXau -$(package)_version=1.0.8 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/lib/ +$(package)_version=1.0.9 +$(package)_download_path=https://xorg.freedesktop.org/releases/individual/lib/ $(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=fdd477320aeb5cdd67272838722d6b7d544887dfe7de46e1e7cc0c27c2bea4f2 +$(package)_sha256_hash=ccf8cbf0dbf676faa2ea0a6d64bcc3b6746064722b606c8c52917ed00dcb73ec $(package)_dependencies=xproto +# When updating this package, check the default value of +# --disable-xthreads. It is currently enabled. define $(package)_set_vars - $(package)_config_opts=--disable-shared + $(package)_config_opts=--disable-shared --disable-lint-library --without-lint + $(package)_config_opts += --disable-dependency-tracking --enable-option-checking $(package)_config_opts_linux=--with-pic endef +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + define $(package)_config_cmds $($(package)_autoconf) endef @@ -21,3 +28,7 @@ endef define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef + +define $(package)_postprocess_cmds + rm -rf share lib/*.la +endef diff --git a/depends/packages/libXext.mk b/depends/packages/libXext.mk deleted file mode 100644 index 4db836066f96e..0000000000000 --- a/depends/packages/libXext.mk +++ /dev/null @@ -1,22 +0,0 @@ -package=libXext -$(package)_version=1.3.2 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/lib/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=f829075bc646cdc085fa25d98d5885d83b1759ceb355933127c257e8e50432e0 -$(package)_dependencies=xproto xextproto libX11 libXau - -define $(package)_set_vars - $(package)_config_opts=--disable-static -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk new file mode 100644 index 0000000000000..1efe6220d3df3 --- /dev/null +++ b/depends/packages/libevent.mk @@ -0,0 +1,41 @@ +package=libevent +$(package)_version=2.1.12-stable +$(package)_download_path=https://github.com/libevent/libevent/releases/download/release-$($(package)_version)/ +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=92e6de1be9ec176428fd2367677e61ceffc2ee1cb119035037a27d346b0403bb + +# When building for Windows, we set _WIN32_WINNT to target the same Windows +# version as we do in configure. Due to quirks in libevents build system, this +# is also required to enable support for ipv6. See #19375. +define $(package)_set_vars + $(package)_config_opts=--disable-shared --disable-openssl --disable-libevent-regress --disable-samples + $(package)_config_opts += --disable-dependency-tracking --enable-option-checking + $(package)_config_opts_release=--disable-debug-mode + $(package)_config_opts_linux=--with-pic + $(package)_config_opts_freebsd=--with-pic + $(package)_config_opts_netbsd=--with-pic + $(package)_config_opts_openbsd=--with-pic + $(package)_config_opts_android=--with-pic + $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601 +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm lib/*.la && \ + rm include/ev*.h +endef diff --git a/depends/packages/libmultiprocess.mk b/depends/packages/libmultiprocess.mk new file mode 100644 index 0000000000000..9b66207fc5063 --- /dev/null +++ b/depends/packages/libmultiprocess.mk @@ -0,0 +1,28 @@ +package=libmultiprocess +$(package)_version=$(native_$(package)_version) +$(package)_download_path=$(native_$(package)_download_path) +$(package)_file_name=$(native_$(package)_file_name) +$(package)_sha256_hash=$(native_$(package)_sha256_hash) +$(package)_dependencies=native_$(package) capnp +ifneq ($(host),$(build)) +$(package)_dependencies += native_capnp +endif + +define $(package)_set_vars := +ifneq ($(host),$(build)) +$(package)_cmake_opts := -DCAPNP_EXECUTABLE="$$(native_capnp_prefixbin)/capnp" +$(package)_cmake_opts += -DCAPNPC_CXX_EXECUTABLE="$$(native_capnp_prefixbin)/capnpc-c++" +endif +endef + +define $(package)_config_cmds + $($(package)_cmake) . +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef diff --git a/depends/packages/libnatpmp.mk b/depends/packages/libnatpmp.mk new file mode 100644 index 0000000000000..cdcf8c0bf2c18 --- /dev/null +++ b/depends/packages/libnatpmp.mk @@ -0,0 +1,22 @@ +package=libnatpmp +$(package)_version=4536032ae32268a45c073a4d5e91bbab4534773a +$(package)_download_path=https://github.com/miniupnp/libnatpmp/archive +$(package)_file_name=$($(package)_version).tar.gz +$(package)_sha256_hash=543b460aab26acf91e11d15e17d8798f845304199eea2d76c2f444ec749c5383 + +define $(package)_set_vars + $(package)_build_opts=CC="$($(package)_cc)" + $(package)_build_opts_mingw32=CPPFLAGS=-DNATPMP_STATICLIB + $(package)_build_opts_darwin=LIBTOOL="$($(package)_libtool)" + $(package)_build_env+=CFLAGS="$($(package)_cflags) $($(package)_cppflags)" AR="$($(package)_ar)" +endef + +define $(package)_build_cmds + $(MAKE) libnatpmp.a $($(package)_build_opts) +endef + +define $(package)_stage_cmds + mkdir -p $($(package)_staging_prefix_dir)/include $($(package)_staging_prefix_dir)/lib &&\ + install *.h $($(package)_staging_prefix_dir)/include &&\ + install libnatpmp.a $($(package)_staging_prefix_dir)/lib +endef diff --git a/depends/packages/libxcb.mk b/depends/packages/libxcb.mk index f29b577f8acab..fa30e80f5c087 100644 --- a/depends/packages/libxcb.mk +++ b/depends/packages/libxcb.mk @@ -1,15 +1,25 @@ package=libxcb -$(package)_version=1.10 -$(package)_download_path=http://xcb.freedesktop.org/dist -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=98d9ab05b636dd088603b64229dd1ab2d2cc02ab807892e107d674f9c3f2d5b5 -$(package)_dependencies=xcb_proto libXau xproto +$(package)_version=1.14 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=a55ed6db98d43469801262d81dc2572ed124edc3db31059d4e9916eb9f844c34 +$(package)_dependencies=xcb_proto libXau define $(package)_set_vars -$(package)_config_opts=--disable-static +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen --without-launchd +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +# Disable unneeded extensions. +# More info is available from: https://doc.qt.io/qt-5.15/linux-requirements.html +$(package)_config_opts += --disable-composite --disable-damage --disable-dpms +$(package)_config_opts += --disable-dri2 --disable-dri3 --disable-glx +$(package)_config_opts += --disable-present --disable-record --disable-resource +$(package)_config_opts += --disable-screensaver --disable-xevie --disable-xfree86-dri +$(package)_config_opts += --disable-xinput --disable-xprint --disable-selinux +$(package)_config_opts += --disable-xtest --disable-xv --disable-xvmc endef define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux && \ sed "s/pthread-stubs//" -i configure endef @@ -26,5 +36,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm -rf share/man share/doc + rm -rf share lib/*.la endef diff --git a/depends/packages/libxcb_util.mk b/depends/packages/libxcb_util.mk new file mode 100644 index 0000000000000..6f1b9cd7c65e0 --- /dev/null +++ b/depends/packages/libxcb_util.mk @@ -0,0 +1,32 @@ +package=libxcb_util +$(package)_version=0.4.0 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-$($(package)_version).tar.bz2 +$(package)_sha256_hash=46e49469cb3b594af1d33176cd7565def2be3fa8be4371d62271fabb5eae50e9 +$(package)_dependencies=libxcb + +define $(package)_set_vars +$(package)_config_opts = --disable-shared --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +$(package)_config_opts += --with-pic +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_image.mk b/depends/packages/libxcb_util_image.mk new file mode 100644 index 0000000000000..d12d67e8e888a --- /dev/null +++ b/depends/packages/libxcb_util_image.mk @@ -0,0 +1,31 @@ +package=libxcb_util_image +$(package)_version=0.4.0 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-image-$($(package)_version).tar.bz2 +$(package)_sha256_hash=2db96a37d78831d643538dd1b595d7d712e04bdccf8896a5e18ce0f398ea2ffc +$(package)_dependencies=libxcb libxcb_util + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts+= --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_keysyms.mk b/depends/packages/libxcb_util_keysyms.mk new file mode 100644 index 0000000000000..d4f72dedbea7f --- /dev/null +++ b/depends/packages/libxcb_util_keysyms.mk @@ -0,0 +1,31 @@ +package=libxcb_util_keysyms +$(package)_version=0.4.0 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-keysyms-$($(package)_version).tar.bz2 +$(package)_sha256_hash=0ef8490ff1dede52b7de533158547f8b454b241aa3e4dcca369507f66f216dd9 +$(package)_dependencies=libxcb xproto + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_render.mk b/depends/packages/libxcb_util_render.mk new file mode 100644 index 0000000000000..28f1fb073c681 --- /dev/null +++ b/depends/packages/libxcb_util_render.mk @@ -0,0 +1,31 @@ +package=libxcb_util_render +$(package)_version=0.3.9 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-renderutil-$($(package)_version).tar.bz2 +$(package)_sha256_hash=c6e97e48fb1286d6394dddb1c1732f00227c70bd1bedb7d1acabefdd340bea5b +$(package)_dependencies=libxcb + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_wm.mk b/depends/packages/libxcb_util_wm.mk new file mode 100644 index 0000000000000..3b905ba4ec55b --- /dev/null +++ b/depends/packages/libxcb_util_wm.mk @@ -0,0 +1,31 @@ +package=libxcb_util_wm +$(package)_version=0.4.1 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-wm-$($(package)_version).tar.bz2 +$(package)_sha256_hash=28bf8179640eaa89276d2b0f1ce4285103d136be6c98262b6151aaee1d3c2a3f +$(package)_dependencies=libxcb + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxkbcommon.mk b/depends/packages/libxkbcommon.mk new file mode 100644 index 0000000000000..8c6c56545f075 --- /dev/null +++ b/depends/packages/libxkbcommon.mk @@ -0,0 +1,32 @@ +package=libxkbcommon +$(package)_version=0.8.4 +$(package)_download_path=https://xkbcommon.org/download/ +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=60ddcff932b7fd352752d51a5c4f04f3d0403230a584df9a2e0d5ed87c486c8b +$(package)_dependencies=libxcb + +define $(package)_set_vars +$(package)_config_opts = --enable-option-checking --disable-dependency-tracking +$(package)_config_opts += --disable-static --disable-docs +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm lib/*.la +endef + diff --git a/depends/packages/miniupnpc.mk b/depends/packages/miniupnpc.mk index 00101f1b91af4..99f5b0a8dbcf2 100644 --- a/depends/packages/miniupnpc.mk +++ b/depends/packages/miniupnpc.mk @@ -1,20 +1,19 @@ package=miniupnpc -$(package)_version=1.9.20140701 -$(package)_download_path=http://miniupnp.free.fr/files +$(package)_version=2.2.2 +$(package)_download_path=https://miniupnp.tuxfamily.org/files/ $(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=26f3985bad7768b8483b793448ae49414cdc4451d0ec83e7c1944367e15f9f07 +$(package)_sha256_hash=888fb0976ba61518276fe1eda988589c700a3f2a69d71089260d75562afd3687 +$(package)_patches=dont_leak_info.patch define $(package)_set_vars $(package)_build_opts=CC="$($(package)_cc)" -$(package)_build_opts_darwin=OS=Darwin +$(package)_build_opts_darwin=LIBTOOL="$($(package)_libtool)" $(package)_build_opts_mingw32=-f Makefile.mingw $(package)_build_env+=CFLAGS="$($(package)_cflags) $($(package)_cppflags)" AR="$($(package)_ar)" endef define $(package)_preprocess_cmds - mkdir dll && \ - sed -e 's|MINIUPNPC_VERSION_STRING \"version\"|MINIUPNPC_VERSION_STRING \"$($(package)_version)\"|' -e 's|OS/version|$(host)|' miniupnpcstrings.h.in > miniupnpcstrings.h && \ - sed -i.old "s|miniupnpcstrings.h: miniupnpcstrings.h.in wingenminiupnpcstrings|miniupnpcstrings.h: miniupnpcstrings.h.in|" Makefile.mingw + patch -p1 < $($(package)_patch_dir)/dont_leak_info.patch endef define $(package)_build_cmds diff --git a/depends/packages/native_capnp.mk b/depends/packages/native_capnp.mk new file mode 100644 index 0000000000000..ed5a6deee2fdb --- /dev/null +++ b/depends/packages/native_capnp.mk @@ -0,0 +1,18 @@ +package=native_capnp +$(package)_version=0.7.0 +$(package)_download_path=https://capnproto.org/ +$(package)_download_file=capnproto-c++-$($(package)_version).tar.gz +$(package)_file_name=capnproto-cxx-$($(package)_version).tar.gz +$(package)_sha256_hash=c9a4c0bd88123064d483ab46ecee777f14d933359e23bff6fb4f4dbd28b4cd41 + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef diff --git a/depends/packages/native_ccache.mk b/depends/packages/native_ccache.mk deleted file mode 100644 index 3226e89a63eae..0000000000000 --- a/depends/packages/native_ccache.mk +++ /dev/null @@ -1,25 +0,0 @@ -package=native_ccache -$(package)_version=3.1.9 -$(package)_download_path=http://samba.org/ftp/ccache -$(package)_file_name=ccache-$($(package)_version).tar.bz2 -$(package)_sha256_hash=04d3e2e438ac8d4cc4b110b68cdd61bd59226c6588739a4a386869467f5ced7c - -define $(package)_set_vars -$(package)_config_opts= -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef - -define $(package)_postprocess_cmds - rm -rf lib include -endef diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk index ad989cb544bec..d169eb6723184 100644 --- a/depends/packages/native_cctools.mk +++ b/depends/packages/native_cctools.mk @@ -1,61 +1,23 @@ package=native_cctools -$(package)_version=809 -$(package)_download_path=http://www.opensource.apple.com/tarballs/cctools -$(package)_file_name=cctools-$($(package)_version).tar.gz -$(package)_sha256_hash=03ba62749b843b131c7304a044a98c6ffacd65b1399b921d69add0375f79d8ad -$(package)_build_subdir=cctools2odcctools/odcctools-$($(package)_version) -$(package)_dependencies=native_libuuid native_openssl -$(package)_ld64_download_file=ld64-127.2.tar.gz -$(package)_ld64_download_path=http://www.opensource.apple.com/tarballs/ld64 -$(package)_ld64_file_name=$($(package)_ld64_download_file) -$(package)_ld64_sha256_hash=97b75547b2bd761306ab3e15ae297f01e7ab9760b922bc657f4ef72e4e052142 -$(package)_dyld_download_file=dyld-195.5.tar.gz -$(package)_dyld_download_path=http://www.opensource.apple.com/tarballs/dyld -$(package)_dyld_file_name=$($(package)_dyld_download_file) -$(package)_dyld_sha256_hash=2cf0484c87cf79b606b351a7055a247dae84093ae92c747a74e0cde2c8c8f83c -$(package)_toolchain4_download_file=10cc648683617cca8bcbeae507888099b41b530c.tar.gz -$(package)_toolchain4_download_path=https://github.com/mingwandroid/toolchain4/archive -$(package)_toolchain4_file_name=toolchain4-1.tar.gz -$(package)_toolchain4_sha256_hash=18406961fd4a1ec5c7ea35c91d6a80a2f8bb797a2bd243a610bd75e13eff9aca -$(package)_clang_download_file=clang+llvm-3.2-x86-linux-ubuntu-12.04.tar.gz -$(package)_clang_download_path=http://llvm.org/releases/3.2 -$(package)_clang_file_name=clang-llvm-3.2-x86-linux-ubuntu-12.04.tar.gz -$(package)_clang_sha256_hash=b9d57a88f9514fa1f327a1a703756d0c1c960f4c58494a5bd80313245d13ffff - -define $(package)_fetch_cmds -$(call fetch_file,$(package),$($(package)_download_path),$($(package)_download_file),$($(package)_file_name),$($(package)_sha256_hash)) && \ -$(call fetch_file,$(package),$($(package)_ld64_download_path),$($(package)_ld64_download_file),$($(package)_ld64_file_name),$($(package)_ld64_sha256_hash)) && \ -$(call fetch_file,$(package),$($(package)_dyld_download_path),$($(package)_dyld_download_file),$($(package)_dyld_file_name),$($(package)_dyld_sha256_hash)) && \ -$(call fetch_file,$(package),$($(package)_clang_download_path),$($(package)_clang_download_file),$($(package)_clang_file_name),$($(package)_clang_sha256_hash)) && \ -$(call fetch_file,$(package),$($(package)_toolchain4_download_path),$($(package)_toolchain4_download_file),$($(package)_toolchain4_file_name),$($(package)_toolchain4_sha256_hash)) -endef +$(package)_version=2ef2e931cf641547eb8a68cfebde61003587c9fd +$(package)_download_path=https://github.com/tpoechtrager/cctools-port/archive +$(package)_file_name=$($(package)_version).tar.gz +$(package)_sha256_hash=6b73269efdf5c58a070e7357b66ee760501388549d6a12b423723f45888b074b +$(package)_build_subdir=cctools +$(package)_dependencies=native_libtapi define $(package)_set_vars -$(package)_config_opts=--target=$(host) --with-sysroot=$(OSX_SDK) -$(package)_cflags+=-m32 -$(package)_cxxflags+=-m32 -$(package)_cppflags+=-D__DARWIN_UNIX03 -D__STDC_CONSTANT_MACROS -D__STDC_LIMIT_MACROS -$(package)_ldflags+=-m32 -Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib -$(package)_ldflags+=-L$$(native_cctools_extract_dir)/clang+llvm-3.2-x86-linux-ubuntu-12.04/lib -endef -define $(package)_extract_cmds - tar --strip-components=1 -xf $(SOURCES_PATH)/$($(package)_toolchain4_file_name) && \ - ln -sf $($(package)_source) cctools2odcctools/$($(package)_file_name) && \ - ln -sf $(SOURCES_PATH)/$($(package)_ld64_file_name) cctools2odcctools/$($(package)_ld64_file_name) && \ - ln -sf $(SOURCES_PATH)/$($(package)_dyld_file_name) cctools2odcctools/$($(package)_dyld_file_name) && \ - tar xf $(SOURCES_PATH)/$($(package)_clang_file_name) && \ - mkdir -p $(SDK_PATH) sdks &&\ - cd sdks; ln -sf $(OSX_SDK) MacOSX$(OSX_SDK_VERSION).sdk + $(package)_config_opts=--target=$(host) + $(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib + ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) + $(package)_config_opts+=--enable-lto-support --with-llvm-config=$(build_prefix)/bin/llvm-config + endif + $(package)_cc=$(clang_prog) + $(package)_cxx=$(clangxx_prog) endef define $(package)_preprocess_cmds - sed -i "s|GCC_DIR|LLVM_CLANG_DIR|g" cctools2odcctools/extract.sh && \ - sed -i "s|llvmgcc42-2336.1|clang+llvm-3.2-x86-linux-ubuntu-12.04|g" cctools2odcctools/extract.sh && \ - sed -i "s|/llvmCore/include/llvm-c|/include/llvm-c \$$$${LLVM_CLANG_DIR}/include/llvm |" cctools2odcctools/extract.sh && \ - sed -i "s|fAC_INIT|AC_INIT|" cctools2odcctools/files/configure.ac && \ - sed -i 's/\# Dynamically linked LTO/\t ;\&\n\t linux*)\n# Dynamically linked LTO/' cctools2odcctools/files/configure.ac && \ - cd cctools2odcctools; ./extract.sh --osxver $(OSX_SDK_VERSION) && \ - sed -i "s|define\tPC|define\tPC_|" odcctools-809/include/architecture/sparc/reg.h + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub cctools endef define $(package)_config_cmds @@ -67,14 +29,9 @@ define $(package)_build_cmds endef define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install && \ - cd ../../clang+llvm-3.2-x86-linux-ubuntu-12.04 && \ - mkdir -p $($(package)_staging_prefix_dir)/lib/clang/3.2/include && \ - mkdir -p $($(package)_staging_prefix_dir)/bin && \ - cp -P bin/clang bin/clang++ $($(package)_staging_prefix_dir)/bin/ &&\ - cp lib/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ - cp lib/clang/3.2/include/* $($(package)_staging_prefix_dir)/lib/clang/3.2/include/ && \ - echo "#!/bin/sh" > $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil && \ - echo "exit 0" >> $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil && \ - chmod +x $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share endef diff --git a/depends/packages/native_cdrkit.mk b/depends/packages/native_cdrkit.mk deleted file mode 100644 index 2cc388b4b30c3..0000000000000 --- a/depends/packages/native_cdrkit.mk +++ /dev/null @@ -1,26 +0,0 @@ -package=native_cdrkit -$(package)_version=1.1.11 -$(package)_download_path=http://distro.ibiblio.org/fatdog/source/c -$(package)_file_name=cdrkit-$($(package)_version).tar.bz2 -$(package)_sha256_hash=b50d64c214a65b1a79afe3a964c691931a4233e2ba605d793eb85d0ac3652564 -$(package)_patches=cdrkit-deterministic.patch - -define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/cdrkit-deterministic.patch -endef - -define $(package)_config_cmds - cmake -DCMAKE_INSTALL_PREFIX=$(build_prefix) -endef - -define $(package)_build_cmds - $(MAKE) genisoimage -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) -C genisoimage install -endef - -define $(package)_postprocess_cmds - rm bin/isovfy bin/isoinfo bin/isodump bin/isodebug bin/devdump -endef diff --git a/depends/packages/native_clang.mk b/depends/packages/native_clang.mk new file mode 100644 index 0000000000000..245269a9d342a --- /dev/null +++ b/depends/packages/native_clang.mk @@ -0,0 +1,25 @@ +package=native_clang +$(package)_version=10.0.1 +$(package)_download_path=https://github.com/llvm/llvm-project/releases/download/llvmorg-$($(package)_version) +ifneq (,$(findstring aarch64,$(BUILD))) +$(package)_file_name=clang+llvm-$($(package)_version)-aarch64-linux-gnu.tar.xz +$(package)_sha256_hash=90dc69a4758ca15cd0ffa45d07fbf5bf4309d47d2c7745a9f0735ecffde9c31f +else +$(package)_file_name=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-16.04.tar.xz +$(package)_sha256_hash=48b83ef827ac2c213d5b64f5ad7ed082c8bcb712b46644e0dc5045c6f462c231 +endif + +define $(package)_preprocess_cmds + rm -f $($(package)_extract_dir)/lib/libc++abi.so* +endef + +define $(package)_stage_cmds + mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include && \ + mkdir -p $($(package)_staging_prefix_dir)/bin && \ + cp bin/clang $($(package)_staging_prefix_dir)/bin/ && \ + cp -P bin/clang++ $($(package)_staging_prefix_dir)/bin/ && \ + cp bin/dsymutil $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil && \ + cp bin/llvm-config $($(package)_staging_prefix_dir)/bin/ && \ + cp lib/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ + cp -r lib/clang/$($(package)_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include/ +endef diff --git a/depends/packages/native_comparisontool.mk b/depends/packages/native_comparisontool.mk deleted file mode 100644 index 3d430d4306a58..0000000000000 --- a/depends/packages/native_comparisontool.mk +++ /dev/null @@ -1,21 +0,0 @@ -package=native_comparisontool -$(package)_version=0f7b5d8 -$(package)_download_path=https://github.com/TheBlueMatt/test-scripts/raw/38b490a2599d422b12d5ce8f165792f63fd8f54f -$(package)_file_name=pull-tests-$($(package)_version).jar -$(package)_sha256_hash=ecd43b988a8b673b483e4f69f931596360a5e90fc415c75c4c259faa690df198 -$(package)_install_dirname=BitcoindComparisonTool_jar -$(package)_install_filename=BitcoindComparisonTool.jar - -define $(package)_extract_cmds -endef - -define $(package)_configure_cmds -endef - -define $(package)_build_cmds -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_staging_prefix_dir)/share/$($(package)_install_dirname) && \ - mv $(SOURCES_PATH)/$($(package)_file_name) $($(package)_staging_prefix_dir)/share/$($(package)_install_dirname)/$($(package)_install_filename) -endef diff --git a/depends/packages/native_ds_store.mk b/depends/packages/native_ds_store.mk new file mode 100644 index 0000000000000..44108925a4f3b --- /dev/null +++ b/depends/packages/native_ds_store.mk @@ -0,0 +1,15 @@ +package=native_ds_store +$(package)_version=1.3.0 +$(package)_download_path=https://github.com/al45tair/ds_store/archive/ +$(package)_file_name=v$($(package)_version).tar.gz +$(package)_sha256_hash=76b3280cd4e19e5179defa23fb594a9dd32643b0c80d774bd3108361d94fb46d +$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages + +define $(package)_build_cmds + python3 setup.py build +endef + +define $(package)_stage_cmds + mkdir -p $($(package)_install_libdir) && \ + python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) +endef diff --git a/depends/packages/native_libdmg-hfsplus.mk b/depends/packages/native_libdmg-hfsplus.mk deleted file mode 100644 index a4ffb6046ccc4..0000000000000 --- a/depends/packages/native_libdmg-hfsplus.mk +++ /dev/null @@ -1,22 +0,0 @@ -package=native_libdmg-hfsplus -$(package)_version=0.1 -$(package)_download_path=https://github.com/theuni/libdmg-hfsplus/archive -$(package)_file_name=libdmg-hfsplus-v$($(package)_version).tar.gz -$(package)_sha256_hash=6569a02eb31c2827080d7d59001869ea14484c281efab0ae7f2b86af5c3120b3 -$(package)_build_subdir=build - -define $(package)_preprocess_cmds - mkdir build -endef - -define $(package)_config_cmds - cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix)/bin .. -endef - -define $(package)_build_cmds - $(MAKE) -C dmg -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) -C dmg install -endef diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk new file mode 100644 index 0000000000000..6e600c5720f50 --- /dev/null +++ b/depends/packages/native_libmultiprocess.mk @@ -0,0 +1,18 @@ +package=native_libmultiprocess +$(package)_version=d576d975debdc9090bd2582f83f49c76c0061698 +$(package)_download_path=https://github.com/chaincodelabs/libmultiprocess/archive +$(package)_file_name=$($(package)_version).tar.gz +$(package)_sha256_hash=9f8b055c8bba755dc32fe799b67c20b91e7b13e67cadafbc54c0f1def057a370 +$(package)_dependencies=native_capnp + +define $(package)_config_cmds + $($(package)_cmake) . +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef diff --git a/depends/packages/native_libtapi.mk b/depends/packages/native_libtapi.mk new file mode 100644 index 0000000000000..1633213a42337 --- /dev/null +++ b/depends/packages/native_libtapi.mk @@ -0,0 +1,19 @@ +package=native_libtapi +$(package)_version=664b8414f89612f2dfd35a9b679c345aa5389026 +$(package)_download_path=https://github.com/tpoechtrager/apple-libtapi/archive +$(package)_file_name=$($(package)_version).tar.gz +$(package)_sha256_hash=62e419c12d1c9fad67cc1cd523132bc00db050998337c734c15bc8d73cc02b61 + +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) +$(package)_dependencies=native_clang +endif + +define $(package)_build_cmds + CC=$(clang_prog) CXX=$(clangxx_prog) INSTALLPREFIX=$($(package)_staging_prefix_dir) ./build.sh +endef + +define $(package)_stage_cmds + ./install.sh && \ + mkdir -p $($(package)_staging_prefix_dir)/include/llvm-c && \ + cp src/llvm/include/llvm-c/lto.h $($(package)_staging_prefix_dir)/include/llvm-c +endef diff --git a/depends/packages/native_libuuid.mk b/depends/packages/native_libuuid.mk deleted file mode 100644 index b25540f80dfab..0000000000000 --- a/depends/packages/native_libuuid.mk +++ /dev/null @@ -1,24 +0,0 @@ -package:=native_libuuid -$(package)_version=1.41.14 -$(package)_download_path=http://downloads.sourceforge.net/e2fsprogs -$(package)_file_name=e2fsprogs-libs-$($(package)_version).tar.gz -$(package)_sha256_hash=dbc7a138a3218d9b80a0626b5b692d76934d6746d8cbb762751be33785d8d9f5 - -define $(package)_set_vars -$(package)_config_opts=--disable-elf-shlibs --disable-uuidd -$(package)_cflags+=-m32 -$(package)_ldflags+=-m32 -$(package)_cxxflags+=-m32 -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -C lib/uuid -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) -C lib/uuid install -endef diff --git a/depends/packages/native_mac_alias.mk b/depends/packages/native_mac_alias.mk new file mode 100644 index 0000000000000..783f87ca7c043 --- /dev/null +++ b/depends/packages/native_mac_alias.mk @@ -0,0 +1,15 @@ +package=native_mac_alias +$(package)_version=2.2.0 +$(package)_download_path=https://github.com/al45tair/mac_alias/archive/ +$(package)_file_name=v$($(package)_version).tar.gz +$(package)_sha256_hash=421e6d7586d1f155c7db3e7da01ca0dacc9649a509a253ad7077b70174426499 +$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages + +define $(package)_build_cmds + python3 setup.py build +endef + +define $(package)_stage_cmds + mkdir -p $($(package)_install_libdir) && \ + python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) +endef diff --git a/depends/packages/native_openssl.mk b/depends/packages/native_openssl.mk deleted file mode 100644 index 1f25d6afccd60..0000000000000 --- a/depends/packages/native_openssl.mk +++ /dev/null @@ -1,21 +0,0 @@ -package=native_openssl -$(package)_version=1.0.1h -$(package)_download_path=https://www.openssl.org/source -$(package)_file_name=openssl-$($(package)_version).tar.gz -$(package)_sha256_hash=9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093 -define $(package)_set_vars -$(package)_build_config_opts= --prefix=$(build_prefix) no-zlib no-shared no-krb5C linux-generic32 -m32 -endef - -define $(package)_config_cmds - ./Configure $($(package)_build_config_opts) &&\ - sed -i "s|engines apps test|engines|" Makefile -endef - -define $(package)_build_cmds - $(MAKE) -j1 -endef - -define $(package)_stage_cmds - $(MAKE) INSTALL_PREFIX=$($(package)_staging_dir) -j1 install_sw -endef diff --git a/depends/packages/native_protobuf.mk b/depends/packages/native_protobuf.mk deleted file mode 100644 index ed1a771f0d5ee..0000000000000 --- a/depends/packages/native_protobuf.mk +++ /dev/null @@ -1,25 +0,0 @@ -package=native_protobuf -$(package)_version=2.5.0 -$(package)_download_path=https://protobuf.googlecode.com/files -$(package)_file_name=protobuf-$($(package)_version).tar.bz2 -$(package)_sha256_hash=13bfc5ae543cf3aa180ac2485c0bc89495e3ae711fc6fab4f8ffe90dfb4bb677 - -define $(package)_set_vars -$(package)_config_opts=--disable-shared -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -C src protoc -endef - -define $(package)_stage_cmds - $(MAKE) -C src DESTDIR=$($(package)_staging_dir) install-strip -endef - -define $(package)_postprocess_cmds - rm -rf lib include -endef diff --git a/depends/packages/openssl.mk b/depends/packages/openssl.mk deleted file mode 100644 index 6d7a556c1100b..0000000000000 --- a/depends/packages/openssl.mk +++ /dev/null @@ -1,41 +0,0 @@ -package=openssl -$(package)_version=1.0.1j -$(package)_download_path=https://www.openssl.org/source -$(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=1b60ca8789ba6f03e8ef20da2293b8dc131c39d83814e775069f02d26354edf3 - -define $(package)_set_vars -$(package)_config_env=AR="$($(package)_ar)" RANLIB="$($(package)_ranlib)" CC="$($(package)_cc)" -$(package)_config_opts=--prefix=$(host_prefix) --openssldir=$(host_prefix)/etc/openssl no-zlib no-shared no-dso -$(package)_config_opts+=no-krb5 no-camellia no-capieng no-cast no-cms no-dtls1 no-gost no-gmp no-heartbeats no-idea no-jpake no-md2 -$(package)_config_opts+=no-mdc2 no-rc5 no-rdrand no-rfc3779 no-rsax no-sctp no-seed no-sha0 no-static_engine no-whirlpool no-rc2 no-rc4 no-ssl2 no-ssl3 -$(package)_config_opts+=$($(package)_cflags) $($(package)_cppflags) -$(package)_config_opts_linux=-fPIC -$(package)_config_opts_x86_64_linux=linux-x86_64 -$(package)_config_opts_i686_linux=linux-generic32 -$(package)_config_opts_arm_linux=linux-generic32 -$(package)_config_opts_x86_64_darwin=darwin64-x86_64-cc -$(package)_config_opts_x86_64_mingw32=mingw64 -$(package)_config_opts_i686_mingw32=mingw -endef - -define $(package)_preprocess_cmds - sed -i.old "/define DATE/d" crypto/Makefile && \ - sed -i.old "s|engines apps test|engines|" Makefile.org -endef - -define $(package)_config_cmds - ./Configure $($(package)_config_opts) -endef - -define $(package)_build_cmds - $(MAKE) -j1 build_libs libcrypto.pc libssl.pc openssl.pc -endef - -define $(package)_stage_cmds - $(MAKE) INSTALL_PREFIX=$($(package)_staging_dir) -j1 install_sw -endef - -define $(package)_postprocess_cmds - rm -rf share bin etc -endef diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index bbf53cc2dce7f..998cc0221c64e 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -1,23 +1,35 @@ -packages:=boost openssl -native_packages := native_ccache native_comparisontool +packages:=boost libevent -qt_native_packages = native_protobuf -qt_packages = qrencode protobuf - -qt46_linux_packages = qt46 expat dbus libxcb xcb_proto libXau xproto freetype libX11 xextproto libXext xtrans libICE libSM -qt5_linux_packages= qt expat dbus libxcb xcb_proto libXau xproto freetype fontconfig libX11 xextproto libXext xtrans +qrencode_linux_packages = qrencode +qrencode_android_packages = qrencode +qrencode_darwin_packages = qrencode +qrencode_mingw32_packages = qrencode +qt_linux_packages:=qt expat libxcb xcb_proto libXau xproto freetype fontconfig libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm +qt_android_packages=qt qt_darwin_packages=qt qt_mingw32_packages=qt -qt_linux_$(USE_LINUX_STATIC_QT5):=$(qt5_linux_packages) -qt_linux_:=$(qt46_linux_packages) -qt_linux_packages:=$(qt_linux_$(USE_LINUX_STATIC_QT5)) +bdb_packages=bdb +sqlite_packages=sqlite -wallet_packages=bdb +zmq_packages=zeromq upnp_packages=miniupnpc +natpmp_packages=libnatpmp + +multiprocess_packages = libmultiprocess capnp +multiprocess_native_packages = native_libmultiprocess native_capnp + +usdt_linux_packages=systemtap + +darwin_native_packages = native_ds_store native_mac_alias ifneq ($(build_os),darwin) -darwin_native_packages=native_libuuid native_openssl native_cctools native_cdrkit native_libdmg-hfsplus +darwin_native_packages += native_cctools native_libtapi + +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) +darwin_native_packages+= native_clang +endif + endif diff --git a/depends/packages/protobuf.mk b/depends/packages/protobuf.mk deleted file mode 100644 index 5affad28375a9..0000000000000 --- a/depends/packages/protobuf.mk +++ /dev/null @@ -1,28 +0,0 @@ -package=protobuf -$(package)_version=$(native_$(package)_version) -$(package)_download_path=$(native_$(package)_download_path) -$(package)_file_name=$(native_$(package)_file_name) -$(package)_sha256_hash=$(native_$(package)_sha256_hash) -$(package)_dependencies=native_$(package) - -define $(package)_set_vars - $(package)_config_opts=--disable-shared --with-protoc=$(build_prefix)/bin/protoc - $(package)_config_opts_linux=--with-pic -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -C src libprotobuf.la -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) -C src install-libLTLIBRARIES install-nobase_includeHEADERS &&\ - $(MAKE) DESTDIR=$($(package)_staging_dir) install-pkgconfigDATA -endef - -define $(package)_postprocess_cmds - rm lib/libprotoc.a -endef diff --git a/depends/packages/qrencode.mk b/depends/packages/qrencode.mk index 1ad329e94d7a1..d1687883bcd51 100644 --- a/depends/packages/qrencode.mk +++ b/depends/packages/qrencode.mk @@ -1,12 +1,19 @@ package=qrencode -$(package)_version=3.4.3 +$(package)_version=3.4.4 $(package)_download_path=https://fukuchi.org/works/qrencode/ -$(package)_file_name=qrencode-$(qrencode_version).tar.bz2 -$(package)_sha256_hash=dfd71487513c871bad485806bfd1fdb304dedc84d2b01a8fb8e0940b50597a98 +$(package)_file_name=$(package)-$($(package)_version).tar.bz2 +$(package)_sha256_hash=efe5188b1ddbcbf98763b819b146be6a90481aac30cfc8d858ab78a19cde1fa5 define $(package)_set_vars -$(package)_config_opts=--disable-shared -without-tools --disable-sdltest +$(package)_config_opts=--disable-shared --without-tools --without-tests --disable-sdltest +$(package)_config_opts += --disable-gprof --disable-gcov --disable-mudflap +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking $(package)_config_opts_linux=--with-pic +$(package)_config_opts_android=--with-pic +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub use endef define $(package)_config_cmds @@ -20,3 +27,7 @@ endef define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef + +define $(package)_postprocess_cmds + rm lib/*.la +endef diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 6a8e714a489db..2bc3a81430f32 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,98 +1,285 @@ -PACKAGE=qt -$(package)_version=5.2.1 -$(package)_download_path=http://download.qt-project.org/official_releases/qt/5.2/$($(package)_version)/single -$(package)_file_name=$(package)-everywhere-opensource-src-$($(package)_version).tar.gz -$(package)_sha256_hash=84e924181d4ad6db00239d87250cc89868484a14841f77fb85ab1f1dbdcd7da1 -$(package)_dependencies=openssl -$(package)_linux_dependencies=freetype fontconfig dbus libxcb libX11 xproto libXext -$(package)_build_subdir=qtbase +package=qt +$(package)_version=5.15.3 +$(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules +$(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz +$(package)_file_name=qtbase-$($(package)_suffix) +$(package)_sha256_hash=26394ec9375d52c1592bd7b689b1619c6b8dbe9b6f91fdd5c355589787f3a0b6 +$(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm $(package)_qt_libs=corelib network widgets gui plugins testlib -$(package)_patches=mac-qmake.conf fix-xcb-include-order.patch qt5-tablet-osx.patch +$(package)_linguist_tools = lrelease lupdate lconvert +$(package)_patches = qt.pro +$(package)_patches += qttools_src.pro +$(package)_patches += mac-qmake.conf +$(package)_patches += fix_qt_pkgconfig.patch +$(package)_patches += no-xlib.patch +$(package)_patches += dont_hardcode_x86_64.patch +$(package)_patches += fix_montery_include.patch +$(package)_patches += fix_android_jni_static.patch +$(package)_patches += dont_hardcode_pwd.patch +$(package)_patches += qtbase-moc-ignore-gcc-macro.patch +$(package)_patches += fix_limits_header.patch +$(package)_patches += use_android_ndk23.patch +$(package)_patches += rcc_hardcode_timestamp.patch +$(package)_patches += duplicate_lcqpafonts.patch + +$(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) +$(package)_qttranslations_sha256_hash=5d7869f670a135ad0986e266813b9dd5bbae2b09577338f9cdf8904d4af52db0 + +$(package)_qttools_file_name=qttools-$($(package)_suffix) +$(package)_qttools_sha256_hash=463b2fe71a085e7ab4e39333ae360ab0ec857b966d7a08f752c427e5df55f90d + +$(package)_extra_sources = $($(package)_qttranslations_file_name) +$(package)_extra_sources += $($(package)_qttools_file_name) define $(package)_set_vars $(package)_config_opts_release = -release -$(package)_config_opts_debug = -debug -$(package)_config_opts += -opensource -confirm-license -no-audio-backend -no-sql-tds -no-glib -no-icu -$(package)_config_opts += -no-cups -no-iconv -no-gif -no-audio-backend -no-freetype -$(package)_config_opts += -no-sql-sqlite -no-nis -no-cups -no-iconv -no-pch -$(package)_config_opts += -no-gif -no-feature-style-plastique -$(package)_config_opts += -no-qml-debug -no-pch -no-nis -nomake examples -nomake tests -$(package)_config_opts += -no-feature-style-cde -no-feature-style-s60 -no-feature-style-motif -$(package)_config_opts += -no-feature-style-windowsmobile -no-feature-style-windowsce -$(package)_config_opts += -no-feature-style-cleanlooks -$(package)_config_opts += -no-sql-db2 -no-sql-ibase -no-sql-oci -no-sql-tds -no-sql-mysql -$(package)_config_opts += -no-sql-odbc -no-sql-psql -no-sql-sqlite -no-sql-sqlite2 -$(package)_config_opts += -skip qtsvg -skip qtwebkit -skip qtwebkit-examples -skip qtserialport -$(package)_config_opts += -skip qtdeclarative -skip qtmultimedia -skip qtimageformats -skip qtx11extras -$(package)_config_opts += -skip qtlocation -skip qtsensors -skip qtquick1 -skip qtxmlpatterns -$(package)_config_opts += -skip qtquickcontrols -skip qtactiveqt -skip qtconnectivity -skip qtmacextras -$(package)_config_opts += -skip qtwinextras -skip qtxmlpatterns -skip qtscript -skip qtdoc - -$(package)_config_opts += -prefix $(host_prefix) -bindir $(build_prefix)/bin -$(package)_config_opts += -no-c++11 -openssl-linked -v -static -silent -pkg-config -$(package)_config_opts += -qt-libpng -qt-libjpeg -qt-zlib -qt-pcre +$(package)_config_opts_release += -silent +$(package)_config_opts_debug = -debug +$(package)_config_opts_debug += -optimized-tools +$(package)_config_opts += -bindir $(build_prefix)/bin +$(package)_config_opts += -c++std c++17 +$(package)_config_opts += -confirm-license +$(package)_config_opts += -hostprefix $(build_prefix) +$(package)_config_opts += -no-compile-examples +$(package)_config_opts += -no-cups +$(package)_config_opts += -no-egl +$(package)_config_opts += -no-eglfs +$(package)_config_opts += -no-evdev +$(package)_config_opts += -no-gif +$(package)_config_opts += -no-glib +$(package)_config_opts += -no-icu +$(package)_config_opts += -no-ico +$(package)_config_opts += -no-iconv +$(package)_config_opts += -no-kms +$(package)_config_opts += -no-linuxfb +$(package)_config_opts += -no-libjpeg +$(package)_config_opts += -no-libproxy +$(package)_config_opts += -no-libudev +$(package)_config_opts += -no-mtdev +$(package)_config_opts += -no-openssl +$(package)_config_opts += -no-openvg +$(package)_config_opts += -no-reduce-relocations +$(package)_config_opts += -no-schannel +$(package)_config_opts += -no-sctp +$(package)_config_opts += -no-securetransport +$(package)_config_opts += -no-sql-db2 +$(package)_config_opts += -no-sql-ibase +$(package)_config_opts += -no-sql-oci +$(package)_config_opts += -no-sql-tds +$(package)_config_opts += -no-sql-mysql +$(package)_config_opts += -no-sql-odbc +$(package)_config_opts += -no-sql-psql +$(package)_config_opts += -no-sql-sqlite +$(package)_config_opts += -no-sql-sqlite2 +$(package)_config_opts += -no-system-proxies +$(package)_config_opts += -no-use-gold-linker +$(package)_config_opts += -no-zstd +$(package)_config_opts += -nomake examples +$(package)_config_opts += -nomake tests +$(package)_config_opts += -nomake tools +$(package)_config_opts += -opensource +$(package)_config_opts += -pkg-config +$(package)_config_opts += -prefix $(host_prefix) +$(package)_config_opts += -qt-libpng +$(package)_config_opts += -qt-pcre +$(package)_config_opts += -qt-harfbuzz +$(package)_config_opts += -qt-zlib +$(package)_config_opts += -static +$(package)_config_opts += -v +$(package)_config_opts += -no-feature-bearermanagement +$(package)_config_opts += -no-feature-colordialog +$(package)_config_opts += -no-feature-commandlineparser +$(package)_config_opts += -no-feature-concurrent +$(package)_config_opts += -no-feature-dial +$(package)_config_opts += -no-feature-fontcombobox +$(package)_config_opts += -no-feature-ftp +$(package)_config_opts += -no-feature-http +$(package)_config_opts += -no-feature-image_heuristic_mask +$(package)_config_opts += -no-feature-keysequenceedit +$(package)_config_opts += -no-feature-lcdnumber +$(package)_config_opts += -no-feature-networkdiskcache +$(package)_config_opts += -no-feature-networkproxy +$(package)_config_opts += -no-feature-pdf +$(package)_config_opts += -no-feature-printdialog +$(package)_config_opts += -no-feature-printer +$(package)_config_opts += -no-feature-printpreviewdialog +$(package)_config_opts += -no-feature-printpreviewwidget +$(package)_config_opts += -no-feature-sessionmanager +$(package)_config_opts += -no-feature-socks5 +$(package)_config_opts += -no-feature-sql +$(package)_config_opts += -no-feature-sqlmodel +$(package)_config_opts += -no-feature-statemachine +$(package)_config_opts += -no-feature-syntaxhighlighter +$(package)_config_opts += -no-feature-textbrowser +$(package)_config_opts += -no-feature-textmarkdownwriter +$(package)_config_opts += -no-feature-textodfwriter +$(package)_config_opts += -no-feature-topleveldomain +$(package)_config_opts += -no-feature-udpsocket +$(package)_config_opts += -no-feature-undocommand +$(package)_config_opts += -no-feature-undogroup +$(package)_config_opts += -no-feature-undostack +$(package)_config_opts += -no-feature-undoview +$(package)_config_opts += -no-feature-vnc +$(package)_config_opts += -no-feature-wizard +$(package)_config_opts += -no-feature-xml + +$(package)_config_opts_darwin = -no-dbus +$(package)_config_opts_darwin += -no-opengl +$(package)_config_opts_darwin += -pch +$(package)_config_opts_darwin += -no-feature-corewlan +$(package)_config_opts_darwin += -no-freetype +$(package)_config_opts_darwin += QMAKE_MACOSX_DEPLOYMENT_TARGET=$(OSX_MIN_VERSION) ifneq ($(build_os),darwin) -$(package)_config_opts_darwin = -xplatform macx-clang-linux -device-option MAC_SDK_PATH=$(OSX_SDK) -device-option CROSS_COMPILE="$(host)-" -$(package)_config_opts_darwin += -device-option MAC_MIN_VERSION=$(OSX_MIN_VERSION) -device-option MAC_TARGET=$(host) +$(package)_config_opts_darwin += -xplatform macx-clang-linux +$(package)_config_opts_darwin += -device-option MAC_SDK_PATH=$(OSX_SDK) +$(package)_config_opts_darwin += -device-option MAC_SDK_VERSION=$(OSX_SDK_VERSION) +$(package)_config_opts_darwin += -device-option CROSS_COMPILE="$(host)-" +$(package)_config_opts_darwin += -device-option MAC_TARGET=$(host) +$(package)_config_opts_darwin += -device-option XCODE_VERSION=$(XCODE_VERSION) endif -$(package)_config_opts_linux = -qt-xkbcommon -qt-xcb -no-eglfs -no-linuxfb -system-freetype -no-sm -fontconfig -no-xinput2 -no-libudev -no-egl -no-opengl -$(package)_config_opts_arm_linux = -platform linux-g++ -xplatform $(host) +ifneq ($(build_arch),$(host_arch)) +$(package)_config_opts_aarch64_darwin += -device-option QMAKE_APPLE_DEVICE_ARCHS=arm64 +$(package)_config_opts_x86_64_darwin += -device-option QMAKE_APPLE_DEVICE_ARCHS=x86_64 +endif + +$(package)_config_opts_linux = -xcb +$(package)_config_opts_linux += -no-xcb-xlib +$(package)_config_opts_linux += -no-feature-xlib +$(package)_config_opts_linux += -system-freetype +$(package)_config_opts_linux += -fontconfig +$(package)_config_opts_linux += -no-opengl +$(package)_config_opts_linux += -no-feature-vulkan +$(package)_config_opts_linux += -dbus-runtime +$(package)_config_opts_arm_linux += -platform linux-g++ -xplatform bitcoin-linux-g++ $(package)_config_opts_i686_linux = -xplatform linux-g++-32 -$(package)_config_opts_mingw32 = -no-opengl -xplatform win32-g++ -device-option CROSS_COMPILE="$(host)-" -$(package)_build_env = QT_RCC_TEST=1 +ifneq (,$(findstring -stdlib=libc++,$($(1)_cxx))) +$(package)_config_opts_x86_64_linux = -xplatform linux-clang-libc++ +else +$(package)_config_opts_x86_64_linux = -xplatform linux-g++-64 +endif +$(package)_config_opts_aarch64_linux = -xplatform linux-aarch64-gnu-g++ +$(package)_config_opts_powerpc64_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ +$(package)_config_opts_powerpc64le_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ +$(package)_config_opts_riscv64_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ +$(package)_config_opts_s390x_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ + +$(package)_config_opts_mingw32 = -no-opengl +$(package)_config_opts_mingw32 += -no-dbus +$(package)_config_opts_mingw32 += -no-freetype +$(package)_config_opts_mingw32 += -xplatform win32-g++ +$(package)_config_opts_mingw32 += "QMAKE_CFLAGS = '$($(package)_cflags) $($(package)_cppflags)'" +$(package)_config_opts_mingw32 += "QMAKE_CXX = '$($(package)_cxx)'" +$(package)_config_opts_mingw32 += "QMAKE_CXXFLAGS = '$($(package)_cflags) $($(package)_cppflags)'" +$(package)_config_opts_mingw32 += "QMAKE_LFLAGS = '$($(package)_ldflags)'" +$(package)_config_opts_mingw32 += -device-option CROSS_COMPILE="$(host)-" +$(package)_config_opts_mingw32 += -pch + +$(package)_config_opts_android = -xplatform android-clang +$(package)_config_opts_android += -android-sdk $(ANDROID_SDK) +$(package)_config_opts_android += -android-ndk $(ANDROID_NDK) +$(package)_config_opts_android += -android-ndk-platform android-$(ANDROID_API_LEVEL) +$(package)_config_opts_android += -egl +$(package)_config_opts_android += -no-dbus +$(package)_config_opts_android += -opengl es2 +$(package)_config_opts_android += -qt-freetype +$(package)_config_opts_android += -no-fontconfig +$(package)_config_opts_android += -L $(host_prefix)/lib +$(package)_config_opts_android += -I $(host_prefix)/include +$(package)_config_opts_android += -pch +$(package)_config_opts_android += -no-feature-vulkan + +$(package)_config_opts_aarch64_android += -android-arch arm64-v8a +$(package)_config_opts_armv7a_android += -android-arch armeabi-v7a +$(package)_config_opts_x86_64_android += -android-arch x86_64 +endef + +define $(package)_fetch_cmds +$(call fetch_file,$(package),$($(package)_download_path),$($(package)_download_file),$($(package)_file_name),$($(package)_sha256_hash)) && \ +$(call fetch_file,$(package),$($(package)_download_path),$($(package)_qttranslations_file_name),$($(package)_qttranslations_file_name),$($(package)_qttranslations_sha256_hash)) && \ +$(call fetch_file,$(package),$($(package)_download_path),$($(package)_qttools_file_name),$($(package)_qttools_file_name),$($(package)_qttools_sha256_hash)) +endef + +define $(package)_extract_cmds + mkdir -p $($(package)_extract_dir) && \ + echo "$($(package)_sha256_hash) $($(package)_source)" > $($(package)_extract_dir)/.$($(package)_file_name).hash && \ + echo "$($(package)_qttranslations_sha256_hash) $($(package)_source_dir)/$($(package)_qttranslations_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \ + echo "$($(package)_qttools_sha256_hash) $($(package)_source_dir)/$($(package)_qttools_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \ + $(build_SHA256SUM) -c $($(package)_extract_dir)/.$($(package)_file_name).hash && \ + mkdir qtbase && \ + $(build_TAR) --no-same-owner --strip-components=1 -xf $($(package)_source) -C qtbase && \ + mkdir qttranslations && \ + $(build_TAR) --no-same-owner --strip-components=1 -xf $($(package)_source_dir)/$($(package)_qttranslations_file_name) -C qttranslations && \ + mkdir qttools && \ + $(build_TAR) --no-same-owner --strip-components=1 -xf $($(package)_source_dir)/$($(package)_qttools_file_name) -C qttools endef +# Preprocessing steps work as follows: +# +# 1. Apply our patches to the extracted source. See each patch for more info. +# +# 2. Create a macOS-Clang-Linux mkspec using our mac-qmake.conf. +# +# 3. After making a copy of the mkspec for the linux-arm-gnueabi host, named +# bitcoin-linux-g++, replace instances of linux-arm-gnueabi with $(host). This +# way we can generically support hosts like riscv64-linux-gnu, which Qt doesn't +# ship a mkspec for. See it's usage in config_opts_* above. +# +# 4. Put our C, CXX and LD FLAGS into gcc-base.conf. Only used for non-host builds. +# +# 5. Do similar for the win32-g++ mkspec. +# +# 6. In clang.conf, swap out clang & clang++, for our compiler + flags. See #17466. +# +# 7. Adjust a regex in toolchain.prf, to accommodate Guix's usage of +# CROSS_LIBRARY_PATH. See #15277. define $(package)_preprocess_cmds - sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \ - sed -i.old "s/src_plugins.depends = src_sql src_xml src_network/src_plugins.depends = src_xml src_network/" qtbase/src/src.pro && \ - sed -i.old "/XIproto.h/d" qtbase/src/plugins/platforms/xcb/qxcbxsettings.cpp && \ - sed -i.old 's/if \[ "$$$$XPLATFORM_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/if \[ "$$$$BUILD_ON_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/' qtbase/configure && \ + cp $($(package)_patch_dir)/qt.pro qt.pro && \ + cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \ + patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \ + patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \ + patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \ + patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \ + patch -p1 -i $($(package)_patch_dir)/dont_hardcode_x86_64.patch && \ + patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ + patch -p1 -i $($(package)_patch_dir)/fix_limits_header.patch && \ + patch -p1 -i $($(package)_patch_dir)/fix_montery_include.patch && \ + patch -p1 -i $($(package)_patch_dir)/use_android_ndk23.patch && \ + patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch && \ + patch -p1 -i $($(package)_patch_dir)/duplicate_lcqpafonts.patch && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ - cp -f qtbase/mkspecs/macx-clang/Info.plist.lib qtbase/mkspecs/macx-clang-linux/ &&\ - cp -f qtbase/mkspecs/macx-clang/Info.plist.app qtbase/mkspecs/macx-clang-linux/ &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \ - patch -p1 < $($(package)_patch_dir)/fix-xcb-include-order.patch && \ - patch -p1 < $($(package)_patch_dir)/qt5-tablet-osx.patch && \ - echo "QMAKE_CFLAGS += $($(package)_cflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ - echo "QMAKE_CXXFLAGS += $($(package)_cxxflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ - echo "QMAKE_LFLAGS += $($(package)_ldflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ - sed -i.old "s|QMAKE_CFLAGS = |QMAKE_CFLAGS = $($(package)_cflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ - sed -i.old "s|QMAKE_LFLAGS = |QMAKE_LFLAGS = $($(package)_ldflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ - sed -i.old "s|QMAKE_CXXFLAGS = |QMAKE_CXXFLAGS = $($(package)_cxxflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf + cp -r qtbase/mkspecs/linux-arm-gnueabi-g++ qtbase/mkspecs/bitcoin-linux-g++ && \ + sed -i.old "s/arm-linux-gnueabi-/$(host)-/g" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ + echo "!host_build: QMAKE_CFLAGS += $($(package)_cflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ + echo "!host_build: QMAKE_CXXFLAGS += $($(package)_cxxflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ + echo "!host_build: QMAKE_LFLAGS += $($(package)_ldflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ + sed -i.old "s|QMAKE_CC = \$$$$\$$$${CROSS_COMPILE}clang|QMAKE_CC = $($(package)_cc)|" qtbase/mkspecs/common/clang.conf && \ + sed -i.old "s|QMAKE_CXX = \$$$$\$$$${CROSS_COMPILE}clang++|QMAKE_CXX = $($(package)_cxx)|" qtbase/mkspecs/common/clang.conf && \ + sed -i.old "s/LIBRARY_PATH/(CROSS_)?\0/g" qtbase/mkspecs/features/toolchain.prf endef define $(package)_config_cmds export PKG_CONFIG_SYSROOT_DIR=/ && \ export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \ - export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \ - export CPATH=$(host_prefix)/include && \ - ./configure $($(package)_config_opts) && \ - $(MAKE) sub-src-clean && \ - cd ../qttranslations && ../qtbase/bin/qmake qttranslations.pro -o Makefile && \ - cd translations && ../../qtbase/bin/qmake translations.pro -o Makefile && cd ../.. &&\ - cd qttools/src/linguist/lrelease/ && ../../../../qtbase/bin/qmake lrelease.pro -o Makefile + export QT_MAC_SDK_NO_VERSION_CHECK=1 && \ + cd qtbase && \ + ./configure -top-level $($(package)_config_opts) endef define $(package)_build_cmds - export CPATH=$(host_prefix)/include && \ - $(MAKE) -C src $(addprefix sub-,$($(package)_qt_libs)) && \ - $(MAKE) -C ../qttools/src/linguist/lrelease && \ - $(MAKE) -C ../qttranslations + $(MAKE) endef define $(package)_stage_cmds - $(MAKE) -C src INSTALL_ROOT=$($(package)_staging_dir) $(addsuffix -install_subtargets,$(addprefix sub-,$($(package)_qt_libs))) && cd .. &&\ - $(MAKE) -C qttools/src/linguist/lrelease INSTALL_ROOT=$($(package)_staging_dir) install_target && \ - $(MAKE) -C qttranslations INSTALL_ROOT=$($(package)_staging_dir) install_subtargets && \ - if `test -f qtbase/src/plugins/platforms/xcb/xcb-static/libxcb-static.a`; then \ - cp qtbase/src/plugins/platforms/xcb/xcb-static/libxcb-static.a $($(package)_staging_prefix_dir)/lib; \ - fi + $(MAKE) -C qtbase/src INSTALL_ROOT=$($(package)_staging_dir) $(addsuffix -install_subtargets,$(addprefix sub-,$($(package)_qt_libs))) && \ + $(MAKE) -C qttools/src/linguist INSTALL_ROOT=$($(package)_staging_dir) $(addsuffix -install_subtargets,$(addprefix sub-,$($(package)_linguist_tools))) && \ + $(MAKE) -C qttranslations INSTALL_ROOT=$($(package)_staging_dir) install_subtargets endef define $(package)_postprocess_cmds - rm -rf mkspecs/ lib/cmake/ && \ - rm lib/libQt5Bootstrap.a lib/lib*.la lib/*.prl plugins/*/*.prl + rm -rf native/mkspecs/ native/lib/ lib/cmake/ && \ + rm -f lib/lib*.la endef diff --git a/depends/packages/qt46.mk b/depends/packages/qt46.mk deleted file mode 100644 index 8fb30a5c4471c..0000000000000 --- a/depends/packages/qt46.mk +++ /dev/null @@ -1,66 +0,0 @@ -PACKAGE=qt46 -$(package)_version=4.6.4 -$(package)_download_path=http://download.qt-project.org/archive/qt/4.6/ -$(package)_file_name=qt-everywhere-opensource-src-$($(package)_version).tar.gz -$(package)_sha256_hash=9ad4d46c721b53a429ed5a2eecfd3c239a9ab566562f183f99d3125f1a234250 -$(package)_dependencies=openssl freetype dbus libX11 xproto libXext libICE libSM -$(package)_patches=stlfix.patch - -define $(package)_set_vars -$(package)_config_opts = -prefix $(host_prefix) -headerdir $(host_prefix)/include/qt4 -bindir $(build_prefix)/bin -$(package)_config_opts += -release -no-separate-debug-info -opensource -confirm-license -$(package)_config_opts += -stl -qt-zlib - -$(package)_config_opts += -nomake examples -nomake tests -nomake tools -nomake translations -nomake demos -nomake docs -$(package)_config_opts += -no-audio-backend -no-glib -no-nis -no-cups -no-iconv -no-gif -no-pch -$(package)_config_opts += -no-xkb -no-xrender -no-xrandr -no-xfixes -no-xcursor -no-xinerama -no-xsync -no-xinput -no-mitshm -no-xshape -$(package)_config_opts += -no-libtiff -no-fontconfig -openssl-linked -$(package)_config_opts += -no-sql-db2 -no-sql-ibase -no-sql-oci -no-sql-tds -no-sql-mysql -$(package)_config_opts += -no-sql-odbc -no-sql-psql -no-sql-sqlite -no-sql-sqlite2 -$(package)_config_opts += -no-xmlpatterns -no-multimedia -no-phonon -no-scripttools -no-declarative -$(package)_config_opts += -no-phonon-backend -no-webkit -no-javascript-jit -no-script -$(package)_config_opts += -no-svg -no-libjpeg -no-libtiff -no-libpng -no-libmng -no-qt3support -no-opengl - -$(package)_config_opts_x86_64_linux += -platform linux-g++-64 -$(package)_config_opts_i686_linux = -platform linux-g++-32 -$(package)_build_env = QT_RCC_TEST=1 -endef - -define $(package)_preprocess_cmds - sed -i.old "s|/include /usr/include||" config.tests/unix/freetype/freetype.pri && \ - sed -i.old "s|src_plugins.depends = src_gui src_sql src_svg|src_plugins.depends = src_gui src_sql|" src/src.pro && \ - sed -i.old "s|\.lower(|\.toLower(|g" src/network/ssl/qsslsocket_openssl.cpp && \ - sed -i.old "s|Key_BackSpace|Key_Backspace|" src/gui/itemviews/qabstractitemview.cpp && \ - sed -i.old "s|/usr/X11R6/lib64|$(host_prefix)/lib|" mkspecs/*/*.conf && \ - sed -i.old "s|/usr/X11R6/lib|$(host_prefix)/lib|" mkspecs/*/*.conf && \ - sed -i.old "s|/usr/X11R6/include|$(host_prefix)/include|" mkspecs/*/*.conf && \ - sed -i.old "s|QMAKE_LFLAGS_SHLIB\t+= -shared|QMAKE_LFLAGS_SHLIB\t+= -shared -Wl,--exclude-libs,ALL|" mkspecs/common/g++.conf && \ - sed -i.old "/SSLv2_client_method/d" src/network/ssl/qsslsocket_openssl.cpp src/network/ssl/qsslsocket_openssl_symbols.cpp && \ - sed -i.old "/SSLv2_server_method/d" src/network/ssl/qsslsocket_openssl.cpp src/network/ssl/qsslsocket_openssl_symbols.cpp && \ - patch -p1 < $($(package)_patch_dir)/stlfix.patch -endef - -define $(package)_config_cmds - export PKG_CONFIG_SYSROOT_DIR=/ && \ - export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \ - export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \ - export CPATH=$(host_prefix)/include && \ - OPENSSL_LIBS='-L$(host_prefix)/lib -lssl -lcrypto' ./configure $($(package)_config_opts) && \ - cd tools/linguist/lrelease; ../../../bin/qmake -o Makefile lrelease.pro -endef - -define $(package)_build_cmds - export CPATH=$(host_prefix)/include && \ - $(MAKE) -C src && \ - $(MAKE) -C tools/linguist/lrelease -endef - -define $(package)_stage_cmds - $(MAKE) -C src INSTALL_ROOT=$($(package)_staging_dir) install && \ - $(MAKE) -C tools/linguist/lrelease INSTALL_ROOT=$($(package)_staging_dir) install -endef - -define $(package)_postprocess_cmds - rm -rf mkspecs/ lib/cmake/ lib/*.prl lib/*.la && \ - find native/bin -type f -exec mv {} {}-qt4 \; -endef diff --git a/depends/packages/sqlite.mk b/depends/packages/sqlite.mk new file mode 100644 index 0000000000000..126781ceebb34 --- /dev/null +++ b/depends/packages/sqlite.mk @@ -0,0 +1,33 @@ +package=sqlite +$(package)_version=3320100 +$(package)_download_path=https://sqlite.org/2020/ +$(package)_file_name=sqlite-autoconf-$($(package)_version).tar.gz +$(package)_sha256_hash=486748abfb16abd8af664e3a5f03b228e5f124682b0c942e157644bf6fff7d10 + +define $(package)_set_vars +$(package)_config_opts=--disable-shared --disable-readline --disable-dynamic-extensions --enable-option-checking +$(package)_config_opts_linux=--with-pic +$(package)_config_opts_freebsd=--with-pic +$(package)_config_opts_netbsd=--with-pic +$(package)_config_opts_openbsd=--with-pic +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) libsqlite3.la +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-includeHEADERS install-pkgconfigDATA +endef + +define $(package)_postprocess_cmds + rm lib/*.la +endef diff --git a/depends/packages/systemtap.mk b/depends/packages/systemtap.mk new file mode 100644 index 0000000000000..833e75b97861d --- /dev/null +++ b/depends/packages/systemtap.mk @@ -0,0 +1,12 @@ +package=systemtap +$(package)_version=4.5 +$(package)_download_path=https://sourceware.org/systemtap/ftp/releases/ +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=75078ed37e0dd2a769c9d1f9394170b2d9f4d7daa425f43ca80c13bad6cfc925 +$(package)_patches=remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch + +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch && \ + mkdir -p $($(package)_staging_prefix_dir)/include/sys && \ + cp includes/sys/sdt.h $($(package)_staging_prefix_dir)/include/sys/sdt.h +endef diff --git a/depends/packages/xcb_proto.mk b/depends/packages/xcb_proto.mk index 0c7c958d62d42..9be822506dbc9 100644 --- a/depends/packages/xcb_proto.mk +++ b/depends/packages/xcb_proto.mk @@ -1,13 +1,8 @@ package=xcb_proto -$(package)_version=1.10 -$(package)_download_path=http://xcb.freedesktop.org/dist -$(package)_file_name=xcb-proto-$($(package)_version).tar.bz2 -$(package)_sha256_hash=7ef40ddd855b750bc597d2a435da21e55e502a0fefa85b274f2c922800baaf05 - -define $(package)_set_vars - $(package)_config_opts=--disable-shared - $(package)_config_opts_linux=--with-pic -endef +$(package)_version=1.14.1 +$(package)_download_path=https://xorg.freedesktop.org/archive/individual/proto +$(package)_file_name=xcb-proto-$($(package)_version).tar.xz +$(package)_sha256_hash=f04add9a972ac334ea11d9d7eb4fc7f8883835da3e4859c9afa971efdf57fcc3 define $(package)_config_cmds $($(package)_autoconf) @@ -22,6 +17,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - find -name "*.pyc" -delete && \ - find -name "*.pyo" -delete + rm -rf lib/python*/site-packages/xcbgen/__pycache__ endef diff --git a/depends/packages/xextproto.mk b/depends/packages/xextproto.mk deleted file mode 100644 index 98a11eb4974f8..0000000000000 --- a/depends/packages/xextproto.mk +++ /dev/null @@ -1,21 +0,0 @@ -package=xextproto -$(package)_version=7.3.0 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/proto -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=f3f4b23ac8db9c3a9e0d8edb591713f3d70ef9c3b175970dd8823dfc92aa5bb0 - -define $(package)_set_vars -$(package)_config_opts=--disable-shared -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef diff --git a/depends/packages/xproto.mk b/depends/packages/xproto.mk index 50a90b26850ac..7a43c52faf432 100644 --- a/depends/packages/xproto.mk +++ b/depends/packages/xproto.mk @@ -1,11 +1,16 @@ package=xproto -$(package)_version=7.0.26 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/proto +$(package)_version=7.0.31 +$(package)_download_path=https://xorg.freedesktop.org/releases/individual/proto $(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=636162c1759805a5a0114a369dffdeccb8af8c859ef6e1445f26a4e6e046514f +$(package)_sha256_hash=c6f9747da0bd3a95f86b17fb8dd5e717c8f3ab7f0ece3ba1b247899ec1ef7747 define $(package)_set_vars -$(package)_config_opts=--disable-shared +$(package)_config_opts=--without-fop --without-xmlto --without-xsltproc --disable-specs +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . endef define $(package)_config_cmds diff --git a/depends/packages/xtrans.mk b/depends/packages/xtrans.mk deleted file mode 100644 index 99eefa6d5ea24..0000000000000 --- a/depends/packages/xtrans.mk +++ /dev/null @@ -1,22 +0,0 @@ -package=xtrans -$(package)_version=1.3.4 -$(package)_download_path=http://xorg.freedesktop.org/releases/individual/lib/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=054d4ee3efd52508c753e9f7bc655ef185a29bd2850dd9e2fc2ccc33544f583a -$(package)_dependencies= - -define $(package)_set_vars -$(package)_config_opts_linux=--with-pic --disable-static -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk new file mode 100644 index 0000000000000..c74ae15b3131d --- /dev/null +++ b/depends/packages/zeromq.mk @@ -0,0 +1,43 @@ +package=zeromq +$(package)_version=4.3.4 +$(package)_download_path=https://github.com/zeromq/libzmq/releases/download/v$($(package)_version)/ +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=c593001a89f5a85dd2ddf564805deb860e02471171b3f204944857336295c3e5 +$(package)_patches=remove_libstd_link.patch netbsd_kevent_void.patch + +define $(package)_set_vars + $(package)_config_opts = --without-docs --disable-shared --disable-valgrind + $(package)_config_opts += --disable-perf --disable-curve-keygen --disable-curve --disable-libbsd + $(package)_config_opts += --without-libsodium --without-libgssapi_krb5 --without-pgm --without-norm --without-vmci + $(package)_config_opts += --disable-libunwind --disable-radix-tree --without-gcov --disable-dependency-tracking + $(package)_config_opts += --disable-Werror --disable-drafts --enable-option-checking + $(package)_config_opts_linux=--with-pic + $(package)_config_opts_freebsd=--with-pic + $(package)_config_opts_netbsd=--with-pic + $(package)_config_opts_openbsd=--with-pic + $(package)_config_opts_android=--with-pic + $(package)_cxxflags+=-std=c++17 +endef + +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \ + patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch && \ + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config +endef + +define $(package)_config_cmds + ./autogen.sh && \ + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) src/libzmq.la +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-includeHEADERS install-pkgconfigDATA +endef + +define $(package)_postprocess_cmds + rm -rf bin share lib/*.la +endef diff --git a/depends/patches/bdb/clang_cxx_11.patch b/depends/patches/bdb/clang_cxx_11.patch new file mode 100644 index 0000000000000..58f7ddc7d502c --- /dev/null +++ b/depends/patches/bdb/clang_cxx_11.patch @@ -0,0 +1,147 @@ +commit 3311d68f11d1697565401eee6efc85c34f022ea7 +Author: fanquake +Date: Mon Aug 17 20:03:56 2020 +0800 + + Fix C++11 compatibility + +diff --git a/dbinc/atomic.h b/dbinc/atomic.h +index 0034dcc..7c11d4a 100644 +--- a/dbinc/atomic.h ++++ b/dbinc/atomic.h +@@ -70,7 +70,7 @@ typedef struct { + * These have no memory barriers; the caller must include them when necessary. + */ + #define atomic_read(p) ((p)->value) +-#define atomic_init(p, val) ((p)->value = (val)) ++#define atomic_init_db(p, val) ((p)->value = (val)) + + #ifdef HAVE_ATOMIC_SUPPORT + +@@ -144,7 +144,7 @@ typedef LONG volatile *interlocked_val; + #define atomic_inc(env, p) __atomic_inc(p) + #define atomic_dec(env, p) __atomic_dec(p) + #define atomic_compare_exchange(env, p, o, n) \ +- __atomic_compare_exchange((p), (o), (n)) ++ __atomic_compare_exchange_db((p), (o), (n)) + static inline int __atomic_inc(db_atomic_t *p) + { + int temp; +@@ -176,7 +176,7 @@ static inline int __atomic_dec(db_atomic_t *p) + * http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html + * which configure could be changed to use. + */ +-static inline int __atomic_compare_exchange( ++static inline int __atomic_compare_exchange_db( + db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval) + { + atomic_value_t was; +@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange( + #define atomic_dec(env, p) (--(p)->value) + #define atomic_compare_exchange(env, p, oldval, newval) \ + (DB_ASSERT(env, atomic_read(p) == (oldval)), \ +- atomic_init(p, (newval)), 1) ++ atomic_init_db(p, (newval)), 1) + #else + #define atomic_inc(env, p) __atomic_inc(env, p) + #define atomic_dec(env, p) __atomic_dec(env, p) +diff --git a/mp/mp_fget.c b/mp/mp_fget.c +index 5fdee5a..0b75f57 100644 +--- a/mp/mp_fget.c ++++ b/mp/mp_fget.c +@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */ + + /* Initialize enough so we can call __memp_bhfree. */ + alloc_bhp->flags = 0; +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + #ifdef DIAGNOSTIC + if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) { + __db_errx(env, +@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */ + MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize, + PROT_READ); + +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + MUTEX_LOCK(env, alloc_bhp->mtx_buf); + alloc_bhp->priority = bhp->priority; + alloc_bhp->pgno = bhp->pgno; +diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c +index 34467d2..f05aa0c 100644 +--- a/mp/mp_mvcc.c ++++ b/mp/mp_mvcc.c +@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp) + #else + memcpy(frozen_bhp, bhp, SSZA(BH, buf)); + #endif +- atomic_init(&frozen_bhp->ref, 0); ++ atomic_init_db(&frozen_bhp->ref, 0); + if (mutex != MUTEX_INVALID) + frozen_bhp->mtx_buf = mutex; + else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH, +@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp) + #endif + alloc_bhp->mtx_buf = mutex; + MUTEX_LOCK(env, alloc_bhp->mtx_buf); +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + F_CLR(alloc_bhp, BH_FROZEN); + } + +diff --git a/mp/mp_region.c b/mp/mp_region.c +index e6cece9..ddbe906 100644 +--- a/mp/mp_region.c ++++ b/mp/mp_region.c +@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0) + return (ret); + SH_TAILQ_INIT(&htab[i].hash_bucket); +- atomic_init(&htab[i].hash_page_dirty, 0); ++ atomic_init_db(&htab[i].hash_page_dirty, 0); + } + + /* +@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID : + mtx_base + i; + SH_TAILQ_INIT(&hp->hash_bucket); +- atomic_init(&hp->hash_page_dirty, 0); ++ atomic_init_db(&hp->hash_page_dirty, 0); + #ifdef HAVE_STATISTICS + hp->hash_io_wait = 0; + hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0; +diff --git a/mutex/mut_method.c b/mutex/mut_method.c +index 2588763..5c6d516 100644 +--- a/mutex/mut_method.c ++++ b/mutex/mut_method.c +@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval) + MUTEX_LOCK(env, mtx); + ret = atomic_read(v) == oldval; + if (ret) +- atomic_init(v, newval); ++ atomic_init_db(v, newval); + MUTEX_UNLOCK(env, mtx); + + return (ret); +diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c +index f3922e0..e40fcdf 100644 +--- a/mutex/mut_tas.c ++++ b/mutex/mut_tas.c +@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags) + + #ifdef HAVE_SHARED_LATCHES + if (F_ISSET(mutexp, DB_MUTEX_SHARED)) +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + else + #endif + if (MUTEX_INIT(&mutexp->tas)) { +@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex) + F_CLR(mutexp, DB_MUTEX_LOCKED); + /* Flush flag update before zeroing count */ + MEMBAR_EXIT(); +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + } else { + DB_ASSERT(env, sharecount > 0); + MEMBAR_EXIT(); diff --git a/depends/patches/boost/darwin_boost_atomic-1.patch b/depends/patches/boost/darwin_boost_atomic-1.patch deleted file mode 100644 index 97f59cb7e4eea..0000000000000 --- a/depends/patches/boost/darwin_boost_atomic-1.patch +++ /dev/null @@ -1,35 +0,0 @@ -diff --git a/include/boost/atomic/detail/cas128strong.hpp b/include/boost/atomic/detail/cas128strong.hpp -index 906c13e..dcb4d7d 100644 ---- a/include/boost/atomic/detail/cas128strong.hpp -+++ b/include/boost/atomic/detail/cas128strong.hpp -@@ -196,15 +196,17 @@ class base_atomic - - public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) -- explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) -+ explicit base_atomic(value_type const& v) BOOST_NOEXCEPT - { -+ memset(&v_, 0, sizeof(v_)); - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& value, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -- storage_type value_s = 0; -+ storage_type value_s; -+ memset(&value_s, 0, sizeof(value_s)); - memcpy(&value_s, &value, sizeof(value_type)); - platform_fence_before_store(order); - platform_store128(value_s, &v_); -@@ -247,7 +249,9 @@ class base_atomic - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { -- storage_type expected_s = 0, desired_s = 0; -+ storage_type expected_s, desired_s; -+ memset(&expected_s, 0, sizeof(expected_s)); -+ memset(&desired_s, 0, sizeof(desired_s)); - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - diff --git a/depends/patches/boost/darwin_boost_atomic-2.patch b/depends/patches/boost/darwin_boost_atomic-2.patch deleted file mode 100644 index ca50765200ec8..0000000000000 --- a/depends/patches/boost/darwin_boost_atomic-2.patch +++ /dev/null @@ -1,55 +0,0 @@ -diff --git a/include/boost/atomic/detail/gcc-atomic.hpp b/include/boost/atomic/detail/gcc-atomic.hpp -index a130590..4af99a1 100644 ---- a/include/boost/atomic/detail/gcc-atomic.hpp -+++ b/include/boost/atomic/detail/gcc-atomic.hpp -@@ -958,14 +958,16 @@ class base_atomic - - public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) -- explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) -+ explicit base_atomic(value_type const& v) BOOST_NOEXCEPT - { -+ memset(&v_, 0, sizeof(v_)); - memcpy(&v_, &v, sizeof(value_type)); - } - - void store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -- storage_type tmp = 0; -+ storage_type tmp; -+ memset(&tmp, 0, sizeof(tmp)); - memcpy(&tmp, &v, sizeof(value_type)); - __atomic_store_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - } -@@ -980,7 +982,8 @@ class base_atomic - - value_type exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -- storage_type tmp = 0; -+ storage_type tmp; -+ memset(&tmp, 0, sizeof(tmp)); - memcpy(&tmp, &v, sizeof(value_type)); - tmp = __atomic_exchange_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - value_type res; -@@ -994,7 +997,9 @@ class base_atomic - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { -- storage_type expected_s = 0, desired_s = 0; -+ storage_type expected_s, desired_s; -+ memset(&expected_s, 0, sizeof(expected_s)); -+ memset(&desired_s, 0, sizeof(desired_s)); - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, false, -@@ -1010,7 +1015,9 @@ class base_atomic - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { -- storage_type expected_s = 0, desired_s = 0; -+ storage_type expected_s, desired_s; -+ memset(&expected_s, 0, sizeof(expected_s)); -+ memset(&desired_s, 0, sizeof(desired_s)); - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, true, diff --git a/depends/patches/fontconfig/gperf_header_regen.patch b/depends/patches/fontconfig/gperf_header_regen.patch new file mode 100644 index 0000000000000..b1a70d5fb12cf --- /dev/null +++ b/depends/patches/fontconfig/gperf_header_regen.patch @@ -0,0 +1,24 @@ +commit 7b6eb33ecd88768b28c67ce5d2d68a7eed5936b6 +Author: fanquake +Date: Tue Aug 25 14:34:53 2020 +0800 + + Remove rule that causes inadvertent header regeneration + + Otherwise the makefile will needlessly attempt to re-generate the + headers with gperf. This can be dropped once the upstream build is fixed. + + See #10851. + +diff --git a/src/Makefile.in b/src/Makefile.in +index f4626ad..4ae1b00 100644 +--- a/src/Makefile.in ++++ b/src/Makefile.in +@@ -912,7 +912,7 @@ + ' - > $@.tmp && \ + mv -f $@.tmp fcobjshash.gperf && touch $@ || ( $(RM) $@.tmp && false ) + +-fcobjshash.h: Makefile fcobjshash.gperf ++fcobjshash.h: + $(AM_V_GEN) $(GPERF) --pic -m 100 fcobjshash.gperf > $@.tmp && \ + mv -f $@.tmp $@ || ( $(RM) $@.tmp && false ) + diff --git a/depends/patches/miniupnpc/dont_leak_info.patch b/depends/patches/miniupnpc/dont_leak_info.patch new file mode 100644 index 0000000000000..512f9c50ea8d7 --- /dev/null +++ b/depends/patches/miniupnpc/dont_leak_info.patch @@ -0,0 +1,32 @@ +commit 8815452257437ba36607d0e2381c01142d1c7bb0 +Author: fanquake +Date: Thu Nov 19 10:51:19 2020 +0800 + + Don't leak OS and miniupnpc version info in User-Agent + +diff --git a//minisoap.c b/minisoap.c +index 7860667..775580b 100644 +--- a/minisoap.c ++++ b/minisoap.c +@@ -90,7 +90,7 @@ int soapPostSubmit(SOCKET fd, + headerssize = snprintf(headerbuf, sizeof(headerbuf), + "POST %s HTTP/%s\r\n" + "Host: %s%s\r\n" +- "User-Agent: " OS_STRING ", " UPNP_VERSION_STRING ", MiniUPnPc/" MINIUPNPC_VERSION_STRING "\r\n" ++ "User-Agent: " UPNP_VERSION_STRING "\r\n" + "Content-Length: %d\r\n" + "Content-Type: text/xml\r\n" + "SOAPAction: \"%s\"\r\n" +diff --git a/miniwget.c b/miniwget.c +index d5b7970..05aeb9c 100644 +--- a/miniwget.c ++++ b/miniwget.c +@@ -444,7 +444,7 @@ miniwget3(const char * host, + "GET %s HTTP/%s\r\n" + "Host: %s:%d\r\n" + "Connection: Close\r\n" +- "User-Agent: " OS_STRING ", " UPNP_VERSION_STRING ", MiniUPnPc/" MINIUPNPC_VERSION_STRING "\r\n" ++ "User-Agent: " UPNP_VERSION_STRING "\r\n" + + "\r\n", + path, httpversion, host, port); diff --git a/depends/patches/native_cdrkit/cdrkit-deterministic.patch b/depends/patches/native_cdrkit/cdrkit-deterministic.patch deleted file mode 100644 index 8ab0993dc4de1..0000000000000 --- a/depends/patches/native_cdrkit/cdrkit-deterministic.patch +++ /dev/null @@ -1,86 +0,0 @@ ---- cdrkit-1.1.11.old/genisoimage/tree.c 2008-10-21 19:57:47.000000000 -0400 -+++ cdrkit-1.1.11/genisoimage/tree.c 2013-12-06 00:23:18.489622668 -0500 -@@ -1139,8 +1139,9 @@ - scan_directory_tree(struct directory *this_dir, char *path, - struct directory_entry *de) - { -- DIR *current_dir; -+ int current_file; - char whole_path[PATH_MAX]; -+ struct dirent **d_list; - struct dirent *d_entry; - struct directory *parent; - int dflag; -@@ -1164,7 +1165,8 @@ - this_dir->dir_flags |= DIR_WAS_SCANNED; - - errno = 0; /* Paranoia */ -- current_dir = opendir(path); -+ //current_dir = opendir(path); -+ current_file = scandir(path, &d_list, NULL, alphasort); - d_entry = NULL; - - /* -@@ -1173,12 +1175,12 @@ - */ - old_path = path; - -- if (current_dir) { -+ if (current_file >= 0) { - errno = 0; -- d_entry = readdir(current_dir); -+ d_entry = d_list[0]; - } - -- if (!current_dir || !d_entry) { -+ if (current_file < 0 || !d_entry) { - int ret = 1; - - #ifdef USE_LIBSCHILY -@@ -1191,8 +1193,8 @@ - de->isorec.flags[0] &= ~ISO_DIRECTORY; - ret = 0; - } -- if (current_dir) -- closedir(current_dir); -+ if(d_list) -+ free(d_list); - return (ret); - } - #ifdef ABORT_DEEP_ISO_ONLY -@@ -1208,7 +1210,7 @@ - errmsgno(EX_BAD, "use Rock Ridge extensions via -R or -r,\n"); - errmsgno(EX_BAD, "or allow deep ISO9660 directory nesting via -D.\n"); - } -- closedir(current_dir); -+ free(d_list); - return (1); - } - #endif -@@ -1250,13 +1252,13 @@ - * The first time through, skip this, since we already asked - * for the first entry when we opened the directory. - */ -- if (dflag) -- d_entry = readdir(current_dir); -+ if (dflag && current_file >= 0) -+ d_entry = d_list[current_file]; - dflag++; - -- if (!d_entry) -+ if (current_file < 0) - break; -- -+ current_file--; - /* OK, got a valid entry */ - - /* If we do not want all files, then pitch the backups. */ -@@ -1348,7 +1350,7 @@ - insert_file_entry(this_dir, whole_path, d_entry->d_name); - #endif /* APPLE_HYB */ - } -- closedir(current_dir); -+ free(d_list); - - #ifdef APPLE_HYB - /* diff --git a/depends/patches/qt/dont_hardcode_pwd.patch b/depends/patches/qt/dont_hardcode_pwd.patch new file mode 100644 index 0000000000000..a74e9cb09872b --- /dev/null +++ b/depends/patches/qt/dont_hardcode_pwd.patch @@ -0,0 +1,27 @@ +commit 0e953866fc4672486e29e1ba6d83b4207e7b2f0b +Author: fanquake +Date: Tue Aug 18 15:09:06 2020 +0800 + + Don't hardcode pwd path + + Let a man use his builtins if he wants to! Also, removes the unnecessary + assumption that pwd lives under /bin/pwd. + + See #15581. + +diff --git a/qtbase/configure b/qtbase/configure +index 08b49a8d..faea5b55 100755 +--- a/qtbase/configure ++++ b/qtbase/configure +@@ -36,9 +36,9 @@ + relconf=`basename $0` + # the directory of this script is the "source tree" + relpath=`dirname $0` +-relpath=`(cd "$relpath"; /bin/pwd)` ++relpath=`(cd "$relpath"; pwd)` + # the current directory is the "build tree" or "object tree" +-outpath=`/bin/pwd` ++outpath=`pwd` + + WHICH="which" + diff --git a/depends/patches/qt/dont_hardcode_x86_64.patch b/depends/patches/qt/dont_hardcode_x86_64.patch new file mode 100644 index 0000000000000..5c1e030fa45b8 --- /dev/null +++ b/depends/patches/qt/dont_hardcode_x86_64.patch @@ -0,0 +1,119 @@ +macOS: Don't hard-code x86_64 as the architecture when using qmake + +Upstream commit: + - Qt 6.1: 9082cc8e8d5a6441dabe5e7a95bc0cd9085b95fe + +For other Qt branches see +https://codereview.qt-project.org/q/I70db7e4c27f0d3da5d0af33cb491d72c312d3fa8 + + +--- old/qtbase/configure.json ++++ new/qtbase/configure.json +@@ -244,11 +244,18 @@ + + "testTypeDependencies": { + "linkerSupportsFlag": [ "use_bfd_linker", "use_gold_linker", "use_lld_linker" ], +- "verifySpec": [ "shared", "use_bfd_linker", "use_gold_linker", "use_lld_linker", "compiler-flags", "qmakeargs", "commit" ], ++ "verifySpec": [ ++ "shared", ++ "use_bfd_linker", "use_gold_linker", "use_lld_linker", ++ "compiler-flags", "qmakeargs", ++ "simulator_and_device", ++ "thread", ++ "commit" ], + "compile": [ "verifyspec" ], + "detectPkgConfig": [ "cross_compile", "machineTuple" ], + "library": [ "pkg-config", "compiler-flags" ], +- "getPkgConfigVariable": [ "pkg-config" ] ++ "getPkgConfigVariable": [ "pkg-config" ], ++ "architecture" : [ "verifyspec" ] + }, + + "testTypeAliases": { +@@ -762,7 +769,7 @@ + }, + "architecture": { + "label": "Architecture", +- "output": [ "architecture" ] ++ "output": [ "architecture", "commitConfig" ] + }, + "pkg-config": { + "label": "Using pkg-config", +diff --git a/configure.pri b/configure.pri +index 49755f7abfd..8be9b10d7d4 100644 +--- old/qtbase/configure.pri ++++ new/qtbase/configure.pri +@@ -662,6 +662,13 @@ defineTest(qtConfOutput_commitOptions) { + write_file($$QT_BUILD_TREE/mkspecs/qdevice.pri, $${currentConfig}.output.devicePro)|error() + } + ++# Output is written after configuring each Qt module, ++# but some tests within a module might depend on the ++# configuration output of previous tests. ++defineTest(qtConfOutput_commitConfig) { ++ qtConfProcessOutput() ++} ++ + # type (empty or 'host'), option name, default value + defineTest(processQtPath) { + out_var = config.rel_input.$${2} +diff --git a/mkspecs/common/macx.conf b/mkspecs/common/macx.conf +index d16b77acb8e..4ba0a8eaa36 100644 +--- old/qtbase/mkspecs/common/macx.conf ++++ new/qtbase/mkspecs/common/macx.conf +@@ -6,7 +6,6 @@ QMAKE_PLATFORM += macos osx macx + QMAKE_MAC_SDK = macosx + + QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.13 +-QMAKE_APPLE_DEVICE_ARCHS = x86_64 + + # Should be 10.15, but as long as the CI builds with + # older SDKs we have to keep this. +diff --git a/mkspecs/features/mac/default_post.prf b/mkspecs/features/mac/default_post.prf +index 92a9112bca6..d888731ec8d 100644 +--- old/qtbase/mkspecs/features/mac/default_post.prf ++++ new/qtbase/mkspecs/features/mac/default_post.prf +@@ -90,6 +90,11 @@ app_extension_api_only { + QMAKE_LFLAGS += $$QMAKE_CFLAGS_APPLICATION_EXTENSION + } + ++# Non-universal builds do not set QMAKE_APPLE_DEVICE_ARCHS, ++# so we pick it up from what the arch test resolved instead. ++isEmpty(QMAKE_APPLE_DEVICE_ARCHS): \ ++ QMAKE_APPLE_DEVICE_ARCHS = $$QT_ARCH ++ + macx-xcode { + qmake_pkginfo_typeinfo.name = QMAKE_PKGINFO_TYPEINFO + !isEmpty(QMAKE_PKGINFO_TYPEINFO): \ +@@ -145,9 +150,6 @@ macx-xcode { + simulator: VALID_SIMULATOR_ARCHS = $$QMAKE_APPLE_SIMULATOR_ARCHS + VALID_ARCHS = $$VALID_DEVICE_ARCHS $$VALID_SIMULATOR_ARCHS + +- isEmpty(VALID_ARCHS): \ +- error("QMAKE_APPLE_DEVICE_ARCHS or QMAKE_APPLE_SIMULATOR_ARCHS must contain at least one architecture") +- + single_arch: VALID_ARCHS = $$first(VALID_ARCHS) + + ACTIVE_ARCHS = $(filter $(EXPORT_VALID_ARCHS), $(ARCHS)) +diff --git a/mkspecs/features/toolchain.prf b/mkspecs/features/toolchain.prf +index efbe7c1e55b..8add6dc8043 100644 +--- old/qtbase/mkspecs/features/toolchain.prf ++++ new/qtbase/mkspecs/features/toolchain.prf +@@ -182,9 +182,14 @@ isEmpty($${target_prefix}.INCDIRS) { + # UIKit simulator platforms will see the device SDK's sysroot in + # QMAKE_DEFAULT_*DIRS, because they're handled in a single build pass. + darwin { +- # Clang doesn't pick up the architecture from the sysroot, and will +- # default to the host architecture, so we need to manually set it. +- cxx_flags += -arch $$QMAKE_APPLE_DEVICE_ARCHS ++ uikit { ++ # Clang doesn't automatically pick up the architecture, just because ++ # we're passing the iOS sysroot below, and we will end up building the ++ # test for the host architecture, resulting in linker errors when ++ # linking against the iOS libraries. We work around this by passing ++ # the architecture explicitly. ++ cxx_flags += -arch $$first(QMAKE_APPLE_DEVICE_ARCHS) ++ } + + uikit:macx-xcode: \ + cxx_flags += -isysroot $$sdk_path_device.value diff --git a/depends/patches/qt/duplicate_lcqpafonts.patch b/depends/patches/qt/duplicate_lcqpafonts.patch new file mode 100644 index 0000000000000..c460b51dcff67 --- /dev/null +++ b/depends/patches/qt/duplicate_lcqpafonts.patch @@ -0,0 +1,104 @@ +QtGui: Fix duplication of logging category lcQpaFonts + +Move it to qplatformfontdatabase.h. + +Upstream commit: + - Qt 6.0: ab01885e48873fb2ad71841a3f1627fe4d9cd835 + +--- a/qtbase/src/gui/text/qplatformfontdatabase.cpp ++++ b/qtbase/src/gui/text/qplatformfontdatabase.cpp +@@ -52,6 +52,8 @@ + + QT_BEGIN_NAMESPACE + ++Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") ++ + void qt_registerFont(const QString &familyname, const QString &stylename, + const QString &foundryname, int weight, + QFont::Style style, int stretch, bool antialiased, + +--- a/qtbase/src/gui/text/qplatformfontdatabase.h ++++ b/qtbase/src/gui/text/qplatformfontdatabase.h +@@ -50,6 +50,7 @@ + // + + #include ++#include + #include + #include + #include +@@ -62,6 +63,7 @@ + + QT_BEGIN_NAMESPACE + ++Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) + + class QWritingSystemsPrivate; + + +--- a/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext.mm ++++ b/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext.mm +@@ -86,8 +86,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") +- + static float SYNTHETIC_ITALIC_SKEW = std::tan(14.f * std::acos(0.f) / 90.f); + + bool QCoreTextFontEngine::ct_getSfntTable(void *user_data, uint tag, uchar *buffer, uint *length) + +--- a/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext_p.h ++++ b/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext_p.h +@@ -64,8 +64,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) +- + class QCoreTextFontEngine : public QFontEngine + { + Q_GADGET + +--- a/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase.cpp ++++ b/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase.cpp +@@ -68,8 +68,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") +- + #ifndef QT_NO_DIRECTWRITE + // ### fixme: Consider direct linking of dwrite.dll once Windows Vista pre SP2 is dropped (QTBUG-49711) + + +--- a/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase_p.h ++++ b/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase_p.h +@@ -63,8 +63,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) +- + class QWindowsFontEngineData + { + Q_DISABLE_COPY_MOVE(QWindowsFontEngineData) + +--- a/qtbase/src/platformsupport/themes/genericunix/qgenericunixthemes.cpp ++++ b/qtbase/src/platformsupport/themes/genericunix/qgenericunixthemes.cpp +@@ -40,6 +40,7 @@ + #include "qgenericunixthemes_p.h" + + #include "qpa/qplatformtheme_p.h" ++#include "qpa/qplatformfontdatabase.h" + + #include + #include +@@ -76,7 +77,6 @@ + QT_BEGIN_NAMESPACE + + Q_DECLARE_LOGGING_CATEGORY(qLcTray) +-Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") + + ResourceHelper::ResourceHelper() + { diff --git a/depends/patches/qt/fix-xcb-include-order.patch b/depends/patches/qt/fix-xcb-include-order.patch deleted file mode 100644 index bf6c6dca36cf8..0000000000000 --- a/depends/patches/qt/fix-xcb-include-order.patch +++ /dev/null @@ -1,21 +0,0 @@ ---- old/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro 2014-07-30 18:17:27.384458441 -0400 -+++ new/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro 2014-07-30 18:18:28.620459303 -0400 -@@ -101,10 +101,6 @@ - } - } - --DEFINES += $$QMAKE_DEFINES_XCB --LIBS += $$QMAKE_LIBS_XCB --QMAKE_CXXFLAGS += $$QMAKE_CFLAGS_XCB -- - CONFIG += qpa/genericunixfontdatabase - - contains(QT_CONFIG, dbus) { -@@ -141,3 +137,7 @@ - INCLUDEPATH += ../../../3rdparty/xkbcommon/xkbcommon/ - } - } -+ -+DEFINES += $$QMAKE_DEFINES_XCB -+LIBS += $$QMAKE_LIBS_XCB -+INCLUDEPATH += $$QMAKE_CFLAGS_XCB diff --git a/depends/patches/qt/fix_android_jni_static.patch b/depends/patches/qt/fix_android_jni_static.patch new file mode 100644 index 0000000000000..22a4d5ab0e150 --- /dev/null +++ b/depends/patches/qt/fix_android_jni_static.patch @@ -0,0 +1,18 @@ +--- old/qtbase/src/plugins/platforms/android/androidjnimain.cpp ++++ new/qtbase/src/plugins/platforms/android/androidjnimain.cpp +@@ -934,6 +934,14 @@ Q_DECL_EXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void */*reserved*/) + __android_log_print(ANDROID_LOG_FATAL, "Qt", "registerNatives failed"); + return -1; + } ++ ++ const jint ret = QT_PREPEND_NAMESPACE(QtAndroidPrivate::initJNI(vm, env)); ++ if (ret != 0) ++ { ++ __android_log_print(ANDROID_LOG_FATAL, "Qt", "initJNI failed"); ++ return ret; ++ } ++ + QWindowSystemInterfacePrivate::TabletEvent::setPlatformSynthesizesMouse(false); + + m_javaVM = vm; + diff --git a/depends/patches/qt/fix_limits_header.patch b/depends/patches/qt/fix_limits_header.patch new file mode 100644 index 0000000000000..258128c0ca13d --- /dev/null +++ b/depends/patches/qt/fix_limits_header.patch @@ -0,0 +1,33 @@ +Fix compiling with GCC 11 + +Upstream: + - bug report: https://bugreports.qt.io/browse/QTBUG-89977 + - fix in Qt 6.1: 813a928c7c3cf98670b6043149880ed5c955efb9 + +--- old/qtbase/src/corelib/text/qbytearraymatcher.h ++++ new/qtbase/src/corelib/text/qbytearraymatcher.h +@@ -42,6 +42,8 @@ + + #include + ++#include ++ + QT_BEGIN_NAMESPACE + + + +Upstream fix and backports: + - Qt 6.1: 3eab20ad382569cb2c9e6ccec2322c3d08c0f716 + - Qt 6.2: 380294a5971da85010a708dc23b0edec192cbf27 + - Qt 6.3: 2b2b3155d9f6ba1e4f859741468fbc47db09292b + +--- old/qtbase/src/corelib/tools/qoffsetstringarray_p.h ++++ new/qtbase/src/corelib/tools/qoffsetstringarray_p.h +@@ -55,6 +55,7 @@ + + #include + #include ++#include + + QT_BEGIN_NAMESPACE + diff --git a/depends/patches/qt/fix_montery_include.patch b/depends/patches/qt/fix_montery_include.patch new file mode 100644 index 0000000000000..38b700addfe7a --- /dev/null +++ b/depends/patches/qt/fix_montery_include.patch @@ -0,0 +1,21 @@ +From dece6f5840463ae2ddf927d65eb1b3680e34a547 +From: Øystein Heskestad +Date: Wed, 27 Oct 2021 13:07:46 +0200 +Subject: [PATCH] Add missing macOS header file that was indirectly included before + +See: https://bugreports.qt.io/browse/QTBUG-97855 + +Upstream Commits: + - Qt 6.2: c884bf138a21dd7320e35cef34d24e22e74d7ce0 + +diff --git a/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h b/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h +index e070ba97..07c75b04 100644 +--- a/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h ++++ b/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h +@@ -40,6 +40,7 @@ + #ifndef QIOSURFACEGRAPHICSBUFFER_H + #define QIOSURFACEGRAPHICSBUFFER_H + ++#include + #include + #include diff --git a/depends/patches/qt/fix_qt_pkgconfig.patch b/depends/patches/qt/fix_qt_pkgconfig.patch new file mode 100644 index 0000000000000..73f4d89f7354e --- /dev/null +++ b/depends/patches/qt/fix_qt_pkgconfig.patch @@ -0,0 +1,11 @@ +--- old/qtbase/mkspecs/features/qt_module.prf ++++ new/qtbase/mkspecs/features/qt_module.prf +@@ -269,7 +269,7 @@ load(qt_installs) + load(qt_targets) + + # this builds on top of qt_common +-!internal_module:if(unix|mingw):!if(darwin:debug_and_release:CONFIG(debug, debug|release)) { ++if(unix|mingw):!if(darwin:debug_and_release:CONFIG(debug, debug|release)) { + CONFIG += create_pc + QMAKE_PKGCONFIG_DESTDIR = pkgconfig + host_build: \ diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf index f7302265bcd99..543407f853f9e 100644 --- a/depends/patches/qt/mac-qmake.conf +++ b/depends/patches/qt/mac-qmake.conf @@ -1,23 +1,22 @@ MAKEFILE_GENERATOR = UNIX -CONFIG += app_bundle incremental global_init_link_order lib_version_first plugin_no_soname absolute_library_soname +CONFIG += app_bundle incremental lib_version_first absolute_library_soname QMAKE_INCREMENTAL_STYLE = sublib include(../common/macx.conf) include(../common/gcc-base-mac.conf) include(../common/clang.conf) include(../common/clang-mac.conf) QMAKE_MAC_SDK_PATH=$${MAC_SDK_PATH} -QMAKE_XCODE_VERSION=4.3 +QMAKE_XCODE_VERSION = $${XCODE_VERSION} QMAKE_XCODE_DEVELOPER_PATH=/Developer -QMAKE_MACOSX_DEPLOYMENT_TARGET = $${MAC_MIN_VERSION} QMAKE_MAC_SDK=macosx -QMAKE_MAC_SDK.macosx.path = $$QMAKE_MAC_SDK_PATH +QMAKE_MAC_SDK.macosx.Path = $${MAC_SDK_PATH} QMAKE_MAC_SDK.macosx.platform_name = macosx -QMAKE_CFLAGS += -target $${MAC_TARGET} -QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS -QMAKE_CXXFLAGS += $$QMAKE_CFLAGS -QMAKE_LFLAGS += -target $${MAC_TARGET} +QMAKE_MAC_SDK.macosx.SDKVersion = $${MAC_SDK_VERSION} +QMAKE_MAC_SDK.macosx.PlatformPath = /phony +!host_build: QMAKE_CFLAGS += -target $${MAC_TARGET} +!host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS +!host_build: QMAKE_CXXFLAGS += $$QMAKE_CFLAGS +!host_build: QMAKE_LFLAGS += -target $${MAC_TARGET} QMAKE_AR = $${CROSS_COMPILE}ar cq QMAKE_RANLIB=$${CROSS_COMPILE}ranlib -QMAKE_LIBTOOL=$${CROSS_COMPILE}libtool -QMAKE_INSTALL_NAME_TOOL=$${CROSS_COMPILE}install_name_tool load(qt_config) diff --git a/depends/patches/qt/no-xlib.patch b/depends/patches/qt/no-xlib.patch new file mode 100644 index 0000000000000..d6846aaca2c29 --- /dev/null +++ b/depends/patches/qt/no-xlib.patch @@ -0,0 +1,69 @@ +From 9563cef873ae82e06f60708d706d054717e801ce Mon Sep 17 00:00:00 2001 +From: Carl Dong +Date: Thu, 18 Jul 2019 17:22:05 -0400 +Subject: [PATCH] Wrap xlib related code blocks in #if's + +They are not necessary to compile QT. +--- + qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp +index 7c62c2e2b3..c05c6c0a07 100644 +--- a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp ++++ b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp +@@ -49,7 +49,9 @@ + #include + #include + #include ++#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) + #include ++#endif + #include + #include + +@@ -391,6 +391,7 @@ void QXcbCursor::changeCursor(QCursor *cursor, QWindow *window) + xcb_flush(xcb_connection()); + } + ++#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) + static int cursorIdForShape(int cshape) + { + int cursorId = 0; +@@ -444,6 +445,7 @@ static int cursorIdForShape(int cshape) + } + return cursorId; + } ++#endif + + xcb_cursor_t QXcbCursor::createNonStandardCursor(int cshape) + { +@@ -556,7 +558,9 @@ static xcb_cursor_t loadCursor(void *dpy, int cshape) + xcb_cursor_t QXcbCursor::createFontCursor(int cshape) + { + xcb_connection_t *conn = xcb_connection(); ++#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) + int cursorId = cursorIdForShape(cshape); ++#endif + xcb_cursor_t cursor = XCB_NONE; + + // Try Xcursor first +@@ -586,6 +590,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) + // Non-standard X11 cursors are created from bitmaps + cursor = createNonStandardCursor(cshape); + ++#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) + // Create a glpyh cursor if everything else failed + if (!cursor && cursorId) { + cursor = xcb_generate_id(conn); +@@ -593,6 +598,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) + cursorId, cursorId + 1, + 0xFFFF, 0xFFFF, 0xFFFF, 0, 0, 0); + } ++#endif + + if (cursor && cshape >= 0 && cshape < Qt::LastCursor && connection()->hasXFixes()) { + const char *name = cursorNames[cshape].front(); +-- +2.22.0 + diff --git a/depends/patches/qt/qt.pro b/depends/patches/qt/qt.pro new file mode 100644 index 0000000000000..8f2e900a840fb --- /dev/null +++ b/depends/patches/qt/qt.pro @@ -0,0 +1,16 @@ +# Create the super cache so modules will add themselves to it. +cache(, super) + +!QTDIR_build: cache(CONFIG, add, $$list(QTDIR_build)) + +prl = no_install_prl +CONFIG += $$prl +cache(CONFIG, add stash, prl) + +TEMPLATE = subdirs +SUBDIRS = qtbase qttools qttranslations + +qttools.depends = qtbase +qttranslations.depends = qttools + +load(qt_configure) diff --git a/depends/patches/qt/qt5-tablet-osx.patch b/depends/patches/qt/qt5-tablet-osx.patch deleted file mode 100644 index 7deabf8d4e042..0000000000000 --- a/depends/patches/qt/qt5-tablet-osx.patch +++ /dev/null @@ -1,20 +0,0 @@ ---- old/qtbase/src/widgets/kernel/qwidgetwindow.cpp 2014-09-05 20:45:18.717570370 -0400 -+++ new/qtbase/src/widgets/kernel/qwidgetwindow.cpp 2014-09-05 20:52:38.653576561 -0400 -@@ -57,7 +57,7 @@ - Q_WIDGETS_EXPORT extern bool qt_tab_all_widgets(); - - QWidget *qt_button_down = 0; // widget got last button-down --static QWidget *qt_tablet_target = 0; -+static QPointer qt_tablet_target = 0; - - // popup control - QWidget *qt_popup_down = 0; // popup that contains the pressed widget -@@ -96,8 +96,6 @@ - - QWidgetWindow::~QWidgetWindow() - { -- if (m_widget == qt_tablet_target) -- qt_tablet_target = 0; - } - - #ifndef QT_NO_ACCESSIBILITY diff --git a/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch b/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch new file mode 100644 index 0000000000000..f0c14a9400e12 --- /dev/null +++ b/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch @@ -0,0 +1,17 @@ +The moc executable loops through headers on CPLUS_INCLUDE_PATH and stumbles +on the GCC internal _GLIBCXX_VISIBILITY macro. Tell it to ignore it as it is +not supposed to be looking there to begin with. + +Upstream report: https://bugreports.qt.io/browse/QTBUG-83160 + +diff --git a/qtbase/src/tools/moc/main.cpp b/qtbase/src/tools/moc/main.cpp +--- a/qtbase/src/tools/moc/main.cpp ++++ b/qtbase/src/tools/moc/main.cpp +@@ -238,6 +238,7 @@ int runMoc(int argc, char **argv) + dummyVariadicFunctionMacro.arguments += Symbol(0, PP_IDENTIFIER, "__VA_ARGS__"); + pp.macros["__attribute__"] = dummyVariadicFunctionMacro; + pp.macros["__declspec"] = dummyVariadicFunctionMacro; ++ pp.macros["_GLIBCXX_VISIBILITY"] = dummyVariadicFunctionMacro; + + QString filename; + QString output; diff --git a/depends/patches/qt/qttools_src.pro b/depends/patches/qt/qttools_src.pro new file mode 100644 index 0000000000000..6ef71a0942735 --- /dev/null +++ b/depends/patches/qt/qttools_src.pro @@ -0,0 +1,6 @@ +TEMPLATE = subdirs +SUBDIRS = linguist + +fb = force_bootstrap +CONFIG += $$fb +cache(CONFIG, add, fb) diff --git a/depends/patches/qt/rcc_hardcode_timestamp.patch b/depends/patches/qt/rcc_hardcode_timestamp.patch new file mode 100644 index 0000000000000..03f3897975646 --- /dev/null +++ b/depends/patches/qt/rcc_hardcode_timestamp.patch @@ -0,0 +1,24 @@ +Hardcode last modified timestamp in Qt RCC + +This change allows the already built qt package to be reused even with +the SOURCE_DATE_EPOCH variable set, e.g., for Guix builds. + + +--- old/qtbase/src/tools/rcc/rcc.cpp ++++ new/qtbase/src/tools/rcc/rcc.cpp +@@ -227,14 +227,7 @@ void RCCFileInfo::writeDataInfo(RCCResourceLibrary &lib) + + if (lib.formatVersion() >= 2) { + // last modified time stamp +- const QDateTime lastModified = m_fileInfo.lastModified(); +- quint64 lastmod = quint64(lastModified.isValid() ? lastModified.toMSecsSinceEpoch() : 0); +- static const quint64 sourceDate = 1000 * qgetenv("QT_RCC_SOURCE_DATE_OVERRIDE").toULongLong(); +- if (sourceDate != 0) +- lastmod = sourceDate; +- static const quint64 sourceDate2 = 1000 * qgetenv("SOURCE_DATE_EPOCH").toULongLong(); +- if (sourceDate2 != 0) +- lastmod = sourceDate2; ++ quint64 lastmod = quint64(1); + lib.writeNumber8(lastmod); + if (text || pass1) + lib.writeChar('\n'); diff --git a/depends/patches/qt/use_android_ndk23.patch b/depends/patches/qt/use_android_ndk23.patch new file mode 100644 index 0000000000000..f22367d527bec --- /dev/null +++ b/depends/patches/qt/use_android_ndk23.patch @@ -0,0 +1,13 @@ +Use Android NDK r23 LTS + +--- old/qtbase/mkspecs/features/android/default_pre.prf ++++ new/qtbase/mkspecs/features/android/default_pre.prf +@@ -76,7 +76,7 @@ else: equals(QT_ARCH, x86_64): CROSS_COMPILE = $$NDK_LLVM_PATH/bin/x86_64-linux- + else: equals(QT_ARCH, arm64-v8a): CROSS_COMPILE = $$NDK_LLVM_PATH/bin/aarch64-linux-android- + else: CROSS_COMPILE = $$NDK_LLVM_PATH/bin/arm-linux-androideabi- + +-QMAKE_RANLIB = $${CROSS_COMPILE}ranlib ++QMAKE_RANLIB = $$NDK_LLVM_PATH/bin/llvm-ranlib + QMAKE_LINK_SHLIB = $$QMAKE_LINK + QMAKE_LFLAGS = + diff --git a/depends/patches/qt46/stlfix.patch b/depends/patches/qt46/stlfix.patch deleted file mode 100644 index f8f6fb04b0b70..0000000000000 --- a/depends/patches/qt46/stlfix.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- old/config.tests/unix/stl/stltest.cpp 2011-06-23 03:45:23.000000000 -0400 -+++ new/config.tests/unix/stl/stltest.cpp 2014-08-28 00:54:04.154837604 -0400 -@@ -49,6 +49,7 @@ - #include - #include - #include -+#include - - // something mean to see if the compiler and C++ standard lib are good enough - template diff --git a/depends/patches/systemtap/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch b/depends/patches/systemtap/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch new file mode 100644 index 0000000000000..eae0cf72d6ce0 --- /dev/null +++ b/depends/patches/systemtap/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch @@ -0,0 +1,31 @@ +commit b92d4c121486f3c6e8a2cea537c53eb09894479a +Author: 0xb10c <0xb10c@gmail.com> +Date: Tue Dec 7 11:02:07 2021 +0100 + + Remove _SDT_ASM_SECTION_AUTOGROUP_SUPPORT check + + We assume that the assembler supports "?" in .pushsection directives. + This enables us to skip configure and make. + + See https://github.com/bitcoin/bitcoin/issues/23297. + +diff --git a/includes/sys/sdt.h b/includes/sys/sdt.h +index 97766e710..352b4ee25 100644 +--- a/includes/sys/sdt.h ++++ b/includes/sys/sdt.h +@@ -230,12 +230,10 @@ __extension__ extern unsigned long long __sdt_unsp; + nice with code in COMDAT sections, which comes up in C++ code. + Without that assembler support, some combinations of probe placements + in certain kinds of C++ code may produce link-time errors. */ +-#include "sdt-config.h" +-#if _SDT_ASM_SECTION_AUTOGROUP_SUPPORT ++/* PATCH: We assume that the assembler supports the feature. This ++ enables us to skip configure and make. In turn, this means we ++ require fewer dependencies and have shorter depend build times. */ + # define _SDT_ASM_AUTOGROUP "?" +-#else +-# define _SDT_ASM_AUTOGROUP "" +-#endif + + #define _SDT_ASM_BODY(provider, name, pack_args, args) \ + _SDT_ASM_1(990: _SDT_NOP) \ diff --git a/depends/patches/zeromq/netbsd_kevent_void.patch b/depends/patches/zeromq/netbsd_kevent_void.patch new file mode 100644 index 0000000000000..845c6bdda6719 --- /dev/null +++ b/depends/patches/zeromq/netbsd_kevent_void.patch @@ -0,0 +1,57 @@ +commit 129137d5182967dbfcfec66bad843df2a992a78f +Author: fanquake +Date: Mon Jan 3 20:13:33 2022 +0800 + + problem: kevent udata is now void* on NetBSD Current (10) + + solution: check for the intptr_t variant in configure. + +diff --git a/configure.ac b/configure.ac +index 1a571291..402f8b86 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -307,6 +307,27 @@ case "${host_os}" in + if test "x$libzmq_netbsd_has_atomic" = "xno"; then + AC_DEFINE(ZMQ_FORCE_MUTEXES, 1, [Force to use mutexes]) + fi ++ # NetBSD Current (to become 10) has changed the type of udata in it's ++ # kevent struct from intptr_t to void * to align with darwin and other ++ # BSDs, see upstream commit: ++ # https://github.com/NetBSD/src/commit/e5ead823eb916b56589d2c6c560dbcfe4a2d0afc ++ AC_MSG_CHECKING([whether kevent udata type is intptr_t]) ++ AC_LANG_PUSH([C++]) ++ AC_LINK_IFELSE([AC_LANG_PROGRAM( ++ [[#include ++ #include ++ #include ]], ++ [[struct kevent ev; ++ intptr_t udata; ++ EV_SET(&ev, 0, 0, EV_ADD, 0, 0, udata); ++ return 0;]])], ++ [libzmq_netbsd_kevent_udata_intptr_t=yes], ++ [libzmq_netbsd_kevent_udata_intptr_t=no]) ++ AC_LANG_POP([C++]) ++ AC_MSG_RESULT([$libzmq_netbsd_kevent_udata_intptr_t]) ++ if test "x$libzmq_netbsd_kevent_udata_intptr_t" = "xyes"; then ++ AC_DEFINE(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T, 1, [kevent udata type is intptr_t]) ++ fi + ;; + *openbsd*|*bitrig*) + # Define on OpenBSD to enable all library features +diff --git a/src/kqueue.cpp b/src/kqueue.cpp +index 53d82ac4..a6a7a7f2 100644 +--- a/src/kqueue.cpp ++++ b/src/kqueue.cpp +@@ -46,9 +46,9 @@ + #include "i_poll_events.hpp" + #include "likely.hpp" + +-// NetBSD defines (struct kevent).udata as intptr_t, everyone else +-// as void *. +-#if defined ZMQ_HAVE_NETBSD ++// NetBSD up to version 9 defines (struct kevent).udata as intptr_t, ++// everyone else as void *. ++#if defined ZMQ_HAVE_NETBSD && defined(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T) + #define kevent_udata_t intptr_t + #else + #define kevent_udata_t void * diff --git a/depends/patches/zeromq/remove_libstd_link.patch b/depends/patches/zeromq/remove_libstd_link.patch new file mode 100644 index 0000000000000..ddf91e6abfaba --- /dev/null +++ b/depends/patches/zeromq/remove_libstd_link.patch @@ -0,0 +1,25 @@ +commit 47d4cd12a2c051815ddda78adebdb3923b260d8a +Author: fanquake +Date: Tue Aug 18 14:45:40 2020 +0800 + + Remove needless linking against libstdc++ + + This is broken for a number of reasons, including: + - g++ understands "static-libstdc++ -lstdc++" to mean "link against + whatever libstdc++ exists, probably shared", which in itself is buggy. + - another stdlib (libc++ for example) may be in use + + See #11981. + +diff --git a/src/libzmq.pc.in b/src/libzmq.pc.in +index 233bc3a..3c2bf0d 100644 +--- a/src/libzmq.pc.in ++++ b/src/libzmq.pc.in +@@ -7,6 +7,6 @@ Name: libzmq + Description: 0MQ c++ library + Version: @VERSION@ + Libs: -L${libdir} -lzmq +-Libs.private: -lstdc++ @pkg_config_libs_private@ ++Libs.private: @pkg_config_libs_private@ + Requires.private: @pkg_config_names_private@ + Cflags: -I${includedir} @pkg_config_defines@ diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 0000000000000..38498103bb170 --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1 @@ +Doxyfile diff --git a/doc/Doxyfile b/doc/Doxyfile deleted file mode 100644 index e0339e652eb07..0000000000000 --- a/doc/Doxyfile +++ /dev/null @@ -1,1752 +0,0 @@ -# Doxyfile 1.7.4 - -# !!! Invoke doxygen from project root using: -# doxygen doc/Doxyfile - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = Bitcoin - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = 0.9.99 - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer -# a quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = "P2P Digital Currency" - -# With the PROJECT_LOGO tag one can specify an logo or icon that is -# included in the documentation. The maximum height of the logo should not -# exceed 55 pixels and the maximum width should not exceed 200 pixels. -# Doxygen will copy the logo to the output directory. - -PROJECT_LOGO = doc/bitcoin_logo_doxygen.png - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = doc/doxygen - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, -# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English -# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, -# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, -# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful if your file system -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) - -JAVADOC_AUTOBRIEF = YES - -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 8 - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n in the value part of an alias to insert newlines. - -ALIASES = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given extension. -# Doxygen has a built-in mapping, but you can override or extend it using this -# tag. The format is ext=language, where ext is a file extension, and language -# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, -# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make -# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C -# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions -# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also makes the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate getter -# and setter methods for a property. Setting this option to YES (the default) -# will make doxygen replace the get and set methods by a property in the -# documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and -# unions are shown inside the group in which they are included (e.g. using -# @ingroup) instead of on a separate page (for HTML and Man pages) or -# section (for LaTeX and RTF). - -INLINE_GROUPED_CLASSES = NO - -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. - -TYPEDEF_HIDES_STRUCT = NO - -# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to -# determine which symbols to keep in memory and which to flush to disk. -# When the cache is full, less often used symbols will be written to disk. -# For small to medium size projects (<1000 input files) the default value is -# probably good enough. For larger projects a too small cache size can cause -# doxygen to be busy swapping symbols to and from disk most of the time -# causing a significant performance penalty. -# If the system has enough physical memory increasing the cache will improve the -# performance by keeping more symbols in memory. Note that the value works on -# a logarithmic scale so increasing the size by one will roughly double the -# memory usage. The cache size is given by this formula: -# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, -# corresponding to a cache size of 2^16 = 65536 symbols - -SYMBOL_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = YES - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespaces are hidden. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. - -CASE_SENSE_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen -# will list include files with double quotes in the documentation -# rather than with sharp brackets. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen -# will sort the (brief and detailed) documentation of class members so that -# constructors and destructors are listed first. If set to NO (the default) -# the constructors will appear in the respective orders defined by -# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. -# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO -# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to -# do proper type resolution of all parameters of a function it will reject a -# match between the prototype and the implementation of a member function even -# if there is only one candidate or it is obvious which candidate to choose -# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen -# will still accept a match between prototype and implementation in such cases. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or macro consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and macros in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = YES - -# If the sources in your project are distributed over multiple directories -# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy -# in the documentation. The default is NO. - -SHOW_DIRECTORIES = NO - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command , where is the value of -# the FILE_VERSION_FILTER tag, and is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. The create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. -# You can optionally specify a file name after the option, if omitted -# DoxygenLayout.xml will be used as the name of the layout file. - -LAYOUT_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. - -WARN_IF_DOC_ERROR = YES - -# The WARN_NO_PARAMDOC option can be enabled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = src - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh -# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py -# *.f90 *.f *.for *.vhd *.vhdl - -FILE_PATTERNS = *.c \ - *.cc \ - *.cxx \ - *.cpp \ - *.c++ \ - *.d \ - *.java \ - *.ii \ - *.ixx \ - *.ipp \ - *.i++ \ - *.inl \ - *.h \ - *.hh \ - *.hxx \ - *.hpp \ - *.h++ \ - *.idl \ - *.odl \ - *.cs \ - *.php \ - *.php3 \ - *.inc \ - *.m \ - *.mm \ - *.dox \ - *.py \ - *.f90 \ - *.f \ - *.for \ - *.vhd \ - *.vhdl - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = src/leveldb src/json src/test /src/qt/test - -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS = boost google - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = * - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. If FILTER_PATTERNS is specified, this tag will be -# ignored. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty or if -# non of the patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) -# and it is also possible to disable source filtering for a specific pattern -# using *.ext= (so without naming a filter). This option only has effect when -# FILTER_SOURCE_FILES is enabled. - -FILTER_SOURCE_PATTERNS = - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. Otherwise they will link to the documentation. - -REFERENCES_LINK_SOURCE = YES - -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = YES - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. Note that when using a custom header you are responsible -# for the proper inclusion of any scripts and style sheets that doxygen -# needs, which is dependent on the configuration options used. -# It is adviced to generate a default header using "doxygen -w html -# header.html footer.html stylesheet.css YourConfigFile" and then modify -# that header. Note that the header is subject to change so you typically -# have to redo this when upgrading to a newer version of doxygen or when -# changing the value of configuration settings such as GENERATE_TREEVIEW! - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet. Note that doxygen will try to copy -# the style sheet file to the HTML output directory, so don't put your own -# stylesheet in the HTML output directory as well, or it will be erased! - -HTML_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that -# the files will be copied as-is; there are no commands or markers available. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. -# Doxygen will adjust the colors in the stylesheet and background images -# according to this color. Hue is specified as an angle on a colorwheel, -# see http://en.wikipedia.org/wiki/Hue for more information. -# For instance the value 0 represents red, 60 is yellow, 120 is green, -# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. -# The allowed range is 0 to 359. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of -# the colors in the HTML output. For a value of 0 the output will use -# grayscales only. A value of 255 will produce the most vivid colors. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to -# the luminance component of the colors in the HTML output. Values below -# 100 gradually make the output lighter, whereas values above 100 make -# the output darker. The value divided by 100 is the actual gamma applied, -# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, -# and 100 does not change the gamma. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting -# this to NO can help when comparing the output of multiple runs. - -HTML_TIMESTAMP = YES - -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. For this to work a browser that supports -# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox -# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). - -HTML_DYNAMIC_SECTIONS = NO - -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OSX 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. -# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. - -GENERATE_DOCSET = NO - -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be -# written to the html output directory. - -CHM_FILE = - -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. - -HHC_LOCATION = - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. - -CHM_INDEX_ENCODING = - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated -# that can be used as input for Qt's qhelpgenerator to generate a -# Qt Compressed Help (.qch) of the generated HTML documentation. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can -# be used to specify the file name of the resulting .qch file. -# The path specified is relative to the HTML output folder. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#namespace - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#virtual-folders - -QHP_VIRTUAL_FOLDER = doc - -# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to -# add. For more information please see -# http://doc.trolltech.com/qthelpproject.html#custom-filters - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see -# -# Qt Help Project / Custom Filters. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's -# filter section matches. -# -# Qt Help Project / Filter Attributes. - -QHP_SECT_FILTER_ATTRS = - -# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can -# be used to specify the location of Qt's qhelpgenerator. -# If non-empty doxygen will try to run qhelpgenerator on the generated -# .qhp file. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files -# will be generated, which together with the HTML files, form an Eclipse help -# plugin. To install this plugin and make it available under the help contents -# menu in Eclipse, the contents of the directory containing the HTML and XML -# files needs to be copied into the plugins directory of eclipse. The name of -# the directory within the plugins directory should be the same as -# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before -# the help appears. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have -# this name. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values -# (range [0,1..20]) that doxygen will group on one line in the generated HTML -# documentation. Note that a value of 0 will completely suppress the enum -# values from appearing in the overview section. - -ENUM_VALUES_PER_LINE = 4 - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to YES, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). -# Windows users are probably better off using the HTML help feature. - -GENERATE_TREEVIEW = NO - -# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, -# and Class Hierarchy pages using a tree view instead of an ordered list. - -USE_INLINE_TREES = NO - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open -# links to external symbols imported via tag files in a separate window. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are -# not supported properly for IE 6.0, but are supported on all modern browsers. -# Note that when changing this option you need to delete any form_*.png files -# in the HTML output before the changes have effect. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax -# (see http://www.mathjax.org) which uses client side Javascript for the -# rendering instead of using prerendered bitmaps. Use this if you do not -# have LaTeX installed or if you want to formulas look prettier in the HTML -# output. When enabled you also need to install MathJax separately and -# configure the path to it using the MATHJAX_RELPATH option. - -USE_MATHJAX = NO - -# When MathJax is enabled you need to specify the location relative to the -# HTML output directory using the MATHJAX_RELPATH option. The destination -# directory should contain the MathJax.js script. For instance, if the mathjax -# directory is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the -# mathjax.org site, so you can quickly see the result without installing -# MathJax, but it is strongly recommended to install a local copy of MathJax -# before deployment. - -MATHJAX_RELPATH = http://www.mathjax.org/mathjax - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box -# for the HTML output. The underlying search engine uses javascript -# and DHTML and should work on any modern browser. Note that when using -# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets -# (GENERATE_DOCSET) there is already a search function so this one should -# typically be disabled. For large projects the javascript based search engine -# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. - -SEARCHENGINE = YES - -# When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a PHP enabled web server instead of at the web client -# using Javascript. Doxygen will generate the search PHP script and index -# file to put on the web server. The advantage of the server -# based approach is that it scales better to large projects and allows -# full text search. The disadvantages are that it is more difficult to setup -# and does not have live searching capabilities. - -SERVER_BASED_SEARCH = NO - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. If left blank `latex' will be used as the default command name. -# Note that when enabling USE_PDFLATEX this option is only used for -# generating bitmaps for formulas in the HTML output, but not in the -# Makefile that is written to the output directory. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the -# default command name. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = a4 - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for -# the generated latex document. The footer should contain everything after -# the last chapter. If it is left blank doxygen will generate a -# standard footer. Notice: only use this tag if you know what you are doing! - -LATEX_FOOTER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) -# in the output. - -LATEX_HIDE_INDICES = NO - -# If LATEX_SOURCE_CODE is set to YES then doxygen will include -# source code with syntax highlighting in the LaTeX output. -# Note that which sources are shown also depends on other settings -# such as SOURCE_BROWSER. - -LATEX_SOURCE_CODE = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. - -GENERATE_XML = NO - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `xml' will be used as the default path. - -XML_OUTPUT = xml - -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_SCHEMA = - -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_DTD = - -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that -# enabling this will significantly increase the size of the XML output. - -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able -# to generate PDF and DVI output from the Perl module output. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. This is useful -# if you want to understand what is going on. On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller -# and Perl will parse it just the same. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same -# Makefile don't overwrite each other's variables. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = NO - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_DEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# pointed to by INCLUDE_PATH will be searched when a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition that -# overrules the definition found in the source code. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all references to function-like macros -# that are alone on a line, have an all uppercase name, and do not end with a -# semicolon, because these will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES option can be used to specify one or more tagfiles. -# Optionally an initial location of the external documentation -# can be added for each tagfile. The format of a tag file without -# this location is as follows: -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths or -# URLs. If a location is present for each tag, the installdox tool -# does not have to be run to correct the links. -# Note that each tag file must have a unique name -# (where the name does NOT include the path) -# If a tag file is not located in the directory in which doxygen -# is run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base -# or super classes. Setting the tag to NO turns the diagrams off. Note that -# this option also works with HAVE_DOT disabled, but it is recommended to -# install and use dot, since it yields more powerful graphs. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = YES - -# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is -# allowed to run in parallel. When set to 0 (the default) doxygen will -# base this on the number of processors available in the system. You can set it -# explicitly to a value larger than 0 to get control over the balance -# between CPU load and processing speed. - -DOT_NUM_THREADS = 0 - -# By default doxygen will write a font called Helvetica to the output -# directory and reference it in all dot files that doxygen generates. -# When you want a differently looking font you can specify the font name -# using DOT_FONTNAME. You need to make sure dot is able to find the font, -# which can be done by putting it in a standard location or by setting the -# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory -# containing the font. - -DOT_FONTNAME = Helvetica - -# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. -# The default size is 10pt. - -DOT_FONTSIZE = 10 - -# By default doxygen will tell dot to use the output directory to look for the -# FreeSans.ttf font (which doxygen will put there itself). If you specify a -# different font using DOT_FONTNAME you can set the path where dot -# can find it using this tag. - -DOT_FONTPATH = - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for groups, showing the direct groups dependencies - -GROUP_GRAPHS = YES - -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. - -UML_LOOK = NO - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the CALL_GRAPH and HAVE_DOT options are set to YES then -# doxygen will generate a call dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable call graphs -# for selected functions only using the \callgraph command. - -CALL_GRAPH = YES - -# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then -# doxygen will generate a caller dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable caller -# graphs for selected functions only using the \callergraph command. - -CALLER_GRAPH = YES - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will generate a graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES -# then doxygen will show the dependencies a directory has on other directories -# in a graphical way. The dependency relations are determined by the #include -# relations between the files in the directories. - -DIRECTORY_GRAPH = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are svg, png, jpg, or gif. -# If left blank png will be used. - -DOT_IMAGE_FORMAT = svg - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MSCFILE_DIRS tag can be used to specify one or more directories that -# contain msc files that are included in the documentation (see the -# \mscfile command). - -MSCFILE_DIRS = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of -# nodes that will be shown in the graph. If the number of nodes in a graph -# becomes larger than this value, doxygen will truncate the graph, which is -# visualized by representing a node as a red box. Note that doxygen if the -# number of direct children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note -# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the -# graphs generated by dot. A depth value of 3 means that only nodes reachable -# from the root by following a path via at most 3 edges will be shown. Nodes -# that lay further from the root node will be omitted. Note that setting this -# option to 1 or 2 may greatly reduce the computation time needed for large -# code bases. Also note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. - -MAX_DOT_GRAPH_DEPTH = 0 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not -# seem to support this out of the box. Warning: Depending on the platform used, -# enabling this option may lead to badly anti-aliased labels on the edges of -# a graph (i.e. they become hard to read). - -DOT_TRANSPARENT = NO - -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) -# support this, this feature is disabled by default. - -DOT_MULTI_TARGETS = NO - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in new file mode 100644 index 0000000000000..d8fd46d1c7d0b --- /dev/null +++ b/doc/Doxyfile.in @@ -0,0 +1,2459 @@ +# Doxyfile 1.8.12 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Bitcoin Core" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = @PACKAGE_VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "P2P Digital Currency" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = doc/bitcoin_logo_doxygen.png + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc/doxygen + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = src doc/README_doxygen.md + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.d \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.idl \ + *.odl \ + *.cs \ + *.php \ + *.php3 \ + *.inc \ + *.m \ + *.mm \ + *.dox \ + *.py \ + *.f90 \ + *.f \ + *.for \ + *.vhd \ + *.vhdl + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = src/crc32c \ + src/leveldb \ + src/json + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = boost \ + google + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = doc/README_doxygen.md + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.bitcoin.Bitcoin-Core + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.bitcoin.Bitcoin-Core + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://www.mathjax.org/mathjax + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /