diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index eeb2d6590b630..0000000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,88 +0,0 @@ -version: '{branch}.{build}' -skip_tags: true -image: Visual Studio 2019 -configuration: Release -platform: x64 -clone_depth: 5 -environment: - APPVEYOR_SAVE_CACHE_ON_ERROR: true - CLCACHE_SERVER: 1 - PATH: 'C:\Python37-x64;C:\Python37-x64\Scripts;%PATH%' - PYTHONUTF8: 1 - QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/v1.6/Qt5.9.8_x64_static_vs2019.zip' - QT_DOWNLOAD_HASH: '9a8c6eb20967873785057fdcd329a657c7f922b0af08c5fde105cc597dd37e21' - QT_LOCAL_PATH: 'C:\Qt5.9.8_x64_static_vs2019' - VCPKG_INSTALL_PATH: 'C:\tools\vcpkg\installed' - VCPKG_COMMIT_ID: 'ed0df8ecc4ed7e755ea03e18aaf285fd9b4b4a74' -cache: -- C:\tools\vcpkg\installed -> build_msvc\vcpkg-packages.txt -- C:\Qt5.9.8_x64_static_vs2019 -install: -# Disable zmq test for now since python zmq library on Windows would cause Access violation sometimes. -# - cmd: pip install zmq -# Powershell block below is to install the c++ dependencies via vcpkg. The pseudo code is: -# 1. Check whether the vcpkg install directory exists (note that updating the vcpkg-packages.txt file -# will cause the appveyor cache rules to invalidate the directory) -# 2. If the directory is missing: -# a. Checkout the vcpkg source (including port files) for the specific checkout and build the vcpkg binary, -# b. Install the missing packages. -- ps: | - $env:PACKAGES = Get-Content -Path build_msvc\vcpkg-packages.txt - Write-Host "vcpkg list: $env:PACKAGES" - if(!(Test-Path -Path ($env:VCPKG_INSTALL_PATH))) { - cd c:\tools\vcpkg - $env:GIT_REDIRECT_STDERR = '2>&1' # git is writing non-errors to STDERR when doing git pull. Send to STDOUT instead. - git pull origin master - git checkout $env:VCPKG_COMMIT_ID - .\bootstrap-vcpkg.bat - Add-Content "C:\tools\vcpkg\triplets\$env:PLATFORM-windows-static.cmake" "set(VCPKG_BUILD_TYPE release)" - .\vcpkg install --triplet $env:PLATFORM-windows-static $env:PACKAGES.split() > $null - cd "$env:APPVEYOR_BUILD_FOLDER" - } - else { - Write-Host "required vcpkg packages already installed." - } - c:\tools\vcpkg\vcpkg integrate install -before_build: -# Powershell block below is to download and extract the Qt static libraries. The pseudo code is: -# 1. If the Qt destination directory exists assume it is correct and do nothing. To -# force a fresh install of the packages delete the job's appveyor cache. -# 2. Otherwise: -# a. Download the zip file with the prebuilt Qt static libraries. -# b. Check that the downloaded file matches the expected hash. -# c. Extract the zip file to the specific destination path expected by the msbuild projects. -- ps: | - if(!(Test-Path -Path ($env:QT_LOCAL_PATH))) { - Write-Host "Downloading Qt binaries."; - Invoke-WebRequest -Uri $env:QT_DOWNLOAD_URL -Out qtdownload.zip; - Write-Host "Qt binaries successfully downloaded, checking hash against $env:QT_DOWNLOAD_HASH..."; - if((Get-FileHash qtdownload.zip).Hash -eq $env:QT_DOWNLOAD_HASH) { - Expand-Archive qtdownload.zip -DestinationPath $env:QT_LOCAL_PATH; - Write-Host "Qt binary download matched the expected hash."; - } - else { - Write-Host "ERROR: Qt binary download did not match the expected hash."; - Exit-AppveyorBuild; - } - } - else { - Write-Host "Qt binaries already present."; - } -- cmd: python build_msvc\msvc-autogen.py -build_script: -- cmd: msbuild /p:TrackFileAccess=false build_msvc\bitcoin.sln /m /v:q /nologo -after_build: -#- 7z a bitcoin-%APPVEYOR_BUILD_VERSION%.zip %APPVEYOR_BUILD_FOLDER%\build_msvc\%platform%\%configuration%\*.exe -test_script: -- cmd: src\test_bitcoin.exe -l test_suite -- cmd: src\bench_bitcoin.exe -evals=1 -scaling=0 > NUL -- ps: python test\util\bitcoin-util-test.py -- cmd: python test\util\rpcauth-test.py -# Fee estimation test failing on appveyor with: WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted. -# functional tests disabled for now. See -# https://github.com/bitcoin/bitcoin/pull/18626#issuecomment-613396202 -# https://github.com/bitcoin/bitcoin/issues/18623 -# - cmd: python test\functional\test_runner.py --ci --quiet --combinedlogslen=4000 --failfast --exclude feature_fee_estimation -artifacts: -#- path: bitcoin-%APPVEYOR_BUILD_VERSION%.zip -deploy: off diff --git a/.cirrus.yml b/.cirrus.yml index b839bfa5fb33f..7f7a882ceef84 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,68 +1,213 @@ +env: # Global defaults + CIRRUS_CLONE_DEPTH: 1 + CIRRUS_LOG_TIMESTAMP: true + MAKEJOBS: "-j10" + TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache + CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error + +# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with +# multiple users to run tasks in parallel. No sudo permission is required. +# +# https://cirrus-ci.org/guide/persistent-workers/ +# +# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+. +# +# The following specific types should exist, with the following requirements: +# - small: For an x86_64 machine, with at least 2 vCPUs and 8 GB of memory. +# - medium: For an x86_64 machine, with at least 4 vCPUs and 16 GB of memory. +# - arm64: For an aarch64 machine, with at least 2 vCPUs and 8 GB of memory. +# +# CI jobs for the latter configuration can be run on x86_64 hardware +# by installing qemu-user-static, which works out of the box with +# podman or docker. Background: https://stackoverflow.com/a/72890225/313633 +# +# The above machine types are matched to each task by their label. Refer to the +# Cirrus CI docs for more details. +# +# When a contributor maintains a fork of the repo, any pull request they make +# to their own fork, or to the main repository, will trigger two CI runs: +# one for the branch push and one for the pull request. +# This can be avoided by setting SKIP_BRANCH_PUSH=true as a custom env variable +# in Cirrus repository settings, accessible from +# https://cirrus-ci.com/github/my-organization/my-repository +# +# On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1 +# ensures that previous containers and artifacts are cleared before each run. +# This requires installing Podman instead of Docker. +# +# Futhermore: +# - podman-docker-4.1+ is required due to the bugfix in 4.1 +# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200) +# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example +# for a single user setup with sudo permission: +# +# ``` +# apt update && apt install git screen python3 bash podman-docker uidmap slirp4netns curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus +# ``` +# +# - There are no strict requirements on the hardware. Having fewer CPU threads +# than recommended merely causes the CI script to run slower. +# To avoid rare and intermittent OOM due to short memory usage spikes, +# it is recommended to add (and persist) swap: +# +# ``` +# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab ) +# ``` +# +# - To register the persistent worker, open a `screen` session and run: +# +# ``` +# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token +# ``` + +# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks +filter_template: &FILTER_TEMPLATE + # Allow forks to specify SKIP_BRANCH_PUSH=true and skip CI runs when a branch is pushed, + # but still run CI when a PR is created. + # https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution + skip: $SKIP_BRANCH_PUSH == "true" && $CIRRUS_PR == "" + stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks + +base_template: &BASE_TEMPLATE + << : *FILTER_TEMPLATE + merge_base_script: + # Require git (used in fingerprint_script). + - git --version || ( apt-get update && apt-get install -y git ) + - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi + - git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge" + - git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts + # Also, the merge commit is used to lint COMMIT_RANGE="HEAD~..HEAD" + +main_template: &MAIN_TEMPLATE + timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out + ci_script: + - ./ci/test_run_all.sh + +global_task_template: &GLOBAL_TASK_TEMPLATE + << : *BASE_TEMPLATE + << : *MAIN_TEMPLATE + +compute_credits_template: &CREDITS_TEMPLATE + # https://cirrus-ci.org/pricing/#compute-credits + # Only use credits for pull requests to the main repo + use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != "" + +task: + name: 'lint' + << : *BASE_TEMPLATE + container: + image: debian:bookworm + cpu: 1 + memory: 1G + # For faster CI feedback, immediately schedule the linters + << : *CREDITS_TEMPLATE + test_runner_cache: + folder: "/lint_test_runner" + fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:test/lint/test_runner) + python_cache: + folder: "/python_build" + fingerprint_script: cat .python-version /etc/os-release + unshallow_script: + - git fetch --unshallow --no-tags + lint_script: + - ./ci/lint_run_all.sh + task: - name: "FreeBsd 12.1 amd64 [GOAL: install] [no depends, only system libs]" - freebsd_instance: - image_family: freebsd-12-1 # https://cirrus-ci.org/guide/FreeBSD/ - cpu: 8 - memory: 8G - timeout_in: 60m + name: 'tidy' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: medium env: - MAKEJOBS: "-j9" - CONFIGURE_OPTS: "--disable-dependency-tracking" - GOAL: "install" - TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache - CCACHE_SIZE: "200M" - CCACHE_COMPRESS: 1 - CCACHE_DIR: "/tmp/ccache_dir" - ccache_cache: - folder: "/tmp/ccache_dir" - install_script: - - pkg install -y autoconf automake boost-libs git gmake libevent libtool pkgconf python3 ccache - - ./contrib/install_db4.sh $(pwd) - - ccache --max-size=${CCACHE_SIZE} - configure_script: - - ./autogen.sh - - ./configure ${CONFIGURE_OPTS} BDB_LIBS="-L$(pwd)/db4/lib -ldb_cxx-4.8" BDB_CFLAGS="-I$(pwd)/db4/include" || ( cat config.log && false) - make_script: - - gmake ${MAKEJOBS} ${GOAL} || ( echo "Build failure. Verbose build follows." && gmake ${GOAL} V=1 ; false ) - check_script: - - gmake check ${MAKEJOBS} VERBOSE=1 - functional_test_script: - - ./test/functional/test_runner.py --jobs 9 --ci --extended --exclude feature_dbcrash --combinedlogslen=1000 --quiet --failfast -#task: -# name: "Windows" -# windows_container: -# image: cirrusci/windowsservercore:2019 -# env: -# CIRRUS_SHELL: powershell -# PATH: 'C:\Python37;C:\Python37\Scripts;%PATH%' -# PYTHONUTF8: 1 -# QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/v1.6/Qt5.9.8_x64_static_vs2019.zip' -# QT_DOWNLOAD_HASH: '9a8c6eb20967873785057fdcd329a657c7f922b0af08c5fde105cc597dd37e21' -# QT_LOCAL_PATH: 'C:\Qt5.9.8_x64_static_vs2019' -# VCPKG_INSTALL_PATH: 'C:\tools\vcpkg\installed' -# VCPKG_COMMIT_ID: 'ed0df8ecc4ed7e755ea03e18aaf285fd9b4b4a74' -# install_script: -# - choco install python --version=3.7.7 -y + FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh" + task: - name: "x86_64 Linux [GOAL: install] [bionic] [Using ./ci/ system]" - container: - image: ubuntu:18.04 - cpu: 8 - memory: 8G - timeout_in: 60m + name: 'ARM, unit tests, no functional tests' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: arm64 # Use arm64 worker to sidestep qemu and avoid a slow CI: https://github.com/bitcoin/bitcoin/pull/28087#issuecomment-1649399453 env: - MAKEJOBS: "-j9" - DANGER_RUN_CI_ON_HOST: "1" - TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache - CCACHE_SIZE: "200M" - CCACHE_DIR: "/tmp/ccache_dir" - ccache_cache: - folder: "/tmp/ccache_dir" - depends_built_cache: - folder: "/tmp/cirrus-ci-build/depends/built" - install_script: - - apt-get update - - apt-get -y install git bash ccache - - ccache --max-size=${CCACHE_SIZE} - ci_script: - - ./ci/test_run_all.sh + FILE_ENV: "./ci/test/00_setup_env_arm.sh" + +task: + name: 'Win64, unit tests, no gui tests, no functional tests' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small + env: + FILE_ENV: "./ci/test/00_setup_env_win64.sh" + +task: + name: '32-bit CentOS, dash, gui' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small + env: + FILE_ENV: "./ci/test/00_setup_env_i686_centos.sh" + +task: + name: 'previous releases, depends DEBUG' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small + env: + FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh" + +task: + name: 'TSan, depends, gui' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: medium + env: + FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" + +task: + name: 'MSan, depends' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small + timeout_in: 300m # Use longer timeout for the *rare* case where a full build (llvm + msan + depends + ...) needs to be done. + env: + FILE_ENV: "./ci/test/00_setup_env_native_msan.sh" + +task: + name: 'fuzzer,address,undefined,integer, no depends' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: medium + env: + FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" + +task: + name: 'multiprocess, i686, DEBUG' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: medium + env: + FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh" + +task: + name: 'no wallet, libbitcoinkernel' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small + env: + FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh" + +task: + name: 'macOS-cross, gui, no tests' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small + env: + FILE_ENV: "./ci/test/00_setup_env_mac_cross.sh" diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000..c5f3028c50366 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +# This is the top-most EditorConfig file. +root = true + +# For all files. +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +# Source code files +[*.{h,cpp,rs,py,sh}] +indent_size = 4 + +# .cirrus.yml, etc. +[*.yml] +indent_size = 2 + +# Makefiles (only relevant for depends build) +[Makefile] +indent_style = tab + +# CMake files +[{CMakeLists.txt,*.cmake,*.cmake.in}] +indent_size = 2 diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 35b42424adb3f..0000000000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 0000000000000..83922b54cbf49 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,93 @@ +name: Bug report +description: Submit a new bug report. +labels: [bug] +body: + - type: markdown + attributes: + value: | + ## This issue tracker is only for technical issues related to Bitcoin Core. + + * General bitcoin questions and/or support requests should use Bitcoin StackExchange at https://bitcoin.stackexchange.com. + * For reporting security issues, please read instructions at https://bitcoincore.org/en/contact/. + * If the node is "stuck" during sync or giving "block checksum mismatch" errors, please ensure your hardware is stable by running `memtest` and observe CPU temperature with a load-test tool such as `linpack` before creating an issue. + + ---- + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the bug you encountered. + options: + - label: I have searched the existing issues + required: true + - type: textarea + id: current-behaviour + attributes: + label: Current behaviour + description: Tell us what went wrong + validations: + required: true + - type: textarea + id: expected-behaviour + attributes: + label: Expected behaviour + description: Tell us what you expected to happen + validations: + required: true + - type: textarea + id: reproduction-steps + attributes: + label: Steps to reproduce + description: | + Tell us how to reproduce your bug. Please attach related screenshots if necessary. + * Run-time or compile-time configuration options + * Actions taken + validations: + required: true + - type: textarea + id: logs + attributes: + label: Relevant log output + description: | + Please copy and paste any relevant log output or attach a debug log file. + + You can find the debug.log in your [data dir.](https://github.com/bitcoin/bitcoin/blob/master/doc/files.md#data-directory-location) + + Please be aware that the debug log might contain personally identifying information. + validations: + required: false + - type: dropdown + attributes: + label: How did you obtain Bitcoin Core + multiple: false + options: + - Compiled from source + - Pre-built binaries + - Package manager + - Other + validations: + required: true + - type: input + id: core-version + attributes: + label: What version of Bitcoin Core are you using? + description: Run `bitcoind --version` or in Bitcoin-QT use `Help > About Bitcoin Core` + placeholder: e.g. v24.0.1 or master@e1bf547 + validations: + required: true + - type: input + id: os + attributes: + label: Operating system and version + placeholder: e.g. "MacOS Ventura 13.2" or "Ubuntu 22.04 LTS" + validations: + required: true + - type: textarea + id: machine-specs + attributes: + label: Machine specifications + description: | + What are the specifications of the host machine? + e.g. OS/CPU and disk type, network connectivity + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index fb91208954ea9..0000000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve (use this for suspected bugs only, if not sure, open a regular issue below) -title: '' -labels: Bug -assignees: '' - ---- - - - - - -**Expected behavior** - - - -**Actual behavior** - - - -**To reproduce** - - - -**System information** - - - - - - - - - diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000..40370284a6d8f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: Bitcoin Core Security Policy + url: https://github.com/bitcoin/bitcoin/blob/master/SECURITY.md + about: View security policy + - name: Bitcoin Core Developers + url: https://bitcoincore.org + about: Bitcoin Core homepage diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 2d5685185ea36..0000000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: Feature -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** - - -**Describe the solution you'd like** - - -**Describe alternatives you've considered** - - -**Additional context** - diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000000..4622fd9819198 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,36 @@ +name: Feature Request +description: Suggest an idea for this project. +labels: [Feature] +body: + - type: textarea + id: feature + attributes: + label: Please describe the feature you'd like to see added. + description: Attach screenshots or logs if applicable. + validations: + required: true + - type: textarea + id: related-problem + attributes: + label: Is your feature related to a problem, if so please describe it. + description: Attach screenshots or logs if applicable. + validations: + required: false + - type: textarea + id: solution + attributes: + label: Describe the solution you'd like + validations: + required: false + - type: textarea + id: alternatives + attributes: + label: Describe any alternatives you've considered + validations: + required: false + - type: textarea + id: additional-context + attributes: + label: Please leave any additional context + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.md b/.github/ISSUE_TEMPLATE/good_first_issue.md deleted file mode 100644 index c441f7a307a97..0000000000000 --- a/.github/ISSUE_TEMPLATE/good_first_issue.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Good first issue -about: '(Regular devs only): Suggest a new good first issue' -title: '' -labels: good first issue -assignees: '' - ---- - - -#### Useful skills: - - - -#### Want to work on this issue? - -The purpose of the `good first issue` label is to highlight which issues are suitable for a new contributor without a deep understanding of the codebase. - -You do not need to request permission to start working on this. You are encouraged to comment on the issue if you are planning to work on it. This will help other contributors monitor which issues are actively being addressed and is also an effective way to request assistance if and when you need it. - -For guidance on contributing, please read [CONTRIBUTING.md](https://github.com/bitcoin/bitcoin/blob/master/CONTRIBUTING.md) before opening your pull request. diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.yml b/.github/ISSUE_TEMPLATE/good_first_issue.yml new file mode 100644 index 0000000000000..133937c011af6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/good_first_issue.yml @@ -0,0 +1,44 @@ +name: Good First Issue +description: (Regular devs only) Suggest a new good first issue +labels: [good first issue] +body: + - type: markdown + attributes: + value: | + Please add the label "good first issue" manually before or after opening + + A good first issue is an uncontroversial issue, that has a relatively unique and obvious solution + + Motivate the issue and explain the solution briefly + - type: textarea + id: motivation + attributes: + label: Motivation + description: Motivate the issue + validations: + required: true + - type: textarea + id: solution + attributes: + label: Possible solution + description: Describe a possible solution + validations: + required: false + - type: textarea + id: useful-skills + attributes: + label: Useful Skills + description: For example, “`std::thread`”, “Qt5 GUI and async GUI design” or “basic understanding of Bitcoin mining and the Bitcoin Core RPC interface”. + value: | + * Compiling Bitcoin Core from source + * Running the C++ unit tests and the Python functional tests + * ... + - type: textarea + attributes: + label: Guidance for new contributors + description: Please leave this to automatically add the footer for new contributors + value: | + Want to work on this issue? + + For guidance on contributing, please read [CONTRIBUTING.md](https://github.com/bitcoin/bitcoin/blob/master/CONTRIBUTING.md) before opening your pull request. + diff --git a/.github/ISSUE_TEMPLATE/gui_issue.yml b/.github/ISSUE_TEMPLATE/gui_issue.yml new file mode 100644 index 0000000000000..4fe578e9b5b9c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/gui_issue.yml @@ -0,0 +1,18 @@ +name: Issue or feature request related to the GUI +description: Any report, issue or feature request related to the GUI +labels: [GUI] +body: +- type: checkboxes + id: acknowledgement + attributes: + label: Issues, reports or feature requests related to the GUI should be opened directly on the GUI repo + description: https://github.com/bitcoin-core/gui/issues/ + options: + - label: I still think this issue should be opened here + required: true +- type: textarea + id: gui-request + attributes: + label: Report + validations: + required: true diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d2c3b233751f8..ae92fc78f2d1c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -3,6 +3,10 @@ Pull requests without a rationale and clear improvement may be closed immediately. + +GUI-related pull requests should be opened against +https://github.com/bitcoin-core/gui +first. See CONTRIBUTING.md --> - $(WindowsTargetPlatformVersion_10).0 - $(WindowsTargetPlatformVersion_10) - - - - - Release - x64 - - - Debug - x64 - - - Release - Win32 - - - Debug - Win32 - - - - - true - false - true - v142 - Unicode - $(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName)\ - $(Platform)\$(Configuration)\$(ProjectName)\ - - - false - true - false - v142 - Unicode - $(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName)\ - $(Platform)\$(Configuration)\$(ProjectName)\ - - - - - MaxSpeed - true - true - true - MultiThreaded - - - true - true - - - - - - Disabled - _DEBUG;%(PreprocessorDefinitions) - true - MultiThreadedDebug - /bigobj %(AdditionalOptions) - - - - - - MaxSpeed - true - true - true - MultiThreaded - - - true - true - - - - - - Disabled - _DEBUG;%(PreprocessorDefinitions) - true - MultiThreadedDebug - /bigobj %(AdditionalOptions) - - - - - - Level3 - NotUsing - /utf-8 %(AdditionalOptions) - 4018;4221;4244;4267;4334;4715;4805;4834 - true - ZMQ_STATIC;NOMINMAX;WIN32;HAVE_CONFIG_H;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_CONSOLE;_WIN32_WINNT=0x0601;%(PreprocessorDefinitions) - ..\..\src;..\..\src\univalue\include;..\..\src\secp256k1\include;..\..\src\leveldb\include;..\..\src\leveldb\helpers\memenv;%(AdditionalIncludeDirectories) - - - Console - true - Iphlpapi.lib;ws2_32.lib;Shlwapi.lib;kernel32.lib;user32.lib;gdi32.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) - - - /ignore:4221 - - - - diff --git a/build_msvc/common.qt.init.vcxproj b/build_msvc/common.qt.init.vcxproj deleted file mode 100644 index 42150a2310377..0000000000000 --- a/build_msvc/common.qt.init.vcxproj +++ /dev/null @@ -1,16 +0,0 @@ - - - - - C:\Qt5.9.8_x64_static_vs2019 - $(QtBaseDir)\plugins - $(QtBaseDir)\lib - $(QtBaseDir)\include - $(QtIncludeDir);$(QtIncludeDir)\QtNetwork;$(QtIncludeDir)\QtCore;$(QtIncludeDir)\QtWidgets;$(QtIncludeDir)\QtGui; - .\QtGeneratedFiles\qt - $(QtBaseDir)\bin - $(QtPluginsLibraryDir)\platforms\qminimal.lib;$(QtPluginsLibraryDir)\platforms\qwindows.lib;$(QtLibraryDir)\qtfreetype.lib;$(QtLibraryDir)\qtharfbuzz.lib;$(QtLibraryDir)\qtlibpng.lib;$(QtLibraryDir)\qtpcre2.lib;$(QtLibraryDir)\Qt5AccessibilitySupport.lib;$(QtLibraryDir)\Qt5Core.lib;$(QtLibraryDir)\Qt5Concurrent.lib;$(QtLibraryDir)\Qt5EventDispatcherSupport.lib;$(QtLibraryDir)\Qt5FontDatabaseSupport.lib;$(QtLibraryDir)\Qt5Gui.lib;$(QtLibraryDir)\Qt5Network.lib;$(QtLibraryDir)\Qt5PlatformCompositorSupport.lib;$(QtLibraryDir)\Qt5ThemeSupport.lib;$(QtLibraryDir)\Qt5Widgets.lib;$(QtLibraryDir)\Qt5WinExtras.lib;$(QtLibraryDir)\qtmain.lib;userenv.lib;netapi32.lib;imm32.lib;Dwmapi.lib;version.lib;winmm.lib;UxTheme.lib - $(QtPluginsLibraryDir)\platforms\qwindowsd.lib;$(QtPluginsLibraryDir)\platforms\qminimald.lib;$(QtLibraryDir)\*d.lib;crypt32.lib;userenv.lib;netapi32.lib;imm32.lib;Dwmapi.lib;version.lib;winmm.lib;UxTheme.lib - - - diff --git a/build_msvc/common.vcxproj b/build_msvc/common.vcxproj deleted file mode 100644 index 4bbcc3767f63b..0000000000000 --- a/build_msvc/common.vcxproj +++ /dev/null @@ -1,12 +0,0 @@ - - -$(BuildDependsOn);CopyBuildArtifacts - - - - - - - - - diff --git a/build_msvc/libbitcoin_cli/libbitcoin_cli.vcxproj.in b/build_msvc/libbitcoin_cli/libbitcoin_cli.vcxproj.in deleted file mode 100644 index 620df72a2f764..0000000000000 --- a/build_msvc/libbitcoin_cli/libbitcoin_cli.vcxproj.in +++ /dev/null @@ -1,16 +0,0 @@ - - - - - {0667528C-D734-4009-ADF9-C0D6C4A5A5A6} - - - StaticLibrary - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libbitcoin_common/libbitcoin_common.vcxproj.in b/build_msvc/libbitcoin_common/libbitcoin_common.vcxproj.in deleted file mode 100644 index b47d62b29587e..0000000000000 --- a/build_msvc/libbitcoin_common/libbitcoin_common.vcxproj.in +++ /dev/null @@ -1,16 +0,0 @@ - - - - - {7C87E378-DF58-482E-AA2F-1BC129BC19CE} - - - StaticLibrary - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libbitcoin_crypto/libbitcoin_crypto.vcxproj.in b/build_msvc/libbitcoin_crypto/libbitcoin_crypto.vcxproj.in deleted file mode 100644 index 32cb75bf871c1..0000000000000 --- a/build_msvc/libbitcoin_crypto/libbitcoin_crypto.vcxproj.in +++ /dev/null @@ -1,16 +0,0 @@ - - - - - {6190199C-6CF4-4DAD-BFBD-93FA72A760C1} - - - StaticLibrary - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj deleted file mode 100644 index 992f64ec2e01d..0000000000000 --- a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj +++ /dev/null @@ -1,230 +0,0 @@ - - - - - - {2B4ABFF8-D1FD-4845-88C9-1F3C0A6512BF} - StaticLibrary - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _AMD64_;%(PreprocessorDefinitions) - $(QtIncludes);$(GeneratedFilesOutDir)\..;%(AdditionalIncludeDirectories) - - - - - - _AMD64_;%(PreprocessorDefinitions) - $(QtIncludes);$(GeneratedFilesOutDir)\..;%(AdditionalIncludeDirectories) - - - - - - _X86_;%(PreprocessorDefinitions) - $(QtIncludes);$(GeneratedFilesOutDir)\..;%(AdditionalIncludeDirectories) - - - - - - _X86_;%(PreprocessorDefinitions) - $(QtIncludes);$(GeneratedFilesOutDir)\..;%(AdditionalIncludeDirectories) - - - - - - - - - - - - - - - - - There was an error executing the libbitcoin_qt moc code include generation task. - - - - - - - - - - There was an error executing the libbitcoin_qt moc header generation task. - - - - - - - - - - There was an error executing the libbitcoin_qt forms header generation task. - - - - - - - - - There was an error executing the libbitcoin_qt translation file generation task. - - - - - - - - There was an error executing the libbitcoin_qt resource code generation task. - - - - - - - - - - - - - - - moccode; - mocheader; - forms; - translation; - resource; - $(BuildDependsOn); - - - - - qtclean; - $(CleanDependsOn); - - - - diff --git a/build_msvc/libbitcoin_server/libbitcoin_server.vcxproj.in b/build_msvc/libbitcoin_server/libbitcoin_server.vcxproj.in deleted file mode 100644 index 58e90dbaeb438..0000000000000 --- a/build_msvc/libbitcoin_server/libbitcoin_server.vcxproj.in +++ /dev/null @@ -1,19 +0,0 @@ - - - - - {460FEE33-1FE1-483F-B3BF-931FF8E969A5} - - - StaticLibrary - - -@SOURCE_FILES@ - - $(IntDir)wallet_init.obj - - - - - - \ No newline at end of file diff --git a/build_msvc/libbitcoin_util/libbitcoin_util.vcxproj.in b/build_msvc/libbitcoin_util/libbitcoin_util.vcxproj.in deleted file mode 100644 index 6ec40461c2adc..0000000000000 --- a/build_msvc/libbitcoin_util/libbitcoin_util.vcxproj.in +++ /dev/null @@ -1,17 +0,0 @@ - - - - - {B53A5535-EE9D-4C6F-9A26-F79EE3BC3754} - - - StaticLibrary - - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libbitcoin_wallet/libbitcoin_wallet.vcxproj.in b/build_msvc/libbitcoin_wallet/libbitcoin_wallet.vcxproj.in deleted file mode 100644 index 9c8279c72abad..0000000000000 --- a/build_msvc/libbitcoin_wallet/libbitcoin_wallet.vcxproj.in +++ /dev/null @@ -1,16 +0,0 @@ - - - - - {93B86837-B543-48A5-A89B-7C87ABB77DF2} - - - StaticLibrary - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj.in b/build_msvc/libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj.in deleted file mode 100644 index 1a6b7b6b92670..0000000000000 --- a/build_msvc/libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj.in +++ /dev/null @@ -1,16 +0,0 @@ - - - - - {F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93} - - - StaticLibrary - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libbitcoin_zmq/libbitcoin_zmq.vcxproj.in b/build_msvc/libbitcoin_zmq/libbitcoin_zmq.vcxproj.in deleted file mode 100644 index e86eea81e6567..0000000000000 --- a/build_msvc/libbitcoin_zmq/libbitcoin_zmq.vcxproj.in +++ /dev/null @@ -1,16 +0,0 @@ - - - - - {792D487F-F14C-49FC-A9DE-3FC150F31C3F} - - - StaticLibrary - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libbitcoinconsensus/libbitcoinconsensus.vcxproj b/build_msvc/libbitcoinconsensus/libbitcoinconsensus.vcxproj deleted file mode 100644 index 4cb0bdc90218a..0000000000000 --- a/build_msvc/libbitcoinconsensus/libbitcoinconsensus.vcxproj +++ /dev/null @@ -1,37 +0,0 @@ - - - - - {2B384FA8-9EE1-4544-93CB-0D733C25E8CE} - - - StaticLibrary - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/build_msvc/libleveldb/libleveldb.vcxproj b/build_msvc/libleveldb/libleveldb.vcxproj deleted file mode 100644 index 1610ae7d8661f..0000000000000 --- a/build_msvc/libleveldb/libleveldb.vcxproj +++ /dev/null @@ -1,61 +0,0 @@ - - - - - {18430FEF-6B61-4C53-B396-718E02850F1B} - - - StaticLibrary - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - HAVE_CRC32C=0;HAVE_SNAPPY=0;__STDC_LIMIT_MACROS;LEVELDB_IS_BIG_ENDIAN=0;_UNICODE;UNICODE;_CRT_NONSTDC_NO_DEPRECATE;LEVELDB_PLATFORM_WINDOWS;LEVELDB_ATOMIC_PRESENT;%(PreprocessorDefinitions) - 4244;4267;4312;4722; - ..\..\src\leveldb;..\..\src\leveldb\include;%(AdditionalIncludeDirectories) - - - - - - diff --git a/build_msvc/libsecp256k1/libsecp256k1.vcxproj b/build_msvc/libsecp256k1/libsecp256k1.vcxproj deleted file mode 100644 index 99fb63fb0262a..0000000000000 --- a/build_msvc/libsecp256k1/libsecp256k1.vcxproj +++ /dev/null @@ -1,22 +0,0 @@ - - - - - {BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6} - - - StaticLibrary - - - - - - - ENABLE_MODULE_ECDH;ENABLE_MODULE_RECOVERY;%(PreprocessorDefinitions) - ..\..\src\secp256k1;%(AdditionalIncludeDirectories) - - - - - - \ No newline at end of file diff --git a/build_msvc/libsecp256k1_config.h b/build_msvc/libsecp256k1_config.h deleted file mode 100644 index 5187c946a0d0f..0000000000000 --- a/build_msvc/libsecp256k1_config.h +++ /dev/null @@ -1,29 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef BITCOIN_LIBSECP256K1_CONFIG_H -#define BITCOIN_LIBSECP256K1_CONFIG_H - -#undef USE_ASM_X86_64 -#undef USE_ENDOMORPHISM -#undef USE_FIELD_10X26 -#undef USE_FIELD_5X52 -#undef USE_FIELD_INV_BUILTIN -#undef USE_FIELD_INV_NUM -#undef USE_NUM_GMP -#undef USE_NUM_NONE -#undef USE_SCALAR_4X64 -#undef USE_SCALAR_8X32 -#undef USE_SCALAR_INV_BUILTIN -#undef USE_SCALAR_INV_NUM - -#define USE_NUM_NONE 1 -#define USE_FIELD_INV_BUILTIN 1 -#define USE_SCALAR_INV_BUILTIN 1 -#define USE_FIELD_10X26 1 -#define USE_SCALAR_8X32 1 - -#endif /* BITCOIN_LIBSECP256K1_CONFIG_H */ diff --git a/build_msvc/libtest_util/libtest_util.vcxproj.in b/build_msvc/libtest_util/libtest_util.vcxproj.in deleted file mode 100644 index b5e844010e919..0000000000000 --- a/build_msvc/libtest_util/libtest_util.vcxproj.in +++ /dev/null @@ -1,16 +0,0 @@ - - - - - {868474FD-35F6-4400-8EED-30A33E7521D4} - - - StaticLibrary - - -@SOURCE_FILES@ - - - - - diff --git a/build_msvc/libunivalue/libunivalue.vcxproj b/build_msvc/libunivalue/libunivalue.vcxproj deleted file mode 100644 index 0f13a57241350..0000000000000 --- a/build_msvc/libunivalue/libunivalue.vcxproj +++ /dev/null @@ -1,19 +0,0 @@ - - - - - {5724BA7D-A09A-4BA8-800B-C4C1561B3D69} - - - StaticLibrary - - - - - - - - - - - diff --git a/build_msvc/msbuild/tasks/hexdump.targets b/build_msvc/msbuild/tasks/hexdump.targets deleted file mode 100644 index 12868a9874181..0000000000000 --- a/build_msvc/msbuild/tasks/hexdump.targets +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - outFileInfo.LastWriteTime) - { - using (Stream inStm = File.OpenRead(RawFilePath)) - { - using (StreamWriter sw = new StreamWriter(HeaderFilePath)) - { - sw.WriteLine(SourceHeader); - int count = 0; - int rawChar = inStm.ReadByte(); - while(rawChar != -1) - { - sw.Write("0x{0:x2}, ", rawChar); - count++; - if(count % 8 == 0) - { - sw.WriteLine(); - } - rawChar = inStm.ReadByte(); - } - sw.WriteLine(SourceFooter); - } - } - } -} -]]> - - - - \ No newline at end of file diff --git a/build_msvc/msbuild/tasks/replaceinfile.targets b/build_msvc/msbuild/tasks/replaceinfile.targets deleted file mode 100644 index 2ccb8b30e053f..0000000000000 --- a/build_msvc/msbuild/tasks/replaceinfile.targets +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/build_msvc/msvc-autogen.py b/build_msvc/msvc-autogen.py deleted file mode 100644 index d99b17d38127b..0000000000000 --- a/build_msvc/msvc-autogen.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -import os -import re -import argparse -from shutil import copyfile - -SOURCE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src')) -DEFAULT_PLATFORM_TOOLSET = R'v141' - -libs = [ - 'libbitcoin_cli', - 'libbitcoin_common', - 'libbitcoin_crypto', - 'libbitcoin_server', - 'libbitcoin_util', - 'libbitcoin_wallet_tool', - 'libbitcoin_wallet', - 'libbitcoin_zmq', - 'bench_bitcoin', - 'libtest_util', -] - -ignore_list = [ -] - -lib_sources = {} - - -def parse_makefile(makefile): - with open(makefile, 'r', encoding='utf-8') as file: - current_lib = '' - for line in file.read().splitlines(): - if current_lib: - source = line.split()[0] - if source.endswith('.cpp') and not source.startswith('$') and source not in ignore_list: - source_filename = source.replace('/', '\\') - object_filename = source.replace('/', '_')[:-4] + ".obj" - lib_sources[current_lib].append((source_filename, object_filename)) - if not line.endswith('\\'): - current_lib = '' - continue - for lib in libs: - _lib = lib.replace('-', '_') - if re.search(_lib + '.*_SOURCES \\= \\\\', line): - current_lib = lib - lib_sources[current_lib] = [] - break - -def set_common_properties(toolset): - with open(os.path.join(SOURCE_DIR, '../build_msvc/common.init.vcxproj'), 'r', encoding='utf-8') as rfile: - s = rfile.read() - s = re.sub('.*?', ''+toolset+'', s) - with open(os.path.join(SOURCE_DIR, '../build_msvc/common.init.vcxproj'), 'w', encoding='utf-8',newline='\n') as wfile: - wfile.write(s) - -def main(): - parser = argparse.ArgumentParser(description='Bitcoin-core msbuild configuration initialiser.') - parser.add_argument('-toolset', nargs='?',help='Optionally sets the msbuild platform toolset, e.g. v142 for Visual Studio 2019.' - ' default is %s.'%DEFAULT_PLATFORM_TOOLSET) - args = parser.parse_args() - if args.toolset: - set_common_properties(args.toolset) - - for makefile_name in os.listdir(SOURCE_DIR): - if 'Makefile' in makefile_name: - parse_makefile(os.path.join(SOURCE_DIR, makefile_name)) - for key, value in lib_sources.items(): - vcxproj_filename = os.path.abspath(os.path.join(os.path.dirname(__file__), key, key + '.vcxproj')) - content = '' - for source_filename, object_filename in value: - content += ' \n' - content += ' $(IntDir)' + object_filename + '\n' - content += ' \n' - with open(vcxproj_filename + '.in', 'r', encoding='utf-8') as vcxproj_in_file: - with open(vcxproj_filename, 'w', encoding='utf-8') as vcxproj_file: - vcxproj_file.write(vcxproj_in_file.read().replace( - '@SOURCE_FILES@\n', content)) - copyfile(os.path.join(SOURCE_DIR,'../build_msvc/bitcoin_config.h'), os.path.join(SOURCE_DIR, 'config/bitcoin-config.h')) - copyfile(os.path.join(SOURCE_DIR,'../build_msvc/libsecp256k1_config.h'), os.path.join(SOURCE_DIR, 'secp256k1/src/libsecp256k1-config.h')) - -if __name__ == '__main__': - main() diff --git a/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj b/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj deleted file mode 100644 index 2095c0c321351..0000000000000 --- a/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj +++ /dev/null @@ -1,122 +0,0 @@ - - - - - - {51201D5E-D939-4854-AE9D-008F03FF518E} - Application - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - - - - - - - - - - - - - - - - - - {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} - - - {0667528c-d734-4009-adf9-c0d6c4a5a5a6} - - - {7c87e378-df58-482e-aa2f-1bc129bc19ce} - - - {6190199c-6cf4-4dad-bfbd-93fa72a760c1} - - - {2b4abff8-d1fd-4845-88c9-1f3c0a6512bf} - - - {460fee33-1fe1-483f-b3bf-931ff8e969a5} - - - {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} - - - {93b86837-b543-48a5-a89b-7c87abb77df2} - - - {792d487f-f14c-49fc-a9de-3fc150f31c3f} - - - {18430fef-6b61-4c53-b396-718e02850f1b} - - - {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} - - - {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} - - - - - - - - - ..\libbitcoin_qt\$(GeneratedFilesOutDir)\..\;$(QtIncludeDir)\QtTest;$(QtIncludes);%(AdditionalIncludeDirectories) - - - $(QtLibraryDir)\Qt5Test.lib;$(QtReleaseLibraries);%(AdditionalDependencies) - /ignore:4206 - - - - - - ..\libbitcoin_qt\$(GeneratedFilesOutDir)\..\;$(QtIncludeDir)\QtTest;$(QtIncludes);%(AdditionalIncludeDirectories) - - - $(QtDebugLibraries);%(AdditionalDependencies) - /ignore:4206 - - - - - - - - - - - - - There was an error executing the test_bitcoin-qt moc code generation task. - - - - - - - - - - - - - moccode; - $(BuildDependsOn); - - - - - QtTestCleanGeneratedFiles; - $(CleanDependsOn); - - - diff --git a/build_msvc/test_bitcoin/test_bitcoin.vcxproj b/build_msvc/test_bitcoin/test_bitcoin.vcxproj deleted file mode 100644 index 5c4b777d5164b..0000000000000 --- a/build_msvc/test_bitcoin/test_bitcoin.vcxproj +++ /dev/null @@ -1,73 +0,0 @@ - - - - - {A56B73DB-D46D-4882-8374-1FE3FFA08F07} - - - Application - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - - - - - - - - - {2b384fa8-9ee1-4544-93cb-0d733c25e8ce} - - - {0667528c-d734-4009-adf9-c0d6c4a5a5a6} - - - {7c87e378-df58-482e-aa2f-1bc129bc19ce} - - - {6190199c-6cf4-4dad-bfbd-93fa72a760c1} - - - {460fee33-1fe1-483f-b3bf-931ff8e969a5} - - - {b53a5535-ee9d-4c6f-9a26-f79ee3bc3754} - - - {93b86837-b543-48a5-a89b-7c87abb77df2} - - - {792d487f-f14c-49fc-a9de-3fc150f31c3f} - - - {1e065f03-3566-47d0-8fa9-daa72b084e7d} - - - {5724ba7d-a09a-4ba8-800b-c4c1561b3d69} - - - {bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6} - - - {18430fef-6b61-4c53-b396-718e02850f1b} - - - - - There was an error executing the JSON test header generation task. - - - - - - - - - - - - - diff --git a/build_msvc/testconsensus/testconsensus.cpp b/build_msvc/testconsensus/testconsensus.cpp deleted file mode 100644 index 5fdd97dc783e5..0000000000000 --- a/build_msvc/testconsensus/testconsensus.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2018-2019 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include - -// bitcoin includes. -#include <..\src\script\bitcoinconsensus.h> -#include <..\src\primitives\transaction.h> -#include <..\src\script\script.h> -#include <..\src\streams.h> -#include <..\src\version.h> - -CMutableTransaction BuildSpendingTransaction(const CScript& scriptSig, const CScriptWitness& scriptWitness, int nValue = 0) -{ - CMutableTransaction txSpend; - txSpend.nVersion = 1; - txSpend.nLockTime = 0; - txSpend.vin.resize(1); - txSpend.vout.resize(1); - txSpend.vin[0].scriptWitness = scriptWitness; - txSpend.vin[0].prevout.hash = uint256(); - txSpend.vin[0].prevout.n = 0; - txSpend.vin[0].scriptSig = scriptSig; - txSpend.vin[0].nSequence = CTxIn::SEQUENCE_FINAL; - txSpend.vout[0].scriptPubKey = CScript(); - txSpend.vout[0].nValue = nValue; - - return txSpend; -} - -int main() -{ - std::cout << "bitcoinconsensus version: " << bitcoinconsensus_version() << std::endl; - - CScript pubKeyScript; - pubKeyScript << OP_1 << OP_0 << OP_1; - - int amount = 0; // 600000000; - - CScript scriptSig; - CScriptWitness scriptWitness; - CTransaction vanillaSpendTx = BuildSpendingTransaction(scriptSig, scriptWitness, amount); - CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); - stream << vanillaSpendTx; - - bitcoinconsensus_error err; - auto op0Result = bitcoinconsensus_verify_script_with_amount(pubKeyScript.data(), pubKeyScript.size(), amount, (const unsigned char*)&stream[0], stream.size(), 0, bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL, &err); - std::cout << "Op0 result: " << op0Result << ", error code " << err << std::endl; - - getchar(); - - return 0; -} diff --git a/build_msvc/testconsensus/testconsensus.vcxproj b/build_msvc/testconsensus/testconsensus.vcxproj deleted file mode 100644 index 776c40920abdd..0000000000000 --- a/build_msvc/testconsensus/testconsensus.vcxproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - - {E78473E9-B850-456C-9120-276301E04C06} - - - Application - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - - - {2B384FA8-9EE1-4544-93CB-0D733C25E8CE} - - - {B53A5535-EE9D-4C6F-9A26-F79EE3BC3754} - - - {BB493552-3B8C-4A8C-BF69-A6E7A51D2EA6} - - - - - - diff --git a/build_msvc/vcpkg-packages.txt b/build_msvc/vcpkg-packages.txt deleted file mode 100644 index 307f295f089cd..0000000000000 --- a/build_msvc/vcpkg-packages.txt +++ /dev/null @@ -1 +0,0 @@ -berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion \ No newline at end of file diff --git a/ci/README.md b/ci/README.md index d2ea255b4bad7..b4edd4b191730 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,38 +1,56 @@ -## ci scripts +## CI Scripts This directory contains scripts for each build step in each build stage. -Currently three stages `lint`, `extended_lint` and `test` are defined. Each stage has its own lifecycle, similar to the -[Travis CI lifecycle](https://docs.travis-ci.com/user/job-lifecycle#the-job-lifecycle). Every script in here is named -and numbered according to which stage and lifecycle step it belongs to. - -### Running a stage locally +### Running a Stage Locally Be aware that the tests will be built and run in-place, so please run at your own risk. If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first. The ci needs to perform various sysadmin tasks such as installing packages or writing to the user's home directory. -While most of the actions are done inside a docker container, this is not possible for all. Thus, cache directories, -such as the depends cache, previous release binaries, or ccache, are mounted as read-write into the docker container. While it should be fine to run +While it should be fine to run the ci system locally on you development box, the ci scripts can generally be assumed to have received less review and testing compared to other parts of the codebase. If you want to keep the work tree clean, you might want to run the ci system in a virtual machine with a Linux operating system of your choice. To allow for a wide range of tested environments, but also ensure reproducibility to some extent, the test stage -requires `docker` to be installed. To install all requirements on Ubuntu, run +requires `bash`, `docker`, and `python3` to be installed. To run on different architectures than the host `qemu` is also required. To install all requirements on Ubuntu, run ``` -sudo apt install docker.io bash +sudo apt install bash docker.io python3 qemu-user-static ``` -To run the default test stage, +It is recommended to run the ci system in a clean env. To run the test stage +with a specific configuration, ``` -./ci/test_run_all.sh +env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' ``` -To run the test stage with a specific configuration, +### Configurations + +The test files (`FILE_ENV`) are constructed to test a wide range of +configurations, rather than a single pass/fail. This helps to catch build +failures and logic errors that present on platforms other than the ones the +author has tested. + +Some builders use the dependency-generator in `./depends`, rather than using +the system package manager to install build dependencies. This guarantees that +the tester is using the same versions as the release builds, which also use +`./depends`. + +It is also possible to force a specific configuration without modifying the +file. For example, ``` -FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh +env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' ``` + +The files starting with `0n` (`n` greater than 0) are the scripts that are run +in order. + +### Cache + +In order to avoid rebuilding all dependencies for each build, the binaries are +cached and reused when possible. Changes in the dependency-generator will +trigger cache-invalidation and rebuilds as necessary. diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index 8b2d609504d5c..d899c0c67a917 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -1,15 +1,67 @@ #!/usr/bin/env bash # -# Copyright (c) 2018-2019 The Bitcoin Core developers +# Copyright (c) 2018-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C -travis_retry pip3 install codespell==1.15.0 -travis_retry pip3 install flake8==3.7.8 -travis_retry pip3 install yq +export CI_RETRY_EXE="/ci_retry --" -SHELLCHECK_VERSION=v0.6.0 -curl -s "https://storage.googleapis.com/shellcheck/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar --xz -xf - --directory /tmp/ -export PATH="/tmp/shellcheck-${SHELLCHECK_VERSION}:${PATH}" +pushd "/" + +${CI_RETRY_EXE} apt-get update +# Lint dependencies: +# - curl/xz-utils (to install shellcheck) +# - git (used in many lint scripts) +# - gpg (used by verify-commits) +${CI_RETRY_EXE} apt-get install -y curl xz-utils git gpg + +PYTHON_PATH="/python_build" +if [ ! -d "${PYTHON_PATH}/bin" ]; then + ( + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/pyenv/pyenv.git + cd pyenv/plugins/python-build || exit 1 + ./install.sh + ) + # For dependencies see https://github.com/pyenv/pyenv/wiki#suggested-build-environment + ${CI_RETRY_EXE} apt-get install -y build-essential libssl-dev zlib1g-dev \ + libbz2-dev libreadline-dev libsqlite3-dev curl llvm \ + libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev \ + clang + env CC=clang python-build "$(cat "/.python-version")" "${PYTHON_PATH}" +fi +export PATH="${PYTHON_PATH}/bin:${PATH}" +command -v python3 +python3 --version + +export LINT_RUNNER_PATH="/lint_test_runner" +if [ ! -d "${LINT_RUNNER_PATH}" ]; then + ${CI_RETRY_EXE} apt-get install -y cargo + ( + cd "/test/lint/test_runner" || exit 1 + cargo build + mkdir -p "${LINT_RUNNER_PATH}" + mv target/debug/test_runner "${LINT_RUNNER_PATH}" + ) +fi + +${CI_RETRY_EXE} pip3 install \ + codespell==2.2.6 \ + lief==0.13.2 \ + mypy==1.4.1 \ + pyzmq==25.1.0 \ + ruff==0.5.5 \ + vulture==2.6 + +SHELLCHECK_VERSION=v0.8.0 +curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | \ + tar --xz -xf - --directory /tmp/ +mv "/tmp/shellcheck-${SHELLCHECK_VERSION}/shellcheck" /usr/bin/ + +MLC_VERSION=v0.18.0 +MLC_BIN=mlc-x86_64-linux +curl -sL "https://github.com/becheran/mlc/releases/download/${MLC_VERSION}/${MLC_BIN}" -o "/usr/bin/mlc" +chmod +x /usr/bin/mlc + +popd || exit diff --git a/ci/lint/05_before_script.sh b/ci/lint/05_before_script.sh deleted file mode 100755 index 2987812c8e64b..0000000000000 --- a/ci/lint/05_before_script.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C - -git fetch --unshallow diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh index 003bdf3c29268..cdf0f60147d9c 100755 --- a/ci/lint/06_script.sh +++ b/ci/lint/06_script.sh @@ -1,26 +1,43 @@ #!/usr/bin/env bash # -# Copyright (c) 2018-2019 The Bitcoin Core developers +# Copyright (c) 2018-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C -if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ]; then - test/lint/commit-script-check.sh $TRAVIS_COMMIT_RANGE +set -ex + +if [ -n "$CIRRUS_PR" ]; then + COMMIT_RANGE="HEAD~..HEAD" + if [ "$(git rev-list -1 HEAD)" != "$(git rev-list -1 --merges HEAD)" ]; then + echo "Error: The top commit must be a merge commit, usually the remote 'pull/${PR_NUMBER}/merge' branch." + false + fi +else + # Otherwise, assume that a merge commit exists. This merge commit is assumed + # to be the base, after which linting will be done. If the merge commit is + # HEAD, the range will be empty. + COMMIT_RANGE="$( git rev-list --max-count=1 --merges HEAD )..HEAD" fi +export COMMIT_RANGE -test/lint/git-subtree-check.sh src/crypto/ctaes -test/lint/git-subtree-check.sh src/secp256k1 -test/lint/git-subtree-check.sh src/univalue -test/lint/git-subtree-check.sh src/leveldb -test/lint/git-subtree-check.sh src/crc32c -test/lint/check-doc.py -test/lint/check-rpc-mappings.py . -test/lint/lint-all.sh +echo +git log --no-merges --oneline "$COMMIT_RANGE" +echo +test/lint/commit-script-check.sh "$COMMIT_RANGE" +RUST_BACKTRACE=1 "${LINT_RUNNER_PATH}/test_runner" -if [ "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" ] && [ "$TRAVIS_EVENT_TYPE" = "cron" ]; then - git log --merges --before="2 days ago" -1 --format='%H' > ./contrib/verify-commits/trusted-sha512-root-commit - travis_retry gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys $( ./contrib/verify-commits/trusted-sha512-root-commit + git log HEAD~10 -1 --format='%H' > ./contrib/verify-commits/trusted-git-root + mapfile -t KEYS < contrib/verify-commits/trusted-keys + git config user.email "ci@ci.ci" + git config user.name "ci" + ${CI_RETRY_EXE} gpg --keyserver hkps://keys.openpgp.org --recv-keys "${KEYS[@]}" && + ./contrib/verify-commits/verify-commits.py; fi diff --git a/ci/lint/container-entrypoint.sh b/ci/lint/container-entrypoint.sh new file mode 100755 index 0000000000000..c8519a39129cf --- /dev/null +++ b/ci/lint/container-entrypoint.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +export LC_ALL=C + +# Fixes permission issues when there is a container UID/GID mismatch with the owner +# of the mounted bitcoin src dir. +git config --global --add safe.directory /bitcoin + +export PATH="/python_build/bin:${PATH}" +export LINT_RUNNER_PATH="/lint_test_runner" + +if [ -z "$1" ]; then + bash -ic "./ci/lint/06_script.sh" +else + exec "$@" +fi diff --git a/ci/lint_imagefile b/ci/lint_imagefile new file mode 100644 index 0000000000000..0e7ede5204380 --- /dev/null +++ b/ci/lint_imagefile @@ -0,0 +1,25 @@ +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# See test/lint/README.md for usage. + +FROM docker.io/debian:bookworm + +ENV DEBIAN_FRONTEND=noninteractive +ENV LC_ALL=C.UTF-8 + +COPY ./ci/retry/retry /ci_retry +COPY ./.python-version /.python-version +COPY ./ci/lint/container-entrypoint.sh /entrypoint.sh +COPY ./ci/lint/04_install.sh /install.sh +COPY ./test/lint/test_runner /test/lint/test_runner + +RUN /install.sh && \ + echo 'alias lint="./ci/lint/06_script.sh"' >> ~/.bashrc && \ + chmod 755 /entrypoint.sh && \ + rm -rf /var/lib/apt/lists/* + + +WORKDIR /bitcoin +ENTRYPOINT ["/entrypoint.sh"] diff --git a/ci/lint_run_all.sh b/ci/lint_run_all.sh new file mode 100755 index 0000000000000..c57261d21a699 --- /dev/null +++ b/ci/lint_run_all.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +# Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally. + +cp "./ci/retry/retry" "/ci_retry" +cp "./.python-version" "/.python-version" +mkdir --parents "/test/lint" +cp --recursive "./test/lint/test_runner" "/test/lint/" +set -o errexit; source ./ci/lint/04_install.sh +set -o errexit +./ci/lint/06_script.sh diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index dae61c5e34b2f..021d5e1597624 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -1,16 +1,30 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -# The root dir. +set -ex + +# The source root dir, usually from git, usually read-only. # The ci system copies this folder. -# This is where the depends build is done. -BASE_ROOT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd ) -export BASE_ROOT_DIR +BASE_READ_ONLY_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd ) +export BASE_READ_ONLY_DIR +# The destination root dir inside the container. +# This folder will also hold any SDKs. +# This folder only exists on the ci guest and will be a copy of BASE_READ_ONLY_DIR +export BASE_ROOT_DIR="${BASE_ROOT_DIR:-/ci_container_base}" +# The depends dir. +# This folder exists only on the ci guest, and on the ci host as a volume. +export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends} +# A folder for the ci system to put temporary files (build result, datadirs for tests, ...) +# This folder only exists on the ci guest. +export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch} +# A folder for the ci system to put executables. +# This folder only exists on the ci guest. +export BINS_SCRATCH_DIR="${BASE_SCRATCH_DIR}/bins/" echo "Setting specific values in env" if [ -n "${FILE_ENV}" ]; then @@ -22,41 +36,35 @@ fi echo "Fallback to default values in env (if not yet set)" # The number of parallel jobs to pass down to make and test_runner.py export MAKEJOBS=${MAKEJOBS:--j4} -# A folder for the ci system to put temporary files (ccache, datadirs for tests, ...) -# This folder only exists on the ci host. -export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch} -# What host to compile for. See also ./depends/README.md -# Tests that need cross-compilation export the appropriate HOST. -# Tests that run natively guess the host -export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")} # Whether to prefer BusyBox over GNU utilities export USE_BUSY_BOX=${USE_BUSY_BOX:-false} + export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true} export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true} -export TEST_PREVIOUS_RELEASES=${TEST_PREVIOUS_RELEASES:-false} +export RUN_TIDY=${RUN_TIDY:-false} +# By how much to scale the test_runner timeouts (option --timeout-factor). +# This is needed because some ci machines have slow CPU or disk, so sanitizers +# might be slow or a reindex might be waiting on disk IO. +export TEST_RUNNER_TIMEOUT_FACTOR=${TEST_RUNNER_TIMEOUT_FACTOR:-40} export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false} -export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed} -export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:18.04} + # Randomize test order. # See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/random.html export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1} -export CCACHE_SIZE=${CCACHE_SIZE:-100M} +# See man 7 debconf +export DEBIAN_FRONTEND=noninteractive +export CCACHE_MAXSIZE=${CCACHE_MAXSIZE:-500M} export CCACHE_TEMPDIR=${CCACHE_TEMPDIR:-/tmp/.ccache-temp} export CCACHE_COMPRESS=${CCACHE_COMPRESS:-1} # The cache dir. -# This folder exists on the ci host and ci guest. Changes are propagated back and forth. -export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache} -# The depends dir. -# This folder exists on the ci host and ci guest. Changes are propagated back and forth. -export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends} +# This folder exists only on the ci guest, and on the ci host as a volume. +export CCACHE_DIR="${CCACHE_DIR:-$BASE_SCRATCH_DIR/ccache}" # Folder where the build result is put (bin and lib). -export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out/$HOST} -# Folder where the build is done (dist and out-of-tree build). -export BASE_BUILD_DIR=${BASE_BUILD_DIR:-$BASE_SCRATCH_DIR/build} -export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/releases/$HOST} -export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks} -export DOCKER_PACKAGES=${DOCKER_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps} +export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out} +# The folder for previous release binaries. +# This folder exists only on the ci guest, and on the ci host as a volume. +export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/prev_releases} +export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential pkg-config curl ca-certificates ccache python3 rsync git procps bison e2fsprogs cmake} export GOAL=${GOAL:-install} export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets} -export PATH=${BASE_ROOT_DIR}/ci/retry:$PATH export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry --"} diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh old mode 100644 new mode 100755 index b70a581532c10..749ae86cb2ba8 --- a/ci/test/00_setup_env_arm.sh +++ b/ci/test/00_setup_env_arm.sh @@ -1,28 +1,20 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export HOST=arm-linux-gnueabihf -# The host arch is unknown, so we run the tests through qemu. -# If the host is arm and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string. -if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-arm -L /usr/arm-linux-gnueabihf/"}"; fi export DPKG_ADD_ARCH="armhf" export PACKAGES="python3-zmq g++-arm-linux-gnueabihf busybox libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf" -if [ -n "$QEMU_USER_CMD" ]; then - # Likely cross-compiling, so install the needed gcc and qemu-user - export PACKAGES="$PACKAGES qemu-user" -fi export CONTAINER_NAME=ci_arm_linux -# Use debian to avoid 404 apt errors when cross compiling -export DOCKER_NAME_TAG="debian:buster" +export CI_IMAGE_NAME_TAG="docker.io/arm64v8/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-arm-linux-gnueabihf (version 12.2, similar to guix) can cross-compile export USE_BUSY_BOX=true export RUN_UNIT_TESTS=true -export RUN_FUNCTIONAL_TESTS=true +export RUN_FUNCTIONAL_TESTS=false export GOAL="install" # -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1" # This could be removed once the ABI change warning does not show up by default -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror" +export BITCOIN_CONFIG="-DREDUCE_EXPORTS=ON -DCMAKE_CXX_FLAGS='-Wno-psabi -Wno-error=maybe-uninitialized'" diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh old mode 100644 new mode 100755 index 5688799f9e57d..5604004d3a10b --- a/ci/test/00_setup_env_i686_centos.sh +++ b/ci/test/00_setup_env_i686_centos.sh @@ -1,15 +1,17 @@ #!/usr/bin/env bash # -# Copyright (c) 2020 The Bitcoin Core developers +# Copyright (c) 2020-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export HOST=i686-pc-linux-gnu -export CONTAINER_NAME=ci_i686_centos_7 -export DOCKER_NAME_TAG=centos:7 -export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash" +export CONTAINER_NAME=ci_i686_centos +export CI_IMAGE_NAME_TAG="quay.io/centos/amd64:stream9" +export CI_BASE_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache make git python3 python3-pip which patch lbzip2 xz procps-ng dash rsync coreutils bison util-linux e2fsprogs cmake" +export PIP_PACKAGES="pyzmq" export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports" +export NO_WERROR=1 # Suppress error: #warning _FORTIFY_SOURCE > 2 is treated like 2 on this platform [-Werror=cpp] +export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DBUILD_GUI=ON -DREDUCE_EXPORTS=ON" export CONFIG_SHELL="/bin/dash" diff --git a/ci/test/00_setup_env_i686_multiprocess.sh b/ci/test/00_setup_env_i686_multiprocess.sh new file mode 100755 index 0000000000000..5810ae8639fa8 --- /dev/null +++ b/ci/test/00_setup_env_i686_multiprocess.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export HOST=i686-pc-linux-gnu +export CONTAINER_NAME=ci_i686_multiprocess +export CI_IMAGE_NAME_TAG="docker.io/amd64/ubuntu:24.04" +export PACKAGES="llvm clang g++-multilib" +export DEP_OPTS="DEBUG=1 MULTIPROCESS=1" +export GOAL="install" +export TEST_RUNNER_EXTRA="--v2transport" +export BITCOIN_CONFIG="\ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_C_COMPILER='clang;-m32' \ + -DCMAKE_CXX_COMPILER='clang++;-m32' \ + -DCMAKE_CXX_FLAGS='-Wno-error=documentation' \ + -DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE' \ +" +export BITCOIND=bitcoin-node # Used in functional tests diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh deleted file mode 100644 index a4dc54d1c12d3..0000000000000 --- a/ci/test/00_setup_env_mac.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -export CONTAINER_NAME=ci_macos_cross -export HOST=x86_64-apple-darwin16 -export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python3-dev python3-setuptools" -export OSX_SDK=10.14 -export RUN_UNIT_TESTS=false -export RUN_FUNCTIONAL_TESTS=false -export GOAL="deploy" -export BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror" diff --git a/ci/test/00_setup_env_mac_cross.sh b/ci/test/00_setup_env_mac_cross.sh new file mode 100755 index 0000000000000..6a1e116a547b7 --- /dev/null +++ b/ci/test/00_setup_env_mac_cross.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks} + +export CONTAINER_NAME=ci_macos_cross +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export HOST=x86_64-apple-darwin +export PACKAGES="clang lld llvm zip" +export XCODE_VERSION=15.0 +export XCODE_BUILD_ID=15A240d +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export GOAL="deploy" +export BITCOIN_CONFIG="-DBUILD_GUI=ON -DREDUCE_EXPORTS=ON" diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh deleted file mode 100644 index 982e38daee477..0000000000000 --- a/ci/test/00_setup_env_mac_host.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -export HOST=x86_64-apple-darwin16 -export PIP_PACKAGES="zmq" -export GOAL="install" -export BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror" -export TEST_RUNNER_EXTRA="wallet_disable" # Only run wallet_disable as a smoke test, see https://github.com/bitcoin/bitcoin/pull/17240#issuecomment-546022121 why the other tests are disabled -# Run without depends -export NO_DEPENDS=1 -export OSX_SDK="" diff --git a/ci/test/00_setup_env_mac_native.sh b/ci/test/00_setup_env_mac_native.sh new file mode 100755 index 0000000000000..45d644d9cac94 --- /dev/null +++ b/ci/test/00_setup_env_mac_native.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +# Homebrew's python@3.12 is marked as externally managed (PEP 668). +# Therefore, `--break-system-packages` is needed. +export PIP_PACKAGES="--break-system-packages zmq" +export GOAL="install" +export CMAKE_GENERATOR="Ninja" +export BITCOIN_CONFIG="-DBUILD_GUI=ON -DWITH_ZMQ=ON -DWITH_MINIUPNPC=ON -DREDUCE_EXPORTS=ON" +export CI_OS_NAME="macos" +export NO_DEPENDS=1 +export OSX_SDK="" +export RUN_FUZZ_TESTS=true diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh old mode 100644 new mode 100755 index 28c63f1cf6f57..dc84ef49a44f5 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -1,14 +1,34 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" + +# Only install BCC tracing packages in CI. Container has to match the host for BCC to work. +if [[ "${INSTALL_BCC_TRACING_TOOLS}" == "true" ]]; then + # Required for USDT functional tests to run + BPFCC_PACKAGE="bpfcc-tools linux-headers-$(uname --kernel-release)" + export CI_CONTAINER_CAP="--privileged -v /sys/kernel:/sys/kernel:rw" +else + BPFCC_PACKAGE="" + export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the container needs access to ptrace (https://github.com/google/sanitizers/issues/764) +fi + export CONTAINER_NAME=ci_native_asan -export PACKAGES="clang-8 llvm-8 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev" -# Use clang-8 instead of default clang (which is clang-6 on Bionic) to avoid spurious segfaults when running on ppc64le +export PACKAGES="systemtap-sdt-dev clang-18 llvm-18 libclang-rt-18-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}" export NO_DEPENDS=1 export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang-8 CXX=clang++-8" +export BITCOIN_CONFIG="\ + -DWITH_USDT=ON -DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=ON \ + -DSANITIZERS=address,float-divide-by-zero,integer,undefined \ + -DCMAKE_C_COMPILER=clang-18 \ + -DCMAKE_CXX_COMPILER=clang++-18 \ + -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ + -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern -Wno-error=deprecated-declarations' \ + -DAPPEND_CXXFLAGS='-std=c++23' \ + -DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' \ +" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh old mode 100644 new mode 100755 index 43ee219ef98e2..1aa24870454ca --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -1,17 +1,26 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export DOCKER_NAME_TAG="ubuntu:20.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_fuzz -export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev" +export PACKAGES="clang-18 llvm-18 libclang-rt-18-dev libevent-dev libboost-dev libsqlite3-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++" +export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the container needs access to ptrace (https://github.com/google/sanitizers/issues/764) +export BITCOIN_CONFIG="\ + -DBUILD_FOR_FUZZING=ON \ + -DSANITIZERS=fuzzer,address,undefined,float-divide-by-zero,integer \ + -DCMAKE_C_COMPILER=clang-18 \ + -DCMAKE_CXX_COMPILER=clang++-18 \ + -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ + -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern' \ +" +export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-18" diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh new file mode 100755 index 0000000000000..cfdbc8c0142a7 --- /dev/null +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +LIBCXX_DIR="/msan/cxx_build/" +export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" +LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" + +export CONTAINER_NAME="ci_native_fuzz_msan" +export PACKAGES="ninja-build" +# BDB generates false-positives and will be removed in future +export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export GOAL="install" +# Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered. +# _FORTIFY_SOURCE is not compatible with MSAN. +export BITCOIN_CONFIG="\ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_C_FLAGS_DEBUG='' \ + -DCMAKE_CXX_FLAGS_DEBUG='' \ + -DBUILD_FOR_FUZZING=ON \ + -DSANITIZERS=fuzzer,memory \ + -DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE -U_FORTIFY_SOURCE' \ +" +export USE_MEMORY_SANITIZER="true" +export RUN_UNIT_TESTS="false" +export RUN_FUNCTIONAL_TESTS="false" +export RUN_FUZZ_TESTS=true diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh old mode 100644 new mode 100755 index c27d52500346b..c65c05bff9ea0 --- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh +++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh @@ -1,18 +1,24 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export DOCKER_NAME_TAG="ubuntu:20.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_fuzz_valgrind -export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev valgrind" +export PACKAGES="clang-16 llvm-16 libclang-rt-16-dev libevent-dev libboost-dev libsqlite3-dev valgrind" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=true export FUZZ_TESTS_CONFIG="--valgrind" export GOAL="install" -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang CXX=clang++" +export BITCOIN_CONFIG="\ + -DBUILD_FOR_FUZZING=ON \ + -DSANITIZERS=fuzzer \ + -DCMAKE_C_COMPILER=clang-16 \ + -DCMAKE_CXX_COMPILER=clang++-16 \ +" +export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-16" diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh new file mode 100755 index 0000000000000..c6b3d68be60b6 --- /dev/null +++ b/ci/test/00_setup_env_native_msan.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +LIBCXX_DIR="/msan/cxx_build/" +export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" +LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" + +export CONTAINER_NAME="ci_native_msan" +export PACKAGES="ninja-build" +# BDB generates false-positives and will be removed in future +export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export GOAL="install" +# Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered. +# _FORTIFY_SOURCE is not compatible with MSAN. +export BITCOIN_CONFIG="\ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_C_FLAGS_DEBUG='' \ + -DCMAKE_CXX_FLAGS_DEBUG='' \ + -DSANITIZERS=memory \ + -DAPPEND_CPPFLAGS='-U_FORTIFY_SOURCE' \ +" +export USE_MEMORY_SANITIZER="true" +export RUN_FUNCTIONAL_TESTS="false" diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh deleted file mode 100644 index 9c2be4cfac8e8..0000000000000 --- a/ci/test/00_setup_env_native_nowallet.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -export CONTAINER_NAME=ci_native_nowallet -export PACKAGES="python3-zmq" -export DEP_OPTS="NO_WALLET=1" -export GOAL="install" -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports" diff --git a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh new file mode 100755 index 0000000000000..3d5d1b7745634 --- /dev/null +++ b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_nowallet_libbitcoinkernel +export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm" +# Use minimum supported python3.10 (or best-effort 3.11) and clang-16, see doc/dependencies.md +export PACKAGES="python3-zmq clang-16 llvm-16 libc++abi-16-dev libc++-16-dev" +export DEP_OPTS="NO_WALLET=1 CC=clang-16 CXX='clang++-16 -stdlib=libc++'" +export GOAL="install" +export BITCOIN_CONFIG="-DREDUCE_EXPORTS=ON -DBUILD_UTIL_CHAINSTATE=ON -DBUILD_KERNEL_LIB=ON -DBUILD_SHARED_LIBS=ON" diff --git a/ci/test/00_setup_env_native_previous_releases.sh b/ci/test/00_setup_env_native_previous_releases.sh new file mode 100755 index 0000000000000..717eb67a28ed6 --- /dev/null +++ b/ci/test/00_setup_env_native_previous_releases.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_previous_releases +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +# Use minimum supported python3.10 and gcc-11, see doc/dependencies.md +export PACKAGES="gcc-11 g++-11 python3-zmq" +export DEP_OPTS="NO_UPNP=1 DEBUG=1 CC=gcc-11 CXX=g++-11" +export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash +export RUN_UNIT_TESTS_SEQUENTIAL="true" +export RUN_UNIT_TESTS="false" +export GOAL="install" +export DOWNLOAD_PREVIOUS_RELEASES="true" +export BITCOIN_CONFIG="\ + -DWITH_ZMQ=ON -DBUILD_GUI=ON -DREDUCE_EXPORTS=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_C_FLAGS='-funsigned-char' \ + -DCMAKE_C_FLAGS_DEBUG='-g0 -O2' \ + -DCMAKE_CXX_FLAGS='-funsigned-char' \ + -DCMAKE_CXX_FLAGS_DEBUG='-g0 -O2' \ + -DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE' \ +" diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh deleted file mode 100644 index 906687175d6b9..0000000000000 --- a/ci/test/00_setup_env_native_qt5.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -export CONTAINER_NAME=ci_native_qt5 -export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev" -export DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1" -export TEST_RUNNER_EXTRA="--coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash -export RUN_UNIT_TESTS_SEQUENTIAL="true" -export RUN_UNIT_TESTS="false" -export GOAL="install" -export TEST_PREVIOUS_RELEASES=true -export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.1 v0.18.1 v0.19.1" -export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\"" diff --git a/ci/test/00_setup_env_native_tidy.sh b/ci/test/00_setup_env_native_tidy.sh new file mode 100755 index 0000000000000..cc1dea09cbfdf --- /dev/null +++ b/ci/test/00_setup_env_native_tidy.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export CONTAINER_NAME=ci_native_tidy +export TIDY_LLVM_V="18" +export PACKAGES="clang-${TIDY_LLVM_V} libclang-${TIDY_LLVM_V}-dev llvm-${TIDY_LLVM_V}-dev libomp-${TIDY_LLVM_V}-dev clang-tidy-${TIDY_LLVM_V} jq libevent-dev libboost-dev libminiupnpc-dev libzmq3-dev systemtap-sdt-dev qtbase5-dev qttools5-dev qttools5-dev-tools libqrencode-dev libsqlite3-dev libdb++-dev" +export NO_DEPENDS=1 +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export RUN_FUZZ_TESTS=false +export RUN_CHECK_DEPS=true +export RUN_TIDY=true +export GOAL="install" +export BITCOIN_CONFIG="\ + -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DWITH_MINIUPNPC=ON -DWITH_USDT=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF \ + -DENABLE_HARDENING=OFF \ + -DCMAKE_C_COMPILER=clang-${TIDY_LLVM_V} \ + -DCMAKE_CXX_COMPILER=clang++-${TIDY_LLVM_V} \ + -DCMAKE_C_FLAGS_RELWITHDEBINFO='-O0 -g0' \ + -DCMAKE_CXX_FLAGS_RELWITHDEBINFO='-O0 -g0' \ +" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh old mode 100644 new mode 100755 index 73ab5eebb684c..9c2da778b484b --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -1,14 +1,15 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan -export DOCKER_NAME_TAG=ubuntu:16.04 -export PACKAGES="clang-8 llvm-8 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev" -export NO_DEPENDS=1 +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export PACKAGES="clang-18 llvm-18 libclang-rt-18-dev libc++abi-18-dev libc++-18-dev python3-zmq" +export DEP_OPTS="CC=clang-18 CXX='clang++-18 -stdlib=libc++'" export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --disable-wallet --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=thread --disable-hardening --disable-asm CC=clang-8 CXX=clang++-8" +export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ +-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh old mode 100644 new mode 100755 index 153a781b0a662..3c5622cd0276c --- a/ci/test/00_setup_env_native_valgrind.sh +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -1,19 +1,21 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_valgrind -export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev" +export PACKAGES="valgrind clang-16 llvm-16 libclang-rt-16-dev python3-zmq libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libsqlite3-dev" export USE_VALGRIND=1 export NO_DEPENDS=1 -if [[ "${TRAVIS}" == "true" && "${TRAVIS_REPO_SLUG}" != "bitcoin/bitcoin" ]]; then - export TEST_RUNNER_EXTRA="wallet_disable" # Only run wallet_disable as a smoke test to not hit the 50 min travis time limit -else - export TEST_RUNNER_EXTRA="--exclude rpc_bind --factor=2" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 -fi +export TEST_RUNNER_EXTRA="--exclude feature_init,rpc_bind,feature_bind_extra" # feature_init excluded for now, see https://github.com/bitcoin/bitcoin/issues/30011 ; bind tests excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang CXX=clang++" # TODO enable GUI +# TODO enable GUI +export BITCOIN_CONFIG="\ + -DWITH_ZMQ=ON -DWITH_BDB=ON -DWITH_MINIUPNPC=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=OFF \ + -DCMAKE_C_COMPILER=clang-16 \ + -DCMAKE_CXX_COMPILER=clang++-16 \ +" diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh old mode 100644 new mode 100755 index c180d023de5e8..eb12bc6fd179b --- a/ci/test/00_setup_env_s390x.sh +++ b/ci/test/00_setup_env_s390x.sh @@ -1,25 +1,16 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export HOST=s390x-linux-gnu -# The host arch is unknown, so we run the tests through qemu. -# If the host is s390x and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string. -if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-s390x"}"; fi export PACKAGES="python3-zmq" -if [ -n "$QEMU_USER_CMD" ]; then - # Likely cross-compiling, so install the needed gcc and qemu-user - export DPKG_ADD_ARCH="s390x" - export PACKAGES="$PACKAGES g++-s390x-linux-gnu qemu-user libc6:s390x libstdc++6:s390x libfontconfig1:s390x libxcb1:s390x" -fi -# Use debian to avoid 404 apt errors export CONTAINER_NAME=ci_s390x -export DOCKER_NAME_TAG="debian:buster" -export RUN_UNIT_TESTS=true +export CI_IMAGE_NAME_TAG="docker.io/s390x/ubuntu:24.04" +export TEST_RUNNER_EXTRA="--exclude rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export RUN_FUNCTIONAL_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb" +export BITCOIN_CONFIG="-DREDUCE_EXPORTS=ON" diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh old mode 100644 new mode 100755 index 8f0c62a1a6e70..15b9e407b6051 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -1,14 +1,20 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 +export CI_IMAGE_NAME_TAG="docker.io/amd64/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-mingw-w64-x86-64-posix (version 12.2, similar to guix) can cross-compile export HOST=x86_64-w64-mingw32 -export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64" +export DPKG_ADD_ARCH="i386" +export PACKAGES="nsis g++-mingw-w64-x86-64-posix wine-binfmt wine64 wine32 file" export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" -export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests" +# Prior to 11.0.0, the mingw-w64 headers were missing noreturn attributes, causing warnings when +# cross-compiling for Windows. https://sourceforge.net/p/mingw-w64/bugs/306/ +# https://github.com/mingw-w64/mingw-w64/commit/1690994f515910a31b9fb7c7bd3a52d4ba987abe +export BITCOIN_CONFIG="-DREDUCE_EXPORTS=ON -DBUILD_GUI_TESTS=OFF \ +-DCMAKE_CXX_FLAGS='-Wno-error=return-type -Wno-error=maybe-uninitialized -Wno-error=array-bounds'" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh new file mode 100755 index 0000000000000..538a58cbd525f --- /dev/null +++ b/ci/test/01_base_install.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +set -ex + +CFG_DONE="ci.base-install-done" # Use a global git setting to remember whether this script ran to avoid running it twice + +if [ "$(git config --global ${CFG_DONE})" == "true" ]; then + echo "Skip base install" + exit 0 +fi + +if [ -n "$DPKG_ADD_ARCH" ]; then + dpkg --add-architecture "$DPKG_ADD_ARCH" +fi + +if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then + bash -c "dnf -y install epel-release" + bash -c "dnf -y --allowerasing install $CI_BASE_PACKAGES $PACKAGES" +elif [ "$CI_OS_NAME" != "macos" ]; then + if [[ -n "${APPEND_APT_SOURCES_LIST}" ]]; then + echo "${APPEND_APT_SOURCES_LIST}" >> /etc/apt/sources.list + fi + ${CI_RETRY_EXE} apt-get update + ${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $CI_BASE_PACKAGES" +fi + +if [ -n "$PIP_PACKAGES" ]; then + # shellcheck disable=SC2086 + ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES +fi + +if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-19.1.0" /msan/llvm-project + + cmake -G Ninja -B /msan/clang_build/ \ + -DLLVM_ENABLE_PROJECTS="clang" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_TARGETS_TO_BUILD=Native \ + -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ + -S /msan/llvm-project/llvm + + ninja -C /msan/clang_build/ "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds + ninja -C /msan/clang_build/ install-runtimes + + update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 + update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + + cmake -G Ninja -B /msan/cxx_build/ \ + -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_USE_SANITIZER=MemoryWithOrigins \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ + -DLLVM_TARGETS_TO_BUILD=Native \ + -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF \ + -DLIBCXXABI_USE_LLVM_UNWINDER=OFF \ + -DLIBCXX_HARDENING_MODE=debug \ + -S /msan/llvm-project/runtimes + + ninja -C /msan/cxx_build/ "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds + + # Clear no longer needed source folder + du -sh /msan/llvm-project + rm -rf /msan/llvm-project +fi + +if [[ "${RUN_TIDY}" == "true" ]]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/include-what-you-use/include-what-you-use -b clang_"${TIDY_LLVM_V}" /include-what-you-use + cmake -B /iwyu-build/ -G 'Unix Makefiles' -DCMAKE_PREFIX_PATH=/usr/lib/llvm-"${TIDY_LLVM_V}" -S /include-what-you-use + make -C /iwyu-build/ install "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds +fi + +mkdir -p "${DEPENDS_DIR}/SDKs" "${DEPENDS_DIR}/sdk-sources" + +OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-libcxx-headers" + +if [ -n "$XCODE_VERSION" ] && [ ! -d "${DEPENDS_DIR}/SDKs/${OSX_SDK_BASENAME}" ]; then + OSX_SDK_FILENAME="${OSX_SDK_BASENAME}.tar.gz" + OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_FILENAME}" + if [ ! -f "$OSX_SDK_PATH" ]; then + ${CI_RETRY_EXE} curl --location --fail "${SDK_URL}/${OSX_SDK_FILENAME}" -o "$OSX_SDK_PATH" + fi + tar -C "${DEPENDS_DIR}/SDKs" -xf "$OSX_SDK_PATH" +fi + +git config --global ${CFG_DONE} "true" diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh new file mode 100755 index 0000000000000..1727f9296b196 --- /dev/null +++ b/ci/test/02_run_container.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 +export CI_IMAGE_LABEL="bitcoin-ci-test" + +set -ex + +if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then + # Export all env vars to avoid missing some. + # Though, exclude those with newlines to avoid parsing problems. + python3 -c 'import os; [print(f"{key}={value}") for key, value in os.environ.items() if "\n" not in value and "HOME" != key and "PATH" != key and "USER" != key]' | tee "/tmp/env-$USER-$CONTAINER_NAME" + # System-dependent env vars must be kept as is. So read them from the container. + docker run --rm "${CI_IMAGE_NAME_TAG}" bash -c "env | grep --extended-regexp '^(HOME|PATH|USER)='" | tee --append "/tmp/env-$USER-$CONTAINER_NAME" + echo "Creating $CI_IMAGE_NAME_TAG container to run in" + + DOCKER_BUILDKIT=1 docker build \ + --file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \ + --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ + --build-arg "FILE_ENV=${FILE_ENV}" \ + --label="${CI_IMAGE_LABEL}" \ + --tag="${CONTAINER_NAME}" \ + "${BASE_READ_ONLY_DIR}" + + docker volume create "${CONTAINER_NAME}_ccache" || true + docker volume create "${CONTAINER_NAME}_depends" || true + docker volume create "${CONTAINER_NAME}_depends_sources" || true + docker volume create "${CONTAINER_NAME}_previous_releases" || true + + CI_CCACHE_MOUNT="type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR" + CI_DEPENDS_MOUNT="type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR/built" + CI_DEPENDS_SOURCES_MOUNT="type=volume,src=${CONTAINER_NAME}_depends_sources,dst=$DEPENDS_DIR/sources" + CI_PREVIOUS_RELEASES_MOUNT="type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR" + + if [ "$DANGER_CI_ON_HOST_CACHE_FOLDERS" ]; then + # ensure the directories exist + mkdir -p "${CCACHE_DIR}" + mkdir -p "${DEPENDS_DIR}/built" + mkdir -p "${DEPENDS_DIR}/sources" + mkdir -p "${PREVIOUS_RELEASES_DIR}" + + CI_CCACHE_MOUNT="type=bind,src=${CCACHE_DIR},dst=$CCACHE_DIR" + CI_DEPENDS_MOUNT="type=bind,src=${DEPENDS_DIR}/built,dst=$DEPENDS_DIR/built" + CI_DEPENDS_SOURCES_MOUNT="type=bind,src=${DEPENDS_DIR}/sources,dst=$DEPENDS_DIR/sources" + CI_PREVIOUS_RELEASES_MOUNT="type=bind,src=${PREVIOUS_RELEASES_DIR},dst=$PREVIOUS_RELEASES_DIR" + fi + + if [ "$DANGER_CI_ON_HOST_CCACHE_FOLDER" ]; then + if [ ! -d "${CCACHE_DIR}" ]; then + echo "Error: Directory '${CCACHE_DIR}' must be created in advance." + exit 1 + fi + CI_CCACHE_MOUNT="type=bind,src=${CCACHE_DIR},dst=${CCACHE_DIR}" + fi + + docker network create --ipv6 --subnet 1111:1111::/112 ci-ip6net || true + + if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then + echo "Restart docker before run to stop and clear all containers started with --rm" + podman container rm --force --all # Similar to "systemctl restart docker" + + # Still prune everything in case the filtered pruning doesn't work, or if labels were not set + # on a previous run. Belt and suspenders approach, should be fine to remove in the future. + # Prune images used by --external containers (e.g. build containers) when + # using podman. + echo "Prune all dangling images" + podman image prune --force --external + fi + echo "Prune all dangling $CI_IMAGE_LABEL images" + # When detecting podman-docker, `--external` should be added. + docker image prune --force --filter "label=$CI_IMAGE_LABEL" + + # Append $USER to /tmp/env to support multi-user systems and $CONTAINER_NAME + # to allow support starting multiple runs simultaneously by the same user. + # shellcheck disable=SC2086 + CI_CONTAINER_ID=$(docker run --cap-add LINUX_IMMUTABLE $CI_CONTAINER_CAP --rm --interactive --detach --tty \ + --mount "type=bind,src=$BASE_READ_ONLY_DIR,dst=$BASE_READ_ONLY_DIR,readonly" \ + --mount "${CI_CCACHE_MOUNT}" \ + --mount "${CI_DEPENDS_MOUNT}" \ + --mount "${CI_DEPENDS_SOURCES_MOUNT}" \ + --mount "${CI_PREVIOUS_RELEASES_MOUNT}" \ + --env-file /tmp/env-$USER-$CONTAINER_NAME \ + --name "$CONTAINER_NAME" \ + --network ci-ip6net \ + "$CONTAINER_NAME") + export CI_CONTAINER_ID + export CI_EXEC_CMD_PREFIX="docker exec ${CI_CONTAINER_ID}" +else + echo "Running on host system without docker wrapper" + echo "Create missing folders" + mkdir -p "${CCACHE_DIR}" + mkdir -p "${PREVIOUS_RELEASES_DIR}" +fi + +if [ "$CI_OS_NAME" == "macos" ]; then + IN_GETOPT_BIN="$(brew --prefix gnu-getopt)/bin/getopt" + export IN_GETOPT_BIN +fi + +CI_EXEC () { + $CI_EXEC_CMD_PREFIX bash -c "export PATH=\"/path_with space:${BINS_SCRATCH_DIR}:${BASE_ROOT_DIR}/ci/retry:\$PATH\" && cd \"${BASE_ROOT_DIR}\" && $*" +} +export -f CI_EXEC + +# Normalize all folders to BASE_ROOT_DIR +CI_EXEC rsync --archive --stats --human-readable "${BASE_READ_ONLY_DIR}/" "${BASE_ROOT_DIR}" || echo "Nothing to copy from ${BASE_READ_ONLY_DIR}/" +CI_EXEC "${BASE_ROOT_DIR}/ci/test/01_base_install.sh" + +# Fixes permission issues when there is a container UID/GID mismatch with the owner +# of the git source code directory. +CI_EXEC git config --global --add safe.directory \"*\" + +CI_EXEC mkdir -p "${BINS_SCRATCH_DIR}" + +CI_EXEC "${BASE_ROOT_DIR}/ci/test/03_test_script.sh" + +if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then + echo "Stop and remove CI container by ID" + docker container kill "${CI_CONTAINER_ID}" +fi diff --git a/ci/test/03_before_install.sh b/ci/test/03_before_install.sh deleted file mode 100755 index e939b9eeeb296..0000000000000 --- a/ci/test/03_before_install.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -# Add llvm-symbolizer directory to PATH. Needed to get symbolized stack traces from the sanitizers. -PATH=$PATH:/usr/lib/llvm-6.0/bin/ -export PATH - -BEGIN_FOLD () { - echo "" - CURRENT_FOLD_NAME=$1 - echo "travis_fold:start:${CURRENT_FOLD_NAME}" -} - -END_FOLD () { - RET=$? - echo "travis_fold:end:${CURRENT_FOLD_NAME}" - if [ $RET != 0 ]; then - echo "${CURRENT_FOLD_NAME} failed with status code ${RET}" - fi -} - diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh new file mode 100755 index 0000000000000..6eab4f74676b1 --- /dev/null +++ b/ci/test/03_test_script.sh @@ -0,0 +1,191 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +set -ex + +export ASAN_OPTIONS="detect_leaks=1:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1" +export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan" +export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1" +export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" + +echo "Number of available processing units: $(nproc)" +if [ "$CI_OS_NAME" == "macos" ]; then + top -l 1 -s 0 | awk ' /PhysMem/ {print}' +else + free -m -h + echo "System info: $(uname --kernel-name --kernel-release)" + lscpu +fi +echo "Free disk space:" +df -h + +# What host to compile for. See also ./depends/README.md +# Tests that need cross-compilation export the appropriate HOST. +# Tests that run natively guess the host +export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")} + +echo "=== BEGIN env ===" +env +echo "=== END env ===" + +( + # compact->outputs[i].file_size is uninitialized memory, so reading it is UB. + # The statistic bytes_written is only used for logging, which is disabled in + # CI, so as a temporary minimal fix to work around UB and CI failures, leave + # bytes_written unmodified. + # See https://github.com/bitcoin/bitcoin/pull/28359#issuecomment-1698694748 + # Tee patch to stdout to make it clear CI is testing modified code. + tee >(patch -p1) <<'EOF' +--- a/src/leveldb/db/db_impl.cc ++++ b/src/leveldb/db/db_impl.cc +@@ -1028,9 +1028,6 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { + stats.bytes_read += compact->compaction->input(which, i)->file_size; + } + } +- for (size_t i = 0; i < compact->outputs.size(); i++) { +- stats.bytes_written += compact->outputs[i].file_size; +- } + + mutex_.Lock(); + stats_[compact->compaction->level() + 1].Add(stats); +EOF +) + +if [ "$RUN_FUZZ_TESTS" = "true" ]; then + export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_corpora/ + if [ ! -d "$DIR_FUZZ_IN" ]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${DIR_QA_ASSETS}" + fi + ( + cd "${DIR_QA_ASSETS}" + echo "Using qa-assets repo from commit ..." + git log -1 + ) +elif [ "$RUN_UNIT_TESTS" = "true" ] || [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then + export DIR_UNIT_TEST_DATA=${DIR_QA_ASSETS}/unit_test_data/ + if [ ! -d "$DIR_UNIT_TEST_DATA" ]; then + mkdir -p "$DIR_UNIT_TEST_DATA" + ${CI_RETRY_EXE} curl --location --fail https://github.com/bitcoin-core/qa-assets/raw/main/unit_test_data/script_assets_test.json -o "${DIR_UNIT_TEST_DATA}/script_assets_test.json" + fi +fi + +if [ "$USE_BUSY_BOX" = "true" ]; then + echo "Setup to use BusyBox utils" + # tar excluded for now because it requires passing in the exact archive type in ./depends (fixed in later BusyBox version) + # ar excluded for now because it does not recognize the -q option in ./depends (unknown if fixed) + for util in $(busybox --list | grep -v "^ar$" | grep -v "^tar$" ); do ln -s "$(command -v busybox)" "${BINS_SCRATCH_DIR}/$util"; done + # Print BusyBox version + patch --help +fi + +# Make sure default datadir does not exist and is never read by creating a dummy file +if [ "$CI_OS_NAME" == "macos" ]; then + echo > "${HOME}/Library/Application Support/Bitcoin" +else + echo > "${HOME}/.bitcoin" +fi + +if [ -z "$NO_DEPENDS" ]; then + if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then + SHELL_OPTS="CONFIG_SHELL=/bin/dash" + else + SHELL_OPTS="CONFIG_SHELL=" + fi + bash -c "$SHELL_OPTS make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS LOG=1" +fi +if [ "$DOWNLOAD_PREVIOUS_RELEASES" = "true" ]; then + test/get_previous_releases.py -b -t "$PREVIOUS_RELEASES_DIR" +fi + +BITCOIN_CONFIG_ALL="-DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON" +if [ -z "$NO_DEPENDS" ]; then + BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} -DCMAKE_TOOLCHAIN_FILE=$DEPENDS_DIR/$HOST/toolchain.cmake" +fi +if [ -z "$NO_WERROR" ]; then + BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} -DWERROR=ON" +fi + +ccache --zero-stats +PRINT_CCACHE_STATISTICS="ccache --version | head -n 1 && ccache --show-stats" + +# Folder where the build is done. +BASE_BUILD_DIR=${BASE_BUILD_DIR:-$BASE_SCRATCH_DIR/build-$HOST} +mkdir -p "${BASE_BUILD_DIR}" +cd "${BASE_BUILD_DIR}" + +BITCOIN_CONFIG_ALL="$BITCOIN_CONFIG_ALL -DENABLE_EXTERNAL_SIGNER=ON -DCMAKE_INSTALL_PREFIX=$BASE_OUTDIR" + +if [[ "${RUN_TIDY}" == "true" ]]; then + BITCOIN_CONFIG_ALL="$BITCOIN_CONFIG_ALL -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +fi + +bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat $(cmake -P "${BASE_ROOT_DIR}/ci/test/GetCMakeLogFiles.cmake")) && false)" + +bash -c "cmake --build . $MAKEJOBS --target all $GOAL" || ( echo "Build failure. Verbose build follows." && cmake --build . --target all "$GOAL" --verbose ; false ) + +bash -c "${PRINT_CCACHE_STATISTICS}" +du -sh "${DEPENDS_DIR}"/*/ +du -sh "${PREVIOUS_RELEASES_DIR}" + +if [[ $HOST = *-mingw32 ]]; then + "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh" +fi + +if [ -n "$USE_VALGRIND" ]; then + "${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh" +fi + +if [ "$RUN_CHECK_DEPS" = "true" ]; then + "${BASE_ROOT_DIR}/contrib/devtools/check-deps.sh" . +fi + +if [ "$RUN_UNIT_TESTS" = "true" ]; then + DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" CTEST_OUTPUT_ON_FAILURE=ON ctest "${MAKEJOBS}" --timeout $(( TEST_RUNNER_TIMEOUT_FACTOR * 60 )) +fi + +if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then + DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${BASE_OUTDIR}"/bin/test_bitcoin --catch_system_errors=no -l test_suite +fi + +if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then + # parses TEST_RUNNER_EXTRA as an array which allows for multiple arguments such as TEST_RUNNER_EXTRA='--exclude "rpc_bind.py --ipv6"' + eval "TEST_RUNNER_EXTRA=($TEST_RUNNER_EXTRA)" + LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/functional/test_runner.py --ci "${MAKEJOBS}" --tmpdirprefix "${BASE_SCRATCH_DIR}"/test_runner/ --ansi --combinedlogslen=99999999 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" "${TEST_RUNNER_EXTRA[@]}" --quiet --failfast +fi + +if [ "${RUN_TIDY}" = "true" ]; then + cmake -B /tidy-build -DLLVM_DIR=/usr/lib/llvm-"${TIDY_LLVM_V}"/cmake -DCMAKE_BUILD_TYPE=Release -S "${BASE_ROOT_DIR}"/contrib/devtools/bitcoin-tidy + cmake --build /tidy-build "$MAKEJOBS" + cmake --build /tidy-build --target bitcoin-tidy-tests "$MAKEJOBS" + + set -eo pipefail + cd "${BASE_BUILD_DIR}/src/" + if ! ( run-clang-tidy-"${TIDY_LLVM_V}" -quiet -load="/tidy-build/libbitcoin-tidy.so" "${MAKEJOBS}" | tee tmp.tidy-out.txt ); then + grep -C5 "error: " tmp.tidy-out.txt + echo "^^^ ⚠️ Failure generated from clang-tidy" + false + fi + # Filter out: + # * qt qrc and moc generated files + jq 'map(select(.file | test("src/qt/qrc_.*\\.cpp$|/moc_.*\\.cpp$") | not))' "${BASE_BUILD_DIR}/compile_commands.json" > tmp.json + mv tmp.json "${BASE_BUILD_DIR}/compile_commands.json" + cd "${BASE_ROOT_DIR}" + python3 "/include-what-you-use/iwyu_tool.py" \ + -p "${BASE_BUILD_DIR}" "${MAKEJOBS}" \ + -- -Xiwyu --cxx17ns -Xiwyu --mapping_file="${BASE_ROOT_DIR}/contrib/devtools/iwyu/bitcoin.core.imp" \ + -Xiwyu --max_line_length=160 \ + 2>&1 | tee /tmp/iwyu_ci.out + cd "${BASE_ROOT_DIR}/src" + python3 "/include-what-you-use/fix_includes.py" --nosafe_headers < /tmp/iwyu_ci.out + git --no-pager diff +fi + +if [ "$RUN_FUZZ_TESTS" = "true" ]; then + # shellcheck disable=SC2086 + LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} "${MAKEJOBS}" -l DEBUG "${DIR_FUZZ_IN}" --empty_min_time=60 +fi diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh deleted file mode 100755 index 5dbf1b82f1cf9..0000000000000 --- a/ci/test/04_install.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -if [[ $DOCKER_NAME_TAG == centos* ]]; then - export LC_ALL=en_US.utf8 -fi -if [[ $QEMU_USER_CMD == qemu-s390* ]]; then - export LC_ALL=C -fi - -if [ "$TRAVIS_OS_NAME" == "osx" ]; then - export PATH="/usr/local/opt/ccache/libexec:$PATH" - ${CI_RETRY_EXE} pip3 install $PIP_PACKAGES -fi - -# Create folders that are mounted into the docker -mkdir -p "${CCACHE_DIR}" -mkdir -p "${PREVIOUS_RELEASES_DIR}" - -export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1" -export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan" -export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan" -export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" -env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|TEST_PREVIOUS_RELEASES|PREVIOUS_RELEASES_DIR)' | tee /tmp/env -if [[ $HOST = *-mingw32 ]]; then - DOCKER_ADMIN="--cap-add SYS_ADMIN" -elif [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764) - DOCKER_ADMIN="--cap-add SYS_PTRACE" -fi - -export P_CI_DIR="$PWD" - -if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then - echo "Creating $DOCKER_NAME_TAG container to run in" - ${CI_RETRY_EXE} docker pull "$DOCKER_NAME_TAG" - - DOCKER_ID=$(docker run $DOCKER_ADMIN -idt \ - --mount type=bind,src=$BASE_ROOT_DIR,dst=/ro_base,readonly \ - --mount type=bind,src=$CCACHE_DIR,dst=$CCACHE_DIR \ - --mount type=bind,src=$DEPENDS_DIR,dst=$DEPENDS_DIR \ - --mount type=bind,src=$PREVIOUS_RELEASES_DIR,dst=$PREVIOUS_RELEASES_DIR \ - -w $BASE_ROOT_DIR \ - --env-file /tmp/env \ - --name $CONTAINER_NAME \ - $DOCKER_NAME_TAG) - - DOCKER_EXEC () { - docker exec $DOCKER_ID bash -c "export PATH=$BASE_SCRATCH_DIR/bins/:\$PATH && cd $P_CI_DIR && $*" - } -else - echo "Running on host system without docker wrapper" - DOCKER_EXEC () { - bash -c "export PATH=$BASE_SCRATCH_DIR/bins/:\$PATH && cd $P_CI_DIR && $*" - } -fi -export -f DOCKER_EXEC - -if [ -n "$DPKG_ADD_ARCH" ]; then - DOCKER_EXEC dpkg --add-architecture "$DPKG_ADD_ARCH" -fi - -if [[ $DOCKER_NAME_TAG == centos* ]]; then - ${CI_RETRY_EXE} DOCKER_EXEC yum -y install epel-release - ${CI_RETRY_EXE} DOCKER_EXEC yum -y install $DOCKER_PACKAGES $PACKAGES -elif [ "$CI_USE_APT_INSTALL" != "no" ]; then - ${CI_RETRY_EXE} DOCKER_EXEC apt-get update - ${CI_RETRY_EXE} DOCKER_EXEC apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $DOCKER_PACKAGES -fi - -if [ "$TRAVIS_OS_NAME" == "osx" ]; then - top -l 1 -s 0 | awk ' /PhysMem/ {print}' - echo "Number of CPUs: $(sysctl -n hw.logicalcpu)" -else - DOCKER_EXEC free -m -h - DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\) - DOCKER_EXEC echo $(lscpu | grep Endian) - DOCKER_EXEC echo "Free disk space:" - DOCKER_EXEC df -h -fi - -if [ ! -d ${DIR_QA_ASSETS} ]; then - if [ "$RUN_FUZZ_TESTS" = "true" ]; then - DOCKER_EXEC git clone https://github.com/bitcoin-core/qa-assets ${DIR_QA_ASSETS} - fi -fi -export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/ - -DOCKER_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/" - -if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then - echo "Create $BASE_ROOT_DIR" - DOCKER_EXEC rsync -a /ro_base/ $BASE_ROOT_DIR -fi - -if [ "$USE_BUSY_BOX" = "true" ]; then - echo "Setup to use BusyBox utils" - DOCKER_EXEC mkdir -p $BASE_SCRATCH_DIR/bins/ - # tar excluded for now because it requires passing in the exact archive type in ./depends (fixed in later BusyBox version) - # find excluded for now because it does not recognize the -delete option in ./depends (fixed in later BusyBox version) - # ar excluded for now because it does not recognize the -q option in ./depends (unknown if fixed) - # shellcheck disable=SC1010 - DOCKER_EXEC for util in \$\(busybox --list \| grep -v "^ar$" \| grep -v "^tar$" \| grep -v "^find$"\)\; do ln -s \$\(command -v busybox\) $BASE_SCRATCH_DIR/bins/\$util\; done - # Print BusyBox version - DOCKER_EXEC patch --help -fi diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh deleted file mode 100755 index 36855045241f5..0000000000000 --- a/ci/test/05_before_script.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -# Make sure default datadir does not exist and is never read by creating a dummy file -if [ "$TRAVIS_OS_NAME" == "osx" ]; then - echo > $HOME/Library/Application\ Support/Bitcoin -else - DOCKER_EXEC echo \> \$HOME/.bitcoin -fi - -DOCKER_EXEC mkdir -p ${DEPENDS_DIR}/SDKs ${DEPENDS_DIR}/sdk-sources - -if [ -n "$OSX_SDK" ] && [ ! -f ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then - curl --location --fail $SDK_URL/MacOSX${OSX_SDK}.sdk.tar.gz -o ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz -fi -if [ -n "$OSX_SDK" ] && [ -f ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then - DOCKER_EXEC tar -C ${DEPENDS_DIR}/SDKs -xf ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz -fi -if [[ $HOST = *-mingw32 ]]; then - DOCKER_EXEC update-alternatives --set $HOST-g++ \$\(which $HOST-g++-posix\) -fi -if [ -z "$NO_DEPENDS" ]; then - if [[ $DOCKER_NAME_TAG == centos* ]]; then - # CentOS has problems building the depends if the config shell is not explicitly set - # (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to - # an error as the first command is executed) - SHELL_OPTS="CONFIG_SHELL=/bin/bash" - else - SHELL_OPTS="CONFIG_SHELL=" - fi - DOCKER_EXEC $SHELL_OPTS make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS -fi -if [ -n "$PREVIOUS_RELEASES_TO_DOWNLOAD" ]; then - BEGIN_FOLD previous-versions - DOCKER_EXEC contrib/devtools/previous_release.sh -b -t "$PREVIOUS_RELEASES_DIR" "${PREVIOUS_RELEASES_TO_DOWNLOAD}" - END_FOLD -fi diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh deleted file mode 100755 index b68cd9d3f8149..0000000000000 --- a/ci/test/06_script_a.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -BITCOIN_CONFIG_ALL="--disable-dependency-tracking --prefix=$DEPENDS_DIR/$HOST --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib" -DOCKER_EXEC "ccache --zero-stats --max-size=$CCACHE_SIZE" - -BEGIN_FOLD autogen -if [ -n "$CONFIG_SHELL" ]; then - DOCKER_EXEC "$CONFIG_SHELL" -c "./autogen.sh" -else - DOCKER_EXEC ./autogen.sh -fi -END_FOLD - -DOCKER_EXEC mkdir -p "${BASE_BUILD_DIR}" -export P_CI_DIR="${BASE_BUILD_DIR}" - -BEGIN_FOLD configure -DOCKER_EXEC "${BASE_ROOT_DIR}/configure" --cache-file=config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (DOCKER_EXEC cat config.log) && false) -END_FOLD - -BEGIN_FOLD distdir -DOCKER_EXEC make distdir VERSION=$HOST -END_FOLD - -export P_CI_DIR="${BASE_BUILD_DIR}/bitcoin-$HOST" - -BEGIN_FOLD configure -DOCKER_EXEC ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (DOCKER_EXEC cat config.log) && false) -END_FOLD - -set -o errtrace -trap 'DOCKER_EXEC "cat ${BASE_SCRATCH_DIR}/sanitizer-output/* 2> /dev/null"' ERR - -BEGIN_FOLD build -DOCKER_EXEC make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && DOCKER_EXEC make $GOAL V=1 ; false ) -END_FOLD - -BEGIN_FOLD cache_stats -DOCKER_EXEC "ccache --version | head -n 1 && ccache --show-stats" -DOCKER_EXEC du -sh "${DEPENDS_DIR}"/*/ -DOCKER_EXEC du -sh "${PREVIOUS_RELEASES_DIR}" -END_FOLD diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh deleted file mode 100755 index c8542862f29d1..0000000000000 --- a/ci/test/06_script_b.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -if [ -n "$QEMU_USER_CMD" ]; then - BEGIN_FOLD wrap-qemu - # Generate all binaries, so that they can be wrapped - DOCKER_EXEC make $MAKEJOBS -C src/secp256k1 VERBOSE=1 - DOCKER_EXEC make $MAKEJOBS -C src/univalue VERBOSE=1 - DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh" - END_FOLD -fi - -if [ -n "$USE_VALGRIND" ]; then - BEGIN_FOLD wrap-valgrind - DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh" - END_FOLD -fi - -bash -c "${CI_WAIT}" & # Print dots in case the tests take a long time to run - -if [ "$RUN_UNIT_TESTS" = "true" ]; then - BEGIN_FOLD unit-tests - DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib make $MAKEJOBS check VERBOSE=1 - END_FOLD -fi - -if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then - BEGIN_FOLD unit-tests-seq - DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib "${BASE_BUILD_DIR}/bitcoin-*/src/test/test_bitcoin*" --catch_system_errors=no -l test_suite - END_FOLD -fi - -if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then - BEGIN_FOLD functional-tests - DOCKER_EXEC test/functional/test_runner.py --ci $MAKEJOBS --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --quiet --failfast - END_FOLD -fi - -if [ "$RUN_FUZZ_TESTS" = "true" ]; then - BEGIN_FOLD fuzz-tests - DOCKER_EXEC test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} -l DEBUG ${DIR_FUZZ_IN} - END_FOLD -fi diff --git a/ci/test/GetCMakeLogFiles.cmake b/ci/test/GetCMakeLogFiles.cmake new file mode 100644 index 0000000000000..80f71dcf63569 --- /dev/null +++ b/ci/test/GetCMakeLogFiles.cmake @@ -0,0 +1,11 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.26) + set(log_files "CMakeFiles/CMakeConfigureLog.yaml") +else() + set(log_files "CMakeFiles/CMakeOutput.log CMakeFiles/CMakeError.log") +endif() + +execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${log_files}) diff --git a/ci/test/wrap-qemu.sh b/ci/test/wrap-qemu.sh deleted file mode 100755 index be7d7fcc1fe19..0000000000000 --- a/ci/test/wrap-qemu.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul,test_json,unitester,object}}; do - # shellcheck disable=SC2044 - for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name $(basename $b_name)); do - echo "Wrap $b ..." - mv "$b" "${b}_orig" - echo '#!/usr/bin/env bash' > "$b" - echo "$QEMU_USER_CMD \"${b}_orig\" \"\$@\"" >> "$b" - chmod +x "$b" - done -done diff --git a/ci/test/wrap-valgrind.sh b/ci/test/wrap-valgrind.sh index 6b3e6eb7e7412..27754831848bc 100755 --- a/ci/test/wrap-valgrind.sh +++ b/ci/test/wrap-valgrind.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2018-2019 The Bitcoin Core developers +# Copyright (c) 2018-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 for b_name in "${BASE_OUTDIR}/bin"/*; do # shellcheck disable=SC2044 - for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name $(basename $b_name)); do + for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename "$b_name")"); do echo "Wrap $b ..." mv "$b" "${b}_orig" echo '#!/usr/bin/env bash' > "$b" diff --git a/ci/test/wrap-wine.sh b/ci/test/wrap-wine.sh new file mode 100755 index 0000000000000..a0d32a93a3f9c --- /dev/null +++ b/ci/test/wrap-wine.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +for b_name in {"${BASE_OUTDIR}/bin"/*,src/univalue/{test_json,unitester,object}}.exe; do + # shellcheck disable=SC2044 + for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename "$b_name")"); do + if (file "$b" | grep "Windows"); then + echo "Wrap $b ..." + mv "$b" "${b}_orig" + echo '#!/usr/bin/env bash' > "$b" + echo "( wine \"${b}_orig\" \"\$@\" ) || ( sleep 1 && wine \"${b}_orig\" \"\$@\" )" >> "$b" + chmod +x "$b" + fi + done +done diff --git a/ci/test_imagefile b/ci/test_imagefile new file mode 100644 index 0000000000000..f8b5eea1c88ab --- /dev/null +++ b/ci/test_imagefile @@ -0,0 +1,16 @@ +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# See ci/README.md for usage. + +ARG CI_IMAGE_NAME_TAG +FROM ${CI_IMAGE_NAME_TAG} + +ARG FILE_ENV +ENV FILE_ENV=${FILE_ENV} + +COPY ./ci/retry/retry /usr/bin/retry +COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_container_base/ci/test/ + +RUN ["bash", "-c", "cd /ci_container_base/ && set -o errexit && source ./ci/test/00_setup_env.sh && ./ci/test/01_base_install.sh"] diff --git a/ci/test_run_all.sh b/ci/test_run_all.sh index a1d4bd19524da..3afc47b23edae 100755 --- a/ci/test_run_all.sh +++ b/ci/test_run_all.sh @@ -1,14 +1,11 @@ #!/usr/bin/env bash # -# Copyright (c) 2019 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 set -o errexit; source ./ci/test/00_setup_env.sh -set -o errexit; source ./ci/test/03_before_install.sh -set -o errexit; source ./ci/test/04_install.sh -set -o errexit; source ./ci/test/05_before_script.sh -set -o errexit; source ./ci/test/06_script_a.sh -set -o errexit; source ./ci/test/06_script_b.sh +set -o errexit +"./ci/test/02_run_container.sh" diff --git a/cmake/bitcoin-build-config.h.in b/cmake/bitcoin-build-config.h.in new file mode 100644 index 0000000000000..094eb8040a3bd --- /dev/null +++ b/cmake/bitcoin-build-config.h.in @@ -0,0 +1,153 @@ +// Copyright (c) 2023-present The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or https://opensource.org/license/mit/. + +#ifndef BITCOIN_CONFIG_H +#define BITCOIN_CONFIG_H + +/* Version Build */ +#define CLIENT_VERSION_BUILD @CLIENT_VERSION_BUILD@ + +/* Version is release */ +#define CLIENT_VERSION_IS_RELEASE @CLIENT_VERSION_IS_RELEASE@ + +/* Major version */ +#define CLIENT_VERSION_MAJOR @CLIENT_VERSION_MAJOR@ + +/* Minor version */ +#define CLIENT_VERSION_MINOR @CLIENT_VERSION_MINOR@ + +/* Copyright holder(s) before %s replacement */ +#define COPYRIGHT_HOLDERS "@COPYRIGHT_HOLDERS@" + +/* Copyright holder(s) */ +#define COPYRIGHT_HOLDERS_FINAL "@COPYRIGHT_HOLDERS_FINAL@" + +/* Replacement for %s in copyright holders string */ +#define COPYRIGHT_HOLDERS_SUBSTITUTION "@PACKAGE_NAME@" + +/* Copyright year */ +#define COPYRIGHT_YEAR @COPYRIGHT_YEAR@ + +/* Define this symbol to build code that uses ARMv8 SHA-NI intrinsics */ +#cmakedefine ENABLE_ARM_SHANI 1 + +/* Define this symbol to build code that uses AVX2 intrinsics */ +#cmakedefine ENABLE_AVX2 1 + +/* Define if external signer support is enabled */ +#cmakedefine ENABLE_EXTERNAL_SIGNER 1 + +/* Define this symbol to build code that uses SSE4.1 intrinsics */ +#cmakedefine ENABLE_SSE41 1 + +/* Define to 1 to enable tracepoints for Userspace, Statically Defined Tracing + */ +#cmakedefine ENABLE_TRACING 1 + +/* Define to 1 to enable wallet functions. */ +#cmakedefine ENABLE_WALLET 1 + +/* Define this symbol to build code that uses x86 SHA-NI intrinsics */ +#cmakedefine ENABLE_X86_SHANI 1 + +/* Define to 1 if you have the declaration of `fork', and to 0 if you don't. + */ +#cmakedefine01 HAVE_DECL_FORK + +/* Define to 1 if you have the declaration of `freeifaddrs', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_FREEIFADDRS + +/* Define to 1 if you have the declaration of `getifaddrs', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_GETIFADDRS + +/* Define to 1 if you have the declaration of `pipe2', and to 0 if you don't. + */ +#cmakedefine01 HAVE_DECL_PIPE2 + +/* Define to 1 if you have the declaration of `setsid', and to 0 if you don't. + */ +#cmakedefine01 HAVE_DECL_SETSID + +/* Define this symbol if evhttp_connection_get_peer expects const char** */ +#cmakedefine HAVE_EVHTTP_CONNECTION_GET_PEER_CONST_CHAR 1 + +/* Define to 1 if fdatasync is available. */ +#cmakedefine HAVE_FDATASYNC 1 + +/* Define this symbol if the BSD getentropy system call is available with + sys/random.h */ +#cmakedefine HAVE_GETENTROPY_RAND 1 + +/* Define this symbol if the Linux getrandom function call is available */ +#cmakedefine HAVE_GETRANDOM 1 + +/* Define this symbol if you have malloc_info */ +#cmakedefine HAVE_MALLOC_INFO 1 + +/* Define this symbol if you have mallopt with M_ARENA_MAX */ +#cmakedefine HAVE_MALLOPT_ARENA_MAX 1 + +/* Define to 1 if O_CLOEXEC flag is available. */ +#cmakedefine01 HAVE_O_CLOEXEC + +/* Define this symbol if you have posix_fallocate */ +#cmakedefine HAVE_POSIX_FALLOCATE 1 + +/* Define this symbol if platform supports unix domain sockets */ +#cmakedefine HAVE_SOCKADDR_UN 1 + +/* Define this symbol to build code that uses getauxval */ +#cmakedefine HAVE_STRONG_GETAUXVAL 1 + +/* Define this symbol if the BSD sysctl() is available */ +#cmakedefine HAVE_SYSCTL 1 + +/* Define this symbol if the BSD sysctl(KERN_ARND) is available */ +#cmakedefine HAVE_SYSCTL_ARND 1 + +/* Define to 1 if std::system or ::wsystem is available. */ +#cmakedefine HAVE_SYSTEM 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_SYS_PRCTL_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_SYS_RESOURCES_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_SYS_VMMETER_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_VM_VM_PARAM_H 1 + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "@PACKAGE_BUGREPORT@" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "@PACKAGE_NAME@" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "@PROJECT_HOMEPAGE_URL@" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "@PACKAGE_VERSION@" + +/* Define to 1 if strerror_r returns char *. */ +#cmakedefine STRERROR_R_CHAR_P 1 + +/* Define if BDB support should be compiled in */ +#cmakedefine USE_BDB 1 + +/* Define if dbus support should be compiled in */ +#cmakedefine USE_DBUS 1 + +/* Define if QR support should be compiled in */ +#cmakedefine USE_QRCODE 1 + +/* Define if sqlite support should be compiled in */ +#cmakedefine USE_SQLITE 1 + +#endif //BITCOIN_CONFIG_H diff --git a/cmake/ccache.cmake b/cmake/ccache.cmake new file mode 100644 index 0000000000000..099aa66411fb6 --- /dev/null +++ b/cmake/ccache.cmake @@ -0,0 +1,36 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +if(NOT MSVC) + find_program(CCACHE_EXECUTABLE ccache) + if(CCACHE_EXECUTABLE) + execute_process( + COMMAND readlink -f ${CMAKE_CXX_COMPILER} + OUTPUT_VARIABLE compiler_resolved_link + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(CCACHE_EXECUTABLE STREQUAL compiler_resolved_link AND NOT WITH_CCACHE) + list(APPEND configure_warnings + "Disabling ccache was attempted using -DWITH_CCACHE=${WITH_CCACHE}, but ccache masquerades as the compiler." + ) + set(WITH_CCACHE ON) + elseif(WITH_CCACHE) + list(APPEND CMAKE_C_COMPILER_LAUNCHER ${CCACHE_EXECUTABLE}) + list(APPEND CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_EXECUTABLE}) + endif() + else() + set(WITH_CCACHE OFF) + endif() + if(WITH_CCACHE) + try_append_cxx_flags("-fdebug-prefix-map=A=B" TARGET core_interface SKIP_LINK + IF_CHECK_PASSED "-fdebug-prefix-map=${PROJECT_SOURCE_DIR}=." + ) + try_append_cxx_flags("-fmacro-prefix-map=A=B" TARGET core_interface SKIP_LINK + IF_CHECK_PASSED "-fmacro-prefix-map=${PROJECT_SOURCE_DIR}=." + ) + endif() +endif() + +mark_as_advanced(CCACHE_EXECUTABLE) diff --git a/cmake/cov_tool_wrapper.sh.in b/cmake/cov_tool_wrapper.sh.in new file mode 100644 index 0000000000000..f6b7ff3419ffd --- /dev/null +++ b/cmake/cov_tool_wrapper.sh.in @@ -0,0 +1,5 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +exec @COV_TOOL@ "$@" diff --git a/cmake/crc32c.cmake b/cmake/crc32c.cmake new file mode 100644 index 0000000000000..16fde7f95dab9 --- /dev/null +++ b/cmake/crc32c.cmake @@ -0,0 +1,123 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# This file is part of the transition from Autotools to CMake. Once CMake +# support has been merged we should switch to using the upstream CMake +# buildsystem. + +include(CheckCXXSourceCompiles) + +# Check for __builtin_prefetch support in the compiler. +check_cxx_source_compiles(" + int main() { + char data = 0; + const char* address = &data; + __builtin_prefetch(address, 0, 0); + return 0; + } + " HAVE_BUILTIN_PREFETCH +) + +# Check for _mm_prefetch support in the compiler. +check_cxx_source_compiles(" + #if defined(_MSC_VER) + #include + #else + #include + #endif + + int main() { + char data = 0; + const char* address = &data; + _mm_prefetch(address, _MM_HINT_NTA); + return 0; + } + " HAVE_MM_PREFETCH +) + +# Check for SSE4.2 support in the compiler. +if(MSVC) + set(SSE42_CXXFLAGS /arch:AVX) +else() + set(SSE42_CXXFLAGS -msse4.2) +endif() +check_cxx_source_compiles_with_flags("${SSE42_CXXFLAGS}" " + #include + #if defined(_MSC_VER) + #include + #elif defined(__GNUC__) && defined(__SSE4_2__) + #include + #endif + + int main() { + uint64_t l = 0; + l = _mm_crc32_u8(l, 0); + l = _mm_crc32_u32(l, 0); + l = _mm_crc32_u64(l, 0); + return l; + } + " HAVE_SSE42 +) + +# Check for ARMv8 w/ CRC and CRYPTO extensions support in the compiler. +set(ARM64_CRC_CXXFLAGS -march=armv8-a+crc+crypto) +check_cxx_source_compiles_with_flags("${ARM64_CRC_CXXFLAGS}" " + #include + #include + + int main() { + #ifdef __aarch64__ + __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0); + vmull_p64(0, 0); + #else + #error crc32c library does not support hardware acceleration on 32-bit ARM + #endif + return 0; + } + " HAVE_ARM64_CRC32C +) + +add_library(crc32c_common INTERFACE) +target_compile_definitions(crc32c_common INTERFACE + HAVE_BUILTIN_PREFETCH=$ + HAVE_MM_PREFETCH=$ + HAVE_STRONG_GETAUXVAL=$ + BYTE_ORDER_BIG_ENDIAN=$ + HAVE_SSE42=$ + HAVE_ARM64_CRC32C=$ +) +target_link_libraries(crc32c_common INTERFACE + core_interface +) + +add_library(crc32c STATIC EXCLUDE_FROM_ALL + ${PROJECT_SOURCE_DIR}/src/crc32c/src/crc32c.cc + ${PROJECT_SOURCE_DIR}/src/crc32c/src/crc32c_portable.cc +) +target_include_directories(crc32c + PUBLIC + $ +) +target_link_libraries(crc32c PRIVATE crc32c_common) +set_target_properties(crc32c PROPERTIES EXPORT_COMPILE_COMMANDS OFF) + +if(HAVE_SSE42) + add_library(crc32c_sse42 STATIC EXCLUDE_FROM_ALL + ${PROJECT_SOURCE_DIR}/src/crc32c/src/crc32c_sse42.cc + ) + target_compile_options(crc32c_sse42 PRIVATE ${SSE42_CXXFLAGS}) + target_link_libraries(crc32c_sse42 PRIVATE crc32c_common) + set_target_properties(crc32c_sse42 PROPERTIES EXPORT_COMPILE_COMMANDS OFF) + target_link_libraries(crc32c PRIVATE crc32c_sse42) +endif() + +if(HAVE_ARM64_CRC32C) + add_library(crc32c_arm64 STATIC EXCLUDE_FROM_ALL + ${PROJECT_SOURCE_DIR}/src/crc32c/src/crc32c_arm64.cc + ) + target_compile_options(crc32c_arm64 PRIVATE ${ARM64_CRC_CXXFLAGS}) + target_link_libraries(crc32c_arm64 PRIVATE crc32c_common) + set_target_properties(crc32c_arm64 PROPERTIES EXPORT_COMPILE_COMMANDS OFF) + target_link_libraries(crc32c PRIVATE crc32c_arm64) +endif() diff --git a/cmake/introspection.cmake b/cmake/introspection.cmake new file mode 100644 index 0000000000000..29c93869a73d1 --- /dev/null +++ b/cmake/introspection.cmake @@ -0,0 +1,227 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include(CheckCXXSourceCompiles) +include(CheckCXXSymbolExists) +include(CheckIncludeFileCXX) + +# The following HAVE_{HEADER}_H variables go to the bitcoin-build-config.h header. +check_include_file_cxx(sys/prctl.h HAVE_SYS_PRCTL_H) +check_include_file_cxx(sys/resources.h HAVE_SYS_RESOURCES_H) +check_include_file_cxx(sys/vmmeter.h HAVE_SYS_VMMETER_H) +check_include_file_cxx(vm/vm_param.h HAVE_VM_VM_PARAM_H) + +check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC) +check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC) +check_cxx_symbol_exists(fork "unistd.h" HAVE_DECL_FORK) +check_cxx_symbol_exists(pipe2 "unistd.h" HAVE_DECL_PIPE2) +check_cxx_symbol_exists(setsid "unistd.h" HAVE_DECL_SETSID) + +check_include_file_cxx(sys/types.h HAVE_SYS_TYPES_H) +check_include_file_cxx(ifaddrs.h HAVE_IFADDRS_H) +if(HAVE_SYS_TYPES_H AND HAVE_IFADDRS_H) + include(TestAppendRequiredLibraries) + test_append_socket_library(core_interface) +endif() + +include(TestAppendRequiredLibraries) +test_append_atomic_library(core_interface) + +check_cxx_symbol_exists(std::system "cstdlib" HAVE_STD_SYSTEM) +check_cxx_symbol_exists(::_wsystem "stdlib.h" HAVE__WSYSTEM) +if(HAVE_STD_SYSTEM OR HAVE__WSYSTEM) + set(HAVE_SYSTEM 1) +endif() + +check_cxx_source_compiles(" + #include + + int main() + { + char buf[100]; + char* p{strerror_r(0, buf, sizeof buf)}; + (void)p; + } + " STRERROR_R_CHAR_P +) + +# Check for malloc_info (for memory statistics information in getmemoryinfo). +check_cxx_symbol_exists(malloc_info "malloc.h" HAVE_MALLOC_INFO) + +# Check for mallopt(M_ARENA_MAX) (to set glibc arenas). +check_cxx_source_compiles(" + #include + + int main() + { + mallopt(M_ARENA_MAX, 1); + } + " HAVE_MALLOPT_ARENA_MAX +) + +# Check for posix_fallocate(). +check_cxx_source_compiles(" + // same as in src/util/fs_helpers.cpp + #ifdef __linux__ + #ifdef _POSIX_C_SOURCE + #undef _POSIX_C_SOURCE + #endif + #define _POSIX_C_SOURCE 200112L + #endif // __linux__ + #include + + int main() + { + return posix_fallocate(0, 0, 0); + } + " HAVE_POSIX_FALLOCATE +) + +# Check for strong getauxval() support in the system headers. +check_cxx_source_compiles(" + #include + + int main() + { + getauxval(AT_HWCAP); + } + " HAVE_STRONG_GETAUXVAL +) + +# Check for UNIX sockets. +check_cxx_source_compiles(" + #include + #include + + int main() + { + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + } + " HAVE_SOCKADDR_UN +) + +# Check for different ways of gathering OS randomness: +# - Linux getrandom() +check_cxx_source_compiles(" + #include + + int main() + { + getrandom(nullptr, 32, 0); + } + " HAVE_GETRANDOM +) + +# - BSD getentropy() +check_cxx_source_compiles(" + #include + + int main() + { + getentropy(nullptr, 32); + } + " HAVE_GETENTROPY_RAND +) + + +# - BSD sysctl() +check_cxx_source_compiles(" + #include + #include + + #ifdef __linux__ + #error Don't use sysctl on Linux, it's deprecated even when it works + #endif + + int main() + { + sysctl(nullptr, 2, nullptr, nullptr, nullptr, 0); + } + " HAVE_SYSCTL +) + +# - BSD sysctl(KERN_ARND) +check_cxx_source_compiles(" + #include + #include + + #ifdef __linux__ + #error Don't use sysctl on Linux, it's deprecated even when it works + #endif + + int main() + { + static int name[2] = {CTL_KERN, KERN_ARND}; + sysctl(name, 2, nullptr, nullptr, nullptr, 0); + } + " HAVE_SYSCTL_ARND +) + +if(NOT MSVC) + include(CheckSourceCompilesAndLinks) + + # Check for SSE4.1 intrinsics. + set(SSE41_CXXFLAGS -msse4.1) + check_cxx_source_compiles_with_flags("${SSE41_CXXFLAGS}" " + #include + + int main() + { + __m128i a = _mm_set1_epi32(0); + __m128i b = _mm_set1_epi32(1); + __m128i r = _mm_blend_epi16(a, b, 0xFF); + return _mm_extract_epi32(r, 3); + } + " HAVE_SSE41 + ) + set(ENABLE_SSE41 ${HAVE_SSE41}) + + # Check for AVX2 intrinsics. + set(AVX2_CXXFLAGS -mavx -mavx2) + check_cxx_source_compiles_with_flags("${AVX2_CXXFLAGS}" " + #include + + int main() + { + __m256i l = _mm256_set1_epi32(0); + return _mm256_extract_epi32(l, 7); + } + " HAVE_AVX2 + ) + set(ENABLE_AVX2 ${HAVE_AVX2}) + + # Check for x86 SHA-NI intrinsics. + set(X86_SHANI_CXXFLAGS -msse4 -msha) + check_cxx_source_compiles_with_flags("${X86_SHANI_CXXFLAGS}" " + #include + + int main() + { + __m128i i = _mm_set1_epi32(0); + __m128i j = _mm_set1_epi32(1); + __m128i k = _mm_set1_epi32(2); + return _mm_extract_epi32(_mm_sha256rnds2_epu32(i, j, k), 0); + } + " HAVE_X86_SHANI + ) + set(ENABLE_X86_SHANI ${HAVE_X86_SHANI}) + + # Check for ARMv8 SHA-NI intrinsics. + set(ARM_SHANI_CXXFLAGS -march=armv8-a+crypto) + check_cxx_source_compiles_with_flags("${ARM_SHANI_CXXFLAGS}" " + #include + + int main() + { + uint32x4_t a, b, c; + vsha256h2q_u32(a, b, c); + vsha256hq_u32(a, b, c); + vsha256su0q_u32(a, b); + vsha256su1q_u32(a, b, c); + } + " HAVE_ARM_SHANI + ) + set(ENABLE_ARM_SHANI ${HAVE_ARM_SHANI}) +endif() diff --git a/cmake/leveldb.cmake b/cmake/leveldb.cmake new file mode 100644 index 0000000000000..823a5d8e3daee --- /dev/null +++ b/cmake/leveldb.cmake @@ -0,0 +1,105 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# This file is part of the transition from Autotools to CMake. Once CMake +# support has been merged we should switch to using the upstream CMake +# buildsystem. + +include(CheckCXXSymbolExists) +check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC) + +add_library(leveldb STATIC EXCLUDE_FROM_ALL + ${PROJECT_SOURCE_DIR}/src/leveldb/db/builder.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/c.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/db_impl.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/db_iter.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/dbformat.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/dumpfile.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/filename.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/log_reader.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/log_writer.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/memtable.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/repair.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/table_cache.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/version_edit.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/version_set.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/db/write_batch.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/block.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/block_builder.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/filter_block.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/format.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/iterator.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/merger.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/table.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/table_builder.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/table/two_level_iterator.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/arena.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/bloom.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/cache.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/coding.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/comparator.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/crc32c.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/env.cc + $<$>:${PROJECT_SOURCE_DIR}/src/leveldb/util/env_posix.cc> + $<$:${PROJECT_SOURCE_DIR}/src/leveldb/util/env_windows.cc> + ${PROJECT_SOURCE_DIR}/src/leveldb/util/filter_policy.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/hash.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/histogram.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/logging.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/options.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/util/status.cc + ${PROJECT_SOURCE_DIR}/src/leveldb/helpers/memenv/memenv.cc +) + +target_compile_definitions(leveldb + PRIVATE + HAVE_SNAPPY=0 + HAVE_CRC32C=1 + HAVE_FDATASYNC=$ + HAVE_FULLFSYNC=$ + HAVE_O_CLOEXEC=$ + FALLTHROUGH_INTENDED=[[fallthrough]] + LEVELDB_IS_BIG_ENDIAN=$ + $<$>:LEVELDB_PLATFORM_POSIX> + $<$:LEVELDB_PLATFORM_WINDOWS> + $<$:_UNICODE;UNICODE> +) +if(MINGW) + target_compile_definitions(leveldb + PRIVATE + __USE_MINGW_ANSI_STDIO=1 + ) +endif() + +target_include_directories(leveldb + PRIVATE + $ + PUBLIC + $ +) + +add_library(nowarn_leveldb_interface INTERFACE) +if(MSVC) + target_compile_options(nowarn_leveldb_interface INTERFACE + /wd4722 + ) + target_compile_definitions(nowarn_leveldb_interface INTERFACE + _CRT_NONSTDC_NO_WARNINGS + ) +else() + target_compile_options(nowarn_leveldb_interface INTERFACE + -Wno-conditional-uninitialized + -Wno-suggest-override + ) +endif() + +target_link_libraries(leveldb PRIVATE + core_interface + nowarn_leveldb_interface + crc32c +) + +set_target_properties(leveldb PROPERTIES + EXPORT_COMPILE_COMMANDS OFF +) diff --git a/cmake/minisketch.cmake b/cmake/minisketch.cmake new file mode 100644 index 0000000000000..bb93c80467256 --- /dev/null +++ b/cmake/minisketch.cmake @@ -0,0 +1,91 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# Check for clmul instructions support. +if(MSVC) + set(CLMUL_CXXFLAGS) +else() + set(CLMUL_CXXFLAGS -mpclmul) +endif() +check_cxx_source_compiles_with_flags("${CLMUL_CXXFLAGS}" " + #include + #include + + int main() + { + __m128i a = _mm_cvtsi64_si128((uint64_t)7); + __m128i b = _mm_clmulepi64_si128(a, a, 37); + __m128i c = _mm_srli_epi64(b, 41); + __m128i d = _mm_xor_si128(b, c); + uint64_t e = _mm_cvtsi128_si64(d); + return e == 0; + } + " HAVE_CLMUL +) + +add_library(minisketch_common INTERFACE) +target_compile_definitions(minisketch_common INTERFACE + DISABLE_DEFAULT_FIELDS + ENABLE_FIELD_32 +) +if(MSVC) + target_compile_options(minisketch_common INTERFACE + /wd4060 + /wd4065 + /wd4146 + /wd4244 + /wd4267 + ) +endif() + +if(HAVE_CLMUL) + add_library(minisketch_clmul OBJECT EXCLUDE_FROM_ALL + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_1byte.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_2bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_3bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_4bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_5bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_6bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_7bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/clmul_8bytes.cpp + ) + target_compile_definitions(minisketch_clmul PUBLIC HAVE_CLMUL) + target_compile_options(minisketch_clmul PRIVATE ${CLMUL_CXXFLAGS}) + target_link_libraries(minisketch_clmul + PRIVATE + core_interface + minisketch_common + ) + set_target_properties(minisketch_clmul PROPERTIES + EXPORT_COMPILE_COMMANDS OFF + ) +endif() + +add_library(minisketch STATIC EXCLUDE_FROM_ALL + ${PROJECT_SOURCE_DIR}/src/minisketch/src/minisketch.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_1byte.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_2bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_3bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_4bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_5bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_6bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_7bytes.cpp + ${PROJECT_SOURCE_DIR}/src/minisketch/src/fields/generic_8bytes.cpp +) + +target_include_directories(minisketch + PUBLIC + $ +) + +target_link_libraries(minisketch + PRIVATE + core_interface + minisketch_common + $ +) + +set_target_properties(minisketch PROPERTIES + EXPORT_COMPILE_COMMANDS OFF +) diff --git a/cmake/module/AddBoostIfNeeded.cmake b/cmake/module/AddBoostIfNeeded.cmake new file mode 100644 index 0000000000000..ecd0d6f2aba1e --- /dev/null +++ b/cmake/module/AddBoostIfNeeded.cmake @@ -0,0 +1,78 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +function(add_boost_if_needed) + #[=[ + TODO: Not all targets, which will be added in the future, require + Boost. Therefore, a proper check will be appropriate here. + + Implementation notes: + Although only Boost headers are used to build Bitcoin Core, + we still leverage a standard CMake's approach to handle + dependencies, i.e., the Boost::headers "library". + A command target_link_libraries(target PRIVATE Boost::headers) + will propagate Boost::headers usage requirements to the target. + For Boost::headers such usage requirements is an include + directory and other added INTERFACE properties. + ]=] + + # We cannot rely on find_package(Boost ...) to work properly without + # Boost_NO_BOOST_CMAKE set until we require a more recent Boost because + # upstream did not ship proper CMake files until 1.82.0. + # Until then, we rely on CMake's FindBoost module. + # See: https://cmake.org/cmake/help/latest/policy/CMP0167.html + if(POLICY CMP0167) + cmake_policy(SET CMP0167 OLD) + endif() + set(Boost_NO_BOOST_CMAKE ON) + find_package(Boost 1.73.0 REQUIRED) + mark_as_advanced(Boost_INCLUDE_DIR) + set_target_properties(Boost::headers PROPERTIES IMPORTED_GLOBAL TRUE) + target_compile_definitions(Boost::headers INTERFACE + # We don't use multi_index serialization. + BOOST_MULTI_INDEX_DISABLE_SERIALIZATION + ) + if(DEFINED VCPKG_TARGET_TRIPLET) + # Workaround for https://github.com/microsoft/vcpkg/issues/36955. + target_compile_definitions(Boost::headers INTERFACE + BOOST_NO_USER_CONFIG + ) + endif() + + # Prevent use of std::unary_function, which was removed in C++17, + # and will generate warnings with newer compilers for Boost + # older than 1.80. + # See: https://github.com/boostorg/config/pull/430. + set(CMAKE_REQUIRED_DEFINITIONS -DBOOST_NO_CXX98_FUNCTION_BASE) + set(CMAKE_REQUIRED_INCLUDES ${Boost_INCLUDE_DIR}) + include(CMakePushCheckState) + cmake_push_check_state() + include(TryAppendCXXFlags) + set(CMAKE_REQUIRED_FLAGS ${working_compiler_werror_flag}) + set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) + check_cxx_source_compiles(" + #include + " NO_DIAGNOSTICS_BOOST_NO_CXX98_FUNCTION_BASE + ) + cmake_pop_check_state() + if(NO_DIAGNOSTICS_BOOST_NO_CXX98_FUNCTION_BASE) + target_compile_definitions(Boost::headers INTERFACE + BOOST_NO_CXX98_FUNCTION_BASE + ) + else() + set(CMAKE_REQUIRED_DEFINITIONS) + endif() + + # Some package managers, such as vcpkg, vendor Boost.Test separately + # from the rest of the headers, so we have to check for it individually. + if(BUILD_TESTS AND DEFINED VCPKG_TARGET_TRIPLET) + list(APPEND CMAKE_REQUIRED_DEFINITIONS -DBOOST_TEST_NO_MAIN) + include(CheckIncludeFileCXX) + check_include_file_cxx(boost/test/included/unit_test.hpp HAVE_BOOST_INCLUDED_UNIT_TEST_H) + if(NOT HAVE_BOOST_INCLUDED_UNIT_TEST_H) + message(FATAL_ERROR "Building test_bitcoin executable requested but boost/test/included/unit_test.hpp header not available.") + endif() + endif() + +endfunction() diff --git a/cmake/module/AddWindowsResources.cmake b/cmake/module/AddWindowsResources.cmake new file mode 100644 index 0000000000000..a9b4f51f73d96 --- /dev/null +++ b/cmake/module/AddWindowsResources.cmake @@ -0,0 +1,14 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) + +macro(add_windows_resources target rc_file) + if(WIN32) + target_sources(${target} PRIVATE ${rc_file}) + set_property(SOURCE ${rc_file} + APPEND PROPERTY COMPILE_DEFINITIONS WINDRES_PREPROC + ) + endif() +endmacro() diff --git a/cmake/module/CheckSourceCompilesAndLinks.cmake b/cmake/module/CheckSourceCompilesAndLinks.cmake new file mode 100644 index 0000000000000..88c897d524330 --- /dev/null +++ b/cmake/module/CheckSourceCompilesAndLinks.cmake @@ -0,0 +1,39 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) +include(CheckCXXSourceCompiles) +include(CMakePushCheckState) + +# This avoids running the linker. +set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) + +macro(check_cxx_source_links source) + set(CMAKE_TRY_COMPILE_TARGET_TYPE EXECUTABLE) + check_cxx_source_compiles("${source}" ${ARGN}) + set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) +endmacro() + +macro(check_cxx_source_compiles_with_flags flags source) + cmake_push_check_state(RESET) + set(CMAKE_REQUIRED_FLAGS ${flags}) + list(JOIN CMAKE_REQUIRED_FLAGS " " CMAKE_REQUIRED_FLAGS) + check_cxx_source_compiles("${source}" ${ARGN}) + cmake_pop_check_state() +endmacro() + +macro(check_cxx_source_links_with_flags flags source) + cmake_push_check_state(RESET) + set(CMAKE_REQUIRED_FLAGS ${flags}) + list(JOIN CMAKE_REQUIRED_FLAGS " " CMAKE_REQUIRED_FLAGS) + check_cxx_source_links("${source}" ${ARGN}) + cmake_pop_check_state() +endmacro() + +macro(check_cxx_source_links_with_libs libs source) + cmake_push_check_state(RESET) + set(CMAKE_REQUIRED_LIBRARIES "${libs}") + check_cxx_source_links("${source}" ${ARGN}) + cmake_pop_check_state() +endmacro() diff --git a/cmake/module/FindBerkeleyDB.cmake b/cmake/module/FindBerkeleyDB.cmake new file mode 100644 index 0000000000000..03a3cce10c53f --- /dev/null +++ b/cmake/module/FindBerkeleyDB.cmake @@ -0,0 +1,133 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +#[=======================================================================[ +FindBerkeleyDB +-------------- + +Finds the Berkeley DB headers and library. + +Imported Targets +^^^^^^^^^^^^^^^^ + +This module provides imported target ``BerkeleyDB::BerkeleyDB``, if +Berkeley DB has been found. + +Result Variables +^^^^^^^^^^^^^^^^ + +This module defines the following variables: + +``BerkeleyDB_FOUND`` + "True" if Berkeley DB found. + +``BerkeleyDB_VERSION`` + The MAJOR.MINOR version of Berkeley DB found. + +#]=======================================================================] + +set(_BerkeleyDB_homebrew_prefix) +if(CMAKE_HOST_APPLE) + find_program(HOMEBREW_EXECUTABLE brew) + if(HOMEBREW_EXECUTABLE) + # The Homebrew package manager installs the berkeley-db* packages as + # "keg-only", which means they are not symlinked into the default prefix. + # To find such a package, the find_path() and find_library() commands + # need additional path hints that are computed by Homebrew itself. + execute_process( + COMMAND ${HOMEBREW_EXECUTABLE} --prefix berkeley-db@4 + OUTPUT_VARIABLE _BerkeleyDB_homebrew_prefix + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + endif() +endif() + +find_path(BerkeleyDB_INCLUDE_DIR + NAMES db_cxx.h + HINTS ${_BerkeleyDB_homebrew_prefix}/include + PATH_SUFFIXES 4.8 48 db4.8 4 db4 5.3 db5.3 5 db5 +) +mark_as_advanced(BerkeleyDB_INCLUDE_DIR) +unset(_BerkeleyDB_homebrew_prefix) + +if(NOT BerkeleyDB_LIBRARY) + if(VCPKG_TARGET_TRIPLET) + # The vcpkg package manager installs the berkeleydb package with the same name + # of release and debug libraries. Therefore, the default search paths set by + # vcpkg's toolchain file cannot be used to search libraries as the debug one + # will always be found. + set(CMAKE_FIND_USE_CMAKE_PATH FALSE) + endif() + + get_filename_component(_BerkeleyDB_lib_hint "${BerkeleyDB_INCLUDE_DIR}" DIRECTORY) + + find_library(BerkeleyDB_LIBRARY_RELEASE + NAMES db_cxx-4.8 db4_cxx db48 db_cxx-5.3 db_cxx-5 db_cxx libdb48 + NAMES_PER_DIR + HINTS ${_BerkeleyDB_lib_hint} + PATH_SUFFIXES lib + ) + mark_as_advanced(BerkeleyDB_LIBRARY_RELEASE) + + find_library(BerkeleyDB_LIBRARY_DEBUG + NAMES db_cxx-4.8 db4_cxx db48 db_cxx-5.3 db_cxx-5 db_cxx libdb48 + NAMES_PER_DIR + HINTS ${_BerkeleyDB_lib_hint} + PATH_SUFFIXES debug/lib + ) + mark_as_advanced(BerkeleyDB_LIBRARY_DEBUG) + + unset(_BerkeleyDB_lib_hint) + unset(CMAKE_FIND_USE_CMAKE_PATH) + + include(SelectLibraryConfigurations) + select_library_configurations(BerkeleyDB) + # The select_library_configurations() command sets BerkeleyDB_FOUND, but we + # want the one from the find_package_handle_standard_args() command below. + unset(BerkeleyDB_FOUND) +endif() + +if(BerkeleyDB_INCLUDE_DIR) + file(STRINGS "${BerkeleyDB_INCLUDE_DIR}/db.h" _BerkeleyDB_version_strings REGEX "^#define[\t ]+DB_VERSION_(MAJOR|MINOR|PATCH)[ \t]+[0-9]+.*") + string(REGEX REPLACE ".*#define[\t ]+DB_VERSION_MAJOR[ \t]+([0-9]+).*" "\\1" _BerkeleyDB_version_major "${_BerkeleyDB_version_strings}") + string(REGEX REPLACE ".*#define[\t ]+DB_VERSION_MINOR[ \t]+([0-9]+).*" "\\1" _BerkeleyDB_version_minor "${_BerkeleyDB_version_strings}") + string(REGEX REPLACE ".*#define[\t ]+DB_VERSION_PATCH[ \t]+([0-9]+).*" "\\1" _BerkeleyDB_version_patch "${_BerkeleyDB_version_strings}") + unset(_BerkeleyDB_version_strings) + # The MAJOR.MINOR.PATCH version will be logged in the following find_package_handle_standard_args() command. + set(_BerkeleyDB_full_version ${_BerkeleyDB_version_major}.${_BerkeleyDB_version_minor}.${_BerkeleyDB_version_patch}) + set(BerkeleyDB_VERSION ${_BerkeleyDB_version_major}.${_BerkeleyDB_version_minor}) + unset(_BerkeleyDB_version_major) + unset(_BerkeleyDB_version_minor) + unset(_BerkeleyDB_version_patch) +endif() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(BerkeleyDB + REQUIRED_VARS BerkeleyDB_LIBRARY BerkeleyDB_INCLUDE_DIR + VERSION_VAR _BerkeleyDB_full_version +) +unset(_BerkeleyDB_full_version) + +if(BerkeleyDB_FOUND AND NOT TARGET BerkeleyDB::BerkeleyDB) + add_library(BerkeleyDB::BerkeleyDB UNKNOWN IMPORTED) + set_target_properties(BerkeleyDB::BerkeleyDB PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${BerkeleyDB_INCLUDE_DIR}" + ) + if(BerkeleyDB_LIBRARY_RELEASE) + set_property(TARGET BerkeleyDB::BerkeleyDB APPEND PROPERTY + IMPORTED_CONFIGURATIONS RELEASE + ) + set_target_properties(BerkeleyDB::BerkeleyDB PROPERTIES + IMPORTED_LOCATION_RELEASE "${BerkeleyDB_LIBRARY_RELEASE}" + ) + endif() + if(BerkeleyDB_LIBRARY_DEBUG) + set_property(TARGET BerkeleyDB::BerkeleyDB APPEND PROPERTY + IMPORTED_CONFIGURATIONS DEBUG) + set_target_properties(BerkeleyDB::BerkeleyDB PROPERTIES + IMPORTED_LOCATION_DEBUG "${BerkeleyDB_LIBRARY_DEBUG}" + ) + endif() +endif() diff --git a/cmake/module/FindLibevent.cmake b/cmake/module/FindLibevent.cmake new file mode 100644 index 0000000000000..901a4f3bd41ec --- /dev/null +++ b/cmake/module/FindLibevent.cmake @@ -0,0 +1,80 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +#[=======================================================================[ +FindLibevent +------------ + +Finds the Libevent headers and libraries. + +This is a wrapper around find_package()/pkg_check_modules() commands that: + - facilitates searching in various build environments + - prints a standard log message + +#]=======================================================================] + +# Check whether evhttp_connection_get_peer expects const char**. +# See https://github.com/libevent/libevent/commit/a18301a2bb160ff7c3ffaf5b7653c39ffe27b385 +function(check_evhttp_connection_get_peer target) + include(CMakePushCheckState) + cmake_push_check_state(RESET) + set(CMAKE_REQUIRED_LIBRARIES ${target}) + include(CheckCXXSourceCompiles) + check_cxx_source_compiles(" + #include + #include + + int main() + { + evhttp_connection* conn = (evhttp_connection*)1; + const char* host; + uint16_t port; + evhttp_connection_get_peer(conn, &host, &port); + } + " HAVE_EVHTTP_CONNECTION_GET_PEER_CONST_CHAR + ) + cmake_pop_check_state() + set(HAVE_EVHTTP_CONNECTION_GET_PEER_CONST_CHAR ${HAVE_EVHTTP_CONNECTION_GET_PEER_CONST_CHAR} PARENT_SCOPE) +endfunction() + + +include(FindPackageHandleStandardArgs) +if(VCPKG_TARGET_TRIPLET) + find_package(Libevent ${Libevent_FIND_VERSION} NO_MODULE QUIET + COMPONENTS extra + ) + find_package_handle_standard_args(Libevent + REQUIRED_VARS Libevent_DIR + VERSION_VAR Libevent_VERSION + ) + check_evhttp_connection_get_peer(libevent::extra) + add_library(libevent::libevent ALIAS libevent::extra) + mark_as_advanced(Libevent_DIR) + mark_as_advanced(_event_h) + mark_as_advanced(_event_lib) +else() + find_package(PkgConfig REQUIRED) + pkg_check_modules(libevent QUIET + IMPORTED_TARGET + libevent>=${Libevent_FIND_VERSION} + ) + set(_libevent_required_vars libevent_LIBRARY_DIRS libevent_FOUND) + if(NOT WIN32) + pkg_check_modules(libevent_pthreads QUIET + IMPORTED_TARGET + libevent_pthreads>=${Libevent_FIND_VERSION} + ) + list(APPEND _libevent_required_vars libevent_pthreads_FOUND) + endif() + find_package_handle_standard_args(Libevent + REQUIRED_VARS ${_libevent_required_vars} + VERSION_VAR libevent_VERSION + ) + unset(_libevent_required_vars) + check_evhttp_connection_get_peer(PkgConfig::libevent) + add_library(libevent::libevent ALIAS PkgConfig::libevent) + if(NOT WIN32) + add_library(libevent::pthreads ALIAS PkgConfig::libevent_pthreads) + endif() +endif() diff --git a/cmake/module/FindMiniUPnPc.cmake b/cmake/module/FindMiniUPnPc.cmake new file mode 100644 index 0000000000000..34b94f05f16ef --- /dev/null +++ b/cmake/module/FindMiniUPnPc.cmake @@ -0,0 +1,84 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +if(NOT MSVC) + find_package(PkgConfig REQUIRED) + pkg_check_modules(PC_MiniUPnPc QUIET miniupnpc) +endif() + +find_path(MiniUPnPc_INCLUDE_DIR + NAMES miniupnpc/miniupnpc.h + PATHS ${PC_MiniUPnPc_INCLUDE_DIRS} +) + +if(MiniUPnPc_INCLUDE_DIR) + file( + STRINGS "${MiniUPnPc_INCLUDE_DIR}/miniupnpc/miniupnpc.h" version_strings + REGEX "^#define[\t ]+MINIUPNPC_API_VERSION[\t ]+[0-9]+" + ) + string(REGEX REPLACE "^#define[\t ]+MINIUPNPC_API_VERSION[\t ]+([0-9]+)" "\\1" MiniUPnPc_API_VERSION "${version_strings}") + + # The minimum supported miniUPnPc API version is set to 17. This excludes + # versions with known vulnerabilities. + if(MiniUPnPc_API_VERSION GREATER_EQUAL 17) + set(MiniUPnPc_API_VERSION_OK TRUE) + endif() +endif() + +if(MSVC) + cmake_path(GET MiniUPnPc_INCLUDE_DIR PARENT_PATH MiniUPnPc_IMPORTED_PATH) + find_library(MiniUPnPc_LIBRARY_DEBUG + NAMES miniupnpc PATHS ${MiniUPnPc_IMPORTED_PATH}/debug/lib + NO_DEFAULT_PATH + ) + find_library(MiniUPnPc_LIBRARY_RELEASE + NAMES miniupnpc PATHS ${MiniUPnPc_IMPORTED_PATH}/lib + NO_DEFAULT_PATH + ) + set(MiniUPnPc_required MiniUPnPc_IMPORTED_PATH) +else() + find_library(MiniUPnPc_LIBRARY + NAMES miniupnpc + PATHS ${PC_MiniUPnPc_LIBRARY_DIRS} + ) + set(MiniUPnPc_required MiniUPnPc_LIBRARY) +endif() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(MiniUPnPc + REQUIRED_VARS ${MiniUPnPc_required} MiniUPnPc_INCLUDE_DIR MiniUPnPc_API_VERSION_OK +) + +if(MiniUPnPc_FOUND AND NOT TARGET MiniUPnPc::MiniUPnPc) + add_library(MiniUPnPc::MiniUPnPc UNKNOWN IMPORTED) + set_target_properties(MiniUPnPc::MiniUPnPc PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${MiniUPnPc_INCLUDE_DIR}" + ) + if(MSVC) + if(MiniUPnPc_LIBRARY_DEBUG) + set_property(TARGET MiniUPnPc::MiniUPnPc APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG) + set_target_properties(MiniUPnPc::MiniUPnPc PROPERTIES + IMPORTED_LOCATION_DEBUG "${MiniUPnPc_LIBRARY_DEBUG}" + ) + endif() + if(MiniUPnPc_LIBRARY_RELEASE) + set_property(TARGET MiniUPnPc::MiniUPnPc APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) + set_target_properties(MiniUPnPc::MiniUPnPc PROPERTIES + IMPORTED_LOCATION_RELEASE "${MiniUPnPc_LIBRARY_RELEASE}" + ) + endif() + else() + set_target_properties(MiniUPnPc::MiniUPnPc PROPERTIES + IMPORTED_LOCATION "${MiniUPnPc_LIBRARY}" + ) + endif() + set_property(TARGET MiniUPnPc::MiniUPnPc PROPERTY + INTERFACE_COMPILE_DEFINITIONS USE_UPNP=1 $<$:MINIUPNP_STATICLIB> + ) +endif() + +mark_as_advanced( + MiniUPnPc_INCLUDE_DIR + MiniUPnPc_LIBRARY +) diff --git a/cmake/module/FindQt.cmake b/cmake/module/FindQt.cmake new file mode 100644 index 0000000000000..2e43294a99699 --- /dev/null +++ b/cmake/module/FindQt.cmake @@ -0,0 +1,66 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +#[=======================================================================[ +FindQt +------ + +Finds the Qt headers and libraries. + +This is a wrapper around find_package() command that: + - facilitates searching in various build environments + - prints a standard log message + +#]=======================================================================] + +set(_qt_homebrew_prefix) +if(CMAKE_HOST_APPLE) + find_program(HOMEBREW_EXECUTABLE brew) + if(HOMEBREW_EXECUTABLE) + execute_process( + COMMAND ${HOMEBREW_EXECUTABLE} --prefix qt@${Qt_FIND_VERSION_MAJOR} + OUTPUT_VARIABLE _qt_homebrew_prefix + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + endif() +endif() + +# Save CMAKE_FIND_ROOT_PATH_MODE_LIBRARY state. +unset(_qt_find_root_path_mode_library_saved) +if(DEFINED CMAKE_FIND_ROOT_PATH_MODE_LIBRARY) + set(_qt_find_root_path_mode_library_saved ${CMAKE_FIND_ROOT_PATH_MODE_LIBRARY}) +endif() + +# The Qt config files internally use find_library() calls for all +# dependencies to ensure their availability. In turn, the find_library() +# inspects the well-known locations on the file system; therefore, it must +# be able to find platform-specific system libraries, for example: +# /usr/x86_64-w64-mingw32/lib/libm.a or /usr/arm-linux-gnueabihf/lib/libm.a. +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) + +find_package(Qt${Qt_FIND_VERSION_MAJOR} ${Qt_FIND_VERSION} + COMPONENTS ${Qt_FIND_COMPONENTS} + HINTS ${_qt_homebrew_prefix} + PATH_SUFFIXES Qt${Qt_FIND_VERSION_MAJOR} # Required on OpenBSD systems. +) +unset(_qt_homebrew_prefix) + +# Restore CMAKE_FIND_ROOT_PATH_MODE_LIBRARY state. +if(DEFINED _qt_find_root_path_mode_library_saved) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ${_qt_find_root_path_mode_library_saved}) + unset(_qt_find_root_path_mode_library_saved) +else() + unset(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY) +endif() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Qt + REQUIRED_VARS Qt${Qt_FIND_VERSION_MAJOR}_DIR + VERSION_VAR Qt${Qt_FIND_VERSION_MAJOR}_VERSION +) + +foreach(component IN LISTS Qt_FIND_COMPONENTS ITEMS "") + mark_as_advanced(Qt${Qt_FIND_VERSION_MAJOR}${component}_DIR) +endforeach() diff --git a/cmake/module/FindUSDT.cmake b/cmake/module/FindUSDT.cmake new file mode 100644 index 0000000000000..0ba9a58fc10ac --- /dev/null +++ b/cmake/module/FindUSDT.cmake @@ -0,0 +1,64 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +#[=======================================================================[ +FindUSDT +-------- + +Finds the Userspace, Statically Defined Tracing header(s). + +Imported Targets +^^^^^^^^^^^^^^^^ + +This module provides imported target ``USDT::headers``, if +USDT has been found. + +Result Variables +^^^^^^^^^^^^^^^^ + +This module defines the following variables: + +``USDT_FOUND`` + "True" if USDT found. + +#]=======================================================================] + +find_path(USDT_INCLUDE_DIR + NAMES sys/sdt.h +) +mark_as_advanced(USDT_INCLUDE_DIR) + +if(USDT_INCLUDE_DIR) + include(CMakePushCheckState) + cmake_push_check_state(RESET) + + include(CheckCXXSourceCompiles) + set(CMAKE_REQUIRED_INCLUDES ${USDT_INCLUDE_DIR}) + check_cxx_source_compiles(" + #include + + int main() + { + DTRACE_PROBE(context, event); + int a, b, c, d, e, f, g; + DTRACE_PROBE7(context, event, a, b, c, d, e, f, g); + } + " HAVE_USDT_H + ) + + cmake_pop_check_state() +endif() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(USDT + REQUIRED_VARS USDT_INCLUDE_DIR HAVE_USDT_H +) + +if(USDT_FOUND AND NOT TARGET USDT::headers) + add_library(USDT::headers INTERFACE IMPORTED) + set_target_properties(USDT::headers PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${USDT_INCLUDE_DIR}" + ) + set(ENABLE_TRACING TRUE) +endif() diff --git a/cmake/module/FlagsSummary.cmake b/cmake/module/FlagsSummary.cmake new file mode 100644 index 0000000000000..91d1df90d99ba --- /dev/null +++ b/cmake/module/FlagsSummary.cmake @@ -0,0 +1,74 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) + +function(indent_message header content indent_num) + if(indent_num GREATER 0) + string(REPEAT " " ${indent_num} indentation) + string(REPEAT "." ${indent_num} tail) + string(REGEX REPLACE "${tail}$" "" header "${header}") + endif() + message("${indentation}${header} ${content}") +endfunction() + +# Print tools' flags on best-effort. Include the abstracted +# CMake flags that we touch ourselves. +function(print_flags_per_config config indent_num) + string(TOUPPER "${config}" config_uppercase) + + include(GetTargetInterface) + get_target_interface(definitions "${config}" core_interface COMPILE_DEFINITIONS) + indent_message("Preprocessor defined macros ..........." "${definitions}" ${indent_num}) + + string(STRIP "${CMAKE_CXX_COMPILER_ARG1} ${CMAKE_CXX_FLAGS}" combined_cxx_flags) + string(STRIP "${combined_cxx_flags} ${CMAKE_CXX_FLAGS_${config_uppercase}}" combined_cxx_flags) + string(STRIP "${combined_cxx_flags} ${CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION}" combined_cxx_flags) + if(CMAKE_POSITION_INDEPENDENT_CODE) + string(JOIN " " combined_cxx_flags ${combined_cxx_flags} ${CMAKE_CXX_COMPILE_OPTIONS_PIC}) + endif() + get_target_interface(core_cxx_flags "${config}" core_interface COMPILE_OPTIONS) + string(STRIP "${combined_cxx_flags} ${core_cxx_flags}" combined_cxx_flags) + string(STRIP "${combined_cxx_flags} ${APPEND_CPPFLAGS}" combined_cxx_flags) + string(STRIP "${combined_cxx_flags} ${APPEND_CXXFLAGS}" combined_cxx_flags) + indent_message("C++ compiler flags ...................." "${combined_cxx_flags}" ${indent_num}) + + string(STRIP "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${config_uppercase}}" combined_linker_flags) + string(STRIP "${combined_linker_flags} ${CMAKE_EXE_LINKER_FLAGS}" combined_linker_flags) + get_target_interface(common_link_options "${config}" core_interface LINK_OPTIONS) + string(STRIP "${combined_linker_flags} ${common_link_options}" combined_linker_flags) + if(CMAKE_CXX_LINK_PIE_SUPPORTED) + string(JOIN " " combined_linker_flags ${combined_linker_flags} ${CMAKE_CXX_LINK_OPTIONS_PIE}) + endif() + string(STRIP "${combined_linker_flags} ${APPEND_LDFLAGS}" combined_linker_flags) + indent_message("Linker flags .........................." "${combined_linker_flags}" ${indent_num}) +endfunction() + +function(flags_summary) + get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) + if(is_multi_config) + list(JOIN CMAKE_CONFIGURATION_TYPES ", " configs) + message("Available build configurations ........ ${configs}") + if(CMAKE_GENERATOR MATCHES "Visual Studio") + set(default_config "Debug") + else() + list(GET CMAKE_CONFIGURATION_TYPES 0 default_config) + endif() + message("Default build configuration ........... ${default_config}") + foreach(config IN LISTS CMAKE_CONFIGURATION_TYPES) + message("") + message("'${config}' build configuration:") + print_flags_per_config("${config}" 2) + endforeach() + else() + message("CMAKE_BUILD_TYPE ...................... ${CMAKE_BUILD_TYPE}") + print_flags_per_config("${CMAKE_BUILD_TYPE}" 0) + endif() + message("") + message([=[ +NOTE: The summary above may not exactly match the final applied build flags + if any additional CMAKE_* or environment variables have been modified. + To see the exact flags applied, build with the --verbose option. +]=]) +endfunction() diff --git a/cmake/module/GenerateHeaders.cmake b/cmake/module/GenerateHeaders.cmake new file mode 100644 index 0000000000000..c69007acb6849 --- /dev/null +++ b/cmake/module/GenerateHeaders.cmake @@ -0,0 +1,21 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +function(generate_header_from_json json_source_relpath) + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${json_source_relpath}.h + COMMAND ${CMAKE_COMMAND} -DJSON_SOURCE_PATH=${CMAKE_CURRENT_SOURCE_DIR}/${json_source_relpath} -DHEADER_PATH=${CMAKE_CURRENT_BINARY_DIR}/${json_source_relpath}.h -P ${PROJECT_SOURCE_DIR}/cmake/script/GenerateHeaderFromJson.cmake + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${json_source_relpath} ${PROJECT_SOURCE_DIR}/cmake/script/GenerateHeaderFromJson.cmake + VERBATIM + ) +endfunction() + +function(generate_header_from_raw raw_source_relpath raw_namespace) + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${raw_source_relpath}.h + COMMAND ${CMAKE_COMMAND} -DRAW_SOURCE_PATH=${CMAKE_CURRENT_SOURCE_DIR}/${raw_source_relpath} -DHEADER_PATH=${CMAKE_CURRENT_BINARY_DIR}/${raw_source_relpath}.h -DRAW_NAMESPACE=${raw_namespace} -P ${PROJECT_SOURCE_DIR}/cmake/script/GenerateHeaderFromRaw.cmake + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${raw_source_relpath} ${PROJECT_SOURCE_DIR}/cmake/script/GenerateHeaderFromRaw.cmake + VERBATIM + ) +endfunction() diff --git a/cmake/module/GenerateSetupNsi.cmake b/cmake/module/GenerateSetupNsi.cmake new file mode 100644 index 0000000000000..3c358c54958b9 --- /dev/null +++ b/cmake/module/GenerateSetupNsi.cmake @@ -0,0 +1,18 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +function(generate_setup_nsi) + set(abs_top_srcdir ${PROJECT_SOURCE_DIR}) + set(abs_top_builddir ${PROJECT_BINARY_DIR}) + set(PACKAGE_URL ${PROJECT_HOMEPAGE_URL}) + set(PACKAGE_TARNAME "bitcoin") + set(BITCOIN_GUI_NAME "bitcoin-qt") + set(BITCOIN_DAEMON_NAME "bitcoind") + set(BITCOIN_CLI_NAME "bitcoin-cli") + set(BITCOIN_TX_NAME "bitcoin-tx") + set(BITCOIN_WALLET_TOOL_NAME "bitcoin-wallet") + set(BITCOIN_TEST_NAME "test_bitcoin") + set(EXEEXT ${CMAKE_EXECUTABLE_SUFFIX}) + configure_file(${PROJECT_SOURCE_DIR}/share/setup.nsi.in ${PROJECT_BINARY_DIR}/bitcoin-win64-setup.nsi USE_SOURCE_PERMISSIONS @ONLY) +endfunction() diff --git a/cmake/module/GetTargetInterface.cmake b/cmake/module/GetTargetInterface.cmake new file mode 100644 index 0000000000000..1e455d456b419 --- /dev/null +++ b/cmake/module/GetTargetInterface.cmake @@ -0,0 +1,53 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) + +# Evaluates config-specific generator expressions in a list. +# Recognizable patterns are: +# - $<$:[value]> +# - $<$>:[value]> +function(evaluate_generator_expressions list config) + set(input ${${list}}) + set(result) + foreach(token IN LISTS input) + if(token MATCHES "\\$<\\$]+)>:([^>]+)>") + if(CMAKE_MATCH_1 STREQUAL config) + list(APPEND result ${CMAKE_MATCH_2}) + endif() + elseif(token MATCHES "\\$<\\$]+)>>:([^>]+)>") + if(NOT CMAKE_MATCH_1 STREQUAL config) + list(APPEND result ${CMAKE_MATCH_2}) + endif() + else() + list(APPEND result ${token}) + endif() + endforeach() + set(${list} ${result} PARENT_SCOPE) +endfunction() + + +# Gets target's interface properties recursively. +function(get_target_interface var config target property) + get_target_property(result ${target} INTERFACE_${property}) + if(result) + evaluate_generator_expressions(result "${config}") + list(JOIN result " " result) + else() + set(result) + endif() + + get_target_property(dependencies ${target} INTERFACE_LINK_LIBRARIES) + if(dependencies) + evaluate_generator_expressions(dependencies "${config}") + foreach(dependency IN LISTS dependencies) + if(TARGET ${dependency}) + get_target_interface(dep_result "${config}" ${dependency} ${property}) + string(STRIP "${result} ${dep_result}" result) + endif() + endforeach() + endif() + + set(${var} "${result}" PARENT_SCOPE) +endfunction() diff --git a/cmake/module/Maintenance.cmake b/cmake/module/Maintenance.cmake new file mode 100644 index 0000000000000..456419b7222fc --- /dev/null +++ b/cmake/module/Maintenance.cmake @@ -0,0 +1,151 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) + +function(setup_split_debug_script) + if(CMAKE_HOST_SYSTEM_NAME STREQUAL "Linux") + set(OBJCOPY ${CMAKE_OBJCOPY}) + set(STRIP ${CMAKE_STRIP}) + configure_file( + contrib/devtools/split-debug.sh.in split-debug.sh + FILE_PERMISSIONS OWNER_READ OWNER_EXECUTE + GROUP_READ GROUP_EXECUTE + WORLD_READ + @ONLY + ) + endif() +endfunction() + +function(add_maintenance_targets) + if(NOT PYTHON_COMMAND) + return() + endif() + + if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + set(exe_format MACHO) + elseif(WIN32) + set(exe_format PE) + else() + set(exe_format ELF) + endif() + + # In CMake, the components of the compiler invocation are separated into distinct variables: + # - CMAKE_CXX_COMPILER: the full path to the compiler binary itself (e.g., /usr/bin/clang++). + # - CMAKE_CXX_COMPILER_ARG1: a string containing initial compiler options (e.g., --target=x86_64-apple-darwin -nostdlibinc). + # By concatenating these variables, we form the complete command line to be passed to a Python script via the CXX environment variable. + string(STRIP "${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1}" cxx_compiler_command) + add_custom_target(test-security-check + COMMAND ${CMAKE_COMMAND} -E env CXX=${cxx_compiler_command} CXXFLAGS=${CMAKE_CXX_FLAGS} LDFLAGS=${CMAKE_EXE_LINKER_FLAGS} ${PYTHON_COMMAND} ${PROJECT_SOURCE_DIR}/contrib/devtools/test-security-check.py TestSecurityChecks.test_${exe_format} + COMMAND ${CMAKE_COMMAND} -E env CXX=${cxx_compiler_command} CXXFLAGS=${CMAKE_CXX_FLAGS} LDFLAGS=${CMAKE_EXE_LINKER_FLAGS} ${PYTHON_COMMAND} ${PROJECT_SOURCE_DIR}/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_${exe_format} + VERBATIM + ) + + foreach(target IN ITEMS bitcoind bitcoin-qt bitcoin-cli bitcoin-tx bitcoin-util bitcoin-wallet test_bitcoin bench_bitcoin) + if(TARGET ${target}) + list(APPEND executables $) + endif() + endforeach() + + add_custom_target(check-symbols + COMMAND ${CMAKE_COMMAND} -E echo "Running symbol and dynamic library checks..." + COMMAND ${PYTHON_COMMAND} ${PROJECT_SOURCE_DIR}/contrib/devtools/symbol-check.py ${executables} + VERBATIM + ) + + if(ENABLE_HARDENING) + add_custom_target(check-security + COMMAND ${CMAKE_COMMAND} -E echo "Checking binary security..." + COMMAND ${PYTHON_COMMAND} ${PROJECT_SOURCE_DIR}/contrib/devtools/security-check.py ${executables} + VERBATIM + ) + else() + add_custom_target(check-security) + endif() +endfunction() + +function(add_windows_deploy_target) + if(MINGW AND TARGET bitcoin-qt AND TARGET bitcoind AND TARGET bitcoin-cli AND TARGET bitcoin-tx AND TARGET bitcoin-wallet AND TARGET bitcoin-util AND TARGET test_bitcoin) + # TODO: Consider replacing this code with the CPack NSIS Generator. + # See https://cmake.org/cmake/help/latest/cpack_gen/nsis.html + include(GenerateSetupNsi) + generate_setup_nsi() + add_custom_command( + OUTPUT ${PROJECT_BINARY_DIR}/bitcoin-win64-setup.exe + COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/release + COMMAND ${CMAKE_STRIP} $ -o ${PROJECT_BINARY_DIR}/release/$ + COMMAND ${CMAKE_STRIP} $ -o ${PROJECT_BINARY_DIR}/release/$ + COMMAND ${CMAKE_STRIP} $ -o ${PROJECT_BINARY_DIR}/release/$ + COMMAND ${CMAKE_STRIP} $ -o ${PROJECT_BINARY_DIR}/release/$ + COMMAND ${CMAKE_STRIP} $ -o ${PROJECT_BINARY_DIR}/release/$ + COMMAND ${CMAKE_STRIP} $ -o ${PROJECT_BINARY_DIR}/release/$ + COMMAND ${CMAKE_STRIP} $ -o ${PROJECT_BINARY_DIR}/release/$ + COMMAND makensis -V2 ${PROJECT_BINARY_DIR}/bitcoin-win64-setup.nsi + VERBATIM + ) + add_custom_target(deploy DEPENDS ${PROJECT_BINARY_DIR}/bitcoin-win64-setup.exe) + endif() +endfunction() + +function(add_macos_deploy_target) + if(CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND TARGET bitcoin-qt) + set(macos_app "Bitcoin-Qt.app") + # Populate Contents subdirectory. + configure_file(${PROJECT_SOURCE_DIR}/share/qt/Info.plist.in ${macos_app}/Contents/Info.plist NO_SOURCE_PERMISSIONS) + file(CONFIGURE OUTPUT ${macos_app}/Contents/PkgInfo CONTENT "APPL????") + # Populate Contents/Resources subdirectory. + file(CONFIGURE OUTPUT ${macos_app}/Contents/Resources/empty.lproj CONTENT "") + configure_file(${PROJECT_SOURCE_DIR}/src/qt/res/icons/bitcoin.icns ${macos_app}/Contents/Resources/bitcoin.icns NO_SOURCE_PERMISSIONS COPYONLY) + file(CONFIGURE OUTPUT ${macos_app}/Contents/Resources/Base.lproj/InfoPlist.strings + CONTENT "{ CFBundleDisplayName = \"@PACKAGE_NAME@\"; CFBundleName = \"@PACKAGE_NAME@\"; }" + ) + + add_custom_command( + OUTPUT ${PROJECT_BINARY_DIR}/${macos_app}/Contents/MacOS/Bitcoin-Qt + COMMAND ${CMAKE_COMMAND} --install ${PROJECT_BINARY_DIR} --config $ --component GUI --prefix ${macos_app}/Contents/MacOS --strip + COMMAND ${CMAKE_COMMAND} -E rename ${macos_app}/Contents/MacOS/bin/$ ${macos_app}/Contents/MacOS/Bitcoin-Qt + COMMAND ${CMAKE_COMMAND} -E rm -rf ${macos_app}/Contents/MacOS/bin + VERBATIM + ) + + string(REPLACE " " "-" osx_volname ${PACKAGE_NAME}) + if(CMAKE_HOST_APPLE) + add_custom_command( + OUTPUT ${PROJECT_BINARY_DIR}/${osx_volname}.zip + COMMAND ${PYTHON_COMMAND} ${PROJECT_SOURCE_DIR}/contrib/macdeploy/macdeployqtplus ${macos_app} ${osx_volname} -translations-dir=${QT_TRANSLATIONS_DIR} -zip + DEPENDS ${PROJECT_BINARY_DIR}/${macos_app}/Contents/MacOS/Bitcoin-Qt + VERBATIM + ) + add_custom_target(deploydir + DEPENDS ${PROJECT_BINARY_DIR}/${osx_volname}.zip + ) + add_custom_target(deploy + DEPENDS ${PROJECT_BINARY_DIR}/${osx_volname}.zip + ) + else() + add_custom_command( + OUTPUT ${PROJECT_BINARY_DIR}/dist/${macos_app}/Contents/MacOS/Bitcoin-Qt + COMMAND OBJDUMP=${CMAKE_OBJDUMP} ${PYTHON_COMMAND} ${PROJECT_SOURCE_DIR}/contrib/macdeploy/macdeployqtplus ${macos_app} ${osx_volname} -translations-dir=${QT_TRANSLATIONS_DIR} + DEPENDS ${PROJECT_BINARY_DIR}/${macos_app}/Contents/MacOS/Bitcoin-Qt + VERBATIM + ) + add_custom_target(deploydir + DEPENDS ${PROJECT_BINARY_DIR}/dist/${macos_app}/Contents/MacOS/Bitcoin-Qt + ) + + find_program(ZIP_COMMAND zip REQUIRED) + add_custom_command( + OUTPUT ${PROJECT_BINARY_DIR}/dist/${osx_volname}.zip + WORKING_DIRECTORY dist + COMMAND ${PROJECT_SOURCE_DIR}/cmake/script/macos_zip.sh ${ZIP_COMMAND} ${osx_volname}.zip + VERBATIM + ) + add_custom_target(deploy + DEPENDS ${PROJECT_BINARY_DIR}/dist/${osx_volname}.zip + ) + endif() + add_dependencies(deploydir bitcoin-qt) + add_dependencies(deploy deploydir) + endif() +endfunction() diff --git a/cmake/module/ProcessConfigurations.cmake b/cmake/module/ProcessConfigurations.cmake new file mode 100644 index 0000000000000..7e2fc0080e98e --- /dev/null +++ b/cmake/module/ProcessConfigurations.cmake @@ -0,0 +1,175 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) + +include(TryAppendCXXFlags) + +macro(normalize_string string) + string(REGEX REPLACE " +" " " ${string} "${${string}}") + string(STRIP "${${string}}" ${string}) +endmacro() + +function(are_flags_overridden flags_var result_var) + normalize_string(${flags_var}) + normalize_string(${flags_var}_INIT) + if(${flags_var} STREQUAL ${flags_var}_INIT) + set(${result_var} FALSE PARENT_SCOPE) + else() + set(${result_var} TRUE PARENT_SCOPE) + endif() +endfunction() + + +# Removes duplicated flags. The relative order of flags is preserved. +# If duplicates are encountered, only the last instance is preserved. +function(deduplicate_flags flags) + separate_arguments(${flags}) + list(REVERSE ${flags}) + list(REMOVE_DUPLICATES ${flags}) + list(REVERSE ${flags}) + list(JOIN ${flags} " " result) + set(${flags} "${result}" PARENT_SCOPE) +endfunction() + + +function(get_all_configs output) + get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) + if(is_multi_config) + set(all_configs ${CMAKE_CONFIGURATION_TYPES}) + else() + get_property(all_configs CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS) + if(NOT all_configs) + # See https://cmake.org/cmake/help/latest/manual/cmake-buildsystem.7.html#default-and-custom-configurations + set(all_configs Debug Release RelWithDebInfo MinSizeRel) + endif() + endif() + set(${output} "${all_configs}" PARENT_SCOPE) +endfunction() + + +#[=[ +Set the default build configuration. + +See: https://cmake.org/cmake/help/latest/manual/cmake-buildsystem.7.html#build-configurations. + +On single-configuration generators, this function sets the CMAKE_BUILD_TYPE variable to +the default build configuration, which can be overridden by the user at configure time if needed. + +On multi-configuration generators, this function rearranges the CMAKE_CONFIGURATION_TYPES list, +ensuring that the default build configuration appears first while maintaining the order of the +remaining configurations. The user can choose a build configuration at build time. +]=] +function(set_default_config config) + get_all_configs(all_configs) + if(NOT ${config} IN_LIST all_configs) + message(FATAL_ERROR "The default config is \"${config}\", but must be one of ${all_configs}.") + endif() + + list(REMOVE_ITEM all_configs ${config}) + list(PREPEND all_configs ${config}) + + get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) + if(is_multi_config) + get_property(help_string CACHE CMAKE_CONFIGURATION_TYPES PROPERTY HELPSTRING) + set(CMAKE_CONFIGURATION_TYPES "${all_configs}" CACHE STRING "${help_string}" FORCE) + # Also see https://gitlab.kitware.com/cmake/cmake/-/issues/19512. + set(CMAKE_TRY_COMPILE_CONFIGURATION "${config}" PARENT_SCOPE) + else() + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY + STRINGS "${all_configs}" + ) + if(NOT CMAKE_BUILD_TYPE) + message(STATUS "Setting build type to \"${config}\" as none was specified") + get_property(help_string CACHE CMAKE_BUILD_TYPE PROPERTY HELPSTRING) + set(CMAKE_BUILD_TYPE "${config}" CACHE STRING "${help_string}" FORCE) + endif() + set(CMAKE_TRY_COMPILE_CONFIGURATION "${CMAKE_BUILD_TYPE}" PARENT_SCOPE) + endif() +endfunction() + +function(remove_cxx_flag_from_all_configs flag) + get_all_configs(all_configs) + foreach(config IN LISTS all_configs) + string(TOUPPER "${config}" config_uppercase) + set(flags "${CMAKE_CXX_FLAGS_${config_uppercase}}") + separate_arguments(flags) + list(FILTER flags EXCLUDE REGEX "${flag}") + list(JOIN flags " " new_flags) + set(CMAKE_CXX_FLAGS_${config_uppercase} "${new_flags}" PARENT_SCOPE) + set(CMAKE_CXX_FLAGS_${config_uppercase} "${new_flags}" + CACHE STRING + "Flags used by the CXX compiler during ${config_uppercase} builds." + FORCE + ) + endforeach() +endfunction() + +function(replace_cxx_flag_in_config config old_flag new_flag) + string(TOUPPER "${config}" config_uppercase) + string(REGEX REPLACE "(^| )${old_flag}( |$)" "\\1${new_flag}\\2" new_flags "${CMAKE_CXX_FLAGS_${config_uppercase}}") + set(CMAKE_CXX_FLAGS_${config_uppercase} "${new_flags}" PARENT_SCOPE) + set(CMAKE_CXX_FLAGS_${config_uppercase} "${new_flags}" + CACHE STRING + "Flags used by the CXX compiler during ${config_uppercase} builds." + FORCE + ) +endfunction() + +set_default_config(RelWithDebInfo) + +# Redefine/adjust per-configuration flags. +target_compile_definitions(core_interface_debug INTERFACE + DEBUG + DEBUG_LOCKORDER + DEBUG_LOCKCONTENTION + RPC_DOC_CHECK + ABORT_ON_FAILED_ASSUME +) +# We leave assertions on. +if(MSVC) + remove_cxx_flag_from_all_configs(/DNDEBUG) +else() + remove_cxx_flag_from_all_configs(-DNDEBUG) + + # Adjust flags used by the CXX compiler during RELEASE builds. + # Prefer -O2 optimization level. (-O3 is CMake's default for Release for many compilers.) + replace_cxx_flag_in_config(Release -O3 -O2) + + are_flags_overridden(CMAKE_CXX_FLAGS_DEBUG cxx_flags_debug_overridden) + if(NOT cxx_flags_debug_overridden) + # Redefine flags used by the CXX compiler during DEBUG builds. + try_append_cxx_flags("-g3" RESULT_VAR compiler_supports_g3) + if(compiler_supports_g3) + replace_cxx_flag_in_config(Debug -g -g3) + endif() + unset(compiler_supports_g3) + + try_append_cxx_flags("-ftrapv" RESULT_VAR compiler_supports_ftrapv) + if(compiler_supports_ftrapv) + string(PREPEND CMAKE_CXX_FLAGS_DEBUG "-ftrapv ") + endif() + unset(compiler_supports_ftrapv) + + string(PREPEND CMAKE_CXX_FLAGS_DEBUG "-O0 ") + + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}" + CACHE STRING + "Flags used by the CXX compiler during DEBUG builds." + FORCE + ) + endif() + unset(cxx_flags_debug_overridden) +endif() + +set(CMAKE_CXX_FLAGS_COVERAGE "-g -Og --coverage") +set(CMAKE_OBJCXX_FLAGS_COVERAGE "-g -Og --coverage") +set(CMAKE_EXE_LINKER_FLAGS_COVERAGE "--coverage") +set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE "--coverage") +get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) +if(is_multi_config) + if(NOT "Coverage" IN_LIST CMAKE_CONFIGURATION_TYPES) + list(APPEND CMAKE_CONFIGURATION_TYPES Coverage) + endif() +endif() diff --git a/cmake/module/TestAppendRequiredLibraries.cmake b/cmake/module/TestAppendRequiredLibraries.cmake new file mode 100644 index 0000000000000..5352102c7a485 --- /dev/null +++ b/cmake/module/TestAppendRequiredLibraries.cmake @@ -0,0 +1,92 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) + +# Illumos/SmartOS requires linking with -lsocket if +# using getifaddrs & freeifaddrs. +# See: +# - https://github.com/bitcoin/bitcoin/pull/21486 +# - https://smartos.org/man/3socket/getifaddrs +function(test_append_socket_library target) + if (NOT TARGET ${target}) + message(FATAL_ERROR "${CMAKE_CURRENT_FUNCTION}() called with non-existent target \"${target}\".") + endif() + + set(check_socket_source " + #include + #include + + int main() { + struct ifaddrs* ifaddr; + getifaddrs(&ifaddr); + freeifaddrs(ifaddr); + } + ") + + include(CheckSourceCompilesAndLinks) + check_cxx_source_links("${check_socket_source}" IFADDR_LINKS_WITHOUT_LIBSOCKET) + if(NOT IFADDR_LINKS_WITHOUT_LIBSOCKET) + check_cxx_source_links_with_libs(socket "${check_socket_source}" IFADDR_NEEDS_LINK_TO_LIBSOCKET) + if(IFADDR_NEEDS_LINK_TO_LIBSOCKET) + target_link_libraries(${target} INTERFACE socket) + else() + message(FATAL_ERROR "Cannot figure out how to use getifaddrs/freeifaddrs.") + endif() + endif() + set(HAVE_DECL_GETIFADDRS TRUE PARENT_SCOPE) + set(HAVE_DECL_FREEIFADDRS TRUE PARENT_SCOPE) +endfunction() + +# Clang, when building for 32-bit, +# and linking against libstdc++, requires linking with +# -latomic if using the C++ atomic library. +# Can be tested with: clang++ -std=c++20 test.cpp -m32 +# +# Sourced from http://bugs.debian.org/797228 +function(test_append_atomic_library target) + if (NOT TARGET ${target}) + message(FATAL_ERROR "${CMAKE_CURRENT_FUNCTION}() called with non-existent target \"${target}\".") + endif() + + set(check_atomic_source " + #include + #include + #include + + using namespace std::chrono_literals; + + int main() { + std::atomic lock{true}; + lock.exchange(false); + + std::atomic t{0s}; + t.store(2s); + auto t1 = t.load(); + t.compare_exchange_strong(t1, 3s); + + std::atomic d{}; + d.store(3.14); + auto d1 = d.load(); + + std::atomic a{}; + int64_t v = 5; + int64_t r = a.fetch_add(v); + return static_cast(r); + } + ") + + include(CheckSourceCompilesAndLinks) + check_cxx_source_links("${check_atomic_source}" STD_ATOMIC_LINKS_WITHOUT_LIBATOMIC) + if(STD_ATOMIC_LINKS_WITHOUT_LIBATOMIC) + return() + endif() + + check_cxx_source_links_with_libs(atomic "${check_atomic_source}" STD_ATOMIC_NEEDS_LINK_TO_LIBATOMIC) + if(STD_ATOMIC_NEEDS_LINK_TO_LIBATOMIC) + target_link_libraries(${target} INTERFACE atomic) + else() + message(FATAL_ERROR "Cannot figure out how to use std::atomic.") + endif() +endfunction() diff --git a/cmake/module/TryAppendCXXFlags.cmake b/cmake/module/TryAppendCXXFlags.cmake new file mode 100644 index 0000000000000..c07455e89e0f1 --- /dev/null +++ b/cmake/module/TryAppendCXXFlags.cmake @@ -0,0 +1,125 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) +include(CheckCXXSourceCompiles) + +#[=[ +Add language-wide flags, which will be passed to all invocations of the compiler. +This includes invocations that drive compiling and those that drive linking. + +Usage examples: + + try_append_cxx_flags("-Wformat -Wformat-security" VAR warn_cxx_flags) + + + try_append_cxx_flags("-Wsuggest-override" VAR warn_cxx_flags + SOURCE "struct A { virtual void f(); }; struct B : A { void f() final; };" + ) + + + try_append_cxx_flags("-fsanitize=${SANITIZERS}" TARGET core_interface + RESULT_VAR cxx_supports_sanitizers + ) + if(NOT cxx_supports_sanitizers) + message(FATAL_ERROR "Compiler did not accept requested flags.") + endif() + + + try_append_cxx_flags("-Wunused-parameter" TARGET core_interface + IF_CHECK_PASSED "-Wno-unused-parameter" + ) + + +In configuration output, this function prints a string by the following pattern: + + -- Performing Test CXX_SUPPORTS_[flags] + -- Performing Test CXX_SUPPORTS_[flags] - Success + +]=] +function(try_append_cxx_flags flags) + cmake_parse_arguments(PARSE_ARGV 1 + TACXXF # prefix + "SKIP_LINK" # options + "TARGET;VAR;SOURCE;RESULT_VAR" # one_value_keywords + "IF_CHECK_PASSED" # multi_value_keywords + ) + + set(flags_as_string "${flags}") + separate_arguments(flags) + + string(MAKE_C_IDENTIFIER "${flags_as_string}" id_string) + string(TOUPPER "${id_string}" id_string) + + set(source "int main() { return 0; }") + if(DEFINED TACXXF_SOURCE AND NOT TACXXF_SOURCE STREQUAL source) + set(source "${TACXXF_SOURCE}") + string(SHA256 source_hash "${source}") + string(SUBSTRING ${source_hash} 0 4 source_hash_head) + string(APPEND id_string _${source_hash_head}) + endif() + + # This avoids running a linker. + set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) + set(CMAKE_REQUIRED_FLAGS "${flags_as_string} ${working_compiler_werror_flag}") + set(compiler_result CXX_SUPPORTS_${id_string}) + check_cxx_source_compiles("${source}" ${compiler_result}) + + if(${compiler_result}) + if(DEFINED TACXXF_IF_CHECK_PASSED) + if(DEFINED TACXXF_TARGET) + target_compile_options(${TACXXF_TARGET} INTERFACE ${TACXXF_IF_CHECK_PASSED}) + endif() + if(DEFINED TACXXF_VAR) + string(STRIP "${${TACXXF_VAR}} ${TACXXF_IF_CHECK_PASSED}" ${TACXXF_VAR}) + endif() + else() + if(DEFINED TACXXF_TARGET) + target_compile_options(${TACXXF_TARGET} INTERFACE ${flags}) + endif() + if(DEFINED TACXXF_VAR) + string(STRIP "${${TACXXF_VAR}} ${flags_as_string}" ${TACXXF_VAR}) + endif() + endif() + endif() + + if(DEFINED TACXXF_VAR) + set(${TACXXF_VAR} "${${TACXXF_VAR}}" PARENT_SCOPE) + endif() + + if(DEFINED TACXXF_RESULT_VAR) + set(${TACXXF_RESULT_VAR} "${${compiler_result}}" PARENT_SCOPE) + endif() + + if(NOT ${compiler_result} OR TACXXF_SKIP_LINK) + return() + endif() + + # This forces running a linker. + set(CMAKE_TRY_COMPILE_TARGET_TYPE EXECUTABLE) + set(CMAKE_REQUIRED_FLAGS "${flags_as_string}") + set(CMAKE_REQUIRED_LINK_OPTIONS ${working_linker_werror_flag}) + set(linker_result LINKER_SUPPORTS_${id_string}) + check_cxx_source_compiles("${source}" ${linker_result}) + + if(${linker_result}) + if(DEFINED TACXXF_IF_CHECK_PASSED) + if(DEFINED TACXXF_TARGET) + target_link_options(${TACXXF_TARGET} INTERFACE ${TACXXF_IF_CHECK_PASSED}) + endif() + else() + if(DEFINED TACXXF_TARGET) + target_link_options(${TACXXF_TARGET} INTERFACE ${flags}) + endif() + endif() + else() + message(WARNING "'${flags_as_string}' fail(s) to link.") + endif() +endfunction() + +if(MSVC) + try_append_cxx_flags("/WX /options:strict" VAR working_compiler_werror_flag SKIP_LINK) +else() + try_append_cxx_flags("-Werror" VAR working_compiler_werror_flag SKIP_LINK) +endif() diff --git a/cmake/module/TryAppendLinkerFlag.cmake b/cmake/module/TryAppendLinkerFlag.cmake new file mode 100644 index 0000000000000..8cbd83678d93d --- /dev/null +++ b/cmake/module/TryAppendLinkerFlag.cmake @@ -0,0 +1,78 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) +include(CheckCXXSourceCompiles) + +#[=[ +Usage example: + + try_append_linker_flag("-Wl,--major-subsystem-version,6" TARGET core_interface) + + +In configuration output, this function prints a string by the following pattern: + + -- Performing Test LINKER_SUPPORTS_[flag] + -- Performing Test LINKER_SUPPORTS_[flag] - Success + +]=] +function(try_append_linker_flag flag) + cmake_parse_arguments(PARSE_ARGV 1 + TALF # prefix + "" # options + "TARGET;VAR;SOURCE;RESULT_VAR" # one_value_keywords + "IF_CHECK_PASSED" # multi_value_keywords + ) + + string(MAKE_C_IDENTIFIER "${flag}" result) + string(TOUPPER "${result}" result) + string(PREPEND result LINKER_SUPPORTS_) + + set(source "int main() { return 0; }") + if(DEFINED TALF_SOURCE AND NOT TALF_SOURCE STREQUAL source) + set(source "${TALF_SOURCE}") + string(SHA256 source_hash "${source}") + string(SUBSTRING ${source_hash} 0 4 source_hash_head) + string(APPEND result _${source_hash_head}) + endif() + + # This forces running a linker. + set(CMAKE_TRY_COMPILE_TARGET_TYPE EXECUTABLE) + set(CMAKE_REQUIRED_LINK_OPTIONS ${flag} ${working_linker_werror_flag}) + check_cxx_source_compiles("${source}" ${result}) + + if(${result}) + if(DEFINED TALF_IF_CHECK_PASSED) + if(DEFINED TALF_TARGET) + target_link_options(${TALF_TARGET} INTERFACE ${TALF_IF_CHECK_PASSED}) + endif() + if(DEFINED TALF_VAR) + string(STRIP "${${TALF_VAR}} ${TALF_IF_CHECK_PASSED}" ${TALF_VAR}) + endif() + else() + if(DEFINED TALF_TARGET) + target_link_options(${TALF_TARGET} INTERFACE ${flag}) + endif() + if(DEFINED TALF_VAR) + string(STRIP "${${TALF_VAR}} ${flag}" ${TALF_VAR}) + endif() + endif() + endif() + + if(DEFINED TALF_VAR) + set(${TALF_VAR} "${${TALF_VAR}}" PARENT_SCOPE) + endif() + + if(DEFINED TALF_RESULT_VAR) + set(${TALF_RESULT_VAR} "${${result}}" PARENT_SCOPE) + endif() +endfunction() + +if(MSVC) + try_append_linker_flag("/WX" VAR working_linker_werror_flag) +elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + try_append_linker_flag("-Wl,-fatal_warnings" VAR working_linker_werror_flag) +else() + try_append_linker_flag("-Wl,--fatal-warnings" VAR working_linker_werror_flag) +endif() diff --git a/cmake/module/WarnAboutGlobalProperties.cmake b/cmake/module/WarnAboutGlobalProperties.cmake new file mode 100644 index 0000000000000..faa56a2a7f191 --- /dev/null +++ b/cmake/module/WarnAboutGlobalProperties.cmake @@ -0,0 +1,36 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include_guard(GLOBAL) + +# Avoid the directory-wide add_definitions() and add_compile_definitions() commands. +# Instead, prefer the target-specific target_compile_definitions() one. +get_directory_property(global_compile_definitions COMPILE_DEFINITIONS) +if(global_compile_definitions) + message(AUTHOR_WARNING "The directory's COMPILE_DEFINITIONS property is not empty: ${global_compile_definitions}") +endif() + +# Avoid the directory-wide add_compile_options() command. +# Instead, prefer the target-specific target_compile_options() one. +get_directory_property(global_compile_options COMPILE_OPTIONS) +if(global_compile_options) + message(AUTHOR_WARNING "The directory's COMPILE_OPTIONS property is not empty: ${global_compile_options}") +endif() + +# Avoid the directory-wide add_link_options() command. +# Instead, prefer the target-specific target_link_options() one. +get_directory_property(global_link_options LINK_OPTIONS) +if(global_link_options) + message(AUTHOR_WARNING "The directory's LINK_OPTIONS property is not empty: ${global_link_options}") +endif() + +# Avoid the directory-wide link_libraries() command. +# Instead, prefer the target-specific target_link_libraries() one. +file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy_cxx_source.cpp "#error") +add_library(check_loose_linked_libraries OBJECT EXCLUDE_FROM_ALL ${CMAKE_CURRENT_BINARY_DIR}/dummy_cxx_source.cpp) +set_target_properties(check_loose_linked_libraries PROPERTIES EXPORT_COMPILE_COMMANDS OFF) +get_target_property(global_linked_libraries check_loose_linked_libraries LINK_LIBRARIES) +if(global_linked_libraries) + message(AUTHOR_WARNING "There are libraries linked with `link_libraries` commands: ${global_linked_libraries}") +endif() diff --git a/cmake/script/Coverage.cmake b/cmake/script/Coverage.cmake new file mode 100644 index 0000000000000..72587a5eb6a4b --- /dev/null +++ b/cmake/script/Coverage.cmake @@ -0,0 +1,89 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include(${CMAKE_CURRENT_LIST_DIR}/CoverageInclude.cmake) + +set(functional_test_runner test/functional/test_runner.py) +if(EXTENDED_FUNCTIONAL_TESTS) + list(APPEND functional_test_runner --extended) +endif() +if(DEFINED JOBS) + list(APPEND CMAKE_CTEST_COMMAND -j ${JOBS}) + list(APPEND functional_test_runner -j ${JOBS}) +endif() + +execute_process( + COMMAND ${CMAKE_CTEST_COMMAND} --build-config Coverage + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --capture --directory src --test-name test_bitcoin --output-file test_bitcoin.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --zerocounters --directory src + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_FILTER_COMMAND} test_bitcoin.info test_bitcoin_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --add-tracefile test_bitcoin_filtered.info --output-file test_bitcoin_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --add-tracefile test_bitcoin_filtered.info --output-file test_bitcoin_coverage.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${GENHTML_COMMAND} test_bitcoin_coverage.info --output-directory test_bitcoin.coverage + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) + +execute_process( + COMMAND ${functional_test_runner} + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --capture --directory src --test-name functional-tests --output-file functional_test.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --zerocounters --directory src + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_FILTER_COMMAND} functional_test.info functional_test_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --add-tracefile functional_test_filtered.info --output-file functional_test_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --add-tracefile test_bitcoin_filtered.info --add-tracefile functional_test_filtered.info --output-file total_coverage.info + COMMAND ${GREP_EXECUTABLE} "%" + COMMAND ${AWK_EXECUTABLE} "{ print substr($3,2,50) \"/\" $5 }" + OUTPUT_FILE coverage_percent.txt + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${GENHTML_COMMAND} total_coverage.info --output-directory total.coverage + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) diff --git a/cmake/script/CoverageFuzz.cmake b/cmake/script/CoverageFuzz.cmake new file mode 100644 index 0000000000000..055880539475e --- /dev/null +++ b/cmake/script/CoverageFuzz.cmake @@ -0,0 +1,53 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +include(${CMAKE_CURRENT_LIST_DIR}/CoverageInclude.cmake) + +if(NOT DEFINED FUZZ_CORPORA_DIR) + set(FUZZ_CORPORA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/qa-assets/fuzz_corpora) +endif() + +set(fuzz_test_runner test/fuzz/test_runner.py ${FUZZ_CORPORA_DIR}) +if(DEFINED JOBS) + list(APPEND fuzz_test_runner -j ${JOBS}) +endif() + +execute_process( + COMMAND ${fuzz_test_runner} --loglevel DEBUG + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --capture --directory src --test-name fuzz-tests --output-file fuzz.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --zerocounters --directory src + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_FILTER_COMMAND} fuzz.info fuzz_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --add-tracefile fuzz_filtered.info --output-file fuzz_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --add-tracefile fuzz_filtered.info --output-file fuzz_coverage.info + COMMAND ${GREP_EXECUTABLE} "%" + COMMAND ${AWK_EXECUTABLE} "{ print substr($3,2,50) \"/\" $5 }" + OUTPUT_FILE coverage_percent.txt + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${GENHTML_COMMAND} fuzz_coverage.info --output-directory fuzz.coverage + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) diff --git a/cmake/script/CoverageInclude.cmake.in b/cmake/script/CoverageInclude.cmake.in new file mode 100644 index 0000000000000..59bf5e3af2e44 --- /dev/null +++ b/cmake/script/CoverageInclude.cmake.in @@ -0,0 +1,59 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +if("@CMAKE_CXX_COMPILER_ID@" STREQUAL "Clang") + find_program(LLVM_COV_EXECUTABLE llvm-cov REQUIRED) + set(COV_TOOL "${LLVM_COV_EXECUTABLE} gcov") +else() + find_program(GCOV_EXECUTABLE gcov REQUIRED) + set(COV_TOOL "${GCOV_EXECUTABLE}") +endif() + +# COV_TOOL is used to replace a placeholder. +configure_file( + cmake/cov_tool_wrapper.sh.in ${CMAKE_CURRENT_LIST_DIR}/cov_tool_wrapper.sh + FILE_PERMISSIONS OWNER_READ OWNER_EXECUTE + GROUP_READ GROUP_EXECUTE + WORLD_READ + @ONLY +) + +find_program(LCOV_EXECUTABLE lcov REQUIRED) +separate_arguments(LCOV_OPTS) +set(LCOV_COMMAND ${LCOV_EXECUTABLE} --gcov-tool ${CMAKE_CURRENT_LIST_DIR}/cov_tool_wrapper.sh ${LCOV_OPTS}) + +find_program(GENHTML_EXECUTABLE genhtml REQUIRED) +set(GENHTML_COMMAND ${GENHTML_EXECUTABLE} --show-details ${LCOV_OPTS}) + +find_program(GREP_EXECUTABLE grep REQUIRED) +find_program(AWK_EXECUTABLE awk REQUIRED) + +set(LCOV_FILTER_COMMAND ./filter-lcov.py) +list(APPEND LCOV_FILTER_COMMAND -p "/usr/local/") +list(APPEND LCOV_FILTER_COMMAND -p "/usr/include/") +list(APPEND LCOV_FILTER_COMMAND -p "/usr/lib/") +list(APPEND LCOV_FILTER_COMMAND -p "/usr/lib64/") +list(APPEND LCOV_FILTER_COMMAND -p "src/leveldb/") +list(APPEND LCOV_FILTER_COMMAND -p "src/crc32c/") +list(APPEND LCOV_FILTER_COMMAND -p "src/bench/") +list(APPEND LCOV_FILTER_COMMAND -p "src/crypto/ctaes") +list(APPEND LCOV_FILTER_COMMAND -p "src/minisketch") +list(APPEND LCOV_FILTER_COMMAND -p "src/secp256k1") +list(APPEND LCOV_FILTER_COMMAND -p "depends") + +execute_process( + COMMAND ${LCOV_COMMAND} --capture --initial --directory src --output-file baseline.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_FILTER_COMMAND} baseline.info baseline_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) +execute_process( + COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --output-file baseline_filtered.info + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + COMMAND_ERROR_IS_FATAL ANY +) diff --git a/cmake/script/GenerateBuildInfo.cmake b/cmake/script/GenerateBuildInfo.cmake new file mode 100644 index 0000000000000..d3ee2eb062106 --- /dev/null +++ b/cmake/script/GenerateBuildInfo.cmake @@ -0,0 +1,113 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +macro(fatal_error) + message(FATAL_ERROR "\n" + "Usage:\n" + " cmake -D BUILD_INFO_HEADER_PATH= [-D SOURCE_DIR=] -P ${CMAKE_CURRENT_LIST_FILE}\n" + "All specified paths must be absolute ones.\n" + ) +endmacro() + +if(DEFINED BUILD_INFO_HEADER_PATH AND IS_ABSOLUTE "${BUILD_INFO_HEADER_PATH}") + if(EXISTS "${BUILD_INFO_HEADER_PATH}") + file(STRINGS ${BUILD_INFO_HEADER_PATH} INFO LIMIT_COUNT 1) + endif() +else() + fatal_error() +endif() + +if(DEFINED SOURCE_DIR) + if(IS_ABSOLUTE "${SOURCE_DIR}" AND IS_DIRECTORY "${SOURCE_DIR}") + set(WORKING_DIR ${SOURCE_DIR}) + else() + fatal_error() + endif() +else() + set(WORKING_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +endif() + +set(GIT_TAG) +set(GIT_COMMIT) +if(NOT "$ENV{BITCOIN_GENBUILD_NO_GIT}" STREQUAL "1") + find_package(Git QUIET) + if(Git_FOUND) + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-parse --is-inside-work-tree + WORKING_DIRECTORY ${WORKING_DIR} + OUTPUT_VARIABLE IS_INSIDE_WORK_TREE + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET + ) + if(IS_INSIDE_WORK_TREE) + # Clean 'dirty' status of touched files that haven't been modified. + execute_process( + COMMAND ${GIT_EXECUTABLE} diff + WORKING_DIRECTORY ${WORKING_DIR} + OUTPUT_QUIET + ERROR_QUIET + ) + + execute_process( + COMMAND ${GIT_EXECUTABLE} describe --abbrev=0 + WORKING_DIRECTORY ${WORKING_DIR} + OUTPUT_VARIABLE MOST_RECENT_TAG + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET + ) + + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-list -1 ${MOST_RECENT_TAG} + WORKING_DIRECTORY ${WORKING_DIR} + OUTPUT_VARIABLE MOST_RECENT_TAG_COMMIT + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET + ) + + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-parse HEAD + WORKING_DIRECTORY ${WORKING_DIR} + OUTPUT_VARIABLE HEAD_COMMIT + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET + ) + + execute_process( + COMMAND ${GIT_EXECUTABLE} diff-index --quiet HEAD -- + WORKING_DIRECTORY ${WORKING_DIR} + RESULT_VARIABLE IS_DIRTY + ) + + if(HEAD_COMMIT STREQUAL MOST_RECENT_TAG_COMMIT AND NOT IS_DIRTY) + # If latest commit is tagged and not dirty, then use the tag name. + set(GIT_TAG ${MOST_RECENT_TAG}) + else() + # Otherwise, generate suffix from git, i.e. string like "0e0a5173fae3-dirty". + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-parse --short=12 HEAD + WORKING_DIRECTORY ${WORKING_DIR} + OUTPUT_VARIABLE GIT_COMMIT + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET + ) + if(IS_DIRTY) + string(APPEND GIT_COMMIT "-dirty") + endif() + endif() + endif() + endif() +endif() + +if(GIT_TAG) + set(NEWINFO "#define BUILD_GIT_TAG \"${GIT_TAG}\"") +elseif(GIT_COMMIT) + set(NEWINFO "#define BUILD_GIT_COMMIT \"${GIT_COMMIT}\"") +else() + set(NEWINFO "// No build information available") +endif() + +# Only update the header if necessary. +if(NOT "${INFO}" STREQUAL "${NEWINFO}") + file(WRITE ${BUILD_INFO_HEADER_PATH} "${NEWINFO}\n") +endif() diff --git a/cmake/script/GenerateHeaderFromJson.cmake b/cmake/script/GenerateHeaderFromJson.cmake new file mode 100644 index 0000000000000..4a3bddb323f81 --- /dev/null +++ b/cmake/script/GenerateHeaderFromJson.cmake @@ -0,0 +1,22 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +cmake_path(GET JSON_SOURCE_PATH STEM json_source_basename) + +file(READ ${JSON_SOURCE_PATH} hex_content HEX) +string(REGEX REPLACE "................" "\\0\n" formatted_bytes "${hex_content}") +string(REGEX REPLACE "[^\n][^\n]" "0x\\0, " formatted_bytes "${formatted_bytes}") + +set(header_content +"#include + +namespace json_tests { +inline constexpr char detail_${json_source_basename}_bytes[] { +${formatted_bytes} +}; + +inline constexpr std::string_view ${json_source_basename}{std::begin(detail_${json_source_basename}_bytes), std::end(detail_${json_source_basename}_bytes)}; +} +") +file(WRITE ${HEADER_PATH} "${header_content}") diff --git a/cmake/script/GenerateHeaderFromRaw.cmake b/cmake/script/GenerateHeaderFromRaw.cmake new file mode 100644 index 0000000000000..638876ecea10c --- /dev/null +++ b/cmake/script/GenerateHeaderFromRaw.cmake @@ -0,0 +1,23 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +cmake_path(GET RAW_SOURCE_PATH STEM raw_source_basename) + +file(READ ${RAW_SOURCE_PATH} hex_content HEX) +string(REGEX REPLACE "................" "\\0\n" formatted_bytes "${hex_content}") +string(REGEX REPLACE "[^\n][^\n]" "std::byte{0x\\0}, " formatted_bytes "${formatted_bytes}") + +set(header_content +"#include +#include + +namespace ${RAW_NAMESPACE} { +inline constexpr std::byte detail_${raw_source_basename}_raw[] { +${formatted_bytes} +}; + +inline constexpr std::span ${raw_source_basename}{detail_${raw_source_basename}_raw}; +} +") +file(WRITE ${HEADER_PATH} "${header_content}") diff --git a/cmake/script/macos_zip.sh b/cmake/script/macos_zip.sh new file mode 100755 index 0000000000000..cc51699dc938a --- /dev/null +++ b/cmake/script/macos_zip.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +export LC_ALL=C + +if [ -n "$SOURCE_DATE_EPOCH" ]; then + find . -exec touch -d "@$SOURCE_DATE_EPOCH" {} + +fi + +find . | sort | "$1" -X@ "$2" diff --git a/cmake/tests.cmake b/cmake/tests.cmake new file mode 100644 index 0000000000000..279132980017f --- /dev/null +++ b/cmake/tests.cmake @@ -0,0 +1,15 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +if(TARGET bitcoin-util AND TARGET bitcoin-tx AND PYTHON_COMMAND) + add_test(NAME util_test_runner + COMMAND ${CMAKE_COMMAND} -E env BITCOINUTIL=$ BITCOINTX=$ ${PYTHON_COMMAND} ${PROJECT_BINARY_DIR}/test/util/test_runner.py + ) +endif() + +if(PYTHON_COMMAND) + add_test(NAME util_rpcauth_test + COMMAND ${PYTHON_COMMAND} ${PROJECT_BINARY_DIR}/test/util/rpcauth-test.py + ) +endif() diff --git a/configure.ac b/configure.ac deleted file mode 100644 index cf04faf6d1005..0000000000000 --- a/configure.ac +++ /dev/null @@ -1,1736 +0,0 @@ -AC_PREREQ([2.69]) -define(_CLIENT_VERSION_MAJOR, 0) -define(_CLIENT_VERSION_MINOR, 20) -define(_CLIENT_VERSION_REVISION, 99) -define(_CLIENT_VERSION_BUILD, 0) -define(_CLIENT_VERSION_RC, 0) -define(_CLIENT_VERSION_IS_RELEASE, false) -define(_COPYRIGHT_YEAR, 2020) -define(_COPYRIGHT_HOLDERS,[The %s developers]) -define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Bitcoin Core]]) -AC_INIT([Bitcoin Core],m4_join([.], _CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MINOR, _CLIENT_VERSION_REVISION, m4_if(_CLIENT_VERSION_BUILD, [0], [], _CLIENT_VERSION_BUILD))m4_if(_CLIENT_VERSION_RC, [0], [], [rc]_CLIENT_VERSION_RC),[https://github.com/bitcoin/bitcoin/issues],[bitcoin],[https://bitcoincore.org/]) -AC_CONFIG_SRCDIR([src/validation.cpp]) -AC_CONFIG_HEADERS([src/config/bitcoin-config.h]) -AC_CONFIG_AUX_DIR([build-aux]) -AC_CONFIG_MACRO_DIR([build-aux/m4]) - -BITCOIN_DAEMON_NAME=bitcoind -BITCOIN_GUI_NAME=bitcoin-qt -BITCOIN_CLI_NAME=bitcoin-cli -BITCOIN_TX_NAME=bitcoin-tx -BITCOIN_WALLET_TOOL_NAME=bitcoin-wallet - -dnl Unless the user specified ARFLAGS, force it to be cr -AC_ARG_VAR(ARFLAGS, [Flags for the archiver, defaults to if not set]) -if test "x${ARFLAGS+set}" != "xset"; then - ARFLAGS="cr" -fi - -AC_CANONICAL_HOST - -AH_TOP([#ifndef BITCOIN_CONFIG_H]) -AH_TOP([#define BITCOIN_CONFIG_H]) -AH_BOTTOM([#endif //BITCOIN_CONFIG_H]) - -dnl faketime breaks configure and is only needed for make. Disable it here. -unset FAKETIME - -dnl Automake init set-up and checks -AM_INIT_AUTOMAKE([1.13 no-define subdir-objects foreign]) - -dnl faketime messes with timestamps and causes configure to be re-run. -dnl --disable-maintainer-mode can be used to bypass this. -AM_MAINTAINER_MODE([enable]) - -dnl make the compilation flags quiet unless V=1 is used -AM_SILENT_RULES([yes]) - -dnl Compiler checks (here before libtool). -if test "x${CXXFLAGS+set}" = "xset"; then - CXXFLAGS_overridden=yes -else - CXXFLAGS_overridden=no -fi -AC_PROG_CXX - -dnl By default, libtool for mingw refuses to link static libs into a dll for -dnl fear of mixing pic/non-pic objects, and import/export complications. Since -dnl we have those under control, re-enable that functionality. -case $host in - *mingw*) - lt_cv_deplibs_check_method="pass_all" - ;; -esac - -AC_ARG_ENABLE([c++17], - [AS_HELP_STRING([--enable-c++17], - [enable compilation in c++17 mode (disabled by default)])], - [use_cxx17=$enableval], - [use_cxx17=no]) - -dnl Require C++11 or C++17 compiler (no GNU extensions) -if test "x$use_cxx17" = xyes; then - AX_CXX_COMPILE_STDCXX([17], [noext], [mandatory]) -else - AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory]) -fi - -dnl Check if -latomic is required for -CHECK_ATOMIC - -dnl Unless the user specified OBJCXX, force it to be the same as CXX. This ensures -dnl that we get the same -std flags for both. -m4_ifdef([AC_PROG_OBJCXX],[ -if test "x${OBJCXX+set}" = "x"; then - OBJCXX="${CXX}" -fi -AC_PROG_OBJCXX -]) - -dnl Since libtool 1.5.2 (released 2004-01-25), on Linux libtool no longer -dnl sets RPATH for any directories in the dynamic linker search path. -dnl See more: https://wiki.debian.org/RpathIssue -LT_PREREQ([1.5.2]) -dnl Libtool init checks. -LT_INIT([pic-only]) - -dnl Check/return PATH for base programs. -AC_PATH_TOOL(AR, ar) -AC_PATH_TOOL(RANLIB, ranlib) -AC_PATH_TOOL(STRIP, strip) -AC_PATH_TOOL(GCOV, gcov) -AC_PATH_PROG(LCOV, lcov) -dnl Python 3.5 is specified in .python-version and should be used if available, see doc/dependencies.md -AC_PATH_PROGS([PYTHON], [python3.5 python3.6 python3.7 python3.8 python3 python]) -AC_PATH_PROG(GENHTML, genhtml) -AC_PATH_PROG([GIT], [git]) -AC_PATH_PROG(CCACHE,ccache) -AC_PATH_PROG(XGETTEXT,xgettext) -AC_PATH_PROG(HEXDUMP,hexdump) -AC_PATH_TOOL(READELF, readelf) -AC_PATH_TOOL(CPPFILT, c++filt) -AC_PATH_TOOL(OBJCOPY, objcopy) -AC_PATH_PROG(DOXYGEN, doxygen) -if test -z "$DOXYGEN"; then - AC_MSG_WARN([Doxygen not found]) -fi -AM_CONDITIONAL([HAVE_DOXYGEN], [test -n "$DOXYGEN"]) - -AC_ARG_VAR(PYTHONPATH, Augments the default search path for python module files) - -AC_ARG_ENABLE([wallet], - [AS_HELP_STRING([--disable-wallet], - [disable wallet (enabled by default)])], - [enable_wallet=$enableval], - [enable_wallet=yes]) - -AC_ARG_WITH([miniupnpc], - [AS_HELP_STRING([--with-miniupnpc], - [enable UPNP (default is yes if libminiupnpc is found)])], - [use_upnp=$withval], - [use_upnp=auto]) - -AC_ARG_ENABLE([upnp-default], - [AS_HELP_STRING([--enable-upnp-default], - [if UPNP is enabled, turn it on at startup (default is no)])], - [use_upnp_default=$enableval], - [use_upnp_default=no]) - -AC_ARG_ENABLE(tests, - AS_HELP_STRING([--disable-tests],[do not compile tests (default is to compile)]), - [use_tests=$enableval], - [use_tests=yes]) - -AC_ARG_ENABLE(gui-tests, - AS_HELP_STRING([--disable-gui-tests],[do not compile GUI tests (default is to compile if GUI and tests enabled)]), - [use_gui_tests=$enableval], - [use_gui_tests=$use_tests]) - -AC_ARG_ENABLE(bench, - AS_HELP_STRING([--disable-bench],[do not compile benchmarks (default is to compile)]), - [use_bench=$enableval], - [use_bench=yes]) - -AC_ARG_ENABLE([extended-functional-tests], - AS_HELP_STRING([--enable-extended-functional-tests],[enable expensive functional tests when using lcov (default no)]), - [use_extended_functional_tests=$enableval], - [use_extended_functional_tests=no]) - -AC_ARG_ENABLE([fuzz], - AS_HELP_STRING([--enable-fuzz], - [enable building of fuzz targets (default no). enabling this will disable all other targets]), - [enable_fuzz=$enableval], - [enable_fuzz=no]) - -AC_ARG_WITH([qrencode], - [AS_HELP_STRING([--with-qrencode], - [enable QR code support (default is yes if qt is enabled and libqrencode is found)])], - [use_qr=$withval], - [use_qr=auto]) - -AC_ARG_ENABLE([hardening], - [AS_HELP_STRING([--disable-hardening], - [do not attempt to harden the resulting executables (default is to harden when possible)])], - [use_hardening=$enableval], - [use_hardening=auto]) - -AC_ARG_ENABLE([reduce-exports], - [AS_HELP_STRING([--enable-reduce-exports], - [attempt to reduce exported symbols in the resulting executables (default is no)])], - [use_reduce_exports=$enableval], - [use_reduce_exports=no]) - -AC_ARG_ENABLE([ccache], - [AS_HELP_STRING([--disable-ccache], - [do not use ccache for building (default is to use if found)])], - [use_ccache=$enableval], - [use_ccache=auto]) - -AC_ARG_ENABLE([lcov], - [AS_HELP_STRING([--enable-lcov], - [enable lcov testing (default is no)])], - [use_lcov=$enableval], - [use_lcov=no]) - -AC_ARG_ENABLE([lcov-branch-coverage], - [AS_HELP_STRING([--enable-lcov-branch-coverage], - [enable lcov testing branch coverage (default is no)])], - [use_lcov_branch=yes], - [use_lcov_branch=no]) - -AC_ARG_ENABLE([glibc-back-compat], - [AS_HELP_STRING([--enable-glibc-back-compat], - [enable backwards compatibility with glibc])], - [use_glibc_compat=$enableval], - [use_glibc_compat=no]) - -AC_ARG_ENABLE([threadlocal], - [AS_HELP_STRING([--enable-threadlocal], - [enable features that depend on the c++ thread_local keyword (currently just thread names in debug logs). (default is to enabled if there is platform support and glibc-back-compat is not enabled)])], - [use_thread_local=$enableval], - [use_thread_local=auto]) - -AC_ARG_ENABLE([asm], - [AS_HELP_STRING([--disable-asm], - [disable assembly routines (enabled by default)])], - [use_asm=$enableval], - [use_asm=yes]) - -if test "x$use_asm" = xyes; then - AC_DEFINE(USE_ASM, 1, [Define this symbol to build in assembly routines]) -fi - -AC_ARG_WITH([system-univalue], - [AS_HELP_STRING([--with-system-univalue], - [Build with system UniValue (default is no)])], - [system_univalue=$withval], - [system_univalue=no] -) -AC_ARG_ENABLE([zmq], - [AS_HELP_STRING([--disable-zmq], - [disable ZMQ notifications])], - [use_zmq=$enableval], - [use_zmq=yes]) - -AC_ARG_ENABLE([bip70], - [AS_HELP_STRING([--enable-bip70], - [BIP70 (payment protocol) support in the GUI (no longer supported)])], - [enable_bip70=$enableval], - [enable_bip70=no]) - -if test x$enable_bip70 != xno; then - AC_MSG_ERROR([BIP70 is no longer supported!]) -fi - -AC_ARG_ENABLE(man, - [AS_HELP_STRING([--disable-man], - [do not install man pages (default is to install)])],, - enable_man=yes) -AM_CONDITIONAL(ENABLE_MAN, test "$enable_man" != no) - -dnl Enable debug -AC_ARG_ENABLE([debug], - [AS_HELP_STRING([--enable-debug], - [use compiler flags and macros suited for debugging (default is no)])], - [enable_debug=$enableval], - [enable_debug=no]) - -dnl Enable different -fsanitize options -AC_ARG_WITH([sanitizers], - [AS_HELP_STRING([--with-sanitizers], - [comma separated list of extra sanitizers to build with (default is none enabled)])], - [use_sanitizers=$withval]) - -dnl Enable gprof profiling -AC_ARG_ENABLE([gprof], - [AS_HELP_STRING([--enable-gprof], - [use gprof profiling compiler flags (default is no)])], - [enable_gprof=$enableval], - [enable_gprof=no]) - -dnl Pass compiler & linker flags that make builds deterministic -AC_ARG_ENABLE([determinism], - [AS_HELP_STRING([--enable-determinism], - [Enable compilation flags that make builds deterministic (default is no)])], - [enable_determinism=$enableval], - [enable_determinism=no]) - -dnl Turn warnings into errors -AC_ARG_ENABLE([werror], - [AS_HELP_STRING([--enable-werror], - [Treat certain compiler warnings as errors (default is no)])], - [enable_werror=$enableval], - [enable_werror=no]) - -AC_LANG_PUSH([C++]) - -dnl Check for a flag to turn compiler warnings into errors. This is helpful for checks which may -dnl appear to succeed because by default they merely emit warnings when they fail. -dnl -dnl Note that this is not necessarily a check to see if -Werror is supported, but rather to see if -dnl a compile with -Werror can succeed. This is important because the compiler may already be -dnl warning about something unrelated, for example about some path issue. If that is the case, -dnl -Werror cannot be used because all of those warnings would be turned into errors. -AX_CHECK_COMPILE_FLAG([-Werror],[CXXFLAG_WERROR="-Werror"],[CXXFLAG_WERROR=""]) - -dnl Check for a flag to turn linker warnings into errors. When flags are passed to linkers via the -dnl compiler driver using a -Wl,-foo flag, linker warnings may be swallowed rather than bubbling up. -dnl See note above, the same applies here as well. -dnl -dnl LDFLAG_WERROR Should only be used when testing -Wl,* -case $host in - *darwin*) - AX_CHECK_LINK_FLAG([-Wl,-fatal_warnings],[LDFLAG_WERROR="-Wl,-fatal_warnings"],[LDFLAG_WERROR=""]) - ;; - *) - AX_CHECK_LINK_FLAG([-Wl,--fatal-warnings],[LDFLAG_WERROR="-Wl,--fatal-warnings"],[LDFLAG_WERROR=""]) - ;; -esac - -if test "x$enable_debug" = xyes; then - dnl Clear default -g -O2 flags - if test "x$CXXFLAGS_overridden" = xno; then - CXXFLAGS="" - fi - - dnl Disable all optimizations - AX_CHECK_COMPILE_FLAG([-O0], [[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -O0"]],,[[$CXXFLAG_WERROR]]) - - dnl Prefer -g3, fall back to -g if that is unavailable. - AX_CHECK_COMPILE_FLAG( - [-g3], - [[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -g3"]], - [AX_CHECK_COMPILE_FLAG([-g],[[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -g"]],,[[$CXXFLAG_WERROR]])], - [[$CXXFLAG_WERROR]]) - - AX_CHECK_PREPROC_FLAG([-DDEBUG],[[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DDEBUG"]],,[[$CXXFLAG_WERROR]]) - AX_CHECK_PREPROC_FLAG([-DDEBUG_LOCKORDER],[[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DDEBUG_LOCKORDER"]],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-ftrapv],[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -ftrapv"],,[[$CXXFLAG_WERROR]]) -fi - -if test x$use_sanitizers != x; then - dnl First check if the compiler accepts flags. If an incompatible pair like - dnl -fsanitize=address,thread is used here, this check will fail. This will also - dnl fail if a bad argument is passed, e.g. -fsanitize=undfeined - AX_CHECK_COMPILE_FLAG( - [[-fsanitize=$use_sanitizers]], - [[SANITIZER_CXXFLAGS=-fsanitize=$use_sanitizers]], - [AC_MSG_ERROR([compiler did not accept requested flags])]) - - dnl Some compilers (e.g. GCC) require additional libraries like libasan, - dnl libtsan, libubsan, etc. Make sure linking still works with the sanitize - dnl flag. This is a separate check so we can give a better error message when - dnl the sanitize flags are supported by the compiler but the actual sanitizer - dnl libs are missing. - AX_CHECK_LINK_FLAG( - [[-fsanitize=$use_sanitizers]], - [[SANITIZER_LDFLAGS=-fsanitize=$use_sanitizers]], - [AC_MSG_ERROR([linker did not accept requested flags, you are missing required libraries])], - [], - [AC_LANG_PROGRAM([[ - #include - #include - extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { return 0; } - __attribute__((weak)) // allow for libFuzzer linking - ]],[[]])]) -fi - -ERROR_CXXFLAGS= -if test "x$enable_werror" = "xyes"; then - if test "x$CXXFLAG_WERROR" = "x"; then - AC_MSG_ERROR("enable-werror set but -Werror is not usable") - fi - AX_CHECK_COMPILE_FLAG([-Werror=vla],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=vla"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Werror=switch],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=switch"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Werror=thread-safety-analysis],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=thread-safety-analysis"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Werror=unused-variable],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unused-variable"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Werror=date-time],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=date-time"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Werror=return-type],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Werror=conditional-uninitialized],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=conditional-uninitialized"],,[[$CXXFLAG_WERROR]]) -fi - -if test "x$CXXFLAGS_overridden" = "xno"; then - AX_CHECK_COMPILE_FLAG([-Wall],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wall"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wextra],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wextra"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wgnu],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wgnu"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wformat],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wvla],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wvla"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wswitch],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wswitch"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wformat-security],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat-security"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wthread-safety-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wthread-safety-analysis"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wrange-loop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wredundant-decls],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wunused-variable],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-variable"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wdate-time],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdate-time"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wconditional-uninitialized],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wconditional-uninitialized"],,[[$CXXFLAG_WERROR]]) - - dnl Some compilers (gcc) ignore unknown -Wno-* options, but warn about all - dnl unknown options if any other warning is produced. Test the -Wfoo case, and - dnl set the -Wno-foo case if it works. - AX_CHECK_COMPILE_FLAG([-Wunused-parameter],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-parameter"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wself-assign],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-self-assign"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wunused-local-typedef],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-local-typedef"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wdeprecated-register],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-deprecated-register"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-implicit-fallthrough"],,[[$CXXFLAG_WERROR]]) -fi - -enable_sse42=no -enable_sse41=no -enable_avx2=no -enable_shani=no - -if test "x$use_asm" = "xyes"; then - -dnl Check for optional instruction set support. Enabling these does _not_ imply that all code will -dnl be compiled with them, rather that specific objects/libs may use them after checking for runtime -dnl compatibility. - -dnl x86 -AX_CHECK_COMPILE_FLAG([-msse4.2],[[SSE42_CXXFLAGS="-msse4.2"]],,[[$CXXFLAG_WERROR]]) -AX_CHECK_COMPILE_FLAG([-msse4.1],[[SSE41_CXXFLAGS="-msse4.1"]],,[[$CXXFLAG_WERROR]]) -AX_CHECK_COMPILE_FLAG([-mavx -mavx2],[[AVX2_CXXFLAGS="-mavx -mavx2"]],,[[$CXXFLAG_WERROR]]) -AX_CHECK_COMPILE_FLAG([-msse4 -msha],[[SHANI_CXXFLAGS="-msse4 -msha"]],,[[$CXXFLAG_WERROR]]) - -TEMP_CXXFLAGS="$CXXFLAGS" -CXXFLAGS="$CXXFLAGS $SSE42_CXXFLAGS" -AC_MSG_CHECKING(for SSE4.2 intrinsics) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #if defined(_MSC_VER) - #include - #elif defined(__GNUC__) && defined(__SSE4_2__) - #include - #endif - ]],[[ - uint64_t l = 0; - l = _mm_crc32_u8(l, 0); - l = _mm_crc32_u32(l, 0); - l = _mm_crc32_u64(l, 0); - return l; - ]])], - [ AC_MSG_RESULT(yes); enable_sse42=yes], - [ AC_MSG_RESULT(no)] -) -CXXFLAGS="$TEMP_CXXFLAGS" - -TEMP_CXXFLAGS="$CXXFLAGS" -CXXFLAGS="$CXXFLAGS $SSE41_CXXFLAGS" -AC_MSG_CHECKING(for SSE4.1 intrinsics) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - ]],[[ - __m128i l = _mm_set1_epi32(0); - return _mm_extract_epi32(l, 3); - ]])], - [ AC_MSG_RESULT(yes); enable_sse41=yes; AC_DEFINE(ENABLE_SSE41, 1, [Define this symbol to build code that uses SSE4.1 intrinsics]) ], - [ AC_MSG_RESULT(no)] -) -CXXFLAGS="$TEMP_CXXFLAGS" - -TEMP_CXXFLAGS="$CXXFLAGS" -CXXFLAGS="$CXXFLAGS $AVX2_CXXFLAGS" -AC_MSG_CHECKING(for AVX2 intrinsics) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - ]],[[ - __m256i l = _mm256_set1_epi32(0); - return _mm256_extract_epi32(l, 7); - ]])], - [ AC_MSG_RESULT(yes); enable_avx2=yes; AC_DEFINE(ENABLE_AVX2, 1, [Define this symbol to build code that uses AVX2 intrinsics]) ], - [ AC_MSG_RESULT(no)] -) -CXXFLAGS="$TEMP_CXXFLAGS" - -TEMP_CXXFLAGS="$CXXFLAGS" -CXXFLAGS="$CXXFLAGS $SHANI_CXXFLAGS" -AC_MSG_CHECKING(for SHA-NI intrinsics) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - ]],[[ - __m128i i = _mm_set1_epi32(0); - __m128i j = _mm_set1_epi32(1); - __m128i k = _mm_set1_epi32(2); - return _mm_extract_epi32(_mm_sha256rnds2_epu32(i, i, k), 0); - ]])], - [ AC_MSG_RESULT(yes); enable_shani=yes; AC_DEFINE(ENABLE_SHANI, 1, [Define this symbol to build code that uses SHA-NI intrinsics]) ], - [ AC_MSG_RESULT(no)] -) -CXXFLAGS="$TEMP_CXXFLAGS" - -# ARM -AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto],[[ARM_CRC_CXXFLAGS="-march=armv8-a+crc+crypto"]],,[[$CXXFLAG_WERROR]]) - -TEMP_CXXFLAGS="$CXXFLAGS" -CXXFLAGS="$CXXFLAGS $ARM_CRC_CXXFLAGS" -AC_MSG_CHECKING(for ARM CRC32 intrinsics) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - ]],[[ - __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0); - vmull_p64(0, 0); - ]])], - [ AC_MSG_RESULT(yes); enable_arm_crc=yes; ], - [ AC_MSG_RESULT(no)] -) -CXXFLAGS="$TEMP_CXXFLAGS" - -fi - -CPPFLAGS="$CPPFLAGS -DHAVE_BUILD_INFO -D__STDC_FORMAT_MACROS" - -AC_ARG_WITH([utils], - [AS_HELP_STRING([--with-utils], - [build bitcoin-cli bitcoin-tx bitcoin-wallet (default=yes)])], - [build_bitcoin_utils=$withval], - [build_bitcoin_utils=yes]) - -AC_ARG_ENABLE([util-cli], - [AS_HELP_STRING([--enable-util-cli], - [build bitcoin-cli])], - [build_bitcoin_cli=$enableval], - [build_bitcoin_cli=$build_bitcoin_utils]) - -AC_ARG_ENABLE([util-tx], - [AS_HELP_STRING([--enable-util-tx], - [build bitcoin-tx])], - [build_bitcoin_tx=$enableval], - [build_bitcoin_tx=$build_bitcoin_utils]) - -AC_ARG_ENABLE([util-wallet], - [AS_HELP_STRING([--enable-util-wallet], - [build bitcoin-wallet])], - [build_bitcoin_wallet=$enableval], - [build_bitcoin_wallet=$build_bitcoin_utils]) - -AC_ARG_WITH([libs], - [AS_HELP_STRING([--with-libs], - [build libraries (default=yes)])], - [build_bitcoin_libs=$withval], - [build_bitcoin_libs=yes]) - -AC_ARG_WITH([daemon], - [AS_HELP_STRING([--with-daemon], - [build bitcoind daemon (default=yes)])], - [build_bitcoind=$withval], - [build_bitcoind=yes]) - -use_pkgconfig=yes -case $host in - *mingw*) - - dnl pkgconfig does more harm than good with MinGW - use_pkgconfig=no - - TARGET_OS=windows - AC_CHECK_LIB([kernel32], [GetModuleFileNameA],, AC_MSG_ERROR(libkernel32 missing)) - AC_CHECK_LIB([user32], [main],, AC_MSG_ERROR(libuser32 missing)) - AC_CHECK_LIB([gdi32], [main],, AC_MSG_ERROR(libgdi32 missing)) - AC_CHECK_LIB([comdlg32], [main],, AC_MSG_ERROR(libcomdlg32 missing)) - AC_CHECK_LIB([winmm], [main],, AC_MSG_ERROR(libwinmm missing)) - AC_CHECK_LIB([shell32], [SHGetSpecialFolderPathW],, AC_MSG_ERROR(libshell32 missing)) - AC_CHECK_LIB([comctl32], [main],, AC_MSG_ERROR(libcomctl32 missing)) - AC_CHECK_LIB([ole32], [CoCreateInstance],, AC_MSG_ERROR(libole32 missing)) - AC_CHECK_LIB([oleaut32], [main],, AC_MSG_ERROR(liboleaut32 missing)) - AC_CHECK_LIB([uuid], [main],, AC_MSG_ERROR(libuuid missing)) - AC_CHECK_LIB([advapi32], [CryptAcquireContextW],, AC_MSG_ERROR(libadvapi32 missing)) - AC_CHECK_LIB([ws2_32], [WSAStartup],, AC_MSG_ERROR(libws2_32 missing)) - AC_CHECK_LIB([shlwapi], [PathRemoveFileSpecW],, AC_MSG_ERROR(libshlwapi missing)) - AC_CHECK_LIB([iphlpapi], [GetAdaptersAddresses],, AC_MSG_ERROR(libiphlpapi missing)) - - dnl -static is interpreted by libtool, where it has a different meaning. - dnl In libtool-speak, it's -all-static. - AX_CHECK_LINK_FLAG([[-static]],[LIBTOOL_APP_LDFLAGS="$LIBTOOL_APP_LDFLAGS -all-static"]) - - AC_PATH_PROG([MAKENSIS], [makensis], none) - if test x$MAKENSIS = xnone; then - AC_MSG_WARN("makensis not found. Cannot create installer.") - fi - - AC_PATH_TOOL(WINDRES, windres, none) - if test x$WINDRES = xnone; then - AC_MSG_ERROR("windres not found") - fi - - CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0601" - if test "x$CXXFLAGS_overridden" = "xno"; then - CXXFLAGS="$CXXFLAGS -w" - fi - - dnl libtool insists upon adding -nostdlib and a list of objects/libs to link against. - dnl That breaks our ability to build dll's with static libgcc/libstdc++/libssp. Override - dnl its command here, with the predeps/postdeps removed, and -static inserted. Postdeps are - dnl also overridden to prevent their insertion later. - dnl This should only affect dll's. - archive_cmds_CXX="\$CC -shared \$libobjs \$deplibs \$compiler_flags -static -o \$output_objdir/\$soname \${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker \$lib" - postdeps_CXX= - - ;; - *darwin*) - TARGET_OS=darwin - if test x$cross_compiling != xyes; then - BUILD_OS=darwin - AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg],rsvg-convert) - AC_CHECK_PROG([BREW],brew, brew) - if test x$BREW = xbrew; then - dnl These Homebrew packages may be keg-only, meaning that they won't be found - dnl in expected paths because they may conflict with system files. Ask - dnl Homebrew where each one is located, then adjust paths accordingly. - dnl It's safe to add these paths even if the functionality is disabled by - dnl the user (--without-wallet or --without-gui for example). - - bdb_prefix=$($BREW --prefix berkeley-db4 2>/dev/null) - qt5_prefix=$($BREW --prefix qt5 2>/dev/null) - if test x$bdb_prefix != x; then - CPPFLAGS="$CPPFLAGS -I$bdb_prefix/include" - LIBS="$LIBS -L$bdb_prefix/lib" - fi - if test x$qt5_prefix != x; then - PKG_CONFIG_PATH="$qt5_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" - export PKG_CONFIG_PATH - fi - fi - else - case $build_os in - *darwin*) - BUILD_OS=darwin - ;; - *) - AC_PATH_TOOL([INSTALLNAMETOOL], [install_name_tool], install_name_tool) - AC_PATH_TOOL([OTOOL], [otool], otool) - AC_PATH_PROGS([GENISOIMAGE], [genisoimage mkisofs],genisoimage) - AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg],rsvg-convert) - AC_PATH_PROGS([IMAGEMAGICK_CONVERT], [convert],convert) - AC_PATH_PROGS([TIFFCP], [tiffcp],tiffcp) - - dnl libtool will try to strip the static lib, which is a problem for - dnl cross-builds because strip attempts to call a hard-coded ld, - dnl which may not exist in the path. Stripping the .a is not - dnl necessary, so just disable it. - old_striplib= - ;; - esac - fi - - AX_CHECK_LINK_FLAG([[-Wl,-headerpad_max_install_names]], [LDFLAGS="$LDFLAGS -Wl,-headerpad_max_install_names"],, [[$LDFLAG_WERROR]]) - CPPFLAGS="$CPPFLAGS -DMAC_OSX -DOBJC_OLD_DISPATCH_PROTOTYPES=0" - OBJCXXFLAGS="$CXXFLAGS" - ;; - *android*) - dnl make sure android stays above linux for hosts like *linux-android* - TARGET_OS=android - ;; - *linux*) - TARGET_OS=linux - ;; -esac - -if test x$use_pkgconfig = xyes; then - m4_ifndef([PKG_PROG_PKG_CONFIG], [AC_MSG_ERROR(PKG_PROG_PKG_CONFIG macro not found. Please install pkg-config and re-run autogen.sh.)]) - m4_ifdef([PKG_PROG_PKG_CONFIG], [ - PKG_PROG_PKG_CONFIG - if test x"$PKG_CONFIG" = "x"; then - AC_MSG_ERROR(pkg-config not found.) - fi - ]) -fi - -if test x$use_extended_functional_tests != xno; then - AC_SUBST(EXTENDED_FUNCTIONAL_TESTS, --extended) -fi - -if test x$use_lcov = xyes; then - if test x$LCOV = x; then - AC_MSG_ERROR("lcov testing requested but lcov not found") - fi - if test x$GCOV = x; then - AC_MSG_ERROR("lcov testing requested but gcov not found") - fi - if test x$PYTHON = x; then - AC_MSG_ERROR("lcov testing requested but python not found") - fi - if test x$GENHTML = x; then - AC_MSG_ERROR("lcov testing requested but genhtml not found") - fi - LCOV="$LCOV --gcov-tool=$GCOV" - AX_CHECK_LINK_FLAG([[--coverage]], [LDFLAGS="$LDFLAGS --coverage"], - [AC_MSG_ERROR("lcov testing requested but --coverage linker flag does not work")]) - AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"], - [AC_MSG_ERROR("lcov testing requested but --coverage flag does not work")]) - CXXFLAGS="$CXXFLAGS -Og" -fi - -if test x$use_lcov_branch != xno; then - AC_SUBST(LCOV_OPTS, "$LCOV_OPTS --rc lcov_branch_coverage=1") -fi - -dnl Check for endianness -AC_C_BIGENDIAN - -dnl Check for pthread compile/link requirements -AX_PTHREAD - -dnl The following macro will add the necessary defines to bitcoin-config.h, but -dnl they also need to be passed down to any subprojects. Pull the results out of -dnl the cache and add them to CPPFLAGS. -AC_SYS_LARGEFILE -dnl detect POSIX or GNU variant of strerror_r -AC_FUNC_STRERROR_R - -if test x$ac_cv_sys_file_offset_bits != x && - test x$ac_cv_sys_file_offset_bits != xno && - test x$ac_cv_sys_file_offset_bits != xunknown; then - CPPFLAGS="$CPPFLAGS -D_FILE_OFFSET_BITS=$ac_cv_sys_file_offset_bits" -fi - -if test x$ac_cv_sys_large_files != x && - test x$ac_cv_sys_large_files != xno && - test x$ac_cv_sys_large_files != xunknown; then - CPPFLAGS="$CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files" -fi - -AX_GCC_FUNC_ATTRIBUTE([visibility]) -AX_GCC_FUNC_ATTRIBUTE([dllexport]) -AX_GCC_FUNC_ATTRIBUTE([dllimport]) - -if test x$use_glibc_compat != xno; then - - dnl __fdelt_chk's params and return type have changed from long unsigned int to long int. - dnl See which one is present here. - AC_MSG_CHECKING(__fdelt_chk type) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#ifdef _FORTIFY_SOURCE - #undef _FORTIFY_SOURCE - #endif - #define _FORTIFY_SOURCE 2 - #include - extern "C" long unsigned int __fdelt_warn(long unsigned int);]],[[]])], - [ fdelt_type="long unsigned int"], - [ fdelt_type="long int"]) - AC_MSG_RESULT($fdelt_type) - AC_DEFINE_UNQUOTED(FDELT_TYPE, $fdelt_type,[parameter and return value type for __fdelt_chk]) - AX_CHECK_LINK_FLAG([[-Wl,--wrap=__divmoddi4]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=__divmoddi4"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-Wl,--wrap=log2f]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=log2f"],, [[$LDFLAG_WERROR]]) -else - AC_SEARCH_LIBS([clock_gettime],[rt]) -fi - -if test "x$enable_gprof" = xyes; then - dnl -pg is incompatible with -pie. Since hardening and profiling together doesn't make sense, - dnl we simply make them mutually exclusive here. Additionally, hardened toolchains may force - dnl -pie by default, in which case it needs to be turned off with -no-pie. - - if test x$use_hardening = xyes; then - AC_MSG_ERROR(gprof profiling is not compatible with hardening. Reconfigure with --disable-hardening or --disable-gprof) - fi - use_hardening=no - AX_CHECK_COMPILE_FLAG([-pg],[GPROF_CXXFLAGS="-pg"], - [AC_MSG_ERROR(gprof profiling requested but not available)], [[$CXXFLAG_WERROR]]) - - AX_CHECK_LINK_FLAG([[-no-pie]], [GPROF_LDFLAGS="-no-pie"]) - AX_CHECK_LINK_FLAG([[-pg]],[GPROF_LDFLAGS="$GPROF_LDFLAGS -pg"], - [AC_MSG_ERROR(gprof profiling requested but not available)], [[$GPROF_LDFLAGS]]) -fi - -if test x$TARGET_OS != xwindows; then - dnl All windows code is PIC, forcing it on just adds useless compile warnings - AX_CHECK_COMPILE_FLAG([-fPIC],[PIC_FLAGS="-fPIC"]) -fi - -dnl All versions of gcc that we commonly use for building are subject to bug -dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set -dnl -fstack-reuse=none for all gcc builds. (Only gcc understands this flag) -AX_CHECK_COMPILE_FLAG([-fstack-reuse=none],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-reuse=none"]) -if test x$use_hardening != xno; then - use_hardening=yes - AX_CHECK_COMPILE_FLAG([-Wstack-protector],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -Wstack-protector"]) - AX_CHECK_COMPILE_FLAG([-fstack-protector-all],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-protector-all"]) - - dnl When enable_debug is yes, all optimizations are disabled. - dnl However, FORTIFY_SOURCE requires that there is some level of optimization, otherwise it does nothing and just creates a compiler warning. - dnl Since FORTIFY_SOURCE is a no-op without optimizations, do not enable it when enable_debug is yes. - if test x$enable_debug != xyes; then - AX_CHECK_PREPROC_FLAG([-D_FORTIFY_SOURCE=2],[ - AX_CHECK_PREPROC_FLAG([-U_FORTIFY_SOURCE],[ - HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -U_FORTIFY_SOURCE" - ]) - HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -D_FORTIFY_SOURCE=2" - ]) - fi - - AX_CHECK_LINK_FLAG([[-Wl,--dynamicbase]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--dynamicbase"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-Wl,--nxcompat]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--nxcompat"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-Wl,--high-entropy-va]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--high-entropy-va"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-Wl,-z,relro]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,relro"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-Wl,-z,now]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,now"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-fPIE -pie]], [PIE_FLAGS="-fPIE"; HARDENED_LDFLAGS="$HARDENED_LDFLAGS -pie"],, [[$CXXFLAG_WERROR]]) - - case $host in - *mingw*) - AC_CHECK_LIB([ssp], [main],, AC_MSG_ERROR(libssp missing)) - ;; - esac -fi - -dnl These flags are specific to ld64, and may cause issues with other linkers. -dnl For example: GNU ld will intepret -dead_strip as -de and then try and use -dnl "ad_strip" as the symbol for the entry point. -if test x$TARGET_OS = xdarwin; then - AX_CHECK_LINK_FLAG([[-Wl,-dead_strip]], [LDFLAGS="$LDFLAGS -Wl,-dead_strip"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-Wl,-dead_strip_dylibs]], [LDFLAGS="$LDFLAGS -Wl,-dead_strip_dylibs"],, [[$LDFLAG_WERROR]]) - AX_CHECK_LINK_FLAG([[-Wl,-bind_at_load]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-bind_at_load"],, [[$LDFLAG_WERROR]]) -fi - -if test x$enable_determinism = xyes; then - if test x$TARGET_OS = xwindows; then - AX_CHECK_LINK_FLAG([[-Wl,--no-insert-timestamp]], [LDFLAGS="$LDFLAGS -Wl,--no-insert-timestamp"],, [[$LDFLAG_WERROR]]) - fi -fi - -AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h sys/sysctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h]) - -dnl FD_ZERO may be dependent on a declaration of memcpy, e.g. in SmartOS -dnl check that it fails to build without memcpy, then that it builds with -AC_MSG_CHECKING(FD_ZERO memcpy dependence) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #if HAVE_SYS_SELECT_H - #include - #endif - ]],[[ - #if HAVE_SYS_SELECT_H - fd_set fds; - FD_ZERO(&fds); - #endif - ]])], - [ AC_MSG_RESULT(no) ], - [ - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #if HAVE_SYS_SELECT_H - #include - #endif - ]], [[ - #if HAVE_SYS_SELECT_H - fd_set fds; - FD_ZERO(&fds); - #endif - ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_CSTRING_DEPENDENT_FD_ZERO, 1, [Define this symbol if FD_ZERO is dependent of a memcpy declaration being available]) ], - [ AC_MSG_ERROR(failed with cstring include) ] - ) - ] -) - -AC_CHECK_DECLS([getifaddrs, freeifaddrs],,, - [#include - #include ] -) -AC_CHECK_DECLS([strnlen]) - -dnl Check for daemon(3), unrelated to --with-daemon (although used by it) -AC_CHECK_DECLS([daemon]) - -AC_CHECK_DECLS([le16toh, le32toh, le64toh, htole16, htole32, htole64, be16toh, be32toh, be64toh, htobe16, htobe32, htobe64],,, - [#if HAVE_ENDIAN_H - #include - #elif HAVE_SYS_ENDIAN_H - #include - #endif]) - -AC_CHECK_DECLS([bswap_16, bswap_32, bswap_64],,, - [#if HAVE_BYTESWAP_H - #include - #endif]) - -AC_CHECK_DECLS([__builtin_clz, __builtin_clzl, __builtin_clzll]) - -dnl Check for malloc_info (for memory statistics information in getmemoryinfo) -AC_MSG_CHECKING(for getmemoryinfo) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ int f = malloc_info(0, NULL); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_MALLOC_INFO, 1,[Define this symbol if you have malloc_info]) ], - [ AC_MSG_RESULT(no)] -) - -dnl Check for mallopt(M_ARENA_MAX) (to set glibc arenas) -AC_MSG_CHECKING(for mallopt M_ARENA_MAX) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ mallopt(M_ARENA_MAX, 1); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_MALLOPT_ARENA_MAX, 1,[Define this symbol if you have mallopt with M_ARENA_MAX]) ], - [ AC_MSG_RESULT(no)] -) - -dnl Check for posix_fallocate -AC_MSG_CHECKING(for posix_fallocate) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - // same as in src/util/system.cpp - #ifdef __linux__ - #ifdef _POSIX_C_SOURCE - #undef _POSIX_C_SOURCE - #endif - #define _POSIX_C_SOURCE 200112L - #endif // __linux__ - #include ]], - [[ int f = posix_fallocate(0, 0, 0); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_POSIX_FALLOCATE, 1,[Define this symbol if you have posix_fallocate]) ], - [ AC_MSG_RESULT(no)] -) - -AC_MSG_CHECKING([for visibility attribute]) -AC_LINK_IFELSE([AC_LANG_SOURCE([ - int foo_def( void ) __attribute__((visibility("default"))); - int main(){} - ])], - [ - AC_DEFINE(HAVE_VISIBILITY_ATTRIBUTE,1,[Define if the visibility attribute is supported.]) - AC_MSG_RESULT(yes) - ], - [ - AC_MSG_RESULT(no) - if test x$use_reduce_exports = xyes; then - AC_MSG_ERROR([Cannot find a working visibility attribute. Use --disable-reduce-exports.]) - fi - ] -) - -dnl thread_local is currently disabled when building with glibc back compat. -dnl Our minimum supported glibc is 2.17, however support for thread_local -dnl did not arrive in glibc until 2.18. -if test "x$use_thread_local" = xyes || { test "x$use_thread_local" = xauto && test "x$use_glibc_compat" = xno; }; then - TEMP_LDFLAGS="$LDFLAGS" - LDFLAGS="$TEMP_LDFLAGS $PTHREAD_CFLAGS" - AC_MSG_CHECKING([for thread_local support]) - AC_LINK_IFELSE([AC_LANG_SOURCE([ - #include - static thread_local int foo = 0; - static void run_thread() { foo++;} - int main(){ - for(int i = 0; i < 10; i++) { std::thread(run_thread).detach();} - return foo; - } - ])], - [ - case $host in - *mingw*) - dnl mingw32's implementation of thread_local has also been shown to behave - dnl erroneously under concurrent usage; see: - dnl https://gist.github.com/jamesob/fe9a872051a88b2025b1aa37bfa98605 - AC_MSG_RESULT(no) - ;; - *freebsd*) - dnl FreeBSD's implementation of thread_local is also buggy (per - dnl https://groups.google.com/d/msg/bsdmailinglist/22ncTZAbDp4/Dii_pII5AwAJ) - AC_MSG_RESULT(no) - ;; - *) - AC_DEFINE(HAVE_THREAD_LOCAL,1,[Define if thread_local is supported.]) - AC_MSG_RESULT(yes) - ;; - esac - ], - [ - AC_MSG_RESULT(no) - ] - ) - LDFLAGS="$TEMP_LDFLAGS" -fi - -dnl check for gmtime_r(), fallback to gmtime_s() if that is unavailable -dnl fail if neither are available. -AC_MSG_CHECKING(for gmtime_r) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ gmtime_r((const time_t *) nullptr, (struct tm *) nullptr); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_GMTIME_R, 1, [Define this symbol if gmtime_r is available]) ], - [ AC_MSG_RESULT(no); - AC_MSG_CHECKING(for gmtime_s); - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ gmtime_s((struct tm *) nullptr, (const time_t *) nullptr); ]])], - [ AC_MSG_RESULT(yes)], - [ AC_MSG_RESULT(no); AC_MSG_ERROR(Both gmtime_r and gmtime_s are unavailable) ] - ) - ] -) - -dnl Check for different ways of gathering OS randomness -AC_MSG_CHECKING(for Linux getrandom syscall) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include - #include - #include ]], - [[ syscall(SYS_getrandom, nullptr, 32, 0); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_SYS_GETRANDOM, 1,[Define this symbol if the Linux getrandom system call is available]) ], - [ AC_MSG_RESULT(no)] -) - -AC_MSG_CHECKING(for getentropy) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ getentropy(nullptr, 32) ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_GETENTROPY, 1,[Define this symbol if the BSD getentropy system call is available]) ], - [ AC_MSG_RESULT(no)] -) - -AC_MSG_CHECKING(for getentropy via random.h) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include - #include ]], - [[ getentropy(nullptr, 32) ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_GETENTROPY_RAND, 1,[Define this symbol if the BSD getentropy system call is available with sys/random.h]) ], - [ AC_MSG_RESULT(no)] -) - -AC_MSG_CHECKING(for sysctl) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include - #include ]], - [[ #ifdef __linux__ - #error "Don't use sysctl on Linux, it's deprecated even when it works" - #endif - sysctl(nullptr, 2, nullptr, nullptr, nullptr, 0); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_SYSCTL, 1,[Define this symbol if the BSD sysctl() is available]) ], - [ AC_MSG_RESULT(no)] -) - -AC_MSG_CHECKING(for sysctl KERN_ARND) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include - #include ]], - [[ #ifdef __linux__ - #error "Don't use sysctl on Linux, it's deprecated even when it works" - #endif - static int name[2] = {CTL_KERN, KERN_ARND}; - sysctl(name, 2, nullptr, nullptr, nullptr, 0); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_SYSCTL_ARND, 1,[Define this symbol if the BSD sysctl(KERN_ARND) is available]) ], - [ AC_MSG_RESULT(no)] -) - -AC_MSG_CHECKING(for if type char equals int8_t) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include - #include ]], - [[ static_assert(std::is_same::value, ""); ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(CHAR_EQUALS_INT8, 1,[Define this symbol if type char equals int8_t]) ], - [ AC_MSG_RESULT(no)] -) - -dnl LevelDB platform checks -AC_MSG_CHECKING(for fdatasync) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ fdatasync(0); ]])], - [ AC_MSG_RESULT(yes); HAVE_FDATASYNC=1 ], - [ AC_MSG_RESULT(no); HAVE_FDATASYNC=0 ] -) - -AC_MSG_CHECKING(for F_FULLFSYNC) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ fcntl(0, F_FULLFSYNC, 0); ]])], - [ AC_MSG_RESULT(yes); HAVE_FULLFSYNC=1 ], - [ AC_MSG_RESULT(no); HAVE_FULLFSYNC=0 ] -) - -AC_MSG_CHECKING(for O_CLOEXEC) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ open("", O_CLOEXEC); ]])], - [ AC_MSG_RESULT(yes); HAVE_O_CLOEXEC=1 ], - [ AC_MSG_RESULT(no); HAVE_O_CLOEXEC=0 ] -) - -dnl crc32c platform checks -AC_MSG_CHECKING(for __builtin_prefetch) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ - char data = 0; - const char* address = &data; - __builtin_prefetch(address, 0, 0); - ]])], - [ AC_MSG_RESULT(yes); HAVE_BUILTIN_PREFETCH=1 ], - [ AC_MSG_RESULT(no); HAVE_BUILTIN_PREFETCH=0 ] -) - -AC_MSG_CHECKING(for _mm_prefetch) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[ - char data = 0; - const char* address = &data; - _mm_prefetch(address, _MM_HINT_NTA); - ]])], - [ AC_MSG_RESULT(yes); HAVE_MM_PREFETCH=1 ], - [ AC_MSG_RESULT(no); HAVE_MM_PREFETCH=0 ] -) - -AC_MSG_CHECKING(for strong getauxval support in the system headers) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - #include - ]], [[ - getauxval(AT_HWCAP); - ]])], - [ AC_MSG_RESULT(yes); HAVE_STRONG_GETAUXVAL=1 ], - [ AC_MSG_RESULT(no); HAVE_STRONG_GETAUXVAL=0 ] -) - -AC_MSG_CHECKING(for weak getauxval support in the compiler) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - unsigned long getauxval(unsigned long type) __attribute__((weak)); - #define AT_HWCAP 16 - ]], [[ - getauxval(AT_HWCAP); - ]])], - [ AC_MSG_RESULT(yes); HAVE_WEAK_GETAUXVAL=1 ], - [ AC_MSG_RESULT(no); HAVE_WEAK_GETAUXVAL=0 ] -) - -dnl Check for reduced exports -if test x$use_reduce_exports = xyes; then - AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[RE_CXXFLAGS="-fvisibility=hidden"], - [AC_MSG_ERROR([Cannot set default symbol visibility. Use --disable-reduce-exports.])]) -fi - -AC_MSG_CHECKING([for std::system]) -AC_LINK_IFELSE( - [ AC_LANG_PROGRAM( - [[ #include ]], - [[ int nErr = std::system(""); ]] - )], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_STD__SYSTEM, 1, Define to 1 if std::system is available.)], - [ AC_MSG_RESULT(no) ] -) - -AC_MSG_CHECKING([for ::_wsystem]) -AC_LINK_IFELSE( - [ AC_LANG_PROGRAM( - [[ ]], - [[ int nErr = ::_wsystem(""); ]] - )], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_WSYSTEM, 1, Define to 1 if ::wsystem is available.)], - [ AC_MSG_RESULT(no) ] -) - -AC_DEFINE([HAVE_SYSTEM], [HAVE_STD__SYSTEM || HAVE_WSYSTEM], [std::system or ::wsystem]) - -LEVELDB_CPPFLAGS= -LIBLEVELDB= -LIBMEMENV= -AM_CONDITIONAL([EMBEDDED_LEVELDB],[true]) -AC_SUBST(LEVELDB_CPPFLAGS) -AC_SUBST(LIBLEVELDB) -AC_SUBST(LIBMEMENV) - -dnl enable-fuzz should disable all other targets -if test "x$enable_fuzz" = "xyes"; then - AC_MSG_WARN(enable-fuzz will disable all other targets) - build_bitcoin_utils=no - build_bitcoin_cli=no - build_bitcoin_tx=no - build_bitcoin_wallet=no - build_bitcoind=no - build_bitcoin_libs=no - bitcoin_enable_qt=no - bitcoin_enable_qt_test=no - bitcoin_enable_qt_dbus=no - enable_wallet=no - use_bench=no - use_upnp=no - use_zmq=no -else - BITCOIN_QT_INIT - - dnl sets $bitcoin_enable_qt, $bitcoin_enable_qt_test, $bitcoin_enable_qt_dbus - BITCOIN_QT_CONFIGURE([$use_pkgconfig]) -fi - -if test x$enable_wallet != xno; then - dnl Check for libdb_cxx only if wallet enabled - BITCOIN_FIND_BDB48 -fi - -dnl Check for libminiupnpc (optional) -if test x$use_upnp != xno; then - AC_CHECK_HEADERS( - [miniupnpc/miniwget.h miniupnpc/miniupnpc.h miniupnpc/upnpcommands.h miniupnpc/upnperrors.h], - [AC_CHECK_LIB([miniupnpc], [upnpDiscover], [MINIUPNPC_LIBS=-lminiupnpc], [have_miniupnpc=no])], - [have_miniupnpc=no] - ) -dnl The minimum supported miniUPnPc API version is set to 10. This keeps compatibility -dnl with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages. -if test x$have_miniupnpc != xno; then - AC_MSG_CHECKING([whether miniUPnPc API version is supported]) - AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ - @%:@include - ]], [[ - #if MINIUPNPC_API_VERSION >= 10 - // Everything is okay - #else - # error miniUPnPc API version is too old - #endif - ]])],[ - AC_MSG_RESULT(yes) - ],[ - AC_MSG_RESULT(no) - AC_MSG_WARN([miniUPnPc API version < 10 is unsupported, disabling UPnP support.]) - have_miniupnpc=no - ]) -fi -fi - -if test x$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench = xnonononononono; then - use_boost=no -else - use_boost=yes -fi - -if test x$use_boost = xyes; then - -dnl Minimum required Boost version -define(MINIMUM_REQUIRED_BOOST, 1.47.0) - -dnl Check for boost libs -AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST]) -if test x$want_boost = xno; then - AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]]) -fi -AX_BOOST_SYSTEM -AX_BOOST_FILESYSTEM -AX_BOOST_THREAD - -dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic -dnl counter implementations. In 1.63 and later the std::atomic approach is default. -m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro -BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS" - -if test x$use_reduce_exports = xyes; then - AC_MSG_CHECKING([for working boost reduced exports]) - TEMP_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$BOOST_CPPFLAGS $CPPFLAGS" - AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ - @%:@include - ]], [[ - #if BOOST_VERSION >= 104900 - // Everything is okay - #else - # error Boost version is too old - #endif - ]])],[ - AC_MSG_RESULT(yes) - ],[ - AC_MSG_ERROR([boost versions < 1.49 are known to be broken with reduced exports. Use --disable-reduce-exports.]) - ]) - CPPFLAGS="$TEMP_CPPFLAGS" -fi -fi - -if test x$use_reduce_exports = xyes; then - CXXFLAGS="$CXXFLAGS $RE_CXXFLAGS" - AX_CHECK_LINK_FLAG([[-Wl,--exclude-libs,ALL]], [RELDFLAGS="-Wl,--exclude-libs,ALL"],, [[$LDFLAG_WERROR]]) -fi - -if test x$use_tests = xyes; then - - if test x$HEXDUMP = x; then - AC_MSG_ERROR(hexdump is required for tests) - fi - - - if test x$use_boost = xyes; then - - AX_BOOST_UNIT_TEST_FRAMEWORK - - dnl Determine if -DBOOST_TEST_DYN_LINK is needed - AC_MSG_CHECKING([for dynamic linked boost test]) - TEMP_LIBS="$LIBS" - LIBS="$LIBS $BOOST_LDFLAGS $BOOST_UNIT_TEST_FRAMEWORK_LIB" - TEMP_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - AC_LINK_IFELSE([AC_LANG_SOURCE([ - #define BOOST_TEST_DYN_LINK - #define BOOST_TEST_MAIN - #include - - ])], - [AC_MSG_RESULT(yes)] - [TESTDEFS="$TESTDEFS -DBOOST_TEST_DYN_LINK"], - [AC_MSG_RESULT(no)]) - LIBS="$TEMP_LIBS" - CPPFLAGS="$TEMP_CPPFLAGS" - - fi -fi - -if test x$use_boost = xyes; then - -BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB" - - -dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums -dnl using c++98 constructs. Unfortunately, this implementation detail leaked into -dnl the abi. This was fixed in 1.57. - -dnl When building against that installed version using c++11, the headers pick up -dnl on the native c++11 scoped enum support and enable it, however it will fail to -dnl link. This can be worked around by disabling c++11 scoped enums if linking will -dnl fail. -dnl BOOST_NO_SCOPED_ENUMS was changed to BOOST_NO_CXX11_SCOPED_ENUMS in 1.51. - -TEMP_LIBS="$LIBS" -LIBS="$BOOST_LIBS $LIBS" -TEMP_CPPFLAGS="$CPPFLAGS" -CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" -AC_MSG_CHECKING([for mismatched boost c++11 scoped enums]) -AC_LINK_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - #if !defined(BOOST_NO_SCOPED_ENUMS) && !defined(BOOST_NO_CXX11_SCOPED_ENUMS) && BOOST_VERSION < 105700 - #define BOOST_NO_SCOPED_ENUMS - #define BOOST_NO_CXX11_SCOPED_ENUMS - #define CHECK - #endif - #include - ]],[[ - #if defined(CHECK) - boost::filesystem::copy_file("foo", "bar"); - #else - choke; - #endif - ]])], - [AC_MSG_RESULT(mismatched); BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_SCOPED_ENUMS -DBOOST_NO_CXX11_SCOPED_ENUMS"], [AC_MSG_RESULT(ok)]) -LIBS="$TEMP_LIBS" -CPPFLAGS="$TEMP_CPPFLAGS" - -fi - -if test x$use_pkgconfig = xyes; then - : dnl - m4_ifdef( - [PKG_CHECK_MODULES], - [ - if test x$use_qr != xno; then - BITCOIN_QT_CHECK([PKG_CHECK_MODULES([QR], [libqrencode], [have_qrencode=yes], [have_qrencode=no])]) - fi - if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then - PKG_CHECK_MODULES([EVENT], [libevent >= 2.0.21], [use_libevent=yes], [AC_MSG_ERROR(libevent version 2.0.21 or greater not found.)]) - if test x$TARGET_OS != xwindows; then - PKG_CHECK_MODULES([EVENT_PTHREADS], [libevent_pthreads >= 2.0.21],, [AC_MSG_ERROR(libevent_pthreads version 2.0.21 or greater not found.)]) - fi - fi - - if test "x$use_zmq" = "xyes"; then - PKG_CHECK_MODULES([ZMQ],[libzmq >= 4], - [AC_DEFINE([ENABLE_ZMQ],[1],[Define to 1 to enable ZMQ functions])], - [AC_DEFINE([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions]) - AC_MSG_WARN([libzmq version 4.x or greater not found, disabling]) - use_zmq=no]) - else - AC_DEFINE_UNQUOTED([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions]) - fi - ] - ) -else - - if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then - AC_CHECK_HEADER([event2/event.h], [use_libevent=yes], AC_MSG_ERROR(libevent headers missing),) - AC_CHECK_LIB([event],[main],EVENT_LIBS=-levent,AC_MSG_ERROR(libevent missing)) - if test x$TARGET_OS != xwindows; then - AC_CHECK_LIB([event_pthreads],[main],EVENT_PTHREADS_LIBS=-levent_pthreads,AC_MSG_ERROR(libevent_pthreads missing)) - fi - fi - - if test "x$use_zmq" = "xyes"; then - AC_CHECK_HEADER([zmq.h], - [AC_DEFINE([ENABLE_ZMQ],[1],[Define to 1 to enable ZMQ functions])], - [AC_MSG_WARN([zmq.h not found, disabling zmq support]) - use_zmq=no - AC_DEFINE([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions])]) - AC_CHECK_LIB([zmq],[zmq_ctx_shutdown],ZMQ_LIBS=-lzmq, - [AC_MSG_WARN([libzmq >= 4.0 not found, disabling zmq support]) - use_zmq=no - AC_DEFINE([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions])]) - else - AC_DEFINE_UNQUOTED([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions]) - fi - - if test "x$use_zmq" = "xyes"; then - dnl Assume libzmq was built for static linking - case $host in - *mingw*) - ZMQ_CFLAGS="$ZMQ_CFLAGS -DZMQ_STATIC" - ;; - esac - fi - - if test x$use_qr != xno; then - BITCOIN_QT_CHECK([AC_CHECK_LIB([qrencode], [main],[QR_LIBS=-lqrencode], [have_qrencode=no])]) - BITCOIN_QT_CHECK([AC_CHECK_HEADER([qrencode.h],, have_qrencode=no)]) - fi -fi - -dnl univalue check - -need_bundled_univalue=yes - -if test x$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench = xnonononononono; then - need_bundled_univalue=no -else - -if test x$system_univalue != xno ; then - found_univalue=no - if test x$use_pkgconfig = xyes; then - : #NOP - m4_ifdef( - [PKG_CHECK_MODULES], - [ - PKG_CHECK_MODULES([UNIVALUE],[libunivalue >= 1.0.4],[found_univalue=yes],[true]) - ] - ) - else - AC_CHECK_HEADER([univalue.h],[ - AC_CHECK_LIB([univalue], [main],[ - UNIVALUE_LIBS=-lunivalue - found_univalue=yes - ],[true]) - ],[true]) - fi - - if test x$found_univalue = xyes ; then - system_univalue=yes - need_bundled_univalue=no - elif test x$system_univalue = xyes ; then - AC_MSG_ERROR([univalue not found]) - else - system_univalue=no - fi -fi - -if test x$need_bundled_univalue = xyes ; then - UNIVALUE_CFLAGS='-I$(srcdir)/univalue/include' - UNIVALUE_LIBS='univalue/libunivalue.la' -fi - -fi - -AM_CONDITIONAL([EMBEDDED_UNIVALUE],[test x$need_bundled_univalue = xyes]) -AC_SUBST(UNIVALUE_CFLAGS) -AC_SUBST(UNIVALUE_LIBS) - -AC_MSG_CHECKING([whether to build bitcoind]) -AM_CONDITIONAL([BUILD_BITCOIND], [test x$build_bitcoind = xyes]) -AC_MSG_RESULT($build_bitcoind) - -AC_MSG_CHECKING([whether to build bitcoin-cli]) -AM_CONDITIONAL([BUILD_BITCOIN_CLI], [test x$build_bitcoin_cli = xyes]) -AC_MSG_RESULT($build_bitcoin_cli) - -AC_MSG_CHECKING([whether to build bitcoin-tx]) -AM_CONDITIONAL([BUILD_BITCOIN_TX], [test x$build_bitcoin_tx = xyes]) -AC_MSG_RESULT($build_bitcoin_tx) - -AC_MSG_CHECKING([whether to build bitcoin-wallet]) -AM_CONDITIONAL([BUILD_BITCOIN_WALLET], [test x$build_bitcoin_wallet = xyes]) -AC_MSG_RESULT($build_bitcoin_wallet) - -AC_MSG_CHECKING([whether to build libraries]) -AM_CONDITIONAL([BUILD_BITCOIN_LIBS], [test x$build_bitcoin_libs = xyes]) -if test x$build_bitcoin_libs = xyes; then - AC_DEFINE(HAVE_CONSENSUS_LIB, 1, [Define this symbol if the consensus lib has been built]) - AC_CONFIG_FILES([libbitcoinconsensus.pc:libbitcoinconsensus.pc.in]) -fi -AC_MSG_RESULT($build_bitcoin_libs) - -AC_LANG_POP - -if test "x$use_ccache" != "xno"; then - AC_MSG_CHECKING(if ccache should be used) - if test x$CCACHE = x; then - if test "x$use_ccache" = "xyes"; then - AC_MSG_ERROR([ccache not found.]); - else - use_ccache=no - fi - else - use_ccache=yes - CC="$ac_cv_path_CCACHE $CC" - CXX="$ac_cv_path_CCACHE $CXX" - fi - AC_MSG_RESULT($use_ccache) -fi - -dnl enable wallet -AC_MSG_CHECKING([if wallet should be enabled]) -if test x$enable_wallet != xno; then - AC_MSG_RESULT(yes) - AC_DEFINE_UNQUOTED([ENABLE_WALLET],[1],[Define to 1 to enable wallet functions]) - -else - AC_MSG_RESULT(no) -fi - -dnl enable upnp support -AC_MSG_CHECKING([whether to build with support for UPnP]) -if test x$have_miniupnpc = xno; then - if test x$use_upnp = xyes; then - AC_MSG_ERROR("UPnP requested but cannot be built. Use --without-miniupnpc.") - fi - AC_MSG_RESULT(no) - use_upnp=no -else - if test x$use_upnp != xno; then - AC_MSG_RESULT(yes) - AC_MSG_CHECKING([whether to build with UPnP enabled by default]) - use_upnp=yes - upnp_setting=0 - if test x$use_upnp_default != xno; then - use_upnp_default=yes - upnp_setting=1 - fi - AC_MSG_RESULT($use_upnp_default) - AC_DEFINE_UNQUOTED([USE_UPNP],[$upnp_setting],[UPnP support not compiled if undefined, otherwise value (0 or 1) determines default state]) - if test x$TARGET_OS = xwindows; then - MINIUPNPC_CPPFLAGS="-DSTATICLIB -DMINIUPNP_STATICLIB" - fi - else - AC_MSG_RESULT(no) - fi -fi - -dnl these are only used when qt is enabled -BUILD_TEST_QT="" -if test x$bitcoin_enable_qt != xno; then - dnl enable dbus support - AC_MSG_CHECKING([whether to build GUI with support for D-Bus]) - if test x$bitcoin_enable_qt_dbus != xno; then - AC_DEFINE([USE_DBUS],[1],[Define if dbus support should be compiled in]) - fi - AC_MSG_RESULT($bitcoin_enable_qt_dbus) - - dnl enable qr support - AC_MSG_CHECKING([whether to build GUI with support for QR codes]) - if test x$have_qrencode = xno; then - if test x$use_qr = xyes; then - AC_MSG_ERROR([QR support requested but cannot be built. Use --without-qrencode]) - fi - use_qr=no - else - if test x$use_qr != xno; then - AC_DEFINE([USE_QRCODE],[1],[Define if QR support should be compiled in]) - use_qr=yes - fi - fi - AC_MSG_RESULT([$use_qr]) - - if test x$XGETTEXT = x; then - AC_MSG_WARN("xgettext is required to update qt translations") - fi - - AC_MSG_CHECKING([whether to build test_bitcoin-qt]) - if test x$use_gui_tests$bitcoin_enable_qt_test = xyesyes; then - AC_MSG_RESULT([yes]) - BUILD_TEST_QT="yes" - else - AC_MSG_RESULT([no]) - fi -fi - -AM_CONDITIONAL([ENABLE_ZMQ], [test "x$use_zmq" = "xyes"]) - -AC_MSG_CHECKING([whether to build test_bitcoin]) -if test x$use_tests = xyes; then - AC_MSG_RESULT([yes]) - BUILD_TEST="yes" -else - AC_MSG_RESULT([no]) - BUILD_TEST="" -fi - -AC_MSG_CHECKING([whether to reduce exports]) -if test x$use_reduce_exports = xyes; then - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi - -if test x$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_libs$build_bitcoind$bitcoin_enable_qt$use_bench$use_tests = xnononononononono; then - AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --with-gui --enable-bench or --enable-tests]) -fi - -AM_CONDITIONAL([TARGET_DARWIN], [test x$TARGET_OS = xdarwin]) -AM_CONDITIONAL([BUILD_DARWIN], [test x$BUILD_OS = xdarwin]) -AM_CONDITIONAL([TARGET_WINDOWS], [test x$TARGET_OS = xwindows]) -AM_CONDITIONAL([ENABLE_WALLET],[test x$enable_wallet = xyes]) -AM_CONDITIONAL([ENABLE_TESTS],[test x$BUILD_TEST = xyes]) -AM_CONDITIONAL([ENABLE_FUZZ],[test x$enable_fuzz = xyes]) -AM_CONDITIONAL([ENABLE_QT],[test x$bitcoin_enable_qt = xyes]) -AM_CONDITIONAL([ENABLE_QT_TESTS],[test x$BUILD_TEST_QT = xyes]) -AM_CONDITIONAL([ENABLE_BENCH],[test x$use_bench = xyes]) -AM_CONDITIONAL([USE_QRCODE], [test x$use_qr = xyes]) -AM_CONDITIONAL([USE_LCOV],[test x$use_lcov = xyes]) -AM_CONDITIONAL([USE_LIBEVENT],[test x$use_libevent = xyes]) -AM_CONDITIONAL([GLIBC_BACK_COMPAT],[test x$use_glibc_compat = xyes]) -AM_CONDITIONAL([HARDEN],[test x$use_hardening = xyes]) -AM_CONDITIONAL([ENABLE_SSE42],[test x$enable_sse42 = xyes]) -AM_CONDITIONAL([ENABLE_SSE41],[test x$enable_sse41 = xyes]) -AM_CONDITIONAL([ENABLE_AVX2],[test x$enable_avx2 = xyes]) -AM_CONDITIONAL([ENABLE_SHANI],[test x$enable_shani = xyes]) -AM_CONDITIONAL([ENABLE_ARM_CRC],[test x$enable_arm_crc = xyes]) -AM_CONDITIONAL([USE_ASM],[test x$use_asm = xyes]) -AM_CONDITIONAL([WORDS_BIGENDIAN],[test x$ac_cv_c_bigendian = xyes]) - -AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version]) -AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version]) -AC_DEFINE(CLIENT_VERSION_REVISION, _CLIENT_VERSION_REVISION, [Build revision]) -AC_DEFINE(CLIENT_VERSION_BUILD, _CLIENT_VERSION_BUILD, [Version Build]) -AC_DEFINE(CLIENT_VERSION_IS_RELEASE, _CLIENT_VERSION_IS_RELEASE, [Version is release]) -AC_DEFINE(COPYRIGHT_YEAR, _COPYRIGHT_YEAR, [Copyright year]) -AC_DEFINE(COPYRIGHT_HOLDERS, "_COPYRIGHT_HOLDERS", [Copyright holder(s) before %s replacement]) -AC_DEFINE(COPYRIGHT_HOLDERS_SUBSTITUTION, "_COPYRIGHT_HOLDERS_SUBSTITUTION", [Replacement for %s in copyright holders string]) -define(_COPYRIGHT_HOLDERS_FINAL, [patsubst(_COPYRIGHT_HOLDERS, [%s], [_COPYRIGHT_HOLDERS_SUBSTITUTION])]) -AC_DEFINE(COPYRIGHT_HOLDERS_FINAL, "_COPYRIGHT_HOLDERS_FINAL", [Copyright holder(s)]) -AC_SUBST(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR) -AC_SUBST(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR) -AC_SUBST(CLIENT_VERSION_REVISION, _CLIENT_VERSION_REVISION) -AC_SUBST(CLIENT_VERSION_BUILD, _CLIENT_VERSION_BUILD) -AC_SUBST(CLIENT_VERSION_IS_RELEASE, _CLIENT_VERSION_IS_RELEASE) -AC_SUBST(COPYRIGHT_YEAR, _COPYRIGHT_YEAR) -AC_SUBST(COPYRIGHT_HOLDERS, "_COPYRIGHT_HOLDERS") -AC_SUBST(COPYRIGHT_HOLDERS_SUBSTITUTION, "_COPYRIGHT_HOLDERS_SUBSTITUTION") -AC_SUBST(COPYRIGHT_HOLDERS_FINAL, "_COPYRIGHT_HOLDERS_FINAL") -AC_SUBST(BITCOIN_DAEMON_NAME) -AC_SUBST(BITCOIN_GUI_NAME) -AC_SUBST(BITCOIN_CLI_NAME) -AC_SUBST(BITCOIN_TX_NAME) -AC_SUBST(BITCOIN_WALLET_TOOL_NAME) - -AC_SUBST(RELDFLAGS) -AC_SUBST(DEBUG_CPPFLAGS) -AC_SUBST(WARN_CXXFLAGS) -AC_SUBST(NOWARN_CXXFLAGS) -AC_SUBST(DEBUG_CXXFLAGS) -AC_SUBST(COMPAT_LDFLAGS) -AC_SUBST(ERROR_CXXFLAGS) -AC_SUBST(GPROF_CXXFLAGS) -AC_SUBST(GPROF_LDFLAGS) -AC_SUBST(HARDENED_CXXFLAGS) -AC_SUBST(HARDENED_CPPFLAGS) -AC_SUBST(HARDENED_LDFLAGS) -AC_SUBST(PIC_FLAGS) -AC_SUBST(PIE_FLAGS) -AC_SUBST(SANITIZER_CXXFLAGS) -AC_SUBST(SANITIZER_LDFLAGS) -AC_SUBST(SSE42_CXXFLAGS) -AC_SUBST(SSE41_CXXFLAGS) -AC_SUBST(AVX2_CXXFLAGS) -AC_SUBST(SHANI_CXXFLAGS) -AC_SUBST(ARM_CRC_CXXFLAGS) -AC_SUBST(LIBTOOL_APP_LDFLAGS) -AC_SUBST(USE_UPNP) -AC_SUBST(USE_QRCODE) -AC_SUBST(BOOST_LIBS) -AC_SUBST(TESTDEFS) -AC_SUBST(MINIUPNPC_CPPFLAGS) -AC_SUBST(MINIUPNPC_LIBS) -AC_SUBST(EVENT_LIBS) -AC_SUBST(EVENT_PTHREADS_LIBS) -AC_SUBST(ZMQ_LIBS) -AC_SUBST(QR_LIBS) -AC_SUBST(HAVE_GMTIME_R) -AC_SUBST(HAVE_FDATASYNC) -AC_SUBST(HAVE_FULLFSYNC) -AC_SUBST(HAVE_O_CLOEXEC) -AC_SUBST(HAVE_BUILTIN_PREFETCH) -AC_SUBST(HAVE_MM_PREFETCH) -AC_SUBST(HAVE_STRONG_GETAUXVAL) -AC_SUBST(HAVE_WEAK_GETAUXVAL) -AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile share/setup.nsi share/qt/Info.plist test/config.ini]) -AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh]) -AM_COND_IF([HAVE_DOXYGEN], [AC_CONFIG_FILES([doc/Doxyfile])]) -AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py]) -AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py]) -AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py]) -AC_CONFIG_LINKS([test/util/bitcoin-util-test.py:test/util/bitcoin-util-test.py]) -AC_CONFIG_LINKS([test/util/rpcauth-test.py:test/util/rpcauth-test.py]) - -dnl boost's m4 checks do something really nasty: they export these vars. As a -dnl result, they leak into secp256k1's configure and crazy things happen. -dnl Until this is fixed upstream and we've synced, we'll just un-export them. -CPPFLAGS_TEMP="$CPPFLAGS" -unset CPPFLAGS -CPPFLAGS="$CPPFLAGS_TEMP" - -LDFLAGS_TEMP="$LDFLAGS" -unset LDFLAGS -LDFLAGS="$LDFLAGS_TEMP" - -LIBS_TEMP="$LIBS" -unset LIBS -LIBS="$LIBS_TEMP" - -PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" -unset PKG_CONFIG_PATH -PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" - -PKGCONFIG_LIBDIR_TEMP="$PKG_CONFIG_LIBDIR" -unset PKG_CONFIG_LIBDIR -PKG_CONFIG_LIBDIR="$PKGCONFIG_LIBDIR_TEMP" - -if test x$need_bundled_univalue = xyes; then - AC_CONFIG_SUBDIRS([src/univalue]) -fi - -ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --with-bignum=no --enable-module-recovery --disable-jni" -AC_CONFIG_SUBDIRS([src/secp256k1]) - -AC_OUTPUT - -dnl Replace the BUILDDIR path with the correct Windows path if compiling on Native Windows -case ${OS} in - *Windows*) - sed 's/BUILDDIR="\/\([[a-z]]\)/BUILDDIR="\1:/' test/config.ini > test/config-2.ini - mv test/config-2.ini test/config.ini - ;; -esac - -echo -echo "Options used to compile and link:" -echo " with wallet = $enable_wallet" -echo " with gui / qt = $bitcoin_enable_qt" -if test x$bitcoin_enable_qt != xno; then - echo " with qr = $use_qr" -fi -echo " with zmq = $use_zmq" -echo " with test = $use_tests" -if test x$use_tests != xno; then - echo " with fuzz = $enable_fuzz" -fi -echo " with bench = $use_bench" -echo " with upnp = $use_upnp" -echo " use asm = $use_asm" -echo " sanitizers = $use_sanitizers" -echo " debug enabled = $enable_debug" -echo " gprof enabled = $enable_gprof" -echo " werror = $enable_werror" -echo -echo " target os = $TARGET_OS" -echo " build os = $BUILD_OS" -echo -echo " CC = $CC" -echo " CFLAGS = $CFLAGS" -echo " CPPFLAGS = $DEBUG_CPPFLAGS $HARDENED_CPPFLAGS $CPPFLAGS" -echo " CXX = $CXX" -echo " CXXFLAGS = $DEBUG_CXXFLAGS $HARDENED_CXXFLAGS $WARN_CXXFLAGS $NOWARN_CXXFLAGS $ERROR_CXXFLAGS $GPROF_CXXFLAGS $CXXFLAGS" -echo " LDFLAGS = $PTHREAD_CFLAGS $HARDENED_LDFLAGS $GPROF_LDFLAGS $LDFLAGS" -echo " ARFLAGS = $ARFLAGS" -echo diff --git a/contrib/README.md b/contrib/README.md index 361975baa4915..f375993ac4b76 100644 --- a/contrib/README.md +++ b/contrib/README.md @@ -26,23 +26,20 @@ The [Debian](/contrib/debian) subfolder contains the copyright file. All other packaging related files can be found in the [bitcoin-core/packaging](https://github.com/bitcoin-core/packaging) repository. -### [Gitian-descriptors](/contrib/gitian-descriptors) ### -Files used during the gitian build process. For more information about gitian, see the [the Bitcoin Core documentation repository](https://github.com/bitcoin-core/docs). - -### [Gitian-keys](/contrib/gitian-keys) -PGP keys used for signing Bitcoin Core [Gitian release](/doc/release-process.md) results. - ### [MacDeploy](/contrib/macdeploy) ### Scripts and notes for Mac builds. -### [Gitian-build](/contrib/gitian-build.py) ### -Script for running full Gitian builds. - Test and Verify Tools --------------------- ### [TestGen](/contrib/testgen) ### Utilities to generate test vectors for the data-driven Bitcoin tests. -### [Verify Binaries](/contrib/verifybinaries) ### +### [Verify-Binaries](/contrib/verify-binaries) ### This script attempts to download and verify the signature file SHA256SUMS.asc from bitcoin.org. + +Command Line Tools +--------------------- + +### [Completions](/contrib/completions) ### +Shell completions for bash and fish. diff --git a/contrib/asmap/README.md b/contrib/asmap/README.md new file mode 100644 index 0000000000000..5fab4b285e213 --- /dev/null +++ b/contrib/asmap/README.md @@ -0,0 +1,12 @@ +# ASMap Tool + +Tool for performing various operations on textual and binary asmap files, +particularly encoding/compressing the raw data to the binary format that can +be used in Bitcoin Core with the `-asmap` option. + +Example usage: +``` +python3 asmap-tool.py encode /path/to/input.file /path/to/output.file +python3 asmap-tool.py decode /path/to/input.file /path/to/output.file +python3 asmap-tool.py diff /path/to/first.file /path/to/second.file +``` diff --git a/contrib/asmap/asmap-tool.py b/contrib/asmap/asmap-tool.py new file mode 100755 index 0000000000000..33a380a2e7df0 --- /dev/null +++ b/contrib/asmap/asmap-tool.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file LICENSE or http://www.opensource.org/licenses/mit-license.php. + +import argparse +import sys +import ipaddress +import json +import math +from collections import defaultdict + +import asmap + +def load_file(input_file): + try: + contents = input_file.read() + except OSError as err: + sys.exit(f"Input file '{input_file.name}' cannot be read: {err.strerror}.") + try: + bin_asmap = asmap.ASMap.from_binary(contents) + except ValueError: + bin_asmap = None + txt_error = None + entries = None + try: + txt_contents = str(contents, encoding="utf-8") + except UnicodeError: + txt_error = "invalid UTF-8" + txt_contents = None + if txt_contents is not None: + entries = [] + for line in txt_contents.split("\n"): + idx = line.find('#') + if idx >= 0: + line = line[:idx] + line = line.lstrip(' ').rstrip(' \t\r\n') + if len(line) == 0: + continue + fields = line.split(' ') + if len(fields) != 2: + txt_error = f"unparseable line '{line}'" + entries = None + break + prefix, asn = fields + if len(asn) <= 2 or asn[:2] != "AS" or any(c < '0' or c > '9' for c in asn[2:]): + txt_error = f"invalid ASN '{asn}'" + entries = None + break + try: + net = ipaddress.ip_network(prefix) + except ValueError: + txt_error = f"invalid network '{prefix}'" + entries = None + break + entries.append((asmap.net_to_prefix(net), int(asn[2:]))) + if entries is not None and bin_asmap is not None and len(contents) > 0: + sys.exit(f"Input file '{input_file.name}' is ambiguous.") + if entries is not None: + state = asmap.ASMap() + state.update_multi(entries) + return state + if bin_asmap is not None: + return bin_asmap + sys.exit(f"Input file '{input_file.name}' is neither a valid binary asmap file nor valid text input ({txt_error}).") + + +def save_binary(output_file, state, fill): + contents = state.to_binary(fill=fill) + try: + output_file.write(contents) + output_file.close() + except OSError as err: + sys.exit(f"Output file '{output_file.name}' cannot be written to: {err.strerror}.") + +def save_text(output_file, state, fill, overlapping): + for prefix, asn in state.to_entries(fill=fill, overlapping=overlapping): + net = asmap.prefix_to_net(prefix) + try: + print(f"{net} AS{asn}", file=output_file) + except OSError as err: + sys.exit(f"Output file '{output_file.name}' cannot be written to: {err.strerror}.") + try: + output_file.close() + except OSError as err: + sys.exit(f"Output file '{output_file.name}' cannot be written to: {err.strerror}.") + +def main(): + parser = argparse.ArgumentParser(description="Tool for performing various operations on textual and binary asmap files.") + subparsers = parser.add_subparsers(title="valid subcommands", dest="subcommand") + + parser_encode = subparsers.add_parser("encode", help="convert asmap data to binary format") + parser_encode.add_argument('-f', '--fill', dest="fill", default=False, action="store_true", + help="permit reassigning undefined network ranges arbitrarily to reduce size") + parser_encode.add_argument('infile', nargs='?', type=argparse.FileType('rb'), default=sys.stdin.buffer, + help="input asmap file (text or binary); default is stdin") + parser_encode.add_argument('outfile', nargs='?', type=argparse.FileType('wb'), default=sys.stdout.buffer, + help="output binary asmap file; default is stdout") + + parser_decode = subparsers.add_parser("decode", help="convert asmap data to text format") + parser_decode.add_argument('-f', '--fill', dest="fill", default=False, action="store_true", + help="permit reassigning undefined network ranges arbitrarily to reduce length") + parser_decode.add_argument('-n', '--nonoverlapping', dest="overlapping", default=True, action="store_false", + help="output strictly non-overall ping network ranges (increases output size)") + parser_decode.add_argument('infile', nargs='?', type=argparse.FileType('rb'), default=sys.stdin.buffer, + help="input asmap file (text or binary); default is stdin") + parser_decode.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout, + help="output text file; default is stdout") + + parser_diff = subparsers.add_parser("diff", help="compute the difference between two asmap files") + parser_diff.add_argument('-i', '--ignore-unassigned', dest="ignore_unassigned", default=False, action="store_true", + help="ignore unassigned ranges in the first input (useful when second input is filled)") + parser_diff.add_argument('infile1', type=argparse.FileType('rb'), + help="first file to compare (text or binary)") + parser_diff.add_argument('infile2', type=argparse.FileType('rb'), + help="second file to compare (text or binary)") + + parser_diff_addrs = subparsers.add_parser("diff_addrs", + help="compute difference between two asmap files for a set of addresses") + parser_diff_addrs.add_argument('-s', '--show-addresses', dest="show_addresses", default=False, action="store_true", + help="include reassigned addresses in the output") + parser_diff_addrs.add_argument("infile1", type=argparse.FileType("rb"), + help="first file to compare (text or binary)") + parser_diff_addrs.add_argument("infile2", type=argparse.FileType("rb"), + help="second file to compare (text or binary)") + parser_diff_addrs.add_argument("addrs_file", type=argparse.FileType("r"), + help="address file containing getnodeaddresses output to use in the comparison " + "(make sure to set the count parameter to zero to get all node addresses, " + "e.g. 'bitcoin-cli getnodeaddresses 0 > addrs.json')") + args = parser.parse_args() + if args.subcommand is None: + parser.print_help() + elif args.subcommand == "encode": + state = load_file(args.infile) + save_binary(args.outfile, state, fill=args.fill) + elif args.subcommand == "decode": + state = load_file(args.infile) + save_text(args.outfile, state, fill=args.fill, overlapping=args.overlapping) + elif args.subcommand == "diff": + state1 = load_file(args.infile1) + state2 = load_file(args.infile2) + ipv4_changed = 0 + ipv6_changed = 0 + for prefix, old_asn, new_asn in state1.diff(state2): + if args.ignore_unassigned and old_asn == 0: + continue + net = asmap.prefix_to_net(prefix) + if isinstance(net, ipaddress.IPv4Network): + ipv4_changed += 1 << (32 - net.prefixlen) + elif isinstance(net, ipaddress.IPv6Network): + ipv6_changed += 1 << (128 - net.prefixlen) + if new_asn == 0: + print(f"# {net} was AS{old_asn}") + elif old_asn == 0: + print(f"{net} AS{new_asn} # was unassigned") + else: + print(f"{net} AS{new_asn} # was AS{old_asn}") + ipv4_change_str = "" if ipv4_changed == 0 else f" (2^{math.log2(ipv4_changed):.2f})" + ipv6_change_str = "" if ipv6_changed == 0 else f" (2^{math.log2(ipv6_changed):.2f})" + + print( + f"# {ipv4_changed}{ipv4_change_str} IPv4 addresses changed; " + f"{ipv6_changed}{ipv6_change_str} IPv6 addresses changed" + ) + elif args.subcommand == "diff_addrs": + state1 = load_file(args.infile1) + state2 = load_file(args.infile2) + address_info = json.load(args.addrs_file) + addrs = {a["address"] for a in address_info if a["network"] in ["ipv4", "ipv6"]} + reassignments = defaultdict(list) + for addr in addrs: + net = ipaddress.ip_network(addr) + prefix = asmap.net_to_prefix(net) + old_asn = state1.lookup(prefix) + new_asn = state2.lookup(prefix) + if new_asn != old_asn: + reassignments[(old_asn, new_asn)].append(addr) + reassignments = sorted(reassignments.items(), key=lambda item: len(item[1]), reverse=True) + num_reassignment_type = defaultdict(int) + for (old_asn, new_asn), reassigned_addrs in reassignments: + num_reassigned = len(reassigned_addrs) + num_reassignment_type[(bool(old_asn), bool(new_asn))] += num_reassigned + old_asn_str = f"AS{old_asn}" if old_asn else "unassigned" + new_asn_str = f"AS{new_asn}" if new_asn else "unassigned" + opt = ": " + ", ".join(reassigned_addrs) if args.show_addresses else "" + print(f"{num_reassigned} address(es) reassigned from {old_asn_str} to {new_asn_str}{opt}") + num_reassignments = sum(len(addrs) for _, addrs in reassignments) + share = num_reassignments / len(addrs) if len(addrs) > 0 else 0 + print(f"Summary: {num_reassignments:,} ({share:.2%}) of {len(addrs):,} addresses were reassigned " + f"(migrations={num_reassignment_type[True, True]}, assignments={num_reassignment_type[False, True]}, " + f"unassignments={num_reassignment_type[True, False]})") + else: + parser.print_help() + sys.exit("No command provided.") + +if __name__ == '__main__': + main() diff --git a/contrib/asmap/asmap.py b/contrib/asmap/asmap.py new file mode 100644 index 0000000000000..2ae84a3f3119b --- /dev/null +++ b/contrib/asmap/asmap.py @@ -0,0 +1,818 @@ +# Copyright (c) 2022 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file LICENSE or http://www.opensource.org/licenses/mit-license.php. + +""" +This module provides the ASNEntry and ASMap classes. +""" + +import copy +import ipaddress +import random +import unittest +from collections.abc import Callable, Iterable +from enum import Enum +from functools import total_ordering +from typing import Optional, Union, overload + +def net_to_prefix(net: Union[ipaddress.IPv4Network,ipaddress.IPv6Network]) -> list[bool]: + """ + Convert an IPv4 or IPv6 network to a prefix represented as a list of bits. + + IPv4 ranges are remapped to their IPv4-mapped IPv6 range (::ffff:0:0/96). + """ + num_bits = net.prefixlen + netrange = int.from_bytes(net.network_address.packed, 'big') + + # Map an IPv4 prefix into IPv6 space. + if isinstance(net, ipaddress.IPv4Network): + num_bits += 96 + netrange += 0xffff00000000 + + # Strip unused bottom bits. + assert (netrange & ((1 << (128 - num_bits)) - 1)) == 0 + return [((netrange >> (127 - i)) & 1) != 0 for i in range(num_bits)] + +def prefix_to_net(prefix: list[bool]) -> Union[ipaddress.IPv4Network,ipaddress.IPv6Network]: + """The reverse operation of net_to_prefix.""" + # Convert to number + netrange = sum(b << (127 - i) for i, b in enumerate(prefix)) + num_bits = len(prefix) + assert num_bits <= 128 + + # Return IPv4 range if in ::ffff:0:0/96 + if num_bits >= 96 and (netrange >> 32) == 0xffff: + return ipaddress.IPv4Network((netrange & 0xffffffff, num_bits - 96), True) + + # Return IPv6 range otherwise. + return ipaddress.IPv6Network((netrange, num_bits), True) + +# Shortcut for (prefix, ASN) entries. +ASNEntry = tuple[list[bool], int] + +# Shortcut for (prefix, old ASN, new ASN) entries. +ASNDiff = tuple[list[bool], int, int] + +class _VarLenCoder: + """ + A class representing a custom variable-length binary encoder/decoder for + integers. Each object represents a different coder, with different parameters + minval and clsbits. + + The encoding is easiest to describe using an example. Let's say minval=100 and + clsbits=[4,2,2,3]. In that case: + - x in [100..115]: encoded as [0] + [4-bit BE encoding of (x-100)]. + - x in [116..119]: encoded as [1,0] + [2-bit BE encoding of (x-116)]. + - x in [120..123]: encoded as [1,1,0] + [2-bit BE encoding of (x-120)]. + - x in [124..131]: encoded as [1,1,1] + [3-bit BE encoding of (x-124)]. + + In general, every number is encoded as: + - First, k "1"-bits, where k is the class the number falls in (there is one class + per element of clsbits). + - Then, a "0"-bit, unless k is the highest class, in which case there is nothing. + - Lastly, clsbits[k] bits encoding in big endian the position in its class that + number falls into. + - Every class k consists of 2^clsbits[k] consecutive integers. k=0 starts at minval, + other classes start one past the last element of the class before it. + """ + + def __init__(self, minval: int, clsbits: list[int]): + """Construct a new _VarLenCoder.""" + self._minval = minval + self._clsbits = clsbits + self._maxval = minval + sum(1 << b for b in clsbits) - 1 + + def can_encode(self, val: int) -> bool: + """Check whether value val is in the range this coder supports.""" + return self._minval <= val <= self._maxval + + def encode(self, val: int, ret: list[int]) -> None: + """Append encoding of val onto integer list ret.""" + + assert self._minval <= val <= self._maxval + val -= self._minval + bits = 0 + for k, bits in enumerate(self._clsbits): + if val >> bits: + # If the value will not fit in class k, subtract its range from v, + # emit a "1" bit and continue with the next class. + val -= 1 << bits + ret.append(1) + else: + if k + 1 < len(self._clsbits): + # Unless we're in the last class, emit a "0" bit. + ret.append(0) + break + # And then encode v (now the position within the class) in big endian. + ret.extend((val >> (bits - 1 - b)) & 1 for b in range(bits)) + + def encode_size(self, val: int) -> int: + """Compute how many bits are needed to encode val.""" + assert self._minval <= val <= self._maxval + val -= self._minval + ret = 0 + bits = 0 + for k, bits in enumerate(self._clsbits): + if val >> bits: + val -= 1 << bits + ret += 1 + else: + ret += k + 1 < len(self._clsbits) + break + return ret + bits + + def decode(self, stream, bitpos) -> tuple[int,int]: + """Decode a number starting at bitpos in stream, returning value and new bitpos.""" + val = self._minval + bits = 0 + for k, bits in enumerate(self._clsbits): + bit = 0 + if k + 1 < len(self._clsbits): + bit = stream[bitpos] + bitpos += 1 + if not bit: + break + val += 1 << bits + for i in range(bits): + bit = stream[bitpos] + bitpos += 1 + val += bit << (bits - 1 - i) + return val, bitpos + +# Variable-length encoders used in the binary asmap format. +_CODER_INS = _VarLenCoder(0, [0, 0, 1]) +_CODER_ASN = _VarLenCoder(1, list(range(15, 25))) +_CODER_MATCH = _VarLenCoder(2, list(range(1, 9))) +_CODER_JUMP = _VarLenCoder(17, list(range(5, 31))) + +class _Instruction(Enum): + """One instruction in the binary asmap format.""" + # A return instruction, encoded as [0], returns a constant ASN. It is followed by + # an integer using the ASN encoding. + RETURN = 0 + # A jump instruction, encoded as [1,0] inspects the next unused bit in the input + # and either continues execution (if 0), or skips a specified number of bits (if 1). + # It is followed by an integer, and then two subprograms. The integer uses jump encoding + # and corresponds to the length of the first subprogram (so it can be skipped). + JUMP = 1 + # A match instruction, encoded as [1,1,0] inspects 1 or more of the next unused bits + # in the input with its argument. If they all match, execution continues. If they do + # not, failure is returned. If a default instruction has been executed before, instead + # of failure the default instruction's argument is returned. It is followed by an + # integer in match encoding, and a subprogram. That value is at least 2 bits and at + # most 9 bits. An n-bit value signifies matching (n-1) bits in the input with the lower + # (n-1) bits in the match value. + MATCH = 2 + # A default instruction, encoded as [1,1,1] sets the default variable to its argument, + # and continues execution. It is followed by an integer in ASN encoding, and a subprogram. + DEFAULT = 3 + # Not an actual instruction, but a way to encode the empty program that fails. In the + # encoder, it is used more generally to represent the failure case inside MATCH instructions, + # which may (if used inside the context of a DEFAULT instruction) actually correspond to + # a successful return. In this usage, they're always converted to an actual MATCH or RETURN + # before the top level is reached (see make_default below). + END = 4 + +class _BinNode: + """A class representing a (node of) the parsed binary asmap format.""" + + @overload + def __init__(self, ins: _Instruction): ... + @overload + def __init__(self, ins: _Instruction, arg1: int): ... + @overload + def __init__(self, ins: _Instruction, arg1: "_BinNode", arg2: "_BinNode"): ... + @overload + def __init__(self, ins: _Instruction, arg1: int, arg2: "_BinNode"): ... + + def __init__(self, ins: _Instruction, arg1=None, arg2=None): + """ + Construct a new asmap node. Possibilities are: + - _BinNode(_Instruction.RETURN, asn) + - _BinNode(_Instruction.JUMP, node_0, node_1) + - _BinNode(_Instruction.MATCH, val, node) + - _BinNode(_Instruction.DEFAULT, asn, node) + - _BinNode(_Instruction.END) + """ + self.ins = ins + self.arg1 = arg1 + self.arg2 = arg2 + if ins == _Instruction.RETURN: + assert isinstance(arg1, int) + assert arg2 is None + self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1) + elif ins == _Instruction.JUMP: + assert isinstance(arg1, _BinNode) + assert isinstance(arg2, _BinNode) + self.size = (_CODER_INS.encode_size(ins.value) + _CODER_JUMP.encode_size(arg1.size) + + arg1.size + arg2.size) + elif ins == _Instruction.DEFAULT: + assert isinstance(arg1, int) + assert isinstance(arg2, _BinNode) + self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1) + arg2.size + elif ins == _Instruction.MATCH: + assert isinstance(arg1, int) + assert isinstance(arg2, _BinNode) + self.size = (_CODER_INS.encode_size(ins.value) + _CODER_MATCH.encode_size(arg1) + + arg2.size) + elif ins == _Instruction.END: + assert arg1 is None + assert arg2 is None + self.size = 0 + else: + assert False + + @staticmethod + def make_end() -> "_BinNode": + """Constructor for a _BinNode with just an END instruction.""" + return _BinNode(_Instruction.END) + + @staticmethod + def make_leaf(val: int) -> "_BinNode": + """Constructor for a _BinNode of just a RETURN instruction.""" + assert val is not None and val > 0 + return _BinNode(_Instruction.RETURN, val) + + @staticmethod + def make_branch(node0: "_BinNode", node1: "_BinNode") -> "_BinNode": + """ + Construct a _BinNode corresponding to running either the node0 or node1 subprogram, + based on the next input bit. It exploits shortcuts that are possible in the encoding, + and uses either a JUMP, MATCH, or END instruction. + """ + if node0.ins == _Instruction.END and node1.ins == _Instruction.END: + return node0 + if node0.ins == _Instruction.END: + if node1.ins == _Instruction.MATCH and node1.arg1 <= 0xFF: + return _BinNode(node1.ins, node1.arg1 + (1 << node1.arg1.bit_length()), node1.arg2) + return _BinNode(_Instruction.MATCH, 3, node1) + if node1.ins == _Instruction.END: + if node0.ins == _Instruction.MATCH and node0.arg1 <= 0xFF: + return _BinNode(node0.ins, node0.arg1 + (1 << (node0.arg1.bit_length() - 1)), + node0.arg2) + return _BinNode(_Instruction.MATCH, 2, node0) + return _BinNode(_Instruction.JUMP, node0, node1) + + @staticmethod + def make_default(val: int, sub: "_BinNode") -> "_BinNode": + """ + Construct a _BinNode that corresponds to the specified subprogram, with the specified + default value. It exploits shortcuts that are possible in the encoding, and will use + either a DEFAULT or a RETURN instruction.""" + assert val is not None and val > 0 + if sub.ins == _Instruction.END: + return _BinNode(_Instruction.RETURN, val) + if sub.ins in (_Instruction.RETURN, _Instruction.DEFAULT): + return sub + return _BinNode(_Instruction.DEFAULT, val, sub) + +@total_ordering +class ASMap: + """ + A class whose objects represent a mapping from subnets to ASNs. + + Internally the mapping is stored as a binary trie, but can be converted + from/to a list of ASNEntry objects, and from/to the binary asmap file format. + + In the trie representation, nodes are represented as bare lists for efficiency + and ease of manipulation: + - [0] means an unassigned subnet (no ASN mapping for it is present) + - [int] means a subnet mapped entirely to the specified ASN. + - [node,node] means a subnet whose lower half and upper half have different + - mappings, represented by new trie nodes. + """ + + def update(self, prefix: list[bool], asn: int) -> None: + """Update this ASMap object to map prefix to the specified asn.""" + assert asn == 0 or _CODER_ASN.can_encode(asn) + + def recurse(node: list, offset: int) -> None: + if offset == len(prefix): + # Reached the end of prefix; overwrite this node. + node.clear() + node.append(asn) + return + if len(node) == 1: + # Need to descend into a leaf node; split it up. + oldasn = node[0] + node.clear() + node.append([oldasn]) + node.append([oldasn]) + # Descend into the node. + recurse(node[prefix[offset]], offset + 1) + # If the result is two identical leaf children, merge them. + if len(node[0]) == 1 and len(node[1]) == 1 and node[0] == node[1]: + oldasn = node[0][0] + node.clear() + node.append(oldasn) + recurse(self._trie, 0) + + def update_multi(self, entries: list[tuple[list[bool], int]]) -> None: + """Apply multiple update operations, where longer prefixes take precedence.""" + entries.sort(key=lambda entry: len(entry[0])) + for prefix, asn in entries: + self.update(prefix, asn) + + def _set_trie(self, trie) -> None: + """Set trie directly. Internal use only.""" + def recurse(node: list) -> None: + if len(node) < 2: + return + recurse(node[0]) + recurse(node[1]) + if len(node[0]) == 2: + return + if node[0] == node[1]: + if len(node[0]) == 0: + node.clear() + else: + asn = node[0][0] + node.clear() + node.append(asn) + recurse(trie) + self._trie = trie + + def __init__(self, entries: Optional[Iterable[ASNEntry]] = None) -> None: + """Construct an ASMap object from an optional list of entries.""" + self._trie = [0] + if entries is not None: + def entry_key(entry): + """Sort function that places shorter prefixes first.""" + prefix, asn = entry + return len(prefix), prefix, asn + for prefix, asn in sorted(entries, key=entry_key): + self.update(prefix, asn) + + def lookup(self, prefix: list[bool]) -> Optional[int]: + """Look up a prefix. Returns ASN, or 0 if unassigned, or None if indeterminate.""" + node = self._trie + for bit in prefix: + if len(node) == 1: + break + node = node[bit] + if len(node) == 1: + return node[0] + return None + + def _to_entries_flat(self, fill: bool = False) -> list[ASNEntry]: + """Convert an ASMap object to a list of non-overlapping (prefix, asn) objects.""" + prefix : list[bool] = [] + + def recurse(node: list) -> list[ASNEntry]: + ret = [] + if len(node) == 1: + if node[0] > 0: + ret = [(list(prefix), node[0])] + elif len(node) == 2: + prefix.append(False) + ret = recurse(node[0]) + prefix[-1] = True + ret += recurse(node[1]) + prefix.pop() + if fill and len(ret) > 1: + asns = set(x[1] for x in ret) + if len(asns) == 1: + ret = [(list(prefix), list(asns)[0])] + return ret + return recurse(self._trie) + + def _to_entries_minimal(self, fill: bool = False) -> list[ASNEntry]: + """Convert a trie to a minimal list of ASNEntry objects, exploiting overlap.""" + prefix : list[bool] = [] + + def recurse(node: list) -> (tuple[dict[Optional[int], list[ASNEntry]], bool]): + if len(node) == 1 and node[0] == 0: + return {None if fill else 0: []}, True + if len(node) == 1: + return {node[0]: [], None: [(list(prefix), node[0])]}, False + ret: dict[Optional[int], list[ASNEntry]] = {} + prefix.append(False) + left, lhole = recurse(node[0]) + prefix[-1] = True + right, rhole = recurse(node[1]) + prefix.pop() + hole = not fill and (lhole or rhole) + def candidate(ctx: Optional[int], res0: Optional[list[ASNEntry]], + res1: Optional[list[ASNEntry]]): + if res0 is not None and res1 is not None: + if ctx not in ret or len(res0) + len(res1) < len(ret[ctx]): + ret[ctx] = res0 + res1 + for ctx in set(left) | set(right): + candidate(ctx, left.get(ctx), right.get(ctx)) + candidate(ctx, left.get(None), right.get(ctx)) + candidate(ctx, left.get(ctx), right.get(None)) + if not hole: + for ctx in list(ret): + if ctx is not None: + candidate(None, [(list(prefix), ctx)], ret[ctx]) + if None in ret: + ret = {ctx:entries for ctx, entries in ret.items() + if ctx is None or len(entries) < len(ret[None])} + if hole: + ret = {ctx:entries for ctx, entries in ret.items() if ctx is None or ctx == 0} + return ret, hole + res, _ = recurse(self._trie) + return res[0] if 0 in res else res[None] + + def __str__(self) -> str: + """Convert this ASMap object to a string containing Python code constructing it.""" + return f"ASMap({self._trie})" + + def to_entries(self, overlapping: bool = True, fill: bool = False) -> list[ASNEntry]: + """ + Convert the mappings in this ASMap object to a list of ASNEntry objects. + + Arguments: + overlapping: Permit the subnets in the resulting ASNEntry to overlap. + Setting this can result in a shorter list. + fill: Permit the resulting ASNEntry objects to cover subnets that + are unassigned in this ASMap object. Setting this can + result in a shorter list. + """ + if overlapping: + return self._to_entries_minimal(fill) + return self._to_entries_flat(fill) + + @staticmethod + def from_random(num_leaves: int = 10, max_asn: int = 6, + unassigned_prob: float = 0.5) -> "ASMap": + """ + Construct a random ASMap object, with specified: + - Number of leaves in its trie (at least 1) + - Maximum ASN value (at least 1) + - Probability for leaf nodes to be unassigned + + The number of leaves in the resulting object may be less than what is + requested. This method is mostly intended for testing. + """ + assert num_leaves >= 1 + assert max_asn >= 1 or unassigned_prob == 1 + assert _CODER_ASN.can_encode(max_asn) + assert 0.0 <= unassigned_prob <= 1.0 + trie: list = [] + leaves = [trie] + ret = ASMap() + for i in range(1, num_leaves): + idx = random.randrange(i) + leaf = leaves[idx] + lastleaf = leaves.pop() + if idx + 1 < i: + leaves[idx] = lastleaf + leaf.append([]) + leaf.append([]) + leaves.append(leaf[0]) + leaves.append(leaf[1]) + for leaf in leaves: + if random.random() >= unassigned_prob: + leaf.append(random.randrange(1, max_asn + 1)) + else: + leaf.append(0) + #pylint: disable=protected-access + ret._set_trie(trie) + return ret + + def _to_binnode(self, fill: bool = False) -> _BinNode: + """Convert a trie to a _BinNode object.""" + def recurse(node: list) -> tuple[dict[Optional[int], _BinNode], bool]: + if len(node) == 1 and node[0] == 0: + return {(None if fill else 0): _BinNode.make_end()}, True + if len(node) == 1: + return {None: _BinNode.make_leaf(node[0]), node[0]: _BinNode.make_end()}, False + ret: dict[Optional[int], _BinNode] = {} + left, lhole = recurse(node[0]) + right, rhole = recurse(node[1]) + hole = (lhole or rhole) and not fill + + def candidate(ctx: Optional[int], arg1, arg2, func: Callable): + if arg1 is not None and arg2 is not None: + cand = func(arg1, arg2) + if ctx not in ret or cand.size < ret[ctx].size: + ret[ctx] = cand + + union = set(left) | set(right) + sorted_union = sorted(union, key=lambda x: (x is None, x)) + for ctx in sorted_union: + candidate(ctx, left.get(ctx), right.get(ctx), _BinNode.make_branch) + candidate(ctx, left.get(None), right.get(ctx), _BinNode.make_branch) + candidate(ctx, left.get(ctx), right.get(None), _BinNode.make_branch) + if not hole: + for ctx in sorted(set(ret) - set([None])): + candidate(None, ctx, ret[ctx], _BinNode.make_default) + if None in ret: + ret = {ctx:enc for ctx, enc in ret.items() + if ctx is None or enc.size < ret[None].size} + if hole: + ret = {ctx:enc for ctx, enc in ret.items() if ctx is None or ctx == 0} + return ret, hole + res, _ = recurse(self._trie) + return res[0] if 0 in res else res[None] + + @staticmethod + def _from_binnode(binnode: _BinNode) -> "ASMap": + """Construct an ASMap object from a _BinNode. Internal use only.""" + def recurse(node: _BinNode, default: int) -> list: + if node.ins == _Instruction.RETURN: + return [node.arg1] + if node.ins == _Instruction.JUMP: + return [recurse(node.arg1, default), recurse(node.arg2, default)] + if node.ins == _Instruction.MATCH: + val = node.arg1 + sub = recurse(node.arg2, default) + while val >= 2: + bit = val & 1 + val >>= 1 + if bit: + sub = [[default], sub] + else: + sub = [sub, [default]] + return sub + assert node.ins == _Instruction.DEFAULT + return recurse(node.arg2, node.arg1) + ret = ASMap() + if binnode.ins != _Instruction.END: + #pylint: disable=protected-access + ret._set_trie(recurse(binnode, 0)) + return ret + + def to_binary(self, fill: bool = False) -> bytes: + """ + Convert this ASMap object to binary. + + Argument: + fill: permit the resulting binary encoder to contain mappers for + unassigned subnets in this ASMap object. Doing so may + reduce the size of the encoding. + Returns: + A bytes object with the encoding of this ASMap object. + """ + bits: list[int] = [] + + def recurse(node: _BinNode) -> None: + _CODER_INS.encode(node.ins.value, bits) + if node.ins == _Instruction.RETURN: + _CODER_ASN.encode(node.arg1, bits) + elif node.ins == _Instruction.JUMP: + _CODER_JUMP.encode(node.arg1.size, bits) + recurse(node.arg1) + recurse(node.arg2) + elif node.ins == _Instruction.DEFAULT: + _CODER_ASN.encode(node.arg1, bits) + recurse(node.arg2) + else: + assert node.ins == _Instruction.MATCH + _CODER_MATCH.encode(node.arg1, bits) + recurse(node.arg2) + + binnode = self._to_binnode(fill) + if binnode.ins != _Instruction.END: + recurse(binnode) + + val = 0 + nbits = 0 + ret = [] + for bit in bits: + val += (bit << nbits) + nbits += 1 + if nbits == 8: + ret.append(val) + val = 0 + nbits = 0 + if nbits: + ret.append(val) + return bytes(ret) + + @staticmethod + def from_binary(bindata: bytes) -> Optional["ASMap"]: + """Decode an ASMap object from the provided binary encoding.""" + + bits: list[int] = [] + for byte in bindata: + bits.extend((byte >> i) & 1 for i in range(8)) + + def recurse(bitpos: int) -> tuple[_BinNode, int]: + insval, bitpos = _CODER_INS.decode(bits, bitpos) + ins = _Instruction(insval) + if ins == _Instruction.RETURN: + asn, bitpos = _CODER_ASN.decode(bits, bitpos) + return _BinNode(ins, asn), bitpos + if ins == _Instruction.JUMP: + jump, bitpos = _CODER_JUMP.decode(bits, bitpos) + left, bitpos1 = recurse(bitpos) + if bitpos1 != bitpos + jump: + raise ValueError("Inconsistent jump") + right, bitpos = recurse(bitpos1) + return _BinNode(ins, left, right), bitpos + if ins == _Instruction.MATCH: + match, bitpos = _CODER_MATCH.decode(bits, bitpos) + sub, bitpos = recurse(bitpos) + return _BinNode(ins, match, sub), bitpos + assert ins == _Instruction.DEFAULT + asn, bitpos = _CODER_ASN.decode(bits, bitpos) + sub, bitpos = recurse(bitpos) + return _BinNode(ins, asn, sub), bitpos + + if len(bits) == 0: + binnode = _BinNode(_Instruction.END) + else: + try: + binnode, bitpos = recurse(0) + except (ValueError, IndexError): + return None + if bitpos < len(bits) - 7: + return None + if not all(bit == 0 for bit in bits[bitpos:]): + return None + + return ASMap._from_binnode(binnode) + + def __lt__(self, other: "ASMap") -> bool: + return self._trie < other._trie + + def __eq__(self, other: object) -> bool: + if isinstance(other, ASMap): + return self._trie == other._trie + return False + + def extends(self, req: "ASMap") -> bool: + """Determine whether this matches req for all subranges where req is assigned.""" + def recurse(actual: list, require: list) -> bool: + if len(require) == 1 and require[0] == 0: + return True + if len(require) == 1: + if len(actual) == 1: + return bool(require[0] == actual[0]) + return recurse(actual[0], require) and recurse(actual[1], require) + if len(actual) == 2: + return recurse(actual[0], require[0]) and recurse(actual[1], require[1]) + return recurse(actual, require[0]) and recurse(actual, require[1]) + assert isinstance(req, ASMap) + #pylint: disable=protected-access + return recurse(self._trie, req._trie) + + def diff(self, other: "ASMap") -> list[ASNDiff]: + """Compute the diff from self to other.""" + prefix: list[bool] = [] + ret: list[ASNDiff] = [] + + def recurse(old_node: list, new_node: list): + if len(old_node) == 1 and len(new_node) == 1: + if old_node[0] != new_node[0]: + ret.append((list(prefix), old_node[0], new_node[0])) + else: + old_left: list = old_node if len(old_node) == 1 else old_node[0] + old_right: list = old_node if len(old_node) == 1 else old_node[1] + new_left: list = new_node if len(new_node) == 1 else new_node[0] + new_right: list = new_node if len(new_node) == 1 else new_node[1] + prefix.append(False) + recurse(old_left, new_left) + prefix[-1] = True + recurse(old_right, new_right) + prefix.pop() + assert isinstance(other, ASMap) + #pylint: disable=protected-access + recurse(self._trie, other._trie) + return ret + + def __copy__(self) -> "ASMap": + """Construct a copy of this ASMap object. Its state will not be shared.""" + ret = ASMap() + #pylint: disable=protected-access + ret._set_trie(copy.deepcopy(self._trie)) + return ret + + def __deepcopy__(self, _) -> "ASMap": + # ASMap objects do not allow sharing of the _trie member, so we don't need the memoization. + return self.__copy__() + + +class TestASMap(unittest.TestCase): + """Unit tests for this module.""" + + def test_ipv6_prefix_roundtrips(self) -> None: + """Test that random IPv6 network ranges roundtrip through prefix encoding.""" + for _ in range(20): + net_bits = random.getrandbits(128) + for prefix_len in range(0, 129): + masked_bits = (net_bits >> (128 - prefix_len)) << (128 - prefix_len) + net = ipaddress.IPv6Network((masked_bits.to_bytes(16, 'big'), prefix_len)) + prefix = net_to_prefix(net) + self.assertTrue(len(prefix) <= 128) + net2 = prefix_to_net(prefix) + self.assertEqual(net, net2) + + def test_ipv4_prefix_roundtrips(self) -> None: + """Test that random IPv4 network ranges roundtrip through prefix encoding.""" + for _ in range(100): + net_bits = random.getrandbits(32) + for prefix_len in range(0, 33): + masked_bits = (net_bits >> (32 - prefix_len)) << (32 - prefix_len) + net = ipaddress.IPv4Network((masked_bits.to_bytes(4, 'big'), prefix_len)) + prefix = net_to_prefix(net) + self.assertTrue(32 <= len(prefix) <= 128) + net2 = prefix_to_net(prefix) + self.assertEqual(net, net2) + + def test_asmap_roundtrips(self) -> None: + """Test case that verifies random ASMap objects roundtrip to/from entries/binary.""" + # Iterate over the number of leaves the random test ASMap objects have. + for leaves in range(1, 20): + # Iterate over the number of bits in the AS numbers used. + for asnbits in range(0, 24): + # Iterate over the probability that leaves are unassigned. + for pct in range(101): + # Construct a random ASMap object according to the above parameters. + asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits), + unassigned_prob=0.01 * pct) + # Run tests for to_entries and construction from those entries, both + # for overlapping and non-overlapping ones. + for overlapping in [False, True]: + entries = asmap.to_entries(overlapping=overlapping, fill=False) + random.shuffle(entries) + asmap2 = ASMap(entries) + assert asmap2 is not None + self.assertEqual(asmap2, asmap) + entries = asmap.to_entries(overlapping=overlapping, fill=True) + random.shuffle(entries) + asmap2 = ASMap(entries) + assert asmap2 is not None + self.assertTrue(asmap2.extends(asmap)) + + # Run tests for to_binary and construction from binary. + enc = asmap.to_binary(fill=False) + asmap3 = ASMap.from_binary(enc) + assert asmap3 is not None + self.assertEqual(asmap3, asmap) + enc = asmap.to_binary(fill=True) + asmap3 = ASMap.from_binary(enc) + assert asmap3 is not None + self.assertTrue(asmap3.extends(asmap)) + + def test_patching(self) -> None: + """Test behavior of update, lookup, extends, and diff.""" + #pylint: disable=too-many-locals,too-many-nested-blocks + # Iterate over the number of leaves the random test ASMap objects have. + for leaves in range(1, 20): + # Iterate over the number of bits in the AS numbers used. + for asnbits in range(0, 10): + # Iterate over the probability that leaves are unassigned. + for pct in range(0, 101): + # Construct a random ASMap object according to the above parameters. + asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits), + unassigned_prob=0.01 * pct) + # Make a copy of that asmap object to which patches will be applied. + # It starts off being equal to asmap. + patched = copy.copy(asmap) + # Keep a list of patches performed. + patches: list[ASNEntry] = [] + # Initially there cannot be any difference. + self.assertEqual(asmap.diff(patched), []) + # Make 5 patches, each building on top of the previous ones. + for _ in range(0, 5): + # Construct a random path and new ASN to assign it to, apply it to patched, + # and remember it in patches. + pathlen = random.randrange(5) + path = [random.getrandbits(1) != 0 for _ in range(pathlen)] + newasn = random.randrange(1 + (1 << asnbits)) + patched.update(path, newasn) + patches = [(path, newasn)] + patches + + # Compute the diff, and whether asmap extends patched, and the other way + # around. + diff = asmap.diff(patched) + self.assertEqual(asmap == patched, len(diff) == 0) + extends = asmap.extends(patched) + back_extends = patched.extends(asmap) + # Determine whether those extends results are consistent with the diff + # result. + self.assertEqual(extends, all(d[2] == 0 for d in diff)) + self.assertEqual(back_extends, all(d[1] == 0 for d in diff)) + # For every diff found: + for path, old_asn, new_asn in diff: + # Verify asmap and patched actually differ there. + self.assertTrue(old_asn != new_asn) + self.assertEqual(asmap.lookup(path), old_asn) + self.assertEqual(patched.lookup(path), new_asn) + for _ in range(2): + # Extend the path far enough that it's smaller than any mapped + # range, and check the lookup holds there too. + spec_path = list(path) + while len(spec_path) < 32: + spec_path.append(random.getrandbits(1) != 0) + self.assertEqual(asmap.lookup(spec_path), old_asn) + self.assertEqual(patched.lookup(spec_path), new_asn) + # Search through the list of performed patches to find the last one + # applying to the extended path (note that patches is in reverse + # order, so the first match should work). + found = False + for patch_path, patch_asn in patches: + if spec_path[:len(patch_path)] == patch_path: + # When found, it must match whatever the result was patched + # to. + self.assertEqual(new_asn, patch_asn) + found = True + break + # And such a patch must exist. + self.assertTrue(found) + +if __name__ == '__main__': + unittest.main() diff --git a/contrib/bitcoin-cli.bash-completion b/contrib/bitcoin-cli.bash-completion deleted file mode 100644 index ddea58a05cc1a..0000000000000 --- a/contrib/bitcoin-cli.bash-completion +++ /dev/null @@ -1,141 +0,0 @@ -# bash programmable completion for bitcoin-cli(1) -# Copyright (c) 2012-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# call $bitcoin-cli for RPC -_bitcoin_rpc() { - # determine already specified args necessary for RPC - local rpcargs=() - for i in ${COMP_LINE}; do - case "$i" in - -conf=*|-datadir=*|-regtest|-rpc*|-testnet) - rpcargs=( "${rpcargs[@]}" "$i" ) - ;; - esac - done - $bitcoin_cli "${rpcargs[@]}" "$@" -} - -_bitcoin_cli() { - local cur prev words=() cword - local bitcoin_cli - - # save and use original argument to invoke bitcoin-cli for -help, help and RPC - # as bitcoin-cli might not be in $PATH - bitcoin_cli="$1" - - COMPREPLY=() - _get_comp_words_by_ref -n = cur prev words cword - - if ((cword > 5)); then - case ${words[cword-5]} in - sendtoaddress) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - esac - fi - - if ((cword > 4)); then - case ${words[cword-4]} in - importaddress|listtransactions|setban) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - signrawtransactionwithkey|signrawtransactionwithwallet) - COMPREPLY=( $( compgen -W "ALL NONE SINGLE ALL|ANYONECANPAY NONE|ANYONECANPAY SINGLE|ANYONECANPAY" -- "$cur" ) ) - return 0 - ;; - esac - fi - - if ((cword > 3)); then - case ${words[cword-3]} in - addmultisigaddress) - return 0 - ;; - getbalance|gettxout|importaddress|importpubkey|importprivkey|listreceivedbyaddress|listsinceblock) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - esac - fi - - if ((cword > 2)); then - case ${words[cword-2]} in - addnode) - COMPREPLY=( $( compgen -W "add remove onetry" -- "$cur" ) ) - return 0 - ;; - setban) - COMPREPLY=( $( compgen -W "add remove" -- "$cur" ) ) - return 0 - ;; - fundrawtransaction|getblock|getblockheader|getmempoolancestors|getmempooldescendants|getrawtransaction|gettransaction|listreceivedbyaddress|sendrawtransaction) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - esac - fi - - case "$prev" in - backupwallet|dumpwallet|importwallet) - _filedir - return 0 - ;; - getaddednodeinfo|getrawmempool|lockunspent) - COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) - return 0 - ;; - getbalance|getnewaddress|listtransactions|sendmany) - return 0 - ;; - esac - - case "$cur" in - -conf=*) - cur="${cur#*=}" - _filedir - return 0 - ;; - -datadir=*) - cur="${cur#*=}" - _filedir -d - return 0 - ;; - -*=*) # prevent nonsense completions - return 0 - ;; - *) - local helpopts commands - - # only parse -help if senseful - if [[ -z "$cur" || "$cur" =~ ^- ]]; then - helpopts=$($bitcoin_cli -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }' ) - fi - - # only parse help if senseful - if [[ -z "$cur" || "$cur" =~ ^[a-z] ]]; then - commands=$(_bitcoin_rpc help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }') - fi - - COMPREPLY=( $( compgen -W "$helpopts $commands" -- "$cur" ) ) - - # Prevent space if an argument is desired - if [[ $COMPREPLY == *= ]]; then - compopt -o nospace - fi - return 0 - ;; - esac -} && -complete -F _bitcoin_cli bitcoin-cli - -# Local variables: -# mode: shell-script -# sh-basic-offset: 4 -# sh-indent-comment: t -# indent-tabs-mode: nil -# End: -# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/bitcoin-qt.pro b/contrib/bitcoin-qt.pro deleted file mode 100644 index 0e4eeee0a7ad9..0000000000000 --- a/contrib/bitcoin-qt.pro +++ /dev/null @@ -1,22 +0,0 @@ -FORMS += \ - ../src/qt/forms/aboutdialog.ui \ - ../src/qt/forms/addressbookpage.ui \ - ../src/qt/forms/askpassphrasedialog.ui \ - ../src/qt/forms/coincontroldialog.ui \ - ../src/qt/forms/editaddressdialog.ui \ - ../src/qt/forms/helpmessagedialog.ui \ - ../src/qt/forms/intro.ui \ - ../src/qt/forms/openuridialog.ui \ - ../src/qt/forms/optionsdialog.ui \ - ../src/qt/forms/overviewpage.ui \ - ../src/qt/forms/receivecoinsdialog.ui \ - ../src/qt/forms/receiverequestdialog.ui \ - ../src/qt/forms/debugwindow.ui \ - ../src/qt/forms/sendcoinsdialog.ui \ - ../src/qt/forms/sendcoinsentry.ui \ - ../src/qt/forms/signverifymessagedialog.ui \ - ../src/qt/forms/transactiondescdialog.ui \ - ../src/qt/forms/createwalletdialog.ui - -RESOURCES += \ - ../src/qt/bitcoin.qrc diff --git a/contrib/bitcoin-tx.bash-completion b/contrib/bitcoin-tx.bash-completion deleted file mode 100644 index a83d2979ed3a8..0000000000000 --- a/contrib/bitcoin-tx.bash-completion +++ /dev/null @@ -1,57 +0,0 @@ -# bash programmable completion for bitcoin-tx(1) -# Copyright (c) 2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -_bitcoin_tx() { - local cur prev words=() cword - local bitcoin_tx - - # save and use original argument to invoke bitcoin-tx for -help - # it might not be in $PATH - bitcoin_tx="$1" - - COMPREPLY=() - _get_comp_words_by_ref -n =: cur prev words cword - - case "$cur" in - load=*:*) - cur="${cur#load=*:}" - _filedir - return 0 - ;; - *=*) # prevent attempts to complete other arguments - return 0 - ;; - esac - - if [[ "$cword" == 1 || ( "$prev" != "-create" && "$prev" == -* ) ]]; then - # only options (or an uncompletable hex-string) allowed - # parse bitcoin-tx -help for options - local helpopts - helpopts=$($bitcoin_tx -help | sed -e '/^ -/ p' -e d ) - COMPREPLY=( $( compgen -W "$helpopts" -- "$cur" ) ) - else - # only commands are allowed - # parse -help for commands - local helpcmds - helpcmds=$($bitcoin_tx -help | sed -e '1,/Commands:/d' -e 's/=.*/=/' -e '/^ [a-z]/ p' -e d ) - COMPREPLY=( $( compgen -W "$helpcmds" -- "$cur" ) ) - fi - - # Prevent space if an argument is desired - if [[ $COMPREPLY == *= ]]; then - compopt -o nospace - fi - - return 0 -} && -complete -F _bitcoin_tx bitcoin-tx - -# Local variables: -# mode: shell-script -# sh-basic-offset: 4 -# sh-indent-comment: t -# indent-tabs-mode: nil -# End: -# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/bitcoind.bash-completion b/contrib/bitcoind.bash-completion deleted file mode 100644 index ec1d9512d4759..0000000000000 --- a/contrib/bitcoind.bash-completion +++ /dev/null @@ -1,56 +0,0 @@ -# bash programmable completion for bitcoind(1) and bitcoin-qt(1) -# Copyright (c) 2012-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -_bitcoind() { - local cur prev words=() cword - local bitcoind - - # save and use original argument to invoke bitcoind for -help - # it might not be in $PATH - bitcoind="$1" - - COMPREPLY=() - _get_comp_words_by_ref -n = cur prev words cword - - case "$cur" in - -conf=*|-pid=*|-loadblock=*|-rpccookiefile=*|-wallet=*) - cur="${cur#*=}" - _filedir - return 0 - ;; - -datadir=*) - cur="${cur#*=}" - _filedir -d - return 0 - ;; - -*=*) # prevent nonsense completions - return 0 - ;; - *) - - # only parse -help if sensible - if [[ -z "$cur" || "$cur" =~ ^- ]]; then - local helpopts - helpopts=$($bitcoind -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }' ) - COMPREPLY=( $( compgen -W "$helpopts" -- "$cur" ) ) - fi - - # Prevent space if an argument is desired - if [[ $COMPREPLY == *= ]]; then - compopt -o nospace - fi - return 0 - ;; - esac -} && -complete -F _bitcoind bitcoind bitcoin-qt - -# Local variables: -# mode: shell-script -# sh-basic-offset: 4 -# sh-indent-comment: t -# indent-tabs-mode: nil -# End: -# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/completions/bash/bitcoin-cli.bash b/contrib/completions/bash/bitcoin-cli.bash new file mode 100644 index 0000000000000..b04fdbcb0e870 --- /dev/null +++ b/contrib/completions/bash/bitcoin-cli.bash @@ -0,0 +1,141 @@ +# bash programmable completion for bitcoin-cli(1) +# Copyright (c) 2012-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# call $bitcoin-cli for RPC +_bitcoin_rpc() { + # determine already specified args necessary for RPC + local rpcargs=() + for i in ${COMP_LINE}; do + case "$i" in + -conf=*|-datadir=*|-regtest|-rpc*|-testnet|-testnet4) + rpcargs=( "${rpcargs[@]}" "$i" ) + ;; + esac + done + $bitcoin_cli "${rpcargs[@]}" "$@" +} + +_bitcoin_cli() { + local cur prev words=() cword + local bitcoin_cli + + # save and use original argument to invoke bitcoin-cli for -help, help and RPC + # as bitcoin-cli might not be in $PATH + bitcoin_cli="$1" + + COMPREPLY=() + _get_comp_words_by_ref -n = cur prev words cword + + if ((cword > 5)); then + case ${words[cword-5]} in + sendtoaddress) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + esac + fi + + if ((cword > 4)); then + case ${words[cword-4]} in + importaddress|listtransactions|setban) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + signrawtransactionwithkey|signrawtransactionwithwallet) + COMPREPLY=( $( compgen -W "ALL NONE SINGLE ALL|ANYONECANPAY NONE|ANYONECANPAY SINGLE|ANYONECANPAY" -- "$cur" ) ) + return 0 + ;; + esac + fi + + if ((cword > 3)); then + case ${words[cword-3]} in + addmultisigaddress) + return 0 + ;; + getbalance|gettxout|importaddress|importpubkey|importprivkey|listreceivedbyaddress|listsinceblock) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + esac + fi + + if ((cword > 2)); then + case ${words[cword-2]} in + addnode) + COMPREPLY=( $( compgen -W "add remove onetry" -- "$cur" ) ) + return 0 + ;; + setban) + COMPREPLY=( $( compgen -W "add remove" -- "$cur" ) ) + return 0 + ;; + fundrawtransaction|getblock|getblockheader|getmempoolancestors|getmempooldescendants|getrawtransaction|gettransaction|listreceivedbyaddress|sendrawtransaction) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + esac + fi + + case "$prev" in + backupwallet|dumpwallet|importwallet) + _filedir + return 0 + ;; + getaddednodeinfo|getrawmempool|lockunspent) + COMPREPLY=( $( compgen -W "true false" -- "$cur" ) ) + return 0 + ;; + getbalance|getnewaddress|listtransactions|sendmany) + return 0 + ;; + esac + + case "$cur" in + -conf=*) + cur="${cur#*=}" + _filedir + return 0 + ;; + -datadir=*) + cur="${cur#*=}" + _filedir -d + return 0 + ;; + -*=*) # prevent nonsense completions + return 0 + ;; + *) + local helpopts commands + + # only parse -help if senseful + if [[ -z "$cur" || "$cur" =~ ^- ]]; then + helpopts=$($bitcoin_cli -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }' ) + fi + + # only parse help if senseful + if [[ -z "$cur" || "$cur" =~ ^[a-z] ]]; then + commands=$(_bitcoin_rpc help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }') + fi + + COMPREPLY=( $( compgen -W "$helpopts $commands" -- "$cur" ) ) + + # Prevent space if an argument is desired + if [[ $COMPREPLY == *= ]]; then + compopt -o nospace + fi + return 0 + ;; + esac +} && +complete -F _bitcoin_cli bitcoin-cli + +# Local variables: +# mode: shell-script +# sh-basic-offset: 4 +# sh-indent-comment: t +# indent-tabs-mode: nil +# End: +# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/completions/bash/bitcoin-tx.bash b/contrib/completions/bash/bitcoin-tx.bash new file mode 100644 index 0000000000000..51a9fe31cb710 --- /dev/null +++ b/contrib/completions/bash/bitcoin-tx.bash @@ -0,0 +1,57 @@ +# bash programmable completion for bitcoin-tx(1) +# Copyright (c) 2016-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +_bitcoin_tx() { + local cur prev words=() cword + local bitcoin_tx + + # save and use original argument to invoke bitcoin-tx for -help + # it might not be in $PATH + bitcoin_tx="$1" + + COMPREPLY=() + _get_comp_words_by_ref -n =: cur prev words cword + + case "$cur" in + load=*:*) + cur="${cur#load=*:}" + _filedir + return 0 + ;; + *=*) # prevent attempts to complete other arguments + return 0 + ;; + esac + + if [[ "$cword" == 1 || ( "$prev" != "-create" && "$prev" == -* ) ]]; then + # only options (or an uncompletable hex-string) allowed + # parse bitcoin-tx -help for options + local helpopts + helpopts=$($bitcoin_tx -help | sed -e '/^ -/ p' -e d ) + COMPREPLY=( $( compgen -W "$helpopts" -- "$cur" ) ) + else + # only commands are allowed + # parse -help for commands + local helpcmds + helpcmds=$($bitcoin_tx -help | sed -e '1,/Commands:/d' -e 's/=.*/=/' -e '/^ [a-z]/ p' -e d ) + COMPREPLY=( $( compgen -W "$helpcmds" -- "$cur" ) ) + fi + + # Prevent space if an argument is desired + if [[ $COMPREPLY == *= ]]; then + compopt -o nospace + fi + + return 0 +} && +complete -F _bitcoin_tx bitcoin-tx + +# Local variables: +# mode: shell-script +# sh-basic-offset: 4 +# sh-indent-comment: t +# indent-tabs-mode: nil +# End: +# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/completions/bash/bitcoind.bash b/contrib/completions/bash/bitcoind.bash new file mode 100644 index 0000000000000..c11d99ef3169c --- /dev/null +++ b/contrib/completions/bash/bitcoind.bash @@ -0,0 +1,56 @@ +# bash programmable completion for bitcoind(1) and bitcoin-qt(1) +# Copyright (c) 2012-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +_bitcoind() { + local cur prev words=() cword + local bitcoind + + # save and use original argument to invoke bitcoind for -help + # it might not be in $PATH + bitcoind="$1" + + COMPREPLY=() + _get_comp_words_by_ref -n = cur prev words cword + + case "$cur" in + -conf=*|-pid=*|-loadblock=*|-rpccookiefile=*|-wallet=*) + cur="${cur#*=}" + _filedir + return 0 + ;; + -datadir=*) + cur="${cur#*=}" + _filedir -d + return 0 + ;; + -*=*) # prevent nonsense completions + return 0 + ;; + *) + + # only parse -help if sensible + if [[ -z "$cur" || "$cur" =~ ^- ]]; then + local helpopts + helpopts=$($bitcoind -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }' ) + COMPREPLY=( $( compgen -W "$helpopts" -- "$cur" ) ) + fi + + # Prevent space if an argument is desired + if [[ $COMPREPLY == *= ]]; then + compopt -o nospace + fi + return 0 + ;; + esac +} && +complete -F _bitcoind bitcoind bitcoin-qt + +# Local variables: +# mode: shell-script +# sh-basic-offset: 4 +# sh-indent-comment: t +# indent-tabs-mode: nil +# End: +# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/completions/fish/bitcoin-cli.fish b/contrib/completions/fish/bitcoin-cli.fish new file mode 100644 index 0000000000000..2f034c475c1d3 --- /dev/null +++ b/contrib/completions/fish/bitcoin-cli.fish @@ -0,0 +1,99 @@ +# Disable files from being included in completions by default +complete --command bitcoin-cli --no-files + +function __fish_bitcoin_cli_get_commands_helper + set --local cmd (commandline -oc) + + # Don't return commands if '-help or -?' in commandline + if string match --quiet --regex -- '^-help$|^-\?$' $cmd + return + end + + # Strip help cmd from token to avoid duplication errors + set --local cmd (string match --invert --regex -- '^help$' $cmd) + # Strip -stdin* options to avoid waiting for input while we fetch completions + # TODO: this appears to be broken when run as tab completion (requires ctrl+c to exit) + set --local cmd (string match --invert --regex -- '^-stdin.*$' $cmd) + + # Match, format and return commands + for command in ($cmd help 2>&1 | string match --invert -r '^\=\=.*' | string match --invert -r '^\\s*$') + echo $command + end +end + +function __fish_bitcoin_cli_get_commands + argparse 'nohelp' 'commandsonly' -- $argv + set --local commands + + # Exclude description, exclude help + if set -q _flag_nohelp; and set -q _flag_commandsonly + set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace -r ' .*$' '' | string match --invert -r 'help') + # Include description, exclude help + else if set -q _flag_nohelp + set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace ' ' \t | string match --invert -r 'help') + # Exclude description, include help + else if set -q _flag_commandsonly + set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace -r ' .*$' '') + # Include description, include help + else + set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace ' ' \t) + end + + if string match -q -r '^.*error.*$' $commands[1] + # RPC offline or RPC wallet not loaded + return + else + for command in $commands + echo $command + end + end +end + + +function __fish_bitcoin_cli_get_options + argparse 'nofiles' -- $argv + set --local cmd (commandline -oc) + # Don't return options if '-help or -?' in commandline + if string match --quiet --regex -- '^-help$|-\?$' $cmd + return + end + set --local options + + if set -q _flag_nofiles + set --append options ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match --invert -r '^.*=$') + else + set --append options ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match -r '^.*=$') + end + + for option in $options + echo $option + end +end + +# Add options with file completion +# Don't offer after a command is given +complete \ + --command bitcoin-cli \ + --no-files \ + --condition "not __fish_seen_subcommand_from (__fish_bitcoin_cli_get_commands --commandsonly)" \ + --arguments "(__fish_bitcoin_cli_get_options)" +# Enable file completions only if the commandline now contains a `*.=` style option +complete --command bitcoin-cli \ + --condition 'string match --regex -- ".*=" (commandline -pt)' \ + --force-files + +# Add options without file completion +# Don't offer after a command is given +complete \ + --command bitcoin-cli \ + --no-files \ + --condition "not __fish_seen_subcommand_from (__fish_bitcoin_cli_get_commands --commandsonly)" \ + --arguments "(__fish_bitcoin_cli_get_options --nofiles)" + +# Add commands +# Permit command completions after `bitcoin-cli help` but not after other commands +complete \ + --command bitcoin-cli \ + --no-files \ + --condition "not __fish_seen_subcommand_from (__fish_bitcoin_cli_get_commands --commandsonly --nohelp)" \ + --arguments "(__fish_bitcoin_cli_get_commands)" diff --git a/contrib/completions/fish/bitcoin-qt.fish b/contrib/completions/fish/bitcoin-qt.fish new file mode 100644 index 0000000000000..15a355ae88f5d --- /dev/null +++ b/contrib/completions/fish/bitcoin-qt.fish @@ -0,0 +1,35 @@ +# Disable files from being included in completions by default +complete --command bitcoin-qt --no-files + +# Extract options +function __fish_bitcoinqt_get_options + argparse 'nofiles' -- $argv + set --local cmd (commandline -opc)[1] + set --local options + + if set -q _flag_nofiles + set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match --invert -r '^.*=$') + else + set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match -r '^.*=$') + end + + for option in $options + echo $option + end +end + + +# Add options with file completion +complete \ + --command bitcoin-qt \ + --arguments "(__fish_bitcoinqt_get_options)" +# Enable file completions only if the commandline now contains a `*.=` style option +complete -c bitcoin-qt \ + --condition 'string match --regex -- ".*=" (commandline -pt)' \ + --force-files + +# Add options without file completion +complete \ + --command bitcoin-qt \ + --arguments "(__fish_bitcoinqt_get_options --nofiles)" + diff --git a/contrib/completions/fish/bitcoin-tx.fish b/contrib/completions/fish/bitcoin-tx.fish new file mode 100644 index 0000000000000..0ff262b948e49 --- /dev/null +++ b/contrib/completions/fish/bitcoin-tx.fish @@ -0,0 +1,65 @@ +# Disable files from being included in completions by default +complete --command bitcoin-tx --no-files + +# Modified version of __fish_seen_subcommand_from +# Uses regex to detect cmd= syntax +function __fish_bitcoin_seen_cmd + set -l cmd (commandline -oc) + set -e cmd[1] + for i in $cmd + for j in $argv + if string match --quiet --regex -- "^$j.*" $i + return 0 + end + end + end + return 1 +end + +# Extract options +function __fish_bitcoin_tx_get_options + set --local cmd (commandline -oc)[1] + if string match --quiet --regex -- '^-help$|-\?$' $cmd + return + end + + for option in ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=') + echo $option + end +end + +# Extract commands +function __fish_bitcoin_tx_get_commands + argparse 'commandsonly' -- $argv + set --local cmd (commandline -oc)[1] + set --local commands + + if set -q _flag_commandsonly + set --append commands ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '' | string replace -r '=.*' '') + else + set --append commands ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '') + end + + for command in $commands + echo $command + end +end + +# Add options +complete \ + --command bitcoin-tx \ + --condition "not __fish_bitcoin_seen_cmd (__fish_bitcoin_tx_get_commands --commandsonly)" \ + --arguments "(__fish_bitcoin_tx_get_options)" \ + --no-files + +# Add commands +complete \ + --command bitcoin-tx \ + --arguments "(__fish_bitcoin_tx_get_commands)" \ + --no-files + +# Add file completions for load and set commands +complete \ + --command bitcoin-tx \ + --condition 'string match --regex -- "(load|set)=" (commandline -pt)' \ + --force-files diff --git a/contrib/completions/fish/bitcoin-util.fish b/contrib/completions/fish/bitcoin-util.fish new file mode 100644 index 0000000000000..0650bf2cb6d3b --- /dev/null +++ b/contrib/completions/fish/bitcoin-util.fish @@ -0,0 +1,38 @@ +# Disable files from being included in completions by default +complete --command bitcoin-util --no-files + +# Extract options +function __fish_bitcoin_util_get_options + set --local cmd (commandline -opc)[1] + set --local options + + set --append options ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=') + + for option in $options + echo $option + end +end + +# Extract commands +function __fish_bitcoin_util_get_commands + set --local cmd (commandline -opc)[1] + set --local commands + + set --append commands ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '') + for command in $commands + echo $command + end +end + +# Add options +complete \ + --command bitcoin-util \ + --condition "not __fish_seen_subcommand_from (__fish_bitcoin_util_get_commands)" \ + --arguments "(__fish_bitcoin_util_get_options)" + +# Add commands +complete \ + --command bitcoin-util \ + --condition "not __fish_seen_subcommand_from (__fish_bitcoin_util_get_commands)" \ + --arguments "(__fish_bitcoin_util_get_commands)" + diff --git a/contrib/completions/fish/bitcoin-wallet.fish b/contrib/completions/fish/bitcoin-wallet.fish new file mode 100644 index 0000000000000..82d8277c9b4f5 --- /dev/null +++ b/contrib/completions/fish/bitcoin-wallet.fish @@ -0,0 +1,35 @@ +# Disable files from being included in completions by default +complete --command bitcoin-wallet --no-files + +# Extract options +function __fish_bitcoin_wallet_get_options + set --local cmd (commandline -opc)[1] + for option in ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=') + echo $option + end +end + +# Extract commands +function __fish_bitcoin_wallet_get_commands + set --local cmd (commandline -opc)[1] + for command in ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '') + echo $command + end +end + +# Add options +complete \ + --command bitcoin-wallet \ + --condition "not __fish_seen_subcommand_from (__fish_bitcoin_wallet_get_commands)" \ + --arguments "(__fish_bitcoin_wallet_get_options)" + +# Add commands +complete \ + --command bitcoin-wallet \ + --condition "not __fish_seen_subcommand_from (__fish_bitcoin_wallet_get_commands)" \ + --arguments "(__fish_bitcoin_wallet_get_commands)" + +# Add file completions for load and set commands +complete --command bitcoin-wallet \ + --condition "string match -r -- '(dumpfile|datadir)*=' (commandline -pt)" \ + --force-files diff --git a/contrib/completions/fish/bitcoind.fish b/contrib/completions/fish/bitcoind.fish new file mode 100644 index 0000000000000..fa245ae17f470 --- /dev/null +++ b/contrib/completions/fish/bitcoind.fish @@ -0,0 +1,35 @@ +# Disable files from being included in completions by default +complete --command bitcoind --no-files + +# Extract options +function __fish_bitcoind_get_options + argparse 'nofiles' -- $argv + set --local cmd (commandline -opc)[1] + set --local options + + if set -q _flag_nofiles + set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match --invert -r '^.*=$') + else + set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match -r '^.*=$') + end + + for option in $options + echo $option + end +end + + +# Add options with file completion +complete \ + --command bitcoind \ + --arguments "(__fish_bitcoind_get_options)" +# Enable file completions only if the commandline now contains a `*.=` style option +complete --command bitcoind \ + --condition 'string match --regex -- ".*=" (commandline -pt)' \ + --force-files + +# Add options without file completion +complete \ + --command bitcoind \ + --arguments "(__fish_bitcoind_get_options --nofiles)" + diff --git a/contrib/debian/copyright b/contrib/debian/copyright index 581fe712e96c2..dafc92f8ad72d 100644 --- a/contrib/debian/copyright +++ b/contrib/debian/copyright @@ -1,28 +1,20 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: Bitcoin Upstream-Contact: Satoshi Nakamoto - irc://#bitcoin@freenode.net + irc://#bitcoin-core-dev@libera.chat Source: https://github.com/bitcoin/bitcoin Files: * -Copyright: 2009-2020, Bitcoin Core Developers +Copyright: 2009-2024, Bitcoin Core Developers License: Expat -Comment: The Bitcoin Core Developers encompasses the current developers listed on bitcoin.org, - as well as the numerous contributors to the project. +Comment: The Bitcoin Core Developers encompasses all contributors to the + project, listed in the release notes or the git log. Files: debian/* Copyright: 2010-2011, Jonas Smedegaard 2011, Matt Corallo License: GPL-2+ -Files: src/secp256k1/build-aux/m4/ax_jni_include_dir.m4 -Copyright: 2008 Don Anderson -License: GNU-All-permissive-License - -Files: src/secp256k1/build-aux/m4/ax_prog_cc_for_build.m4 -Copyright: 2008 Paolo Bonzini -License: GNU-All-permissive-License - Files: src/qt/res/icons/add.png src/qt/res/icons/address-book.png src/qt/res/icons/chevron.png @@ -87,6 +79,10 @@ Files: src/qt/res/icons/proxy.png Copyright: Cristian Mircea Messel License: public-domain +Files: src/qt/res/fonts/RobotoMono-Bold.ttf +License: Apache-2.0 +Comment: Site: https://fonts.google.com/specimen/Roboto+Mono + License: Expat Permission is hereby granted, free of charge, to any person obtaining a @@ -108,12 +104,6 @@ License: Expat TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -License: GNU-All-permissive-License - Copying and distribution of this file, with or without modification, are - permitted in any medium without royalty provided the copyright notice - and this notice are preserved. This file is offered as-is, without any - warranty. - License: GPL-2+ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the @@ -144,3 +134,14 @@ Comment: License: public-domain This work is in the public domain. + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md index bdff7a84b098b..56eaeef815063 100644 --- a/contrib/devtools/README.md +++ b/contrib/devtools/README.md @@ -7,7 +7,8 @@ clang-format-diff.py A script to format unified git diffs according to [.clang-format](../../src/.clang-format). -Requires `clang-format`, installed e.g. via `brew install clang-format` on macOS. +Requires `clang-format`, installed e.g. via `brew install clang-format` on macOS, +or `sudo apt install clang-format` on Debian/Ubuntu. For instance, to format the last commit with 0 lines of context, the script should be called from the git root folder as follows. @@ -75,18 +76,43 @@ year rather than two hyphenated years. If the file already has a copyright for `The Bitcoin Core developers`, the script will exit. -gen-manpages.sh +gen-manpages.py =============== A small script to automatically create manpages in ../../doc/man by running the release binaries with the -help option. This requires help2man which can be found at: https://www.gnu.org/software/help2man/ With in-tree builds this tool can be run from any directory within the -repostitory. To use this tool with out-of-tree builds set `BUILDDIR`. For +repository. To use this tool with out-of-tree builds set `BUILDDIR`. For example: ```bash -BUILDDIR=$PWD/build contrib/devtools/gen-manpages.sh +BUILDDIR=$PWD/build contrib/devtools/gen-manpages.py +``` + +headerssync-params.py +===================== + +A script to generate optimal parameters for the headerssync module (src/headerssync.cpp). It takes no command-line +options, as all its configuration is set at the top of the file. It runs many times faster inside PyPy. Invocation: + +```bash +pypy3 contrib/devtools/headerssync-params.py +``` + +gen-bitcoin-conf.sh +=================== + +Generates a bitcoin.conf file in `share/examples/` by parsing the output from `bitcoind --help`. This script is run during the +release process to include a bitcoin.conf with the release binaries and can also be run by users to generate a file locally. +When generating a file as part of the release process, make sure to commit the changes after running the script. + +With in-tree builds this tool can be run from any directory within the +repository. To use this tool with out-of-tree builds set `BUILDDIR`. For +example: + +```bash +BUILDDIR=$PWD/build contrib/devtools/gen-bitcoin-conf.sh ``` security-check.py and test-security-check.py @@ -97,7 +123,7 @@ Perform basic security checks on a series of executables. symbol-check.py =============== -A script to check that the executables produced by gitian only contain +A script to check that release executables only contain certain symbols and are only linked against allowed libraries. For Linux this means checking for allowed gcc, glibc and libstdc++ version symbols. @@ -105,9 +131,9 @@ This makes sure they are still compatible with the minimum supported distributio For macOS and Windows we check that the executables are only linked against libraries we allow. -Example usage after a gitian build: +Example usage: - find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py + find ../path/to/executables -type f -executable | xargs python3 contrib/devtools/symbol-check.py If no errors occur the return value will be 0 and the output will be empty. diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt new file mode 100644 index 0000000000000..c6f683f7abaff --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt @@ -0,0 +1,67 @@ +cmake_minimum_required(VERSION 3.22) + +project(bitcoin-tidy + VERSION + 1.0.0 + DESCRIPTION "clang-tidy checks for Bitcoin Core" + LANGUAGES CXX) + +include(GNUInstallDirs) + +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED True) +set(CMAKE_CXX_EXTENSIONS False) + +set(CMAKE_DISABLE_FIND_PACKAGE_CURL ON) +set(CMAKE_DISABLE_FIND_PACKAGE_FFI ON) +set(CMAKE_DISABLE_FIND_PACKAGE_LibEdit ON) +set(CMAKE_DISABLE_FIND_PACKAGE_LibXml2 ON) +set(CMAKE_DISABLE_FIND_PACKAGE_Terminfo ON) +set(CMAKE_DISABLE_FIND_PACKAGE_ZLIB ON) +set(CMAKE_DISABLE_FIND_PACKAGE_zstd ON) + +find_package(LLVM REQUIRED CONFIG) +find_program(CLANG_TIDY_EXE NAMES "clang-tidy-${LLVM_VERSION_MAJOR}" "clang-tidy" HINTS ${LLVM_TOOLS_BINARY_DIR}) +message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") +message(STATUS "Found clang-tidy: ${CLANG_TIDY_EXE}") + +add_library(bitcoin-tidy MODULE bitcoin-tidy.cpp nontrivial-threadlocal.cpp) +target_include_directories(bitcoin-tidy SYSTEM PRIVATE ${LLVM_INCLUDE_DIRS}) + +# Disable RTTI and exceptions as necessary +if (MSVC) + target_compile_options(bitcoin-tidy PRIVATE /GR-) +else() + target_compile_options(bitcoin-tidy PRIVATE -fno-rtti) + target_compile_options(bitcoin-tidy PRIVATE -fno-exceptions) +endif() + +if(CMAKE_HOST_APPLE) + # ld64 expects no undefined symbols by default + target_link_options(bitcoin-tidy PRIVATE -Wl,-flat_namespace) + target_link_options(bitcoin-tidy PRIVATE -Wl,-undefined -Wl,suppress) +endif() + +# Add warnings +if (MSVC) + target_compile_options(bitcoin-tidy PRIVATE /W4) +else() + target_compile_options(bitcoin-tidy PRIVATE -Wall) + target_compile_options(bitcoin-tidy PRIVATE -Wextra) +endif() + +if(CMAKE_VERSION VERSION_LESS 3.27) + set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "--load=${CMAKE_BINARY_DIR}/${CMAKE_SHARED_MODULE_PREFIX}bitcoin-tidy${CMAKE_SHARED_MODULE_SUFFIX}" "-checks=-*,bitcoin-*") +else() + # CLANG_TIDY_COMMAND supports generator expressions as of 3.27 + set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "--load=$" "-checks=-*,bitcoin-*") +endif() + +# Create a dummy library that runs clang-tidy tests as a side-effect of building +add_library(bitcoin-tidy-tests OBJECT EXCLUDE_FROM_ALL example_nontrivial-threadlocal.cpp) +add_dependencies(bitcoin-tidy-tests bitcoin-tidy) + +set_target_properties(bitcoin-tidy-tests PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}") + + +install(TARGETS bitcoin-tidy LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) diff --git a/contrib/devtools/bitcoin-tidy/README.md b/contrib/devtools/bitcoin-tidy/README.md new file mode 100644 index 0000000000000..c15e07c4ede54 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/README.md @@ -0,0 +1,11 @@ +# Bitcoin Tidy + +Example Usage: + +```bash +cmake -S . -B build -DLLVM_DIR=$(llvm-config --cmakedir) -DCMAKE_BUILD_TYPE=Release + +cmake --build build -j$(nproc) + +cmake --build build --target bitcoin-tidy-tests -j$(nproc) +``` diff --git a/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp new file mode 100644 index 0000000000000..f2658b5a58662 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp @@ -0,0 +1,22 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "nontrivial-threadlocal.h" + +#include +#include + +class BitcoinModule final : public clang::tidy::ClangTidyModule +{ +public: + void addCheckFactories(clang::tidy::ClangTidyCheckFactories& CheckFactories) override + { + CheckFactories.registerCheck("bitcoin-nontrivial-threadlocal"); + } +}; + +static clang::tidy::ClangTidyModuleRegistry::Add + X("bitcoin-module", "Adds bitcoin checks."); + +volatile int BitcoinModuleAnchorSource = 0; diff --git a/contrib/devtools/bitcoin-tidy/example_nontrivial-threadlocal.cpp b/contrib/devtools/bitcoin-tidy/example_nontrivial-threadlocal.cpp new file mode 100644 index 0000000000000..2b74df5d0e838 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/example_nontrivial-threadlocal.cpp @@ -0,0 +1,2 @@ +#include +thread_local std::string foo; diff --git a/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.cpp b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.cpp new file mode 100644 index 0000000000000..d2bc78a31b27c --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.cpp @@ -0,0 +1,44 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "nontrivial-threadlocal.h" + +#include +#include + + +// Copied from clang-tidy's UnusedRaiiCheck +namespace { +AST_MATCHER(clang::CXXRecordDecl, hasNonTrivialDestructor) { + // TODO: If the dtor is there but empty we don't want to warn either. + return Node.hasDefinition() && Node.hasNonTrivialDestructor(); +} +} // namespace + +namespace bitcoin { + +void NonTrivialThreadLocal::registerMatchers(clang::ast_matchers::MatchFinder* finder) +{ + using namespace clang::ast_matchers; + + /* + thread_local std::string foo; + */ + + finder->addMatcher( + varDecl( + hasThreadStorageDuration(), + hasType(hasCanonicalType(recordType(hasDeclaration(cxxRecordDecl(hasNonTrivialDestructor()))))) + ).bind("nontrivial_threadlocal"), + this); +} + +void NonTrivialThreadLocal::check(const clang::ast_matchers::MatchFinder::MatchResult& Result) +{ + if (const clang::VarDecl* var = Result.Nodes.getNodeAs("nontrivial_threadlocal")) { + const auto user_diag = diag(var->getBeginLoc(), "Variable with non-trivial destructor cannot be thread_local."); + } +} + +} // namespace bitcoin diff --git a/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.h b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.h new file mode 100644 index 0000000000000..c853073467bd0 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.h @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef NONTRIVIAL_THREADLOCAL_CHECK_H +#define NONTRIVIAL_THREADLOCAL_CHECK_H + +#include + +namespace bitcoin { + +// Warn about any thread_local variable with a non-trivial destructor. +class NonTrivialThreadLocal final : public clang::tidy::ClangTidyCheck +{ +public: + NonTrivialThreadLocal(clang::StringRef Name, clang::tidy::ClangTidyContext* Context) + : clang::tidy::ClangTidyCheck(Name, Context) {} + + bool isLanguageVersionSupported(const clang::LangOptions& LangOpts) const override + { + return LangOpts.CPlusPlus; + } + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; +}; + +} // namespace bitcoin + +#endif // NONTRIVIAL_THREADLOCAL_CHECK_H diff --git a/contrib/devtools/check-deps.sh b/contrib/devtools/check-deps.sh new file mode 100755 index 0000000000000..cdfc4e7533d04 --- /dev/null +++ b/contrib/devtools/check-deps.sh @@ -0,0 +1,203 @@ +#!/usr/bin/env bash + +export LC_ALL=C +set -Eeuo pipefail + +# Declare paths to libraries +declare -A LIBS +LIBS[cli]="libbitcoin_cli.a" +LIBS[common]="libbitcoin_common.a" +LIBS[consensus]="libbitcoin_consensus.a" +LIBS[crypto]="crypto/libbitcoin_crypto.a crypto/libbitcoin_crypto_x86_shani.a crypto/libbitcoin_crypto_sse41.a crypto/libbitcoin_crypto_avx2.a" +LIBS[node]="libbitcoin_node.a" +LIBS[util]="util/libbitcoin_util.a" +LIBS[wallet]="wallet/libbitcoin_wallet.a" + +# Declare allowed dependencies "X Y" where X is allowed to depend on Y. This +# list is taken from doc/design/libraries.md. +ALLOWED_DEPENDENCIES=( + "cli common" + "cli util" + "common consensus" + "common crypto" + "common util" + "consensus crypto" + "node common" + "node consensus" + "node crypto" + "node kernel" + "node util" + "util crypto" + "wallet common" + "wallet crypto" + "wallet util" +) + +# Add minor dependencies omitted from doc/design/libraries.md to keep the +# dependency diagram simple. +ALLOWED_DEPENDENCIES+=( + "wallet consensus" +) + +# Declare list of known errors that should be suppressed. +declare -A SUPPRESS +# init.cpp file currently calls Berkeley DB sanity check function on startup, so +# there is an undocumented dependency of the node library on the wallet library. +SUPPRESS["init.cpp.o bdb.cpp.o _ZN6wallet27BerkeleyDatabaseSanityCheckEv"]=1 +# init/common.cpp file calls InitError and InitWarning from interface_ui which +# is currently part of the node library. interface_ui should just be part of the +# common library instead, and is moved in +# https://github.com/bitcoin/bitcoin/issues/10102 +SUPPRESS["common.cpp.o interface_ui.cpp.o _Z11InitWarningRK13bilingual_str"]=1 +SUPPRESS["common.cpp.o interface_ui.cpp.o _Z9InitErrorRK13bilingual_str"]=1 +# rpc/external_signer.cpp adds defines node RPC methods but is built as part of the +# common library. It should be moved to the node library instead. +SUPPRESS["external_signer.cpp.o server.cpp.o _ZN9CRPCTable13appendCommandERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPK11CRPCCommand"]=1 + +usage() { + echo "Usage: $(basename "${BASH_SOURCE[0]}") [BUILD_DIR]" +} + +# Output makefile targets, converting library .a paths to CMake targets +lib_targets() { + for lib in "${!LIBS[@]}"; do + for lib_path in ${LIBS[$lib]}; do + local name="${lib_path##*/}" + name="${name#lib}" + name="${name%.a}" + echo "$name" + done + done +} + +# Extract symbol names and object names and write to text files +extract_symbols() { + local temp_dir="$1" + for lib in "${!LIBS[@]}"; do + for lib_path in ${LIBS[$lib]}; do + nm -o "$lib_path" | { grep ' T \| W ' || true; } | awk '{print $3, $1}' >> "${temp_dir}/${lib}_exports.txt" + nm -o "$lib_path" | { grep ' U ' || true; } | awk '{print $3, $1}' >> "${temp_dir}/${lib}_imports.txt" + awk '{print $1}' "${temp_dir}/${lib}_exports.txt" | sort -u > "${temp_dir}/${lib}_exported_symbols.txt" + awk '{print $1}' "${temp_dir}/${lib}_imports.txt" | sort -u > "${temp_dir}/${lib}_imported_symbols.txt" + done + done +} + +# Lookup object name(s) corresponding to symbol name in text file +obj_names() { + local symbol="$1" + local txt_file="$2" + sed -n "s/^$symbol [^:]\\+:\\([^:]\\+\\):[^:]*\$/\\1/p" "$txt_file" | sort -u +} + +# Iterate through libraries and find disallowed dependencies +check_libraries() { + local temp_dir="$1" + local result=0 + for src in "${!LIBS[@]}"; do + for dst in "${!LIBS[@]}"; do + if [ "$src" != "$dst" ] && ! is_allowed "$src" "$dst"; then + if ! check_disallowed "$src" "$dst" "$temp_dir"; then + result=1 + fi + fi + done + done + check_not_suppressed + return $result +} + +# Return whether src library is allowed to depend on dst. +is_allowed() { + local src="$1" + local dst="$2" + for allowed in "${ALLOWED_DEPENDENCIES[@]}"; do + if [ "$src $dst" = "$allowed" ]; then + return 0 + fi + done + return 1 +} + +# Return whether src library imports any symbols from dst, assuming src is not +# allowed to depend on dst. +check_disallowed() { + local src="$1" + local dst="$2" + local temp_dir="$3" + local result=0 + + # Loop over symbol names exported by dst and imported by src + while read symbol; do + local dst_obj + dst_obj=$(obj_names "$symbol" "${temp_dir}/${dst}_exports.txt") + while read src_obj; do + if ! check_suppress "$src_obj" "$dst_obj" "$symbol"; then + echo "Error: $src_obj depends on $dst_obj symbol '$(c++filt "$symbol")', can suppress with:" + echo " SUPPRESS[\"$src_obj $dst_obj $symbol\"]=1" + result=1 + fi + done < <(obj_names "$symbol" "${temp_dir}/${src}_imports.txt") + done < <(comm -12 "${temp_dir}/${dst}_exported_symbols.txt" "${temp_dir}/${src}_imported_symbols.txt") + return $result +} + +# Declare array to track errors which were suppressed. +declare -A SUPPRESSED + +# Return whether error should be suppressed and record suppression in +# SUPPRESSED array. +check_suppress() { + local src_obj="$1" + local dst_obj="$2" + local symbol="$3" + for suppress in "${!SUPPRESS[@]}"; do + read suppress_src suppress_dst suppress_pattern <<<"$suppress" + if [[ "$src_obj" == "$suppress_src" && "$dst_obj" == "$suppress_dst" && "$symbol" =~ $suppress_pattern ]]; then + SUPPRESSED["$suppress"]=1 + return 0 + fi + done + return 1 +} + +# Warn about error which were supposed to be suppressed, but were not encountered. +check_not_suppressed() { + for suppress in "${!SUPPRESS[@]}"; do + if [[ ! -v SUPPRESSED[$suppress] ]]; then + echo >&2 "Warning: suppression '$suppress' was ignored, consider deleting." + fi + done +} + +# Check arguments. +if [ "$#" = 0 ]; then + BUILD_DIR="$(dirname "${BASH_SOURCE[0]}")/../../build" +elif [ "$#" = 1 ]; then + BUILD_DIR="$1" +else + echo >&2 "Error: wrong number of arguments." + usage >&2 + exit 1 +fi +if [ ! -f "$BUILD_DIR/Makefile" ]; then + echo >&2 "Error: directory '$BUILD_DIR' does not contain a makefile, please specify path to build directory for library targets." + usage >&2 + exit 1 +fi + +# Build libraries and run checks. +# shellcheck disable=SC2046 +cmake --build "$BUILD_DIR" -j"$(nproc)" -t $(lib_targets) +TEMP_DIR="$(mktemp -d)" +cd "$BUILD_DIR/src" +extract_symbols "$TEMP_DIR" +if check_libraries "$TEMP_DIR"; then + echo "Success! No unexpected dependencies were detected." + RET=0 +else + echo >&2 "Error: Unexpected dependencies were detected. Check previous output." + RET=1 +fi +rm -r "$TEMP_DIR" +exit $RET diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py index bc5f09a3e260f..b742a8cea6718 100755 --- a/contrib/devtools/circular-dependencies.py +++ b/contrib/devtools/circular-dependencies.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2018-2019 The Bitcoin Core developers +# Copyright (c) 2018-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -32,7 +32,7 @@ def module_name(path): return None files = dict() -deps = dict() +deps: dict[str, set[str]] = dict() RE = re.compile("^#include <(.*)>") @@ -59,12 +59,12 @@ def module_name(path): deps[module].add(included_module) # Loop to find the shortest (remaining) circular dependency -have_cycle = False +have_cycle: bool = False while True: shortest_cycle = None for module in sorted(deps.keys()): # Build the transitive closure of dependencies of module - closure = dict() + closure: dict[str, list[str]] = dict() for dep in deps[module]: closure[dep] = [] while True: diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py index 98eee67f43008..30e804dbe272e 100755 --- a/contrib/devtools/clang-format-diff.py +++ b/contrib/devtools/clang-format-diff.py @@ -1,166 +1,190 @@ #!/usr/bin/env python3 # -#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# +# ===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# # -# The LLVM Compiler Infrastructure +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # -# This file is distributed under the University of Illinois Open Source -# License. -# -# ============================================================ -# -# University of Illinois/NCSA -# Open Source License -# -# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign. -# All rights reserved. -# -# Developed by: -# -# LLVM Team -# -# University of Illinois at Urbana-Champaign -# -# http://llvm.org -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of -# this software and associated documentation files (the "Software"), to deal with -# the Software without restriction, including without limitation the rights to -# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -# of the Software, and to permit persons to whom the Software is furnished to do -# so, subject to the following conditions: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimers. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimers in the -# documentation and/or other materials provided with the distribution. -# -# * Neither the names of the LLVM Team, University of Illinois at -# Urbana-Champaign, nor the names of its contributors may be used to -# endorse or promote products derived from this Software without specific -# prior written permission. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE -# SOFTWARE. -# -# ============================================================ -# -#===------------------------------------------------------------------------===# - -r""" -ClangFormat Diff Reformatter -============================ +# ===------------------------------------------------------------------------===# +""" This script reads input from a unified diff and reformats all the changed lines. This is useful to reformat all the lines touched by a specific patch. Example usage for git/svn users: - git diff -U0 HEAD^ | clang-format-diff.py -p1 -i - svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i + git diff -U0 --no-color --relative HEAD^ | {clang_format_diff} -p1 -i + svn diff --diff-cmd=diff -x-U0 | {clang_format_diff} -i +It should be noted that the filename contained in the diff is used unmodified +to determine the source file to update. Users calling this script directly +should be careful to ensure that the path in the diff is correct relative to the +current working directory. """ +from __future__ import absolute_import, division, print_function import argparse import difflib -import io import re import subprocess import sys - -# Change this to the full path if clang-format is not on the path. -binary = 'clang-format' +from io import StringIO def main(): - parser = argparse.ArgumentParser(description= - 'Reformat changed lines in diff. Without -i ' - 'option just output the diff that would be ' - 'introduced.') - parser.add_argument('-i', action='store_true', default=False, - help='apply edits to files instead of displaying a diff') - parser.add_argument('-p', metavar='NUM', default=0, - help='strip the smallest prefix containing P slashes') - parser.add_argument('-regex', metavar='PATTERN', default=None, - help='custom pattern selecting file paths to reformat ' - '(case sensitive, overrides -iregex)') - parser.add_argument('-iregex', metavar='PATTERN', default= - r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto' - r'|protodevel|java)', - help='custom pattern selecting file paths to reformat ' - '(case insensitive, overridden by -regex)') - parser.add_argument('-sort-includes', action='store_true', default=False, - help='let clang-format sort include blocks') - parser.add_argument('-v', '--verbose', action='store_true', - help='be more verbose, ineffective without -i') - args = parser.parse_args() - - # Extract changed lines for each file. - filename = None - lines_by_file = {} - for line in sys.stdin: - match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line) - if match: - filename = match.group(2) - if filename is None: - continue - - if args.regex is not None: - if not re.match('^%s$' % args.regex, filename): - continue - else: - if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE): - continue - - match = re.search(r'^@@.*\+(\d+)(,(\d+))?', line) - if match: - start_line = int(match.group(1)) - line_count = 1 - if match.group(3): - line_count = int(match.group(3)) - if line_count == 0: - continue - end_line = start_line + line_count - 1 - lines_by_file.setdefault(filename, []).extend( - ['-lines', str(start_line) + ':' + str(end_line)]) - - # Reformat files containing changes in place. - for filename, lines in lines_by_file.items(): - if args.i and args.verbose: - print('Formatting {}'.format(filename)) - command = [binary, filename] - if args.i: - command.append('-i') - if args.sort_includes: - command.append('-sort-includes') - command.extend(lines) - command.extend(['-style=file', '-fallback-style=none']) - p = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=None, - stdin=subprocess.PIPE, - universal_newlines=True) - stdout, stderr = p.communicate() - if p.returncode != 0: - sys.exit(p.returncode) - - if not args.i: - with open(filename, encoding="utf8") as f: - code = f.readlines() - formatted_code = io.StringIO(stdout).readlines() - diff = difflib.unified_diff(code, formatted_code, - filename, filename, - '(before formatting)', '(after formatting)') - diff_string = ''.join(diff) - if len(diff_string) > 0: - sys.stdout.write(diff_string) - -if __name__ == '__main__': - main() + parser = argparse.ArgumentParser( + description=__doc__.format(clang_format_diff="%(prog)s"), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-i", + action="store_true", + default=False, + help="apply edits to files instead of displaying a diff", + ) + parser.add_argument( + "-p", + metavar="NUM", + default=0, + help="strip the smallest prefix containing P slashes", + ) + parser.add_argument( + "-regex", + metavar="PATTERN", + default=None, + help="custom pattern selecting file paths to reformat " + "(case sensitive, overrides -iregex)", + ) + parser.add_argument( + "-iregex", + metavar="PATTERN", + default=r".*\.(?:cpp|cc|c\+\+|cxx|cppm|ccm|cxxm|c\+\+m|c|cl|h|hh|hpp" + r"|hxx|m|mm|inc|js|ts|proto|protodevel|java|cs|json|s?vh?)", + help="custom pattern selecting file paths to reformat " + "(case insensitive, overridden by -regex)", + ) + parser.add_argument( + "-sort-includes", + action="store_true", + default=False, + help="let clang-format sort include blocks", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="be more verbose, ineffective without -i", + ) + parser.add_argument( + "-style", + help="formatting style to apply (LLVM, GNU, Google, Chromium, " + "Microsoft, Mozilla, WebKit)", + ) + parser.add_argument( + "-fallback-style", + help="The name of the predefined style used as a" + "fallback in case clang-format is invoked with" + "-style=file, but can not find the .clang-format" + "file to use.", + ) + parser.add_argument( + "-binary", + default="clang-format", + help="location of binary to use for clang-format", + ) + args = parser.parse_args() + + # Extract changed lines for each file. + filename = None + lines_by_file = {} + for line in sys.stdin: + match = re.search(r"^\+\+\+\ (.*?/){%s}(\S*)" % args.p, line) + if match: + filename = match.group(2) + if filename is None: + continue + + if args.regex is not None: + if not re.match("^%s$" % args.regex, filename): + continue + else: + if not re.match("^%s$" % args.iregex, filename, re.IGNORECASE): + continue + + match = re.search(r"^@@.*\+(\d+)(?:,(\d+))?", line) + if match: + start_line = int(match.group(1)) + line_count = 1 + if match.group(2): + line_count = int(match.group(2)) + # The input is something like + # + # @@ -1, +0,0 @@ + # + # which means no lines were added. + if line_count == 0: + continue + # Also format lines range if line_count is 0 in case of deleting + # surrounding statements. + end_line = start_line + if line_count != 0: + end_line += line_count - 1 + lines_by_file.setdefault(filename, []).extend( + ["-lines", str(start_line) + ":" + str(end_line)] + ) + + # Reformat files containing changes in place. + for filename, lines in lines_by_file.items(): + if args.i and args.verbose: + print("Formatting {}".format(filename)) + command = [args.binary, filename] + if args.i: + command.append("-i") + if args.sort_includes: + command.append("-sort-includes") + command.extend(lines) + if args.style: + command.extend(["-style", args.style]) + if args.fallback_style: + command.extend(["-fallback-style", args.fallback_style]) + + try: + p = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=None, + stdin=subprocess.PIPE, + universal_newlines=True, + ) + except OSError as e: + # Give the user more context when clang-format isn't + # found/isn't executable, etc. + raise RuntimeError( + 'Failed to run "%s" - %s"' % (" ".join(command), e.strerror) + ) + + stdout, _stderr = p.communicate() + if p.returncode != 0: + sys.exit(p.returncode) + + if not args.i: + with open(filename, encoding="utf8") as f: + code = f.readlines() + formatted_code = StringIO(stdout).readlines() + diff = difflib.unified_diff( + code, + formatted_code, + filename, + filename, + "(before formatting)", + "(after formatting)", + ) + diff_string = "".join(diff) + if len(diff_string) > 0: + sys.stdout.write(diff_string) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py index 084914f11a82a..3c98ee7b20d2e 100755 --- a/contrib/devtools/copyright_header.py +++ b/contrib/devtools/copyright_header.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2016-2020 The Bitcoin Core developers +# Copyright (c) 2016-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -19,9 +19,9 @@ 'src/qt/bitcoinstrings.cpp', 'src/chainparamsseeds.h', # other external copyrights: - 'src/reverse_iterator.h', 'src/test/fuzz/FuzzedDataProvider.h', 'src/tinyformat.h', + 'src/bench/nanobench.h', 'test/functional/test_framework/bignum.py', # python init: '*__init__.py', @@ -32,8 +32,8 @@ # git subtrees "src/crypto/ctaes/", "src/leveldb/", + "src/minisketch", "src/secp256k1/", - "src/univalue/", "src/crc32c/", ] @@ -318,15 +318,13 @@ def get_most_recent_git_change_year(filename): ################################################################################ def read_file_lines(filename): - f = open(filename, 'r', encoding="utf8") - file_lines = f.readlines() - f.close() + with open(filename, 'r', encoding="utf8") as f: + file_lines = f.readlines() return file_lines def write_file_lines(filename, file_lines): - f = open(filename, 'w', encoding="utf8") - f.write(''.join(file_lines)) - f.close() + with open(filename, 'w', encoding="utf8") as f: + f.write(''.join(file_lines)) ################################################################################ # update header years execution @@ -369,7 +367,7 @@ def create_updated_copyright_line(line, last_git_change_year): space_split = after_copyright.split(' ') year_range = space_split[0] start_year, end_year = parse_year_range(year_range) - if end_year == last_git_change_year: + if end_year >= last_git_change_year: return line return (before_copyright + copyright_splitter + year_range_to_str(start_year, last_git_change_year) + ' ' + diff --git a/contrib/devtools/gen-bitcoin-conf.sh b/contrib/devtools/gen-bitcoin-conf.sh new file mode 100755 index 0000000000000..d830852c9e0d2 --- /dev/null +++ b/contrib/devtools/gen-bitcoin-conf.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C +TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)} +BUILDDIR=${BUILDDIR:-$TOPDIR} +BINDIR=${BINDIR:-$BUILDDIR/src} +BITCOIND=${BITCOIND:-$BINDIR/bitcoind} +SHARE_EXAMPLES_DIR=${SHARE_EXAMPLES_DIR:-$TOPDIR/share/examples} +EXAMPLE_CONF_FILE=${EXAMPLE_CONF_FILE:-$SHARE_EXAMPLES_DIR/bitcoin.conf} + +[ ! -x "$BITCOIND" ] && echo "$BITCOIND not found or not executable." && exit 1 + +DIRTY="" +VERSION_OUTPUT=$($BITCOIND --version) +if [[ $VERSION_OUTPUT == *"dirty"* ]]; then + DIRTY="${DIRTY}${BITCOIND}\n" +fi + +if [ -n "$DIRTY" ] +then + echo -e "WARNING: $BITCOIND was built from a dirty tree.\n" + echo -e "To safely generate a bitcoin.conf file, please commit your changes to $BITCOIND, rebuild, then run this script again.\n" +fi + +echo 'Generating example bitcoin.conf file in share/examples/' + +# create the directory, if it doesn't exist +mkdir -p "${SHARE_EXAMPLES_DIR}" + +# create the header text +cat > "${EXAMPLE_CONF_FILE}" << 'EOF' +## +## bitcoin.conf configuration file. +## Generated by contrib/devtools/gen-bitcoin-conf.sh. +## +## Lines beginning with # are comments. +## All possible configuration options are provided. To use, copy this file +## to your data directory (default or specified by -datadir), uncomment +## options you would like to change, and save the file. +## + + +### Options +EOF + +# parse the output from bitcoind --help +# adding newlines is a bit funky to ensure portability for BSD +# see here for more details: https://stackoverflow.com/a/24575385 +${BITCOIND} --help \ + | sed '1,/Print this help message and exit/d' \ + | sed -E 's/^[[:space:]]{2}\-/#/' \ + | sed -E 's/^[[:space:]]{7}/# /' \ + | sed -E '/[=[:space:]]/!s/#.*$/&=1/' \ + | awk '/^#[a-z]/{x=$0;next}{if (NF==0) print x"\n",x="";else print}' \ + | sed 's,\(^[[:upper:]].*\)\:$,\ +### \1,' \ + | sed 's/[[:space:]]*$//' >> "${EXAMPLE_CONF_FILE}" + +# create the footer text +cat >> "${EXAMPLE_CONF_FILE}" << 'EOF' + +# [Sections] +# Most options will apply to all networks. To confine an option to a specific +# network, add it under the relevant section below. +# +# Note: If not specified under a network section, the options addnode, connect, +# port, bind, rpcport, rpcbind, and wallet will only apply to mainnet. + +# Options for mainnet +[main] + +# Options for testnet3 +[test] + +# Options for testnet4 +[testnet4] + +# Options for signet +[signet] + +# Options for regtest +[regtest] +EOF diff --git a/contrib/devtools/gen-manpages.py b/contrib/devtools/gen-manpages.py new file mode 100755 index 0000000000000..92acd9a40373c --- /dev/null +++ b/contrib/devtools/gen-manpages.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +import os +import subprocess +import sys +import tempfile + +BINARIES = [ +'src/bitcoind', +'src/bitcoin-cli', +'src/bitcoin-tx', +'src/bitcoin-wallet', +'src/bitcoin-util', +'src/qt/bitcoin-qt', +] + +# Paths to external utilities. +git = os.getenv('GIT', 'git') +help2man = os.getenv('HELP2MAN', 'help2man') + +# If not otherwise specified, get top directory from git. +topdir = os.getenv('TOPDIR') +if not topdir: + r = subprocess.run([git, 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE, check=True, text=True) + topdir = r.stdout.rstrip() + +# Get input and output directories. +builddir = os.getenv('BUILDDIR', topdir) +mandir = os.getenv('MANDIR', os.path.join(topdir, 'doc/man')) + +# Verify that all the required binaries are usable, and extract copyright +# message in a first pass. +versions = [] +for relpath in BINARIES: + abspath = os.path.join(builddir, relpath) + try: + r = subprocess.run([abspath, "--version"], stdout=subprocess.PIPE, check=True, text=True) + except IOError: + print(f'{abspath} not found or not an executable', file=sys.stderr) + sys.exit(1) + # take first line (which must contain version) + verstr = r.stdout.splitlines()[0] + # last word of line is the actual version e.g. v22.99.0-5c6b3d5b3508 + verstr = verstr.split()[-1] + assert verstr.startswith('v') + # remaining lines are copyright + copyright = r.stdout.split('\n')[1:] + assert copyright[0].startswith('Copyright (C)') + + versions.append((abspath, verstr, copyright)) + +if any(verstr.endswith('-dirty') for (_, verstr, _) in versions): + print("WARNING: Binaries were built from a dirty tree.") + print('man pages generated from dirty binaries should NOT be committed.') + print('To properly generate man pages, please commit your changes (or discard them), rebuild, then run this script again.') + print() + +with tempfile.NamedTemporaryFile('w', suffix='.h2m') as footer: + # Create copyright footer, and write it to a temporary include file. + # Copyright is the same for all binaries, so just use the first. + footer.write('[COPYRIGHT]\n') + footer.write('\n'.join(versions[0][2]).strip()) + # Create SEE ALSO section + footer.write('\n[SEE ALSO]\n') + footer.write(', '.join(s.rpartition('/')[2] + '(1)' for s in BINARIES)) + footer.write('\n') + footer.flush() + + # Call the binaries through help2man to produce a manual page for each of them. + for (abspath, verstr, _) in versions: + outname = os.path.join(mandir, os.path.basename(abspath) + '.1') + print(f'Generating {outname}…') + subprocess.run([help2man, '-N', '--version-string=' + verstr, '--include=' + footer.name, '-o', outname, abspath], check=True) diff --git a/contrib/devtools/gen-manpages.sh b/contrib/devtools/gen-manpages.sh deleted file mode 100755 index aa65953d83858..0000000000000 --- a/contrib/devtools/gen-manpages.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2016-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C -TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)} -BUILDDIR=${BUILDDIR:-$TOPDIR} - -BINDIR=${BINDIR:-$BUILDDIR/src} -MANDIR=${MANDIR:-$TOPDIR/doc/man} - -BITCOIND=${BITCOIND:-$BINDIR/bitcoind} -BITCOINCLI=${BITCOINCLI:-$BINDIR/bitcoin-cli} -BITCOINTX=${BITCOINTX:-$BINDIR/bitcoin-tx} -WALLET_TOOL=${WALLET_TOOL:-$BINDIR/bitcoin-wallet} -BITCOINQT=${BITCOINQT:-$BINDIR/qt/bitcoin-qt} - -[ ! -x $BITCOIND ] && echo "$BITCOIND not found or not executable." && exit 1 - -# The autodetected version git tag can screw up manpage output a little bit -read -r -a BTCVER <<< "$($BITCOINCLI --version | head -n1 | awk -F'[ -]' '{ print $6, $7 }')" - -# Create a footer file with copyright content. -# This gets autodetected fine for bitcoind if --version-string is not set, -# but has different outcomes for bitcoin-qt and bitcoin-cli. -echo "[COPYRIGHT]" > footer.h2m -$BITCOIND --version | sed -n '1!p' >> footer.h2m - -for cmd in $BITCOIND $BITCOINCLI $BITCOINTX $WALLET_TOOL $BITCOINQT; do - cmdname="${cmd##*/}" - help2man -N --version-string=${BTCVER[0]} --include=footer.h2m -o ${MANDIR}/${cmdname}.1 ${cmd} - sed -i "s/\\\-${BTCVER[1]}//g" ${MANDIR}/${cmdname}.1 -done - -rm -f footer.h2m diff --git a/contrib/devtools/headerssync-params.py b/contrib/devtools/headerssync-params.py new file mode 100755 index 0000000000000..f8e415532a035 --- /dev/null +++ b/contrib/devtools/headerssync-params.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Script to find the optimal parameters for the headerssync module through simulation.""" + +from math import log, exp, sqrt +from datetime import datetime, timedelta +import random + +# Parameters: + +# Aim for still working fine at some point in the future. [datetime] +TIME = datetime(2027, 4, 1) + +# Expected block interval. [timedelta] +BLOCK_INTERVAL = timedelta(seconds=600) + +# The number of headers corresponding to the minchainwork parameter. [headers] +MINCHAINWORK_HEADERS = 856760 + +# Combined processing bandwidth from all attackers to one victim. [bit/s] +# 6 Gbit/s is approximately the speed at which a single thread of a Ryzen 5950X CPU thread can hash +# headers. In practice, the victim's network bandwidth and network processing overheads probably +# impose a far lower number, but it's a useful upper bound. +ATTACK_BANDWIDTH = 6000000000 + +# How much additional permanent memory usage are attackers (jointly) allowed to cause in the victim, +# expressed as fraction of the normal memory usage due to mainchain growth, for the duration the +# attack is sustained. [unitless] +# 0.2 means that attackers, while they keep up the attack, can cause permanent memory usage due to +# headers storage to grow at 1.2 header per BLOCK_INTERVAL. +ATTACK_FRACTION = 0.2 + +# When this is set, the mapping from period size to memory usage (at optimal buffer size for that +# period) is assumed to be convex. This greatly speeds up the computation, and does not appear +# to influence the outcome. Set to False for a stronger guarantee to get the optimal result. +ASSUME_CONVEX = True + +# Explanation: +# +# The headerssync module implements a DoS protection against low-difficulty header spam which does +# not rely on checkpoints. In short it works as follows: +# +# - (initial) header synchronization is split into two phases: +# - A commitment phase, in which headers are downloaded from the peer, and a very compact +# commitment to them is remembered in per-peer memory. The commitment phase ends when the +# received chain's combined work reaches a predetermined threshold. +# - A redownload phase, during which the headers are downloaded a second time from the same peer, +# and compared against the commitment constructed in the first phase. If there is a match, the +# redownloaded headers are fed to validation and accepted into permanent storage. +# +# This separation guarantees that no headers are accepted into permanent storage without +# requiring the peer to first prove the chain actually has sufficient work. +# +# - To actually implement this commitment mechanism, the following approach is used: +# - Keep a *1 bit* commitment (constructed using a salted hash function), for every block whose +# height is a multiple of {period} plus an offset value. If RANDOMIZE_OFFSET, the offset, +# like the salt, is chosen randomly when the synchronization starts and kept fixed afterwards. +# - When redownloading, headers are fed through a per-peer queue that holds {bufsize} headers, +# before passing them to validation. All the headers in this queue are verified against the +# commitment bits created in the first phase before any header is released from it. This means +# {bufsize/period} bits are checked "on top of" each header before actually processing it, +# which results in a commitment structure with roughly {bufsize/period} bits of security, as +# once a header is modified, due to the prevhash inclusion, all future headers necessarily +# change as well. +# +# The question is what these {period} and {bufsize} parameters need to be set to. This program +# exhaustively tests a range of values to find the optimal choice, taking into account: +# +# - Minimizing the (maximum of) two scenarios that trigger per-peer memory usage: +# +# - When downloading a (likely honest) chain that reaches the chainwork threshold after {n} +# blocks, and then redownloads them, we will consume per-peer memory that is sufficient to +# store {n/period} commitment bits and {bufsize} headers. We only consider attackers without +# sufficient hashpower (as otherwise they are from a PoW perspective not attackers), which +# means {n} is restricted to the honest chain's length before reaching minchainwork. +# +# - When downloading a (likely false) chain of {n} headers that never reaches the chainwork +# threshold, we will consume per-peer memory that is sufficient to store {n/period} +# commitment bits. Such a chain may be very long, by exploiting the timewarp bug to avoid +# ramping up difficulty. There is however an absolute limit on how long such a chain can be: 6 +# blocks per second since genesis, due to the increasing MTP consensus rule. +# +# - Not gratuitously preventing synchronizing any valid chain, however difficult such a chain may +# be to construct. In particular, the above scenario with an enormous timewarp-expoiting chain +# cannot simply be ignored, as it is legal that the honest main chain is like that. We however +# do not bother minimizing the memory usage in that case (because a billion-header long honest +# chain will inevitably use far larger amounts of memory than designed for). +# +# - Keep the rate at which attackers can get low-difficulty headers accepted to the block index +# negligible. Specifically, the possibility exists for an attacker to send the honest main +# chain's headers during the commitment phase, but then start deviating at an attacker-chosen +# point by sending novel low-difficulty headers instead. Depending on how high we set the +# {bufsize/period} ratio, we can make the probability that such a header makes it in +# arbitrarily small, but at the cost of higher memory during the redownload phase. It turns out, +# some rate of memory usage growth is expected anyway due to chain growth, so permitting the +# attacker to increase that rate by a small factor isn't concerning. The attacker may start +# somewhat later than genesis, as long as the difficulty doesn't get too high. This reduces +# the attacker bandwidth required at the cost of higher PoW needed for constructing the +# alternate chain. This trade-off is ignored here, as it results in at most a small constant +# factor in attack rate. + + +# System properties: + +# Headers in the redownload buffer are stored without prevhash. [bits] +COMPACT_HEADER_SIZE = 48 * 8 + +# How many bits a header uses in P2P protocol. [bits] +NET_HEADER_SIZE = 81 * 8 + +# How many headers are sent at once. [headers] +HEADER_BATCH_COUNT = 2000 + +# Whether or not the offset of which blocks heights get checksummed is randomized. +RANDOMIZE_OFFSET = True + +# Timestamp of the genesis block +GENESIS_TIME = datetime(2009, 1, 3) + +# Derived values: + +# What rate of headers worth of RAM attackers are allowed to cause in the victim. [headers/s] +LIMIT_HEADERRATE = ATTACK_FRACTION / BLOCK_INTERVAL.total_seconds() + +# How many headers can attackers (jointly) send a victim per second. [headers/s] +NET_HEADERRATE = ATTACK_BANDWIDTH / NET_HEADER_SIZE + +# What fraction of headers sent by attackers can at most be accepted by a victim [unitless] +LIMIT_FRACTION = LIMIT_HEADERRATE / NET_HEADERRATE + +# How many headers we permit attackers to cause being accepted per attack. [headers/attack] +ATTACK_HEADERS = LIMIT_FRACTION * MINCHAINWORK_HEADERS + + +def find_max_headers(when): + """Compute the maximum number of headers a valid Bitcoin chain can have at given time.""" + # When exploiting the timewarp attack, this can be up to 6 per second since genesis. + return 6 * ((when - GENESIS_TIME) // timedelta(seconds=1)) + + +def lambert_w(value): + """Solve the equation x*exp(x)=value (x > 0, value > 0).""" + # Initial approximation. + approx = max(log(value), 0.0) + for _ in range(10): + # Newton-Rhapson iteration steps. + approx += (value * exp(-approx) - approx) / (approx + 1.0) + return approx + + +def attack_rate(period, bufsize, limit=None): + """Compute maximal accepted headers per attack in (period, bufsize) configuration. + + If limit is provided, the computation is stopped early when the result is known to exceed the + value in limit. + """ + + max_rate = None + max_honest = None + # Let the current batch 0 being received be the first one in which the attacker starts lying. + # They will only ever start doing so right after a commitment block, but where that is can be + # in a number of places. Let honest be the number of honest headers in this current batch, + # preceding the forged ones. + for honest in range(HEADER_BATCH_COUNT): + # The number of headers the attack under consideration will on average get accepted. + # This is the number being computed. + rate = 0 + + # Iterate over the possible alignments of commitments w.r.t. the first batch. In case + # the alignments are randomized, try all values. If not, the attacker can know/choose + # the alignment, and will always start forging right after a commitment. + if RANDOMIZE_OFFSET: + align_choices = list(range(period)) + else: + align_choices = [(honest - 1) % period] + # Now loop over those possible alignment values, computing the average attack rate + # over them by dividing each contribution by len(align_choices). + for align in align_choices: + # These state variables capture the situation after receiving the first batch. + # - The number of headers received after the last commitment for an honest block: + after_good_commit = HEADER_BATCH_COUNT - honest + ((honest - align - 1) % period) + # - The number of forged headers in the redownload buffer: + forged_in_buf = HEADER_BATCH_COUNT - honest + + # Now iterate over the next batches of headers received, adding contributions to the + # rate variable. + while True: + # Process the first HEADER_BATCH_COUNT headers in the buffer: + accept_forged_headers = max(forged_in_buf - bufsize, 0) + forged_in_buf -= accept_forged_headers + if accept_forged_headers: + # The probability the attack has not been detected yet at this point: + prob = 0.5 ** (after_good_commit // period) + # Update attack rate, divided by align_choices to average over the alignments. + rate += accept_forged_headers * prob / len(align_choices) + # If this means we exceed limit, bail out early (performance optimization). + if limit is not None and rate >= limit: + return rate, None + # If the maximal term being added is negligible compared to rate, stop + # iterating. + if HEADER_BATCH_COUNT * prob < 1.0e-16 * rate * len(align_choices): + break + # Update state from a new incoming batch (which is all forged) + after_good_commit += HEADER_BATCH_COUNT + forged_in_buf += HEADER_BATCH_COUNT + + if max_rate is None or rate > max_rate: + max_rate = rate + max_honest = honest + + return max_rate, max_honest + + +def memory_usage(period, bufsize, when): + """How much memory (max,mainchain,timewarp) does the (period,bufsize) configuration need?""" + + # Per-peer memory usage for a timewarp chain that never meets minchainwork + mem_timewarp = find_max_headers(when) // period + # Per-peer memory usage for being fed the main chain + mem_mainchain = (MINCHAINWORK_HEADERS // period) + bufsize * COMPACT_HEADER_SIZE + # Maximum per-peer memory usage + max_mem = max(mem_timewarp, mem_mainchain) + + return max_mem, mem_mainchain, mem_timewarp + +def find_bufsize(period, attack_headers, when, max_mem=None, min_bufsize=1): + """Determine how big bufsize needs to be given a specific period length. + + Given a period, find the smallest value of bufsize such that the attack rate against the + (period, bufsize) configuration is below attack_headers. If max_mem is provided, and no + such bufsize exists that needs less than max_mem bits of memory, None is returned. + min_bufsize is the minimal result to be considered.""" + + if max_mem is None: + succ_buf = min_bufsize - 1 + fail_buf = min_bufsize + # First double iteratively until an upper bound for failure is found. + while True: + if attack_rate(period, fail_buf, attack_headers)[0] < attack_headers: + break + succ_buf, fail_buf = fail_buf, 3 * fail_buf - 2 * succ_buf + else: + # If a long low-work header chain exists that exceeds max_mem already, give up. + if find_max_headers(when) // period > max_mem: + return None + # Otherwise, verify that the maximal buffer size that permits a mainchain sync with less + # than max_mem memory is sufficient to get the attack rate below attack_headers. If not, + # also give up. + max_buf = (max_mem - (MINCHAINWORK_HEADERS // period)) // COMPACT_HEADER_SIZE + if max_buf < min_bufsize: + return None + if attack_rate(period, max_buf, attack_headers)[0] >= attack_headers: + return None + # If it is sufficient, that's an upper bound to start our search. + succ_buf = min_bufsize - 1 + fail_buf = max_buf + + # Then perform a bisection search to narrow it down. + while fail_buf > succ_buf + 1: + try_buf = (succ_buf + fail_buf) // 2 + if attack_rate(period, try_buf, attack_headers)[0] >= attack_headers: + succ_buf = try_buf + else: + fail_buf = try_buf + return fail_buf + + +def optimize(when): + """Find the best (period, bufsize) configuration.""" + + # When period*bufsize = memory_scale, the per-peer memory for a mainchain sync and a maximally + # long low-difficulty header sync are equal. + memory_scale = (find_max_headers(when) - MINCHAINWORK_HEADERS) / COMPACT_HEADER_SIZE + # Compute approximation for {bufsize/period}, using a formula for a simplified problem. + approx_ratio = lambert_w(log(4) * memory_scale / ATTACK_HEADERS**2) / log(4) + # Use those for a first attempt. + print("Searching configurations:") + period = int(sqrt(memory_scale / approx_ratio) + 0.5) + bufsize = find_bufsize(period, ATTACK_HEADERS, when) + mem = memory_usage(period, bufsize, when) + best = (period, bufsize, mem) + maps = [(period, bufsize), (MINCHAINWORK_HEADERS + 1, None)] + print(f"- Initial: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB") + + # Consider all period values between 1 and MINCHAINWORK_HEADERS, except the one just tried. + periods = [iv for iv in range(1, MINCHAINWORK_HEADERS + 1) if iv != period] + # Iterate, picking a random element from periods, computing its corresponding bufsize, and + # then using the result to shrink the period. + while True: + # Remove all periods whose memory usage for low-work long chain sync exceed the best + # memory usage we've found so far. + periods = [p for p in periods if find_max_headers(when) // p < best[2][0]] + # Stop if there is nothing left to try. + if len(periods) == 0: + break + # Pick a random remaining option for period size, and compute corresponding bufsize. + period = periods.pop(random.randrange(len(periods))) + # The buffer size (at a given attack level) cannot shrink as the period grows. Find the + # largest period smaller than the selected one we know the buffer size for, and use that + # as a lower bound to find_bufsize. + min_bufsize = max([(p, b) for p, b in maps if p < period] + [(0,0)])[1] + bufsize = find_bufsize(period, ATTACK_HEADERS, when, best[2][0], min_bufsize) + if bufsize is not None: + # We found a (period, bufsize) configuration with better memory usage than our best + # so far. Remember it for future lower bounds. + maps.append((period, bufsize)) + mem = memory_usage(period, bufsize, when) + assert mem[0] <= best[2][0] + if ASSUME_CONVEX: + # Remove all periods that are on the other side of the former best as the new + # best. + periods = [p for p in periods if (p < best[0]) == (period < best[0])] + best = (period, bufsize, mem) + print(f"- New best: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB") + else: + # The (period, bufsize) configuration we found is worse than what we already had. + if ASSUME_CONVEX: + # Remove all periods that are on the other side of the tried configuration as the + # best one. + periods = [p for p in periods if (p < period) == (best[0] < period)] + + # Return the result. + period, bufsize, _ = best + return period, bufsize + + +def analyze(when): + """Find the best configuration and print it out.""" + + period, bufsize = optimize(when) + # Compute accurate statistics for the best found configuration. + _, mem_mainchain, mem_timewarp = memory_usage(period, bufsize, when) + headers_per_attack, _ = attack_rate(period, bufsize) + attack_volume = NET_HEADER_SIZE * MINCHAINWORK_HEADERS + # And report them. + print() + print("Optimal configuration:") + print() + print("//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks.") + print(f"constexpr size_t HEADER_COMMITMENT_PERIOD{{{period}}};") + print() + print("//! Only feed headers to validation once this many headers on top have been") + print("//! received and validated against commitments.") + print(f"constexpr size_t REDOWNLOAD_BUFFER_SIZE{{{bufsize}}};" + f" // {bufsize}/{period} = ~{bufsize/period:.1f} commitments") + print() + print("Properties:") + print(f"- Per-peer memory for mainchain sync: {mem_mainchain / 8192:.3f} KiB") + print(f"- Per-peer memory for timewarp attack: {mem_timewarp / 8192:.3f} KiB") + print(f"- Attack rate: {1/headers_per_attack:.1f} attacks for 1 header of memory growth") + print(f" (where each attack costs {attack_volume / 8388608:.3f} MiB bandwidth)") + + +analyze(TIME) diff --git a/contrib/devtools/iwyu/bitcoin.core.imp b/contrib/devtools/iwyu/bitcoin.core.imp new file mode 100644 index 0000000000000..befc949f18ca8 --- /dev/null +++ b/contrib/devtools/iwyu/bitcoin.core.imp @@ -0,0 +1,4 @@ +# Fixups / upstreamed changes +[ + { include: [ "", private, "", public ] }, +] diff --git a/contrib/devtools/previous_release.sh b/contrib/devtools/previous_release.sh deleted file mode 100755 index b2ecc274fbb7b..0000000000000 --- a/contrib/devtools/previous_release.sh +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Build previous releases. - -export LC_ALL=C - -CONFIG_FLAGS="" -FUNCTIONAL_TESTS=0 -DELETE_EXISTING=0 -USE_DEPENDS=0 -DOWNLOAD_BINARY=0 -CONFIG_FLAGS="" -TARGET="releases" - -while getopts ":hfrdbt:" opt; do - case $opt in - h) - echo "Usage: .previous_release.sh [options] tag1 tag2" - echo " options:" - echo " -h Print this message" - echo " -f Configure for functional tests" - echo " -r Remove existing directory" - echo " -d Use depends" - echo " -b Download release binary" - echo " -t Target directory (default: releases)" - exit 0 - ;; - f) - FUNCTIONAL_TESTS=1 - CONFIG_FLAGS="$CONFIG_FLAGS --without-gui --disable-tests --disable-bench" - ;; - r) - DELETE_EXISTING=1 - ;; - d) - USE_DEPENDS=1 - ;; - b) - DOWNLOAD_BINARY=1 - ;; - t) - TARGET=$OPTARG - ;; - \?) - echo "Invalid option: -$OPTARG" >&2 - exit 1 - ;; - esac -done - -shift $((OPTIND-1)) - -if [ -z "$1" ]; then - echo "Specify release tag(s), e.g.: .previous_release v0.15.1" - exit 1 -fi - -if [ ! -d "$TARGET" ]; then - mkdir -p $TARGET -fi - -if [ "$DOWNLOAD_BINARY" -eq "1" ]; then - HOST="${HOST:-$(./depends/config.guess)}" - case "$HOST" in - x86_64-*-linux*) - PLATFORM=x86_64-linux-gnu - ;; - x86_64-apple-darwin*) - PLATFORM=osx64 - ;; - *) - echo "Not sure which binary to download for $HOST." - exit 1 - ;; - esac -fi - -echo "Releases directory: $TARGET" -pushd "$TARGET" || exit 1 -{ - for tag in "$@" - do - if [ "$DELETE_EXISTING" -eq "1" ]; then - if [ -d "$tag" ]; then - rm -r "$tag" - fi - fi - - if [ "$DOWNLOAD_BINARY" -eq "0" ]; then - - if [ ! -d "$tag" ]; then - if [ -z $(git tag -l "$tag") ]; then - echo "Tag $tag not found" - exit 1 - fi - - git clone https://github.com/bitcoin/bitcoin "$tag" - pushd "$tag" || exit 1 - { - git checkout "$tag" - if [ "$USE_DEPENDS" -eq "1" ]; then - pushd depends || exit 1 - { - if [ "$FUNCTIONAL_TESTS" -eq "1" ]; then - make NO_QT=1 - else - make - fi - HOST="${HOST:-$(./config.guess)}" - } - popd || exit 1 - CONFIG_FLAGS="--prefix=$PWD/depends/$HOST $CONFIG_FLAGS" - fi - ./autogen.sh - ./configure $CONFIG_FLAGS - make - # Move binaries, so they're in the same place as in the release download: - mkdir bin - mv src/bitcoind src/bitcoin-cli src/bitcoin-tx bin - if [ "$FUNCTIONAL_TESTS" -eq "0" ]; then - mv src/qt/bitcoin-qt bin - fi - } - popd || exit 1 - fi - else - if [ -d "$tag" ]; then - echo "Using cached $tag" - else - mkdir "$tag" - if [[ "$tag" =~ v(.*)(rc[0-9]+)$ ]]; then - BIN_PATH="bin/bitcoin-core-${BASH_REMATCH[1]}/test.${BASH_REMATCH[2]}" - else - BIN_PATH="bin/bitcoin-core-${tag:1}" - fi - URL="https://bitcoin.org/$BIN_PATH/bitcoin-${tag:1}-$PLATFORM.tar.gz" - echo "Fetching: $URL" - if ! curl -O -f $URL; then - echo "Download failed." - exit 1 - fi - tar -zxf "bitcoin-${tag:1}-$PLATFORM.tar.gz" -C "$tag" --strip-components=1 "bitcoin-${tag:1}" - rm "bitcoin-${tag:1}-$PLATFORM.tar.gz" - fi - fi - done -} -popd || exit 1 diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py index 9444271bdce4e..4c20685b51c94 100755 --- a/contrib/devtools/security-check.py +++ b/contrib/devtools/security-check.py @@ -1,311 +1,283 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2020 The Bitcoin Core developers +# Copyright (c) 2015-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Perform basic security checks on a series of executables. Exit status will be 0 if successful, and the program will be silent. Otherwise the exit status will be 1 and it will log which executables failed which checks. -Needs `readelf` (for ELF), `objdump` (for PE) and `otool` (for MACHO). ''' -import subprocess +import re import sys -import os -READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') -OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump') -OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool') -NONFATAL = {} # checks which are non-fatal for now but only generate a warning +import lief -def check_ELF_PIE(executable): - ''' - Check for position independent executable (PIE), allowing for address space randomization. - ''' - p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - - ok = False - for line in stdout.splitlines(): - line = line.split() - if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN': - ok = True - return ok - -def get_ELF_program_headers(executable): - '''Return type and flags for ELF program headers''' - p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - in_headers = False - count = 0 - headers = [] - for line in stdout.splitlines(): - if line.startswith('Program Headers:'): - in_headers = True - if line == '': - in_headers = False - if in_headers: - if count == 1: # header line - ofs_typ = line.find('Type') - ofs_offset = line.find('Offset') - ofs_flags = line.find('Flg') - ofs_align = line.find('Align') - if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1: - raise ValueError('Cannot parse elfread -lW output') - elif count > 1: - typ = line[ofs_typ:ofs_offset].rstrip() - flags = line[ofs_flags:ofs_align].rstrip() - headers.append((typ, flags)) - count += 1 - return headers - -def check_ELF_NX(executable): - ''' - Check that no sections are writable and executable (including the stack) - ''' - have_wx = False - have_gnu_stack = False - for (typ, flags) in get_ELF_program_headers(executable): - if typ == 'GNU_STACK': - have_gnu_stack = True - if 'W' in flags and 'E' in flags: # section is both writable and executable - have_wx = True - return have_gnu_stack and not have_wx - -def check_ELF_RELRO(executable): +def check_ELF_RELRO(binary) -> bool: ''' Check for read-only relocations. GNU_RELRO program header must exist Dynamic section must have BIND_NOW flag ''' have_gnu_relro = False - for (typ, flags) in get_ELF_program_headers(executable): - # Note: not checking flags == 'R': here as linkers set the permission differently - # This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions. + for segment in binary.segments: + # Note: not checking p_flags == PF_R: here as linkers set the permission differently + # This does not affect security: the permission flags of the GNU_RELRO program + # header are ignored, the PT_LOAD header determines the effective permissions. # However, the dynamic linker need to write to this area so these are RW. # Glibc itself takes care of mprotecting this area R after relocations are finished. # See also https://marc.info/?l=binutils&m=1498883354122353 - if typ == 'GNU_RELRO': + if segment.type == lief.ELF.SEGMENT_TYPES.GNU_RELRO: have_gnu_relro = True have_bindnow = False - p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - for line in stdout.splitlines(): - tokens = line.split() - if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2:]): + try: + flags = binary.get(lief.ELF.DYNAMIC_TAGS.FLAGS) + if flags.value & lief.ELF.DYNAMIC_FLAGS.BIND_NOW: have_bindnow = True + except Exception: + have_bindnow = False + return have_gnu_relro and have_bindnow -def check_ELF_Canary(executable): +def check_ELF_CANARY(binary) -> bool: ''' Check for use of stack canary ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - ok = False - for line in stdout.splitlines(): - if '__stack_chk_fail' in line: - ok = True - return ok - -def get_PE_dll_characteristics(executable): + return binary.has_symbol('__stack_chk_fail') + +def check_ELF_SEPARATE_CODE(binary): + ''' + Check that sections are appropriately separated in virtual memory, + based on their permissions. This checks for missing -Wl,-z,separate-code + and potentially other problems. + ''' + R = lief.ELF.SEGMENT_FLAGS.R + W = lief.ELF.SEGMENT_FLAGS.W + E = lief.ELF.SEGMENT_FLAGS.X + EXPECTED_FLAGS = { + # Read + execute + '.init': R | E, + '.plt': R | E, + '.plt.got': R | E, + '.plt.sec': R | E, + '.text': R | E, + '.fini': R | E, + # Read-only data + '.interp': R, + '.note.gnu.property': R, + '.note.gnu.build-id': R, + '.note.ABI-tag': R, + '.gnu.hash': R, + '.dynsym': R, + '.dynstr': R, + '.gnu.version': R, + '.gnu.version_r': R, + '.rela.dyn': R, + '.rela.plt': R, + '.rodata': R, + '.eh_frame_hdr': R, + '.eh_frame': R, + '.qtmetadata': R, + '.gcc_except_table': R, + '.stapsdt.base': R, + # Writable data + '.init_array': R | W, + '.fini_array': R | W, + '.dynamic': R | W, + '.got': R | W, + '.data': R | W, + '.bss': R | W, + } + if binary.header.machine_type == lief.ELF.ARCH.PPC64: + # .plt is RW on ppc64 even with separate-code + EXPECTED_FLAGS['.plt'] = R | W + # For all LOAD program headers get mapping to the list of sections, + # and for each section, remember the flags of the associated program header. + flags_per_section = {} + for segment in binary.segments: + if segment.type == lief.ELF.SEGMENT_TYPES.LOAD: + for section in segment.sections: + flags_per_section[section.name] = segment.flags + # Spot-check ELF LOAD program header flags per section + # If these sections exist, check them against the expected R/W/E flags + for (section, flags) in flags_per_section.items(): + if section in EXPECTED_FLAGS: + if int(EXPECTED_FLAGS[section]) != int(flags): + return False + return True + +def check_ELF_CONTROL_FLOW(binary) -> bool: ''' - Get PE DllCharacteristics bits. - Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386' - and bits is the DllCharacteristics value. + Check for control flow instrumentation ''' - p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - arch = '' - bits = 0 - for line in stdout.splitlines(): - tokens = line.split() - if len(tokens)>=2 and tokens[0] == 'architecture:': - arch = tokens[1].rstrip(',') - if len(tokens)>=2 and tokens[0] == 'DllCharacteristics': - bits = int(tokens[1],16) - return (arch,bits) - -IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020 -IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040 -IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100 - -def check_PE_DYNAMIC_BASE(executable): + main = binary.get_function_address('main') + content = binary.get_content_from_virtual_address(main, 4, lief.Binary.VA_TYPES.AUTO) + + if content.tolist() == [243, 15, 30, 250]: # endbr64 + return True + return False + +def check_ELF_FORTIFY(binary) -> bool: + + # bitcoin-util does not currently contain any fortified functions + if 'Bitcoin Core bitcoin-util utility version ' in binary.strings: + return True + + chk_funcs = set() + + for sym in binary.imported_symbols: + match = re.search(r'__[a-z]*_chk', sym.name) + if match: + chk_funcs.add(match.group(0)) + + # ignore stack-protector and bdb + chk_funcs.discard('__stack_chk') + chk_funcs.discard('__db_chk') + + return len(chk_funcs) >= 1 + +def check_PE_DYNAMIC_BASE(binary) -> bool: '''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)''' - (arch,bits) = get_PE_dll_characteristics(executable) - reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE - return (bits & reqbits) == reqbits + return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists -# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE -# to have secure ASLR. -def check_PE_HIGH_ENTROPY_VA(executable): +# Must support high-entropy 64-bit address space layout randomization +# in addition to DYNAMIC_BASE to have secure ASLR. +def check_PE_HIGH_ENTROPY_VA(binary) -> bool: '''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR''' - (arch,bits) = get_PE_dll_characteristics(executable) - if arch == 'i386:x86-64': - reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA - else: # Unnecessary on 32-bit - assert(arch == 'i386') - reqbits = 0 - return (bits & reqbits) == reqbits - -def check_PE_RELOC_SECTION(executable) -> bool: + return lief.PE.DLL_CHARACTERISTICS.HIGH_ENTROPY_VA in binary.optional_header.dll_characteristics_lists + +def check_PE_RELOC_SECTION(binary) -> bool: '''Check for a reloc section. This is required for functional ASLR.''' - p = subprocess.Popen([OBJDUMP_CMD, '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - for line in stdout.splitlines(): - if '.reloc' in line: - return True - return False + return binary.has_relocations -def check_PE_NX(executable): - '''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)''' - (arch,bits) = get_PE_dll_characteristics(executable) - return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT - -def get_MACHO_executable_flags(executable): - p = subprocess.Popen([OTOOL_CMD, '-vh', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - - flags = [] - for line in stdout.splitlines(): - tokens = line.split() - # filter first two header lines - if 'magic' in tokens or 'Mach' in tokens: - continue - # filter ncmds and sizeofcmds values - flags += [t for t in tokens if not t.isdigit()] - return flags - -def check_MACHO_PIE(executable) -> bool: +def check_PE_CONTROL_FLOW(binary) -> bool: ''' - Check for position independent executable (PIE), allowing for address space randomization. + Check for control flow instrumentation ''' - flags = get_MACHO_executable_flags(executable) - if 'PIE' in flags: + main = binary.get_symbol('main').value + + section_addr = binary.section_from_rva(main).virtual_address + virtual_address = binary.optional_header.imagebase + section_addr + main + + content = binary.get_content_from_virtual_address(virtual_address, 4, lief.Binary.VA_TYPES.VA) + + if content.tolist() == [243, 15, 30, 250]: # endbr64 return True return False -def check_MACHO_NOUNDEFS(executable) -> bool: +def check_PE_CANARY(binary) -> bool: + ''' + Check for use of stack canary + ''' + return binary.has_symbol('__stack_chk_fail') + +def check_MACHO_NOUNDEFS(binary) -> bool: ''' Check for no undefined references. ''' - flags = get_MACHO_executable_flags(executable) - if 'NOUNDEFS' in flags: - return True - return False + return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS) + +def check_MACHO_FIXUP_CHAINS(binary) -> bool: + ''' + Check for use of chained fixups. + ''' + return binary.has_dyld_chained_fixups + +def check_MACHO_CANARY(binary) -> bool: + ''' + Check for use of stack canary + ''' + return binary.has_symbol('___stack_chk_fail') -def check_MACHO_NX(executable) -> bool: +def check_PIE(binary) -> bool: + ''' + Check for position independent executable (PIE), + allowing for address space randomization. + ''' + return binary.is_pie + +def check_NX(binary) -> bool: ''' Check for no stack execution ''' - flags = get_MACHO_executable_flags(executable) - if 'ALLOW_STACK_EXECUTION' in flags: - return False - return True + return binary.has_nx -def check_MACHO_LAZY_BINDINGS(executable) -> bool: +def check_MACHO_CONTROL_FLOW(binary) -> bool: ''' - Check for no lazy bindings. - We don't use or check for MH_BINDATLOAD. See #18295. + Check for control flow instrumentation ''' - p = subprocess.Popen([OTOOL_CMD, '-l', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - - for line in stdout.splitlines(): - tokens = line.split() - if 'lazy_bind_off' in tokens or 'lazy_bind_size' in tokens: - if tokens[1] != '0': - return False - return True + content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO) -def check_MACHO_Canary(executable) -> bool: + if content.tolist() == [243, 15, 30, 250]: # endbr64 + return True + return False + +def check_MACHO_BRANCH_PROTECTION(binary) -> bool: ''' - Check for use of stack canary + Check for branch protection instrumentation ''' - p = subprocess.Popen([OTOOL_CMD, '-Iv', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - ok = False - for line in stdout.splitlines(): - if '___stack_chk_fail' in line: - ok = True - return ok + content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO) -CHECKS = { -'ELF': [ - ('PIE', check_ELF_PIE), - ('NX', check_ELF_NX), + if content.tolist() == [95, 36, 3, 213]: # bti + return True + return False + +BASE_ELF = [ + ('PIE', check_PIE), + ('NX', check_NX), ('RELRO', check_ELF_RELRO), - ('Canary', check_ELF_Canary) -], -'PE': [ + ('CANARY', check_ELF_CANARY), + ('SEPARATE_CODE', check_ELF_SEPARATE_CODE), +] + +BASE_PE = [ + ('PIE', check_PIE), ('DYNAMIC_BASE', check_PE_DYNAMIC_BASE), ('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA), - ('NX', check_PE_NX), - ('RELOC_SECTION', check_PE_RELOC_SECTION) -], -'MACHO': [ - ('PIE', check_MACHO_PIE), + ('NX', check_NX), + ('RELOC_SECTION', check_PE_RELOC_SECTION), + ('CONTROL_FLOW', check_PE_CONTROL_FLOW), + ('CANARY', check_PE_CANARY), +] + +BASE_MACHO = [ ('NOUNDEFS', check_MACHO_NOUNDEFS), - ('NX', check_MACHO_NX), - ('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS), - ('Canary', check_MACHO_Canary) + ('CANARY', check_MACHO_CANARY), + ('FIXUP_CHAINS', check_MACHO_FIXUP_CHAINS), ] -} -def identify_executable(executable): - with open(filename, 'rb') as f: - magic = f.read(4) - if magic.startswith(b'MZ'): - return 'PE' - elif magic.startswith(b'\x7fELF'): - return 'ELF' - elif magic.startswith(b'\xcf\xfa'): - return 'MACHO' - return None +CHECKS = { + lief.EXE_FORMATS.ELF: { + lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_CONTROL_FLOW), ('FORTIFY', check_ELF_FORTIFY)], + lief.ARCHITECTURES.ARM: BASE_ELF + [('FORTIFY', check_ELF_FORTIFY)], + lief.ARCHITECTURES.ARM64: BASE_ELF + [('FORTIFY', check_ELF_FORTIFY)], + lief.ARCHITECTURES.PPC: BASE_ELF + [('FORTIFY', check_ELF_FORTIFY)], + lief.ARCHITECTURES.RISCV: BASE_ELF, # Skip FORTIFY. See https://github.com/lief-project/LIEF/issues/1082. + }, + lief.EXE_FORMATS.PE: { + lief.ARCHITECTURES.X86: BASE_PE, + }, + lief.EXE_FORMATS.MACHO: { + lief.ARCHITECTURES.X86: BASE_MACHO + [('PIE', check_PIE), + ('NX', check_NX), + ('CONTROL_FLOW', check_MACHO_CONTROL_FLOW)], + lief.ARCHITECTURES.ARM64: BASE_MACHO + [('BRANCH_PROTECTION', check_MACHO_BRANCH_PROTECTION)], + } +} if __name__ == '__main__': - retval = 0 + retval: int = 0 for filename in sys.argv[1:]: - try: - etype = identify_executable(filename) - if etype is None: - print('%s: unknown format' % filename) - retval = 1 - continue - - failed = [] - warning = [] - for (name, func) in CHECKS[etype]: - if not func(filename): - if name in NONFATAL: - warning.append(name) - else: - failed.append(name) - if failed: - print('%s: failed %s' % (filename, ' '.join(failed))) - retval = 1 - if warning: - print('%s: warning %s' % (filename, ' '.join(warning))) - except IOError: - print('%s: cannot open' % filename) + binary = lief.parse(filename) + etype = binary.format + arch = binary.abstract.header.architecture + binary.concrete + + failed: list[str] = [] + for (name, func) in CHECKS[etype][arch]: + if not func(binary): + failed.append(name) + if failed: + print(f'{filename}: failed {" ".join(failed)}') retval = 1 sys.exit(retval) - diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index 6949cb7ced89e..3f6010280abd7 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -3,57 +3,92 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' -A script to check that the executables produced by gitian only contain -certain symbols and are only linked against allowed libraries. +A script to check that release executables only contain certain symbols +and are only linked against allowed libraries. Example usage: - find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py + find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py ''' -import subprocess -import re import sys -import os -from typing import List, Optional, Tuple -# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases -# -# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B) -# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6) -# -# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases +import lief + +# Debian 11 (Bullseye) EOL: 2026. https://wiki.debian.org/LTS # -# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all) -# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all) +# - libgcc version 10.2.1 (https://packages.debian.org/bullseye/libgcc-s1) +# - libc version 2.31 (https://packages.debian.org/source/bullseye/glibc) # -# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General +# Ubuntu 20.04 (Focal) EOL: 2030. https://wiki.ubuntu.com/ReleaseTeam # -# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/) -# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/) +# - libgcc version 10.5.0 (https://packages.ubuntu.com/focal/libgcc1) +# - libc version 2.31 (https://packages.ubuntu.com/focal/libc6) # -# Taking the minimum of these as our target. +# CentOS Stream 9 EOL: 2027. https://www.centos.org/cl-vs-cs/#end-of-life # -# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to: -# GCC 4.8.5: GCC_4.8.0 -# (glibc) GLIBC_2_17 +# - libgcc version 12.2.1 (https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/Packages/) +# - libc version 2.34 (https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/Packages/) # +# See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info. + MAX_VERSIONS = { -'GCC': (4,8,0), -'GLIBC': (2,17), -'LIBATOMIC': (1,0) +'GCC': (4,3,0), +'GLIBC': { + lief.ELF.ARCH.x86_64: (2,31), + lief.ELF.ARCH.ARM: (2,31), + lief.ELF.ARCH.AARCH64:(2,31), + lief.ELF.ARCH.PPC64: (2,31), + lief.ELF.ARCH.RISCV: (2,31), +}, +'LIBATOMIC': (1,0), +'V': (0,5,0), # xkb (bitcoin-qt only) } -# See here for a description of _IO_stdin_used: -# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', -'environ', '_environ', '__environ', +'environ', '_environ', '__environ', '_fini', '_init', 'stdin', +'stdout', 'stderr', +} + +# Expected linker-loader names can be found here: +# https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16 +ELF_INTERPRETER_NAMES: dict[lief.ELF.ARCH, dict[lief.ENDIANNESS, str]] = { + lief.ELF.ARCH.x86_64: { + lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2", + }, + lief.ELF.ARCH.ARM: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-armhf.so.3", + }, + lief.ELF.ARCH.AARCH64: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-aarch64.so.1", + }, + lief.ELF.ARCH.PPC64: { + lief.ENDIANNESS.BIG: "/lib64/ld64.so.1", + lief.ENDIANNESS.LITTLE: "/lib64/ld64.so.2", + }, + lief.ELF.ARCH.RISCV: { + lief.ENDIANNESS.LITTLE: "/lib/ld-linux-riscv64-lp64d.so.1", + }, +} + +ELF_ABIS: dict[lief.ELF.ARCH, dict[lief.ENDIANNESS, list[int]]] = { + lief.ELF.ARCH.x86_64: { + lief.ENDIANNESS.LITTLE: [3,2,0], + }, + lief.ELF.ARCH.ARM: { + lief.ENDIANNESS.LITTLE: [3,2,0], + }, + lief.ELF.ARCH.AARCH64: { + lief.ENDIANNESS.LITTLE: [3,7,0], + }, + lief.ELF.ARCH.PPC64: { + lief.ENDIANNESS.LITTLE: [3,10,0], + lief.ENDIANNESS.BIG: [3,2,0], + }, + lief.ELF.ARCH.RISCV: { + lief.ENDIANNESS.LITTLE: [4,15,0], + }, } -READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') -CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') -OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump') -OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool') # Allowed NEEDED libraries ELF_ALLOWED_LIBRARIES = { @@ -62,25 +97,33 @@ 'libc.so.6', # C library 'libpthread.so.0', # threading 'libm.so.6', # math library -'librt.so.1', # real-time (clock) 'libatomic.so.1', 'ld-linux-x86-64.so.2', # 64-bit dynamic linker 'ld-linux.so.2', # 32-bit dynamic linker 'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker 'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker +'ld64.so.1', # POWER64 ABIv1 dynamic linker +'ld64.so.2', # POWER64 ABIv2 dynamic linker 'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker # bitcoin-qt only 'libxcb.so.1', # part of X11 +'libxkbcommon.so.0', # keyboard keymapping +'libxkbcommon-x11.so.0', # keyboard keymapping 'libfontconfig.so.1', # font support 'libfreetype.so.6', # font parsing -'libdl.so.2' # programming interface to dynamic linker -} -ARCH_MIN_GLIBC_VER = { -'80386': (2,1), -'X86-64': (2,2,5), -'ARM': (2,4), -'AArch64':(2,17), -'RISC-V': (2,27) +'libdl.so.2', # programming interface to dynamic linker +'libxcb-icccm.so.4', +'libxcb-image.so.0', +'libxcb-shm.so.0', +'libxcb-keysyms.so.1', +'libxcb-randr.so.0', +'libxcb-render-util.so.0', +'libxcb-render.so.0', +'libxcb-shape.so.0', +'libxcb-sync.so.1', +'libxcb-xfixes.so.0', +'libxcb-xinerama.so.0', +'libxcb-xkb.so.1', } MACHO_ALLOWED_LIBRARIES = { @@ -91,14 +134,20 @@ 'AppKit', # user interface 'ApplicationServices', # common application tasks. 'Carbon', # deprecated c back-compat API +'ColorSync', 'CoreFoundation', # low level func, data types 'CoreGraphics', # 2D rendering 'CoreServices', # operating system services 'CoreText', # interface for laying out text and handling fonts. +'CoreVideo', # video processing 'Foundation', # base layer functionality for apps/frameworks 'ImageIO', # read and write image file formats. 'IOKit', # user-space access to hardware devices and drivers. +'IOSurface', # cross process image/drawing buffers 'libobjc.A.dylib', # Objective-C runtime library +'Metal', # 3D graphics +'Security', # access control and authentication +'QuartzCore', # animation } PE_ALLOWED_LIBRARIES = { @@ -107,200 +156,157 @@ 'KERNEL32.dll', # win32 base APIs 'msvcrt.dll', # C standard library for MSVC 'SHELL32.dll', # shell API -'USER32.dll', # user interface 'WS2_32.dll', # sockets # bitcoin-qt only 'dwmapi.dll', # desktop window manager 'GDI32.dll', # graphics device interface 'IMM32.dll', # input method editor +'NETAPI32.dll', # network management 'ole32.dll', # component object model 'OLEAUT32.dll', # OLE Automation API 'SHLWAPI.dll', # light weight shell API -'UxTheme.dll', +'USER32.dll', # user interface +'USERENV.dll', # user management +'UxTheme.dll', # visual style 'VERSION.dll', # version checking 'WINMM.dll', # WinMM audio API +'WTSAPI32.dll', # Remote Desktop } -class CPPFilt(object): - ''' - Demangle C++ symbol names. - - Use a pipe to the 'c++filt' command. - ''' - def __init__(self): - self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) - - def __call__(self, mangled): - self.proc.stdin.write(mangled + '\n') - self.proc.stdin.flush() - return self.proc.stdout.readline().rstrip() - - def close(self): - self.proc.stdin.close() - self.proc.stdout.close() - self.proc.wait() - -def read_symbols(executable, imports=True) -> List[Tuple[str, str, str]]: - ''' - Parse an ELF executable and return a list of (symbol,version, arch) tuples - for dynamic, imported symbols. - ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Could not read symbols for {}: {}'.format(executable, stderr.strip())) - syms = [] - for line in stdout.splitlines(): - line = line.split() - if 'Machine:' in line: - arch = line[-1] - if len(line)>7 and re.match('[0-9]+:$', line[0]): - (sym, _, version) = line[7].partition('@') - is_import = line[6] == 'UND' - if version.startswith('@'): - version = version[1:] - if is_import == imports: - syms.append((sym, version, arch)) - return syms - def check_version(max_versions, version, arch) -> bool: - if '_' in version: - (lib, _, ver) = version.rpartition('_') - else: - lib = version - ver = '0' + (lib, _, ver) = version.rpartition('_') ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False - return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch] - -def elf_read_libraries(filename) -> List[str]: - p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - libraries = [] - for line in stdout.splitlines(): - tokens = line.split() - if len(tokens)>2 and tokens[1] == '(NEEDED)': - match = re.match(r'^Shared library: \[(.*)\]$', ' '.join(tokens[2:])) - if match: - libraries.append(match.group(1)) - else: - raise ValueError('Unparseable (NEEDED) specification') - return libraries - -def check_imported_symbols(filename) -> bool: - cppfilt = CPPFilt() - ok = True - for sym, version, arch in read_symbols(filename, True): - if version and not check_version(MAX_VERSIONS, version, arch): - print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version)) - ok = False + if isinstance(max_versions[lib], tuple): + return ver <= max_versions[lib] + else: + return ver <= max_versions[lib][arch] + +def check_imported_symbols(binary) -> bool: + ok: bool = True + + for symbol in binary.imported_symbols: + if not symbol.imported: + continue + + version = symbol.symbol_version if symbol.has_version else None + + if version: + aux_version = version.symbol_version_auxiliary.name if version.has_auxiliary_version else None + if aux_version and not check_version(MAX_VERSIONS, aux_version, binary.header.machine_type): + print(f'{filename}: symbol {symbol.name} from unsupported version {version}') + ok = False return ok -def check_exported_symbols(filename) -> bool: - cppfilt = CPPFilt() - ok = True - for sym,version,arch in read_symbols(filename, False): - if arch == 'RISC-V' or sym in IGNORE_EXPORTS: +def check_exported_symbols(binary) -> bool: + ok: bool = True + + for symbol in binary.dynamic_symbols: + if not symbol.exported: continue - print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym))) + name = symbol.name + if binary.header.machine_type == lief.ELF.ARCH.RISCV or name in IGNORE_EXPORTS: + continue + print(f'{binary.name}: export of symbol {name} not allowed!') ok = False return ok -def check_ELF_libraries(filename) -> bool: - ok = True - for library_name in elf_read_libraries(filename): - if library_name not in ELF_ALLOWED_LIBRARIES: - print('{}: NEEDED library {} is not allowed'.format(filename, library_name)) +def check_RUNPATH(binary) -> bool: + assert binary.get(lief.ELF.DYNAMIC_TAGS.RUNPATH) is None + assert binary.get(lief.ELF.DYNAMIC_TAGS.RPATH) is None + return True + +def check_ELF_libraries(binary) -> bool: + ok: bool = True + for library in binary.libraries: + if library not in ELF_ALLOWED_LIBRARIES: + print(f'{filename}: {library} is not in ALLOWED_LIBRARIES!') ok = False return ok -def macho_read_libraries(filename) -> List[str]: - p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - libraries = [] - for line in stdout.splitlines(): - tokens = line.split() - if len(tokens) == 1: # skip executable name - continue - libraries.append(tokens[0].split('/')[-1]) - return libraries - -def check_MACHO_libraries(filename) -> bool: - ok = True - for dylib in macho_read_libraries(filename): - if dylib not in MACHO_ALLOWED_LIBRARIES: - print('{} is not in ALLOWED_LIBRARIES!'.format(dylib)) +def check_MACHO_libraries(binary) -> bool: + ok: bool = True + for dylib in binary.libraries: + split = dylib.name.split('/') + if split[-1] not in MACHO_ALLOWED_LIBRARIES: + print(f'{split[-1]} is not in ALLOWED_LIBRARIES!') ok = False return ok -def pe_read_libraries(filename) -> List[str]: - p = subprocess.Popen([OBJDUMP_CMD, '-x', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - if p.returncode: - raise IOError('Error opening file') - libraries = [] - for line in stdout.splitlines(): - if 'DLL Name:' in line: - tokens = line.split(': ') - libraries.append(tokens[1]) - return libraries - -def check_PE_libraries(filename) -> bool: - ok = True - for dylib in pe_read_libraries(filename): +def check_MACHO_min_os(binary) -> bool: + if binary.build_version.minos == [13,0,0]: + return True + return False + +def check_MACHO_sdk(binary) -> bool: + if binary.build_version.sdk == [14, 0, 0]: + return True + return False + +def check_MACHO_lld(binary) -> bool: + if binary.build_version.tools[0].version == [18, 1, 8]: + return True + return False + +def check_PE_libraries(binary) -> bool: + ok: bool = True + for dylib in binary.libraries: if dylib not in PE_ALLOWED_LIBRARIES: - print('{} is not in ALLOWED_LIBRARIES!'.format(dylib)) + print(f'{dylib} is not in ALLOWED_LIBRARIES!') ok = False return ok +def check_PE_subsystem_version(binary) -> bool: + major: int = binary.optional_header.major_subsystem_version + minor: int = binary.optional_header.minor_subsystem_version + if major == 6 and minor == 1: + return True + return False + +def check_ELF_interpreter(binary) -> bool: + expected_interpreter = ELF_INTERPRETER_NAMES[binary.header.machine_type][binary.abstract.header.endianness] + + return binary.concrete.interpreter == expected_interpreter + +def check_ELF_ABI(binary) -> bool: + expected_abi = ELF_ABIS[binary.header.machine_type][binary.abstract.header.endianness] + note = binary.concrete.get(lief.ELF.NOTE_TYPES.ABI_TAG) + assert note.details.abi == lief.ELF.NOTE_ABIS.LINUX + return note.details.version == expected_abi + CHECKS = { -'ELF': [ +lief.EXE_FORMATS.ELF: [ ('IMPORTED_SYMBOLS', check_imported_symbols), ('EXPORTED_SYMBOLS', check_exported_symbols), - ('LIBRARY_DEPENDENCIES', check_ELF_libraries) + ('LIBRARY_DEPENDENCIES', check_ELF_libraries), + ('INTERPRETER_NAME', check_ELF_interpreter), + ('ABI', check_ELF_ABI), + ('RUNPATH', check_RUNPATH), ], -'MACHO': [ - ('DYNAMIC_LIBRARIES', check_MACHO_libraries) +lief.EXE_FORMATS.MACHO: [ + ('DYNAMIC_LIBRARIES', check_MACHO_libraries), + ('MIN_OS', check_MACHO_min_os), + ('SDK', check_MACHO_sdk), + ('LLD', check_MACHO_lld), ], -'PE' : [ - ('DYNAMIC_LIBRARIES', check_PE_libraries) +lief.EXE_FORMATS.PE: [ + ('DYNAMIC_LIBRARIES', check_PE_libraries), + ('SUBSYSTEM_VERSION', check_PE_subsystem_version), ] } -def identify_executable(executable) -> Optional[str]: - with open(filename, 'rb') as f: - magic = f.read(4) - if magic.startswith(b'MZ'): - return 'PE' - elif magic.startswith(b'\x7fELF'): - return 'ELF' - elif magic.startswith(b'\xcf\xfa'): - return 'MACHO' - return None - if __name__ == '__main__': - retval = 0 + retval: int = 0 for filename in sys.argv[1:]: - try: - etype = identify_executable(filename) - if etype is None: - print('{}: unknown format'.format(filename)) - retval = 1 - continue - - failed = [] - for (name, func) in CHECKS[etype]: - if not func(filename): - failed.append(name) - if failed: - print('{}: failed {}'.format(filename, ' '.join(failed))) - retval = 1 - except IOError: - print('{}: cannot open'.format(filename)) + binary = lief.parse(filename) + etype = binary.format + + failed: list[str] = [] + for (name, func) in CHECKS[etype]: + if not func(binary): + failed.append(name) + if failed: + print(f'{filename}: failed {" ".join(failed)}') retval = 1 sys.exit(retval) diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index ea70b279418f1..99f171608ddd2 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -1,84 +1,130 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2020 The Bitcoin Core developers +# Copyright (c) 2015-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Test script for security-check.py ''' +import lief +import os import subprocess import unittest +from utils import determine_wellknown_cmd + def write_testcode(filename): with open(filename, 'w', encoding="utf8") as f: f.write(''' - #include + #include int main() { - printf("the quick brown fox jumps over the lazy god\\n"); + std::printf("the quick brown fox jumps over the lazy god\\n"); return 0; } ''') -def call_security_check(cc, source, executable, options): - subprocess.check_call([cc,source,'-o',executable] + options) - p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) - (stdout, stderr) = p.communicate() - return (p.returncode, stdout.rstrip()) +def clean_files(source, executable): + os.remove(source) + os.remove(executable) + +def env_flags() -> list[str]: + # This should behave the same as AC_TRY_LINK, so arrange well-known flags + # in the same order as autoconf would. + # + # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for + # reference. + flags: list[str] = [] + for var in ['CXXFLAGS', 'CPPFLAGS', 'LDFLAGS']: + flags += filter(None, os.environ.get(var, '').split(' ')) + return flags + +def call_security_check(cxx: str, source: str, executable: str, options) -> tuple: + subprocess.run([*cxx,source,'-o',executable] + env_flags() + options, check=True) + p = subprocess.run([os.path.join(os.path.dirname(__file__), 'security-check.py'), executable], stdout=subprocess.PIPE, text=True) + return (p.returncode, p.stdout.rstrip()) + +def get_arch(cxx, source, executable): + subprocess.run([*cxx, source, '-o', executable] + env_flags(), check=True) + binary = lief.parse(executable) + arch = binary.abstract.header.architecture + os.remove(executable) + return arch class TestSecurityChecks(unittest.TestCase): def test_ELF(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1' - cc = 'gcc' + cxx = determine_wellknown_cmd('CXX', 'g++') write_testcode(source) + arch = get_arch(cxx, source, executable) + + if arch == lief.ARCHITECTURES.X86: + pass_flags = ['-D_FORTIFY_SOURCE=3', '-Wl,-znoexecstack', '-Wl,-zrelro', '-Wl,-z,now', '-pie', '-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full'] + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-zexecstack']), (1, executable + ': failed NX')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-no-pie','-fno-PIE']), (1, executable + ': failed PIE')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-znorelro']), (1, executable + ': failed RELRO')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-z,noseparate-code']), (1, executable + ': failed SEPARATE_CODE')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-fcf-protection=none']), (1, executable + ': failed CONTROL_FLOW')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-U_FORTIFY_SOURCE']), (1, executable + ': failed FORTIFY')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags), (0, '')) + else: + pass_flags = ['-D_FORTIFY_SOURCE=3', '-Wl,-znoexecstack', '-Wl,-zrelro', '-Wl,-z,now', '-pie', '-fPIE', '-Wl,-z,separate-code'] + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-zexecstack']), (1, executable + ': failed NX')) + # LIEF fails to parse RISC-V with no PIE correctly, and doesn't detect the fortified function, + # so skip this test for RISC-V (for now). See https://github.com/lief-project/LIEF/issues/1082. + if arch != lief.ARCHITECTURES.RISCV: + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-no-pie','-fno-PIE']), (1, executable + ': failed PIE')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-U_FORTIFY_SOURCE']), (1, executable + ': failed FORTIFY')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-znorelro']), (1, executable + ': failed RELRO')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-z,noseparate-code']), (1, executable + ': failed SEPARATE_CODE')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags), (0, '')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']), - (1, executable+': failed PIE NX RELRO Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']), - (1, executable+': failed PIE RELRO Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE']), - (1, executable+': failed PIE RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']), - (1, executable+': failed RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']), - (0, '')) + clean_files(source, executable) def test_PE(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1.exe' - cc = 'x86_64-w64-mingw32-gcc' + cxx = determine_wellknown_cmd('CXX', 'x86_64-w64-mingw32-g++') write_testcode(source) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed HIGH_ENTROPY_VA RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-no-pie','-fno-PIE']), - (1, executable+': failed RELOC_SECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']), - (0, '')) + pass_flags = ['-Wl,--nxcompat', '-Wl,--enable-reloc-section', '-Wl,--dynamicbase', '-Wl,--high-entropy-va', '-pie', '-fPIE', '-fcf-protection=full', '-fstack-protector-all', '-lssp'] + + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-fno-stack-protector']), (1, executable + ': failed CANARY')) + # https://github.com/lief-project/LIEF/issues/1076 - in future, we could test this individually. + # self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,--disable-reloc-section']), (1, executable + ': failed RELOC_SECTION')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,--disable-nxcompat']), (1, executable + ': failed NX')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,--disable-dynamicbase']), (1, executable + ': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA')) # -pie -fPIE does nothing without --dynamicbase + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,--disable-high-entropy-va']), (1, executable + ': failed HIGH_ENTROPY_VA')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-fcf-protection=none']), (1, executable + ': failed CONTROL_FLOW')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags), (0, '')) + + clean_files(source, executable) def test_MACHO(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1' - cc = 'clang' + cxx = determine_wellknown_cmd('CXX', 'clang++') write_testcode(source) + arch = get_arch(cxx, source, executable) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']), - (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']), - (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']), - (1, executable+': failed PIE NOUNDEFS LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']), - (1, executable+': failed PIE LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']), - (1, executable+': failed PIE')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all']), - (0, '')) + if arch == lief.ARCHITECTURES.X86: + pass_flags = ['-Wl,-pie', '-fstack-protector-all', '-fcf-protection=full', '-Wl,-fixup_chains'] + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-no_pie', '-Wl,-no_fixup_chains']), (1, executable+': failed FIXUP_CHAINS PIE')) # -fixup_chains is incompatible with -no_pie + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-no_fixup_chains']), (1, executable + ': failed FIXUP_CHAINS')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-fno-stack-protector']), (1, executable + ': failed CANARY')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-flat_namespace']), (1, executable + ': failed NOUNDEFS')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-fcf-protection=none']), (1, executable + ': failed CONTROL_FLOW')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags), (0, '')) + else: + # arm64 darwin doesn't support non-PIE binaries or executable stacks + pass_flags = ['-fstack-protector-all', '-Wl,-fixup_chains', '-mbranch-protection=bti'] + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-mbranch-protection=none']), (1, executable + ': failed BRANCH_PROTECTION')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-no_fixup_chains']), (1, executable + ': failed FIXUP_CHAINS')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-fno-stack-protector']), (1, executable + ': failed CANARY')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-flat_namespace']), (1, executable + ': failed NOUNDEFS')) + self.assertEqual(call_security_check(cxx, source, executable, pass_flags), (0, '')) + + clean_files(source, executable) if __name__ == '__main__': unittest.main() - diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py new file mode 100755 index 0000000000000..c75a5e1546b9e --- /dev/null +++ b/contrib/devtools/test-symbol-check.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Test script for symbol-check.py +''' +import os +import subprocess +import unittest + +from utils import determine_wellknown_cmd + +def call_symbol_check(cxx: list[str], source, executable, options): + # This should behave the same as AC_TRY_LINK, so arrange well-known flags + # in the same order as autoconf would. + # + # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for + # reference. + env_flags: list[str] = [] + for var in ['CXXFLAGS', 'CPPFLAGS', 'LDFLAGS']: + env_flags += filter(None, os.environ.get(var, '').split(' ')) + + subprocess.run([*cxx,source,'-o',executable] + env_flags + options, check=True) + p = subprocess.run([os.path.join(os.path.dirname(__file__), 'symbol-check.py'), executable], stdout=subprocess.PIPE, text=True) + os.remove(source) + os.remove(executable) + return (p.returncode, p.stdout.rstrip()) + +class TestSymbolChecks(unittest.TestCase): + def test_ELF(self): + source = 'test1.cpp' + executable = 'test1' + cxx = determine_wellknown_cmd('CXX', 'g++') + + # -lutil is part of the libc6 package so a safe bet that it's installed + # it's also out of context enough that it's unlikely to ever become a real dependency + source = 'test2.cpp' + executable = 'test2' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + login(0); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lutil']), + (1, executable + ': libutil.so.1 is not in ALLOWED_LIBRARIES!\n' + + executable + ': failed LIBRARY_DEPENDENCIES')) + + # finally, check a simple conforming binary + source = 'test3.cpp' + executable = 'test3' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + std::printf("42"); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, []), + (0, '')) + + def test_MACHO(self): + source = 'test1.cpp' + executable = 'test1' + cxx = determine_wellknown_cmd('CXX', 'clang++') + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + XML_ExpatVersion(); + return 0; + } + + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lexpat', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), + (1, 'libexpat.1.dylib is not in ALLOWED_LIBRARIES!\n' + + f'{executable}: failed DYNAMIC_LIBRARIES MIN_OS SDK')) + + source = 'test2.cpp' + executable = 'test2' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + CGMainDisplayID(); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, ['-framework', 'CoreGraphics', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), + (1, f'{executable}: failed MIN_OS SDK')) + + source = 'test3.cpp' + executable = 'test3' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,13.0', '-Wl,11.4']), + (1, f'{executable}: failed SDK')) + + def test_PE(self): + source = 'test1.cpp' + executable = 'test1.exe' + cxx = determine_wellknown_cmd('CXX', 'x86_64-w64-mingw32-g++') + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + PdhConnectMachineA(NULL); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lpdh', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), + (1, 'pdh.dll is not in ALLOWED_LIBRARIES!\n' + + executable + ': failed DYNAMIC_LIBRARIES')) + + source = 'test2.cpp' + executable = 'test2.exe' + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, ['-Wl,--major-subsystem-version', '-Wl,9', '-Wl,--minor-subsystem-version', '-Wl,9']), + (1, executable + ': failed SUBSYSTEM_VERSION')) + + source = 'test3.cpp' + executable = 'test3.exe' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + #include + + int main() + { + CoFreeUnusedLibrariesEx(0,0); + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lole32', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), + (0, '')) + + +if __name__ == '__main__': + unittest.main() diff --git a/contrib/devtools/test_deterministic_coverage.sh b/contrib/devtools/test_deterministic_coverage.sh index 95b1553215885..885396bb25934 100755 --- a/contrib/devtools/test_deterministic_coverage.sh +++ b/contrib/devtools/test_deterministic_coverage.sh @@ -16,23 +16,22 @@ GCOV_EXECUTABLE="gcov" NON_DETERMINISTIC_TESTS=( "blockfilter_index_tests/blockfilter_index_initial_sync" # src/checkqueue.h: In CCheckQueue::Loop(): while (queue.empty()) { ... } "coinselector_tests/knapsack_solver_test" # coinselector_tests.cpp: if (equal_sets(setCoinsRet, setCoinsRet2)) - "denialofservice_tests/DoS_mapOrphans" # denialofservice_tests.cpp: it = mapOrphanTransactions.lower_bound(InsecureRand256()); "fs_tests/fsbridge_fstream" # deterministic test failure? - "miner_tests/CreateNewBlock_validity" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "miner_tests/CreateNewBlock_validity" # validation.cpp: if (signals.CallbacksPending() > 10) "scheduler_tests/manythreads" # scheduler.cpp: CScheduler::serviceQueue() "scheduler_tests/singlethreadedscheduler_ordered" # scheduler.cpp: CScheduler::serviceQueue() - "txvalidationcache_tests/checkinputs_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "txvalidationcache_tests/tx_mempool_block_doublespend" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "txindex_tests/txindex_initial_sync" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "txvalidation_tests/tx_mempool_reject_coinbase" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "validation_block_tests/processnewblock_signals_ordering" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "wallet_tests/coin_mark_dirty_immature_credit" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "wallet_tests/dummy_input_size_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "wallet_tests/importmulti_rescan" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "wallet_tests/importwallet_rescan" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "wallet_tests/ListCoins" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "wallet_tests/scan_for_wallet_transactions" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) - "wallet_tests/wallet_disableprivkeys" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) + "txvalidationcache_tests/checkinputs_test" # validation.cpp: if (signals.CallbacksPending() > 10) + "txvalidationcache_tests/tx_mempool_block_doublespend" # validation.cpp: if (signals.CallbacksPending() > 10) + "txindex_tests/txindex_initial_sync" # validation.cpp: if (signals.CallbacksPending() > 10) + "txvalidation_tests/tx_mempool_reject_coinbase" # validation.cpp: if (signals.CallbacksPending() > 10) + "validation_block_tests/processnewblock_signals_ordering" # validation.cpp: if (signals.CallbacksPending() > 10) + "wallet_tests/coin_mark_dirty_immature_credit" # validation.cpp: if (signals.CallbacksPending() > 10) + "wallet_tests/dummy_input_size_test" # validation.cpp: if (signals.CallbacksPending() > 10) + "wallet_tests/importmulti_rescan" # validation.cpp: if (signals.CallbacksPending() > 10) + "wallet_tests/importwallet_rescan" # validation.cpp: if (signals.CallbacksPending() > 10) + "wallet_tests/ListCoins" # validation.cpp: if (signals.CallbacksPending() > 10) + "wallet_tests/scan_for_wallet_transactions" # validation.cpp: if (signals.CallbacksPending() > 10) + "wallet_tests/wallet_disableprivkeys" # validation.cpp: if (signals.CallbacksPending() > 10) ) TEST_BITCOIN_BINARY="src/test/test_bitcoin" @@ -82,7 +81,7 @@ if ! command -v gcovr > /dev/null; then fi if [[ ! -e ${TEST_BITCOIN_BINARY} ]]; then - echo "Error: Executable ${TEST_BITCOIN_BINARY} not found. Run \"./configure --enable-lcov\" and compile." + echo "Error: Executable ${TEST_BITCOIN_BINARY} not found. Run \"cmake -B build -DCMAKE_BUILD_TYPE=Coverage\" and compile." exit 1 fi @@ -91,7 +90,7 @@ get_file_suffix_count() { } if [[ $(get_file_suffix_count gcno) == 0 ]]; then - echo "Error: Could not find any *.gcno files. The *.gcno files are generated by the compiler. Run \"./configure --enable-lcov\" and re-compile." + echo "Error: Could not find any *.gcno files. The *.gcno files are generated by the compiler. Run \"cmake -B build -DCMAKE_BUILD_TYPE=Coverage\" and re-compile." exit 1 fi @@ -116,7 +115,7 @@ while [[ ${TEST_RUN_ID} -lt ${N_TEST_RUNS} ]]; do fi rm "${TEST_OUTPUT_TEMPFILE}" if [[ $(get_file_suffix_count gcda) == 0 ]]; then - echo "Error: Running the test suite did not create any *.gcda files. The gcda files are generated when the instrumented test programs are executed. Run \"./configure --enable-lcov\" and re-compile." + echo "Error: Running the test suite did not create any *.gcda files. The gcda files are generated when the instrumented test programs are executed. Run \"cmake -B build -DCMAKE_BUILD_TYPE=Coverage\" and re-compile." exit 1 fi GCOVR_TEMPFILE=$(mktemp) diff --git a/contrib/devtools/utils.py b/contrib/devtools/utils.py new file mode 100755 index 0000000000000..8b4c67c6c0ce2 --- /dev/null +++ b/contrib/devtools/utils.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Common utility functions +''' +import shutil +import sys +import os + + +def determine_wellknown_cmd(envvar, progname) -> list[str]: + maybe_env = os.getenv(envvar) + maybe_which = shutil.which(progname) + if maybe_env: + return maybe_env.split(' ') # Well-known vars are often meant to be word-split + elif maybe_which: + return [ maybe_which ] + else: + sys.exit(f"{progname} not found") diff --git a/contrib/devtools/utxo_snapshot.sh b/contrib/devtools/utxo_snapshot.sh deleted file mode 100755 index dee25ff67b61e..0000000000000 --- a/contrib/devtools/utxo_snapshot.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -export LC_ALL=C - -set -ueo pipefail - -if (( $# < 3 )); then - echo 'Usage: utxo_snapshot.sh ' - echo - echo " if is '-', don't produce a snapshot file but instead print the " - echo " expected assumeutxo hash" - echo - echo 'Examples:' - echo - echo " ./contrib/devtools/utxo_snapshot.sh 570000 utxo.dat ./src/bitcoin-cli -datadir=\$(pwd)/testdata" - echo ' ./contrib/devtools/utxo_snapshot.sh 570000 - ./src/bitcoin-cli' - exit 1 -fi - -GENERATE_AT_HEIGHT="${1}"; shift; -OUTPUT_PATH="${1}"; shift; -# Most of the calls we make take a while to run, so pad with a lengthy timeout. -BITCOIN_CLI_CALL="${*} -rpcclienttimeout=9999999" - -# Block we'll invalidate/reconsider to rewind/fast-forward the chain. -PIVOT_BLOCKHASH=$($BITCOIN_CLI_CALL getblockhash $(( GENERATE_AT_HEIGHT + 1 )) ) - -(>&2 echo "Rewinding chain back to height ${GENERATE_AT_HEIGHT} (by invalidating ${PIVOT_BLOCKHASH}); this may take a while") -${BITCOIN_CLI_CALL} invalidateblock "${PIVOT_BLOCKHASH}" - -if [[ "${OUTPUT_PATH}" = "-" ]]; then - (>&2 echo "Generating txoutset info...") - ${BITCOIN_CLI_CALL} gettxoutsetinfo | grep hash_serialized_2 | sed 's/^.*: "\(.\+\)\+",/\1/g' -else - (>&2 echo "Generating UTXO snapshot...") - ${BITCOIN_CLI_CALL} dumptxoutset "${OUTPUT_PATH}" -fi - -(>&2 echo "Restoring chain to original height; this may take a while") -${BITCOIN_CLI_CALL} reconsiderblock "${PIVOT_BLOCKHASH}" diff --git a/contrib/filter-lcov.py b/contrib/filter-lcov.py index e005cb96da623..db780ad53bd39 100755 --- a/contrib/filter-lcov.py +++ b/contrib/filter-lcov.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2017-2019 The Bitcoin Core developers +# Copyright (c) 2017-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py deleted file mode 100755 index 4a3df93cea3d2..0000000000000 --- a/contrib/gitian-build.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2018-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -import argparse -import os -import subprocess -import sys - -def setup(): - global args, workdir - programs = ['ruby', 'git', 'make', 'wget', 'curl'] - if args.kvm: - programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils'] - elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'): - dockers = ['docker.io', 'docker-ce'] - for i in dockers: - return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i]) - if return_code == 0: - break - if return_code != 0: - print('Cannot find any way to install Docker.', file=sys.stderr) - sys.exit(1) - else: - programs += ['apt-cacher-ng', 'lxc', 'debootstrap'] - subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) - if not os.path.isdir('gitian.sigs'): - subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/gitian.sigs.git']) - if not os.path.isdir('bitcoin-detached-sigs'): - subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/bitcoin-detached-sigs.git']) - if not os.path.isdir('gitian-builder'): - subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) - if not os.path.isdir('bitcoin'): - subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin/bitcoin.git']) - os.chdir('gitian-builder') - make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64'] - if args.docker: - make_image_prog += ['--docker'] - elif not args.kvm: - make_image_prog += ['--lxc'] - subprocess.check_call(make_image_prog) - os.chdir(workdir) - if args.is_bionic and not args.kvm and not args.docker: - subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) - print('Reboot is required') - sys.exit(0) - -def build(): - global args, workdir - - os.makedirs('bitcoin-binaries/' + args.version, exist_ok=True) - print('\nBuilding Dependencies\n') - os.chdir('gitian-builder') - os.makedirs('inputs', exist_ok=True) - - subprocess.check_call(['wget', '-O', 'inputs/osslsigncode-2.0.tar.gz', 'https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz']) - subprocess.check_call(["echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c"], shell=True) - subprocess.check_call(['make', '-C', '../bitcoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) - - if args.linux: - print('\nCompiling ' + args.version + ' Linux') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml']) - subprocess.check_call('mv build/out/bitcoin-*.tar.gz build/out/src/bitcoin-*.tar.gz ../bitcoin-binaries/'+args.version, shell=True) - - if args.windows: - print('\nCompiling ' + args.version + ' Windows') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-win.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-win.yml']) - subprocess.check_call('mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/', shell=True) - subprocess.check_call('mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe build/out/src/bitcoin-*.tar.gz ../bitcoin-binaries/'+args.version, shell=True) - - if args.macos: - print('\nCompiling ' + args.version + ' MacOS') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml']) - subprocess.check_call('mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/', shell=True) - subprocess.check_call('mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg build/out/src/bitcoin-*.tar.gz ../bitcoin-binaries/'+args.version, shell=True) - - os.chdir(workdir) - - if args.commit_files: - print('\nCommitting '+args.version+' Unsigned Sigs\n') - os.chdir('gitian.sigs') - subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer]) - subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer]) - subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer]) - subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer]) - os.chdir(workdir) - -def sign(): - global args, workdir - os.chdir('gitian-builder') - - if args.windows: - print('\nSigning ' + args.version + ' Windows') - subprocess.check_call('cp inputs/bitcoin-' + args.version + '-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz', shell=True) - subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml']) - subprocess.check_call('mv build/out/bitcoin-*win64-setup.exe ../bitcoin-binaries/'+args.version, shell=True) - - if args.macos: - print('\nSigning ' + args.version + ' MacOS') - subprocess.check_call('cp inputs/bitcoin-' + args.version + '-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz', shell=True) - subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml']) - subprocess.check_call('mv build/out/bitcoin-osx-signed.dmg ../bitcoin-binaries/'+args.version+'/bitcoin-'+args.version+'-osx.dmg', shell=True) - - os.chdir(workdir) - - if args.commit_files: - print('\nCommitting '+args.version+' Signed Sigs\n') - os.chdir('gitian.sigs') - subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer]) - subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer]) - subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer]) - os.chdir(workdir) - -def verify(): - global args, workdir - rc = 0 - os.chdir('gitian-builder') - - print('\nVerifying v'+args.version+' Linux\n') - if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml']): - print('Verifying v'+args.version+' Linux FAILED\n') - rc = 1 - - print('\nVerifying v'+args.version+' Windows\n') - if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../bitcoin/contrib/gitian-descriptors/gitian-win.yml']): - print('Verifying v'+args.version+' Windows FAILED\n') - rc = 1 - - print('\nVerifying v'+args.version+' MacOS\n') - if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml']): - print('Verifying v'+args.version+' MacOS FAILED\n') - rc = 1 - - print('\nVerifying v'+args.version+' Signed Windows\n') - if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml']): - print('Verifying v'+args.version+' Signed Windows FAILED\n') - rc = 1 - - print('\nVerifying v'+args.version+' Signed MacOS\n') - if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml']): - print('Verifying v'+args.version+' Signed MacOS FAILED\n') - rc = 1 - - os.chdir(workdir) - return rc - -def main(): - global args, workdir - - parser = argparse.ArgumentParser(description='Script for running full Gitian builds.') - parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch') - parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request') - parser.add_argument('-u', '--url', dest='url', default='https://github.com/bitcoin/bitcoin', help='Specify the URL of the repository. Default is %(default)s') - parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build') - parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build') - parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS') - parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries') - parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') - parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s') - parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s') - parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC') - parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC') - parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)') - parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.') - parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git') - parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file') - parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') - - args = parser.parse_args() - workdir = os.getcwd() - - args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) - - if args.kvm and args.docker: - raise Exception('Error: cannot have both kvm and docker') - - # Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they - # can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm). - os.environ['USE_LXC'] = '' - os.environ['USE_VBOX'] = '' - os.environ['USE_DOCKER'] = '' - if args.docker: - os.environ['USE_DOCKER'] = '1' - elif not args.kvm: - os.environ['USE_LXC'] = '1' - if 'GITIAN_HOST_IP' not in os.environ.keys(): - os.environ['GITIAN_HOST_IP'] = '10.0.3.1' - if 'LXC_GUEST_IP' not in os.environ.keys(): - os.environ['LXC_GUEST_IP'] = '10.0.3.5' - - if args.setup: - setup() - - if args.buildsign: - args.build = True - args.sign = True - - if not args.build and not args.sign and not args.verify: - sys.exit(0) - - args.linux = 'l' in args.os - args.windows = 'w' in args.os - args.macos = 'm' in args.os - - # Disable for MacOS if no SDK found - if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.14.sdk.tar.gz'): - print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') - args.macos = False - - args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' - - script_name = os.path.basename(sys.argv[0]) - if not args.signer: - print(script_name+': Missing signer') - print('Try '+script_name+' --help for more information') - sys.exit(1) - if not args.version: - print(script_name+': Missing version') - print('Try '+script_name+' --help for more information') - sys.exit(1) - - # Add leading 'v' for tags - if args.commit and args.pull: - raise Exception('Cannot have both commit and pull') - args.commit = ('' if args.commit else 'v') + args.version - - os.chdir('bitcoin') - if args.pull: - subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) - os.chdir('../gitian-builder/inputs/bitcoin') - subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) - args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip() - args.version = 'pull-' + args.version - print(args.commit) - subprocess.check_call(['git', 'fetch']) - subprocess.check_call(['git', 'checkout', args.commit]) - os.chdir(workdir) - - os.chdir('gitian-builder') - subprocess.check_call(['git', 'pull']) - os.chdir(workdir) - - if args.build: - build() - - if args.sign: - sign() - - if args.verify: - os.chdir('gitian.sigs') - subprocess.check_call(['git', 'pull']) - os.chdir(workdir) - sys.exit(verify()) - -if __name__ == '__main__': - main() diff --git a/contrib/gitian-descriptors/assign_DISTNAME b/contrib/gitian-descriptors/assign_DISTNAME deleted file mode 100755 index a2ca768aaa70b..0000000000000 --- a/contrib/gitian-descriptors/assign_DISTNAME +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) 2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# A helper script to be sourced into the gitian descriptors - -if RECENT_TAG="$(git describe --exact-match HEAD)"; then - VERSION="${RECENT_TAG#v}" -else - VERSION="$(git rev-parse --short=12 HEAD)" -fi -DISTNAME="bitcoin-${VERSION}" diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml deleted file mode 100644 index 0ed1e16f7ea90..0000000000000 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ /dev/null @@ -1,187 +0,0 @@ ---- -name: "bitcoin-core-linux-0.21" -enable_cache: true -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "curl" -- "g++-aarch64-linux-gnu" -- "g++-8-aarch64-linux-gnu" -- "gcc-8-aarch64-linux-gnu" -- "binutils-aarch64-linux-gnu" -- "g++-arm-linux-gnueabihf" -- "g++-8-arm-linux-gnueabihf" -- "gcc-8-arm-linux-gnueabihf" -- "binutils-arm-linux-gnueabihf" -- "g++-riscv64-linux-gnu" -- "g++-8-riscv64-linux-gnu" -- "gcc-8-riscv64-linux-gnu" -- "binutils-riscv64-linux-gnu" -- "g++-8-multilib" -- "gcc-8-multilib" -- "binutils-gold" -- "git" -- "pkg-config" -- "autoconf" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "ca-certificates" -- "python3" -remotes: -- "url": "https://github.com/bitcoin/bitcoin.git" - "dir": "bitcoin" -files: [] -script: | - set -e -o pipefail - - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu" - CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests" - FAKETIME_HOST_PROGS="gcc g++" - FAKETIME_PROGS="date ar ranlib nm" - HOST_CFLAGS="-O2 -g" - HOST_CXXFLAGS="-O2 -g" - HOST_LDFLAGS_BASE="-static-libstdc++ -Wl,-O2" - - export QT_RCC_TEST=1 - export QT_RCC_SOURCE_DATE_OVERRIDE=1 - export TZ="UTC" - export BUILD_DIR="$PWD" - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - # Use $LIB in LD_PRELOAD to avoid hardcoding the dir (See `man ld.so`) - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - if which ${i}-${prog}-8 - then - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog}-8 | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - fi - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - EXTRA_INCLUDES_BASE=$WRAP_DIR/extra_includes - mkdir -p $EXTRA_INCLUDES_BASE - - # x86 needs /usr/include/i386-linux-gnu/asm pointed to /usr/include/x86_64-linux-gnu/asm, - # but we can't write there. Instead, create a link here and force it to be included in the - # search paths by wrapping gcc/g++. - - mkdir -p $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu - rm -f $WRAP_DIR/extra_includes/i686-pc-linux-gnu/asm - ln -s /usr/include/x86_64-linux-gnu/asm $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu/asm - - for prog in gcc g++; do - rm -f ${WRAP_DIR}/${prog} - cat << EOF > ${WRAP_DIR}/${prog} - #!/usr/bin/env bash - REAL="$(which -a ${prog}-8 | grep -v ${WRAP_DIR}/${prog} | head -1)" - for var in "\$@" - do - if [ "\$var" = "-m32" ]; then - export C_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu" - export CPLUS_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu" - break - fi - done - \$REAL \$@ - EOF - chmod +x ${WRAP_DIR}/${prog} - done - - cd bitcoin - BASEPREFIX="${PWD}/depends" - # Build dependencies for each host - for i in $HOSTS; do - EXTRA_INCLUDES="$EXTRA_INCLUDES_BASE/$i" - if [ -d "$EXTRA_INCLUDES" ]; then - export HOST_ID_SALT="$EXTRA_INCLUDES" - fi - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - unset HOST_ID_SALT - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - export PATH=${WRAP_DIR}:${PATH} - - # Define DISTNAME variable. - # shellcheck source=contrib/gitian-descriptors/assign_DISTNAME - source contrib/gitian-descriptors/assign_DISTNAME - - GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz" - - # Create the source tarball - mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD - - ORIGPATH="$PATH" - # Extract the git archive into a dir for each host and build - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - if [ "${i}" = "riscv64-linux-gnu" ]; then - # Workaround for https://bugs.launchpad.net/ubuntu/+source/gcc-8-cross-ports/+bug/1853740 - # TODO: remove this when no longer needed - HOST_LDFLAGS="${HOST_LDFLAGS_BASE} -Wl,-z,noexecstack" - else - HOST_LDFLAGS="${HOST_LDFLAGS_BASE}" - fi - mkdir -p distsrc-${i} - cd distsrc-${i} - INSTALLPATH="${PWD}/installed/${DISTNAME}" - mkdir -p ${INSTALLPATH} - tar -xf $GIT_ARCHIVE - - ./autogen.sh - CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" - make ${MAKEOPTS} - make ${MAKEOPTS} -C src check-security - make ${MAKEOPTS} -C src check-symbols - make install DESTDIR=${INSTALLPATH} - cd installed - find . -name "lib*.la" -delete - find . -name "lib*.a" -delete - rm -rf ${DISTNAME}/lib/pkgconfig - find ${DISTNAME}/bin -type f -executable -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg - find ${DISTNAME}/lib -type f -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg - cp ../README.md ${DISTNAME}/ - find ${DISTNAME} -not -name "*.dbg" | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz - find ${DISTNAME} -name "*.dbg" | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}-debug.tar.gz - cd ../../ - rm -rf distsrc-${i} - done diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml deleted file mode 100644 index a4f3219c2291e..0000000000000 --- a/contrib/gitian-descriptors/gitian-osx-signer.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -name: "bitcoin-dmg-signer" -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "faketime" -remotes: -- "url": "https://github.com/bitcoin-core/bitcoin-detached-sigs.git" - "dir": "signature" -files: -- "bitcoin-osx-unsigned.tar.gz" -script: | - set -e -o pipefail - - WRAP_DIR=$HOME/wrapped - mkdir -p ${WRAP_DIR} - export PATH="$PWD":$PATH - FAKETIME_PROGS="dmg genisoimage" - - # Create global faketime wrappers - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - - UNSIGNED=bitcoin-osx-unsigned.tar.gz - SIGNED=bitcoin-osx-signed.dmg - - tar -xf ${UNSIGNED} - OSX_VOLNAME="$(cat osx_volname)" - ./detached-sig-apply.sh ${UNSIGNED} signature/osx - ${WRAP_DIR}/genisoimage -no-cache-inodes -D -l -probe -V "${OSX_VOLNAME}" -no-pad -r -dir-mode 0755 -apple -o uncompressed.dmg signed-app - ${WRAP_DIR}/dmg dmg uncompressed.dmg ${OUTDIR}/${SIGNED} diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml deleted file mode 100644 index bbae7201e5b78..0000000000000 --- a/contrib/gitian-descriptors/gitian-osx.yml +++ /dev/null @@ -1,159 +0,0 @@ ---- -name: "bitcoin-core-osx-0.21" -enable_cache: true -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "ca-certificates" -- "curl" -- "g++" -- "git" -- "pkg-config" -- "autoconf" -- "librsvg2-bin" -- "libtiff-tools" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "cmake" -- "imagemagick" -- "libcap-dev" -- "libz-dev" -- "libbz2-dev" -- "python3" -- "python3-dev" -- "python3-setuptools" -- "fonts-tuffy" -remotes: -- "url": "https://github.com/bitcoin/bitcoin.git" - "dir": "bitcoin" -files: -- "MacOSX10.14.sdk.tar.gz" -script: | - set -e -o pipefail - - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-apple-darwin16" - CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests GENISOIMAGE=$WRAP_DIR/genisoimage" - FAKETIME_HOST_PROGS="" - FAKETIME_PROGS="ar ranlib date dmg genisoimage" - - export QT_RCC_TEST=1 - export QT_RCC_SOURCE_DATE_OVERRIDE=1 - export TZ="UTC" - export BUILD_DIR="$PWD" - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - export ZERO_AR_DATE=1 - - # Use $LIB in LD_PRELOAD to avoid hardcoding the dir (See `man ld.so`) - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - cd bitcoin - BASEPREFIX="${PWD}/depends" - - mkdir -p ${BASEPREFIX}/SDKs - tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/MacOSX10.14.sdk.tar.gz - - # Build dependencies for each host - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - export PATH=${WRAP_DIR}:${PATH} - - # Define DISTNAME variable. - # shellcheck source=contrib/gitian-descriptors/assign_DISTNAME - source contrib/gitian-descriptors/assign_DISTNAME - - GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz" - - # Create the source tarball - mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD - - ORIGPATH="$PATH" - # Extract the git archive into a dir for each host and build - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir -p distsrc-${i} - cd distsrc-${i} - INSTALLPATH="${PWD}/installed/${DISTNAME}" - mkdir -p ${INSTALLPATH} - tar -xf $GIT_ARCHIVE - - ./autogen.sh - CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} - make ${MAKEOPTS} - make ${MAKEOPTS} -C src check-security - make ${MAKEOPTS} -C src check-symbols - make install-strip DESTDIR=${INSTALLPATH} - - make osx_volname - make deploydir - OSX_VOLNAME="$(cat osx_volname)" - mkdir -p unsigned-app-${i} - cp osx_volname unsigned-app-${i}/ - cp contrib/macdeploy/detached-sig-apply.sh unsigned-app-${i} - cp contrib/macdeploy/detached-sig-create.sh unsigned-app-${i} - cp ${BASEPREFIX}/${i}/native/bin/dmg ${BASEPREFIX}/${i}/native/bin/genisoimage unsigned-app-${i} - cp ${BASEPREFIX}/${i}/native/bin/${i}-codesign_allocate unsigned-app-${i}/codesign_allocate - cp ${BASEPREFIX}/${i}/native/bin/${i}-pagestuff unsigned-app-${i}/pagestuff - mv dist unsigned-app-${i} - pushd unsigned-app-${i} - find . | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-osx-unsigned.tar.gz - popd - - make deploy - ${WRAP_DIR}/dmg dmg "${OSX_VOLNAME}.dmg" ${OUTDIR}/${DISTNAME}-osx-unsigned.dmg - - cd installed - find . -name "lib*.la" -delete - find . -name "lib*.a" -delete - rm -rf ${DISTNAME}/lib/pkgconfig - find ${DISTNAME} | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz - cd ../../ - done - - mv ${OUTDIR}/${DISTNAME}-x86_64-*.tar.gz ${OUTDIR}/${DISTNAME}-osx64.tar.gz diff --git a/contrib/gitian-descriptors/gitian-win-signer.yml b/contrib/gitian-descriptors/gitian-win-signer.yml deleted file mode 100644 index 6bcd126662f3a..0000000000000 --- a/contrib/gitian-descriptors/gitian-win-signer.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: "bitcoin-win-signer" -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "libssl-dev" -- "autoconf" -- "automake" -- "libtool" -- "pkg-config" -remotes: -- "url": "https://github.com/bitcoin-core/bitcoin-detached-sigs.git" - "dir": "signature" -files: -- "osslsigncode-2.0.tar.gz" -- "bitcoin-win-unsigned.tar.gz" -script: | - set -e -o pipefail - - BUILD_DIR="$PWD" - SIGDIR=${BUILD_DIR}/signature/win - UNSIGNED_DIR=${BUILD_DIR}/unsigned - - echo "5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f osslsigncode-2.0.tar.gz" | sha256sum -c - - mkdir -p ${UNSIGNED_DIR} - tar -C ${UNSIGNED_DIR} -xf bitcoin-win-unsigned.tar.gz - - tar xf osslsigncode-2.0.tar.gz - cd osslsigncode-2.0 - - ./autogen.sh - ./configure --without-gsf --without-curl --disable-dependency-tracking - make - find ${UNSIGNED_DIR} -name "*-unsigned.exe" | while read i; do - INFILE="$(basename "${i}")" - OUTFILE="${INFILE/-unsigned}" - ./osslsigncode attach-signature -in "${i}" -out "${OUTDIR}/${OUTFILE}" -sigin "${SIGDIR}/${INFILE}.pem" - done diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml deleted file mode 100644 index d05b6d426da5f..0000000000000 --- a/contrib/gitian-descriptors/gitian-win.yml +++ /dev/null @@ -1,156 +0,0 @@ ---- -name: "bitcoin-core-win-0.21" -enable_cache: true -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "curl" -- "g++" -- "git" -- "pkg-config" -- "autoconf" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "mingw-w64" -- "g++-mingw-w64" -- "nsis" -- "zip" -- "ca-certificates" -- "python3" -remotes: -- "url": "https://github.com/bitcoin/bitcoin.git" - "dir": "bitcoin" -files: [] -script: | - set -e -o pipefail - - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-w64-mingw32" - CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests" - FAKETIME_HOST_PROGS="ar ranlib nm windres strip objcopy" - FAKETIME_PROGS="date makensis zip" - HOST_CFLAGS="-O2 -g -fno-ident" - HOST_CXXFLAGS="-O2 -g -fno-ident" - - export QT_RCC_TEST=1 - export QT_RCC_SOURCE_DATE_OVERRIDE=1 - export TZ="UTC" - export BUILD_DIR="$PWD" - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - # Use $LIB in LD_PRELOAD to avoid hardcoding the dir (See `man ld.so`) - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - } - - function create_per-host_compiler_wrapper { - # -posix variant is required for c++11 threading. - for i in $HOSTS; do - for prog in gcc g++; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog}-posix | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_compiler_wrapper "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - cd bitcoin - BASEPREFIX="${PWD}/depends" - # Build dependencies for each host - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_compiler_wrapper "${REFERENCE_DATETIME}" - export PATH=${WRAP_DIR}:${PATH} - - # Define DISTNAME variable. - # shellcheck source=contrib/gitian-descriptors/assign_DISTNAME - source contrib/gitian-descriptors/assign_DISTNAME - - GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz" - - # Create the source tarball - mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD - - ORIGPATH="$PATH" - # Extract the git archive into a dir for each host and build - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir -p distsrc-${i} - cd distsrc-${i} - INSTALLPATH="${PWD}/installed/${DISTNAME}" - mkdir -p ${INSTALLPATH} - tar -xf $GIT_ARCHIVE - - ./autogen.sh - CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" - make ${MAKEOPTS} - make ${MAKEOPTS} -C src check-security - make ${MAKEOPTS} -C src check-symbols - make deploy BITCOIN_WIN_INSTALLER="${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" - make install DESTDIR=${INSTALLPATH} - cd installed - mv ${DISTNAME}/bin/*.dll ${DISTNAME}/lib/ - find . -name "lib*.la" -delete - find . -name "lib*.a" -delete - rm -rf ${DISTNAME}/lib/pkgconfig - find ${DISTNAME}/bin -type f -executable -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg - find ${DISTNAME}/lib -type f -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg - cp ../doc/README_windows.txt ${DISTNAME}/readme.txt - find ${DISTNAME} -not -name "*.dbg" -type f | sort | zip -X@ ${OUTDIR}/${DISTNAME}-${i//x86_64-w64-mingw32/win64}.zip - find ${DISTNAME} -name "*.dbg" -type f | sort | zip -X@ ${OUTDIR}/${DISTNAME}-${i//x86_64-w64-mingw32/win64}-debug.zip - cd ../../ - rm -rf distsrc-${i} - done - - cp -rf contrib/windeploy $BUILD_DIR - cd $BUILD_DIR/windeploy - mkdir unsigned - cp ${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe unsigned/ - find . | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz diff --git a/contrib/gitian-keys/README.md b/contrib/gitian-keys/README.md deleted file mode 100644 index ffe4fb144b1ae..0000000000000 --- a/contrib/gitian-keys/README.md +++ /dev/null @@ -1,27 +0,0 @@ -## PGP keys of Gitian builders and Developers - -The file `keys.txt` contains fingerprints of the public keys of Gitian builders -and active developers. - -The associated keys are mainly used to sign git commits or the build results -of Gitian builds. - -The most recent version of each pgp key can be found on most pgp key servers. - -Fetch the latest version from the key server to see if any key was revoked in -the meantime. -To fetch the latest version of all pgp keys in your gpg homedir, - -```sh -gpg --refresh-keys -``` - -To fetch keys of Gitian builders and active developers, feed the list of -fingerprints of the primary keys into gpg: - -```sh -while read fingerprint keyholder_name; do gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys ${fingerprint}; done < ./keys.txt -``` - -Add your key to the list if you provided Gitian signatures for two major or -minor releases of Bitcoin Core. diff --git a/contrib/gitian-keys/keys.txt b/contrib/gitian-keys/keys.txt deleted file mode 100644 index 0a2c1302c82f8..0000000000000 --- a/contrib/gitian-keys/keys.txt +++ /dev/null @@ -1,36 +0,0 @@ -9D3CC86A72F8494342EA5FD10A41BDC3F4FAFF1C Aaron Clauson (sipsorcery) -617C90010B3BD370B0AC7D424BB42E31C79111B8 Akira Takizawa -E944AE667CF960B1004BC32FCA662BE18B877A60 Andreas Schildbach -152812300785C96444D3334D17565732E08E5E41 Andrew Chow -912FD3228387123DC97E0E57D5566241A0295FA9 BtcDrak -C519EBCF3B926298946783EFF6430754120EC2F4 Christian Decker (cdecker) -F20F56EF6A067F70E8A5C99FFF95FAA971697405 centaur -C060A6635913D98A3587D7DB1C2491FFEB0EF770 Cory Fields -BF6273FAEF7CC0BA1F562E50989F6B3048A116B5 Dev Random -6D3170C1DC2C6FD0AEEBCA6743811D1A26623924 Douglas Roark -9A1689B60D1B3CCE9262307A2F40A9BF167FBA47 Erik Mossberg (erkmos) -D35176BE9264832E4ACA8986BF0792FBE95DC863 fivepiece -01CDF4627A3B88AAE4A571C87588242FBE38D3A8 Gavin Andresen -D1DBF2C4B96F2DEBF4C16654410108112E7EA81F Hennadii Stepanov (hebasto) -D3CC177286005BB8FF673294C5242A1AB3936517 jl2012 -82921A4B88FD454B7EB8CE3C796C4109063D4EAF Jon Atack -32EE5C4C3FA15CCADB46ABE529D4BCB6416F53EC Jonas Schnelli -4B4E840451149DD7FB0D633477DFAB5C3108B9A8 Jorge Timon -C42AFF7C61B3E44A1454CD3557AF762DB3353322 Karl-Johan Alm (kallewoof) -E463A93F5F3117EEDE6C7316BD02942421F4889F Luke Dashjr -B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B Marco Falke -07DF3E57A548CCFB7530709189BBB8663E2E65CE Matt Corallo (BlueMatt) -CA03882CB1FC067B5D3ACFE4D300116E1C875A3D MeshCollider -E777299FC265DD04793070EB944D35F9AC3DB76A Michael Ford -9692B91BBF0E8D34DFD33B1882C5C009628ECF0C Michagogo -77E72E69DA7EE0A148C06B21B34821D4944DE5F7 Nils Schneider -D62A803E27E7F43486035ADBBCD04D8E9CCCAC2A Paul Rabahy -37EC7D7B0A217CDB4B4E007E7FAB114267E4FA04 Peter Todd -D762373D24904A3E42F33B08B9A408E71DAAC974 Pieter Wuille (Location: Leuven, Belgium) -133EAC179436F14A5CF1B794860FEB804E669320 Pieter Wuille -A8FC55F3B04BA3146F3492E79303B33A305224CB Sebastian Kung (TheCharlatan) -ED9BDF7AD6A55E232E84524257FF9BDBCC301009 Sjors Provoost -9EDAFF80E080659604F4A76B2EBB056FD847F8A7 Stephan Oeste (Emzy) -AEC1884398647C47413C1C3FB1179EB7347DC10D Warren Togami -79D00BAC68B56D422F945A8F8E3A8F3247DBCBBF Willy Ko -71A3B16735405025D447E8F274810B012346C9A6 Wladimir J. van der Laan diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md new file mode 100644 index 0000000000000..10f5835bb881e --- /dev/null +++ b/contrib/guix/INSTALL.md @@ -0,0 +1,814 @@ +# Guix Installation and Setup + +This only needs to be done once per machine. If you have already completed the +installation and setup, please proceed to [perform a build](./README.md). + +Otherwise, you may choose from one of the following options to install Guix: + +1. Using the official **shell installer script** [⤓ skip to section][install-script] + - Maintained by Guix developers + - Easiest (automatically performs *most* setup) + - Works on nearly all Linux distributions + - Only installs latest release + - Binary installation only, requires high level of trust + - Note: The script needs to be run as root, so it should be inspected before it's run +2. Using the official **binary tarball** [⤓ skip to section][install-bin-tarball] + - Maintained by Guix developers + - Normal difficulty (full manual setup required) + - Works on nearly all Linux distributions + - Installs any release + - Binary installation only, requires high level of trust +3. Using fanquake's **Docker image** [↗︎ external instructions][install-fanquake-docker] + - Maintained by fanquake + - Easy (automatically performs *some* setup) + - Works wherever Docker images work + - Installs any release + - Binary installation only, requires high level of trust +4. Using a **distribution-maintained package** [⤓ skip to section][install-distro-pkg] + - Maintained by distribution's Guix package maintainer + - Normal difficulty (manual setup required) + - Works only on distributions with Guix packaged, see: https://repology.org/project/guix/versions + - Installs a release decided on by package maintainer + - Source or binary installation depending on the distribution +5. Building **from source** [⤓ skip to section][install-source] + - Maintained by you + - Hard, but rewarding + - Can be made to work on most Linux distributions + - Installs any commit (more granular) + - Source installation, requires lower level of trust + +## Options 1 and 2: Using the official shell installer script or binary tarball + +The installation instructions for both the official shell installer script and +the binary tarballs can be found in the GNU Guix Manual's [Binary Installation +section](https://guix.gnu.org/manual/en/html_node/Binary-Installation.html). + +Note that running through the binary tarball installation steps is largely +equivalent to manually performing what the shell installer script does. + +Note that at the time of writing (July 5th, 2021), the shell installer script +automatically creates an `/etc/profile.d` entry which the binary tarball +installation instructions do not ask you to create. However, you will likely +need this entry for better desktop integration. Please see [this +section](#add-an-etcprofiled-entry) for instructions on how to add a +`/etc/profile.d/guix.sh` entry. + +Regardless of which installation option you chose, the changes to +`/etc/profile.d` will not take effect until the next shell or desktop session, +so you should log out and log back in. + +## Option 3: Using fanquake's Docker image + +Please refer to fanquake's instructions +[here](https://github.com/fanquake/core-review/tree/master/guix). + +## Option 4: Using a distribution-maintained package + +Note that this section is based on the distro packaging situation at the time of +writing (July 2021). Guix is expected to be more widely packaged over time. For +an up-to-date view on Guix's package status/version across distros, please see: +https://repology.org/project/guix/versions + +### Debian / Ubuntu + +Guix is available as a distribution package in [Debian +](https://packages.debian.org/search?keywords=guix) and [Ubuntu +](https://packages.ubuntu.com/search?keywords=guix). + +To install: +```sh +sudo apt install guix +``` + +### Arch Linux + +Guix is available in the AUR as +[`guix`](https://aur.archlinux.org/packages/guix/), please follow the +installation instructions in the Arch Linux Wiki ([live +link](https://wiki.archlinux.org/index.php/Guix#AUR_Package_Installation), +[2021/03/30 +permalink](https://wiki.archlinux.org/index.php?title=Guix&oldid=637559#AUR_Package_Installation)) +to install Guix. + +At the time of writing (2021/03/30), the `check` phase will fail if the path to +guix's build directory is longer than 36 characters due to an anachronistic +character limit on the shebang line. Since the `check` phase happens after the +`build` phase, which may take quite a long time, it is recommended that users +either: + +1. Skip the `check` phase + - For `makepkg`: `makepkg --nocheck ...` + - For `yay`: `yay --mflags="--nocheck" ...` + - For `paru`: `paru --nocheck ...` +2. Or, check their build directory's length beforehand + - For those building with `makepkg`: `pwd | wc -c` + +## Option 5: Building from source + +Building Guix from source is a rather involved process but a rewarding one for +those looking to minimize trust and maximize customizability (e.g. building a +particular commit of Guix). Previous experience with using autotools-style build +systems to build packages from source will be helpful. *hic sunt dracones.* + +I strongly urge you to at least skim through the entire section once before you +start issuing commands, as it will save you a lot of unnecessary pain and +anguish. + +### Installing common build tools + +There are a few basic build tools that are required for most things we'll build, +so let's install them now: + +Text transformation/i18n: +- `autopoint` (sometimes packaged in `gettext`) +- `help2man` +- `po4a` +- `texinfo` + +Build system tools: +- `g++` w/ C++11 support +- `libtool` +- `autoconf` +- `automake` +- `pkg-config` (sometimes packaged as `pkgconf`) +- `make` +- `cmake` + +Miscellaneous: +- `git` +- `gnupg` +- `python3` + +### Building and Installing Guix's dependencies + +In order to build Guix itself from source, we need to first make sure that the +necessary dependencies are installed and discoverable. The most up-to-date list +of Guix's dependencies is kept in the ["Requirements" +section](https://guix.gnu.org/manual/en/html_node/Requirements.html) of the Guix +Reference Manual. + +Depending on your distribution, most or all of these dependencies may already be +packaged and installable without manually building and installing. + +For reference, the graphic below outlines Guix v1.3.0's dependency graph: + +![bootstrap map](https://user-images.githubusercontent.com/6399679/125064185-a9a59880-e0b0-11eb-82c1-9b8e5dc9950d.png) + +If you do not care about building each dependency from source, and Guix is +already packaged for your distribution, you can easily install only the build +dependencies of Guix. For example, to enable deb-src and install the Guix build +dependencies on Ubuntu/Debian: + +```sh +sed -i 's|# deb-src|deb-src|g' /etc/apt/sources.list +apt update +apt-get build-dep -y guix +``` + +If this succeeded, you can likely skip to section +["Building and Installing Guix itself"](#building-and-installing-guix-itself). + +#### Guile + +###### Corner case: Multiple versions of Guile on one system + +It is recommended to only install the required version of Guile, so that build systems do +not get confused about which Guile to use. + +However, if you insist on having more versions of Guile installed on +your system, then you need to **consistently** specify +`GUILE_EFFECTIVE_VERSION=3.0` to all +`./configure` invocations for Guix and its dependencies. + +##### Installing Guile + +If your distribution splits packages into `-dev`-suffixed and +non-`-dev`-suffixed sub-packages (as is the case for Debian-derived +distributions), please make sure to install both. For example, to install Guile +v3.0 on Debian/Ubuntu: + +```sh +apt install guile-3.0 guile-3.0-dev +``` + +#### Mixing distribution packages and source-built packages + +At the time of writing, most distributions have _some_ of Guix's dependencies +packaged, but not all. This means that you may want to install the distribution +package for some dependencies, and manually build-from-source for others. + +Distribution packages usually install to `/usr`, which is different from the +default `./configure` prefix of source-built packages: `/usr/local`. + +This means that if you mix-and-match distribution packages and source-built +packages and do not specify exactly `--prefix=/usr` to `./configure` for +source-built packages, you will need to augment the `GUILE_LOAD_PATH` and +`GUILE_LOAD_COMPILED_PATH` environment variables so that Guile will look +under the right prefix and find your source-built packages. + +For example, if you are using Guile v3.0, and have Guile packages in the +`/usr/local` prefix, either add the following lines to your `.profile` or +`.bash_profile` so that the environment variable is properly set for all future +shell logins, or paste the lines into a POSIX-style shell to temporarily modify +the environment variables of your current shell session. + +```sh +# Help Guile v3.0.x find packages in /usr/local +export GUILE_LOAD_PATH="/usr/local/share/guile/site/3.0${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH" +export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/3.0/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH" +``` + +Note that these environment variables are used to check for packages during +`./configure`, so they should be set as soon as possible should you want to use +a prefix other than `/usr`. + +#### Building and installing source-built packages + +***IMPORTANT**: A few dependencies have non-obvious quirks/errata which are +documented in the sub-sections immediately below. Please read these sections +before proceeding to build and install these packages.* + +Although you should always refer to the README or INSTALL files for the most +accurate information, most of these dependencies use autoconf-style build +systems (check if there's a `configure.ac` file), and will likely do the right +thing with the following: + +Clone the repository and check out the latest release: +```sh +git clone /.git +cd +git tag -l # check for the latest release +git checkout +``` + +For autoconf-based build systems (if `./autogen.sh` or `configure.ac` exists at +the root of the repository): + +```sh +./autogen.sh || autoreconf -vfi +./configure --prefix= +make +sudo make install +``` + +For CMake-based build systems (if `CMakeLists.txt` exists at the root of the +repository): + +```sh +mkdir build && cd build +cmake .. -DCMAKE_INSTALL_PREFIX= +sudo cmake --build . --target install +``` + +If you choose not to specify exactly `--prefix=/usr` to `./configure`, please +make sure you've carefully read the [previous section] on mixing distribution +packages and source-built packages. + +##### Binding packages require `-dev`-suffixed packages + +Relevant for: +- Everyone + +When building bindings, the `-dev`-suffixed version of the original package +needs to be installed. For example, building `Guile-zlib` on Debian-derived +distributions requires that `zlib1g-dev` is installed. + +When using bindings, the `-dev`-suffixed version of the original package still +needs to be installed. This is particularly problematic when distribution +packages are mispackaged like `guile-sqlite3` is in Ubuntu Focal such that +installing `guile-sqlite3` does not automatically install `libsqlite3-dev` as a +dependency. + +Below is a list of relevant Guile bindings and their corresponding `-dev` +packages in Debian at the time of writing. + +| Guile binding package | -dev Debian package | +|-----------------------|---------------------| +| guile-gcrypt | libgcrypt-dev | +| guile-git | libgit2-dev | +| guile-gnutls | (none) | +| guile-json | (none) | +| guile-lzlib | liblz-dev | +| guile-ssh | libssh-dev | +| guile-sqlite3 | libsqlite3-dev | +| guile-zlib | zlib1g-dev | + +##### `guile-git` actually depends on `libgit2 >= 1.1` + +Relevant for: +- Those building `guile-git` from source against `libgit2 < 1.1` +- Those installing `guile-git` from their distribution where `guile-git` is + built against `libgit2 < 1.1` + +As of v0.5.2, `guile-git` claims to only require `libgit2 >= 0.28.0`, however, +it actually requires `libgit2 >= 1.1`, otherwise, it will be confused by a +reference of `origin/keyring`: instead of interpreting the reference as "the +'keyring' branch of the 'origin' remote", the reference is interpreted as "the +branch literally named 'origin/keyring'" + +This is especially notable because Ubuntu Focal packages `libgit2 v0.28.4`, and +`guile-git` is built against it. + +Should you be in this situation, you need to build both `libgit2 v1.1.x` and +`guile-git` from source. + +Source: https://logs.guix.gnu.org/guix/2020-11-12.log#232527 + +### Building and Installing Guix itself + +Start by cloning Guix: + +``` +git clone https://git.savannah.gnu.org/git/guix.git +cd guix +``` + +You will likely want to build the latest release. +At the time of writing (November 2023), the latest release was `v1.4.0`. + +``` +git branch -a -l 'origin/version-*' # check for the latest release +git checkout +``` + +Bootstrap the build system: +``` +./bootstrap +``` + +Configure with the recommended `--localstatedir` flag: +``` +./configure --localstatedir=/var +``` + +Note: If you intend to hack on Guix in the future, you will need to supply the +same `--localstatedir=` flag for all future Guix `./configure` invocations. See +the last paragraph of this +[section](https://guix.gnu.org/manual/en/html_node/Requirements.html) for more +details. + +Build Guix (this will take a while): +``` +make -j$(nproc) +``` + +Install Guix: + +``` +sudo make install +``` + +### Post-"build from source" Setup + +#### Creating and starting a `guix-daemon-original` service with a fixed `argv[0]` + +At this point, guix will be installed to `${bindir}`, which is likely +`/usr/local/bin` if you did not override directory variables at +`./configure`-time. More information on standard Automake directory variables +can be found +[here](https://www.gnu.org/software/automake/manual/html_node/Standard-Directory-Variables.html). + +However, the Guix init scripts and service configurations for Upstart, systemd, +SysV, and OpenRC are installed (in `${libdir}`) to launch +`${localstatedir}/guix/profiles/per-user/root/current-guix/bin/guix-daemon`, +which does not yet exist, and will only exist after [`root` performs their first +`guix pull`](#guix-pull-as-root). + +We need to create a `-original` version of these init scripts that's pointed to +the binaries we just built and `make install`'ed in `${bindir}` (normally, +`/usr/local/bin`). + +Example for `systemd`, run as `root`: + +```sh +# Create guix-daemon-original.service by modifying guix-daemon.service +libdir=# set according to your PREFIX (default is /usr/local/lib) +bindir="$(dirname $(command -v guix-daemon))" +sed -E -e "s|/\S*/guix/profiles/per-user/root/current-guix/bin/guix-daemon|${bindir}/guix-daemon|" "${libdir}"/systemd/system/guix-daemon.service > /etc/systemd/system/guix-daemon-original.service +chmod 664 /etc/systemd/system/guix-daemon-original.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the non-working guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the working guix-daemon-original.service is started and enabled +systemctl enable guix-daemon-original +systemctl start guix-daemon-original +``` + +#### Creating `guix-daemon` users / groups + +Please see the [relevant +section](https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html) +in the Guix Reference Manual for more details. + +## Optional setup + +At this point, you are set up to [use Guix to build Bitcoin +Core](./README.md#usage). However, if you want to polish your setup a bit and +make it "what Guix intended", then read the next few subsections. + +### Add an `/etc/profile.d` entry + +This section definitely does not apply to you if you installed Guix using: +1. The shell installer script +2. fanquake's Docker image +3. Debian's `guix` package + +#### Background + +Although Guix knows how to update itself and its packages, it does so in a +non-invasive way (it does not modify `/usr/local/bin/guix`). + +Instead, it does the following: + +- After a `guix pull`, it updates + `/var/guix/profiles/per-user/$USER/current-guix`, and creates a symlink + targeting this directory at `$HOME/.config/guix/current` + +- After a `guix install`, it updates + `/var/guix/profiles/per-user/$USER/guix-profile`, and creates a symlink + targeting this directory at `$HOME/.guix-profile` + +Therefore, in order for these operations to affect your shell/desktop sessions +(and for the principle of least astonishment to hold), their corresponding +directories have to be added to well-known environment variables like `$PATH`, +`$INFOPATH`, `$XDG_DATA_DIRS`, etc. + +In other words, if `$HOME/.config/guix/current/bin` does not exist in your +`$PATH`, a `guix pull` will have no effect on what `guix` you are using. Same +goes for `$HOME/.guix-profile/bin`, `guix install`, and installed packages. + +Helpfully, after a `guix pull` or `guix install`, a message will be printed like +so: + +``` +hint: Consider setting the necessary environment variables by running: + + GUIX_PROFILE="$HOME/.guix-profile" + . "$GUIX_PROFILE/etc/profile" + +Alternately, see `guix package --search-paths -p "$HOME/.guix-profile"'. +``` + +However, this is somewhat tedious to do for both `guix pull` and `guix install` +for each user on the system that wants to properly use `guix`. I recommend that +you instead add an entry to `/etc/profile.d` instead. This is done by default +when installing the Debian package later than 1.2.0-4 and when using the shell +script installer. + +#### Instructions + +Create `/etc/profile.d/guix.sh` with the following content: +```sh +# _GUIX_PROFILE: `guix pull` profile +_GUIX_PROFILE="$HOME/.config/guix/current" +if [ -L $_GUIX_PROFILE ]; then + export PATH="$_GUIX_PROFILE/bin${PATH:+:}$PATH" + # Export INFOPATH so that the updated info pages can be found + # and read by both /usr/bin/info and/or $GUIX_PROFILE/bin/info + # When INFOPATH is unset, add a trailing colon so that Emacs + # searches 'Info-default-directory-list'. + export INFOPATH="$_GUIX_PROFILE/share/info:$INFOPATH" +fi + +# GUIX_PROFILE: User's default profile +GUIX_PROFILE="$HOME/.guix-profile" +[ -L $GUIX_PROFILE ] || return +GUIX_LOCPATH="$GUIX_PROFILE/lib/locale" +export GUIX_PROFILE GUIX_LOCPATH + +[ -f "$GUIX_PROFILE/etc/profile" ] && . "$GUIX_PROFILE/etc/profile" + +# set XDG_DATA_DIRS to include Guix installations +export XDG_DATA_DIRS="$GUIX_PROFILE/share:${XDG_DATA_DIRS:-/usr/local/share/:/usr/share/}" +``` + +Please note that this will not take effect until the next shell or desktop +session (log out and log back in). + +### `guix pull` as root + +Before you do this, you need to read the section on [choosing your security +model][security-model] and adjust `guix` and `guix-daemon` flags according to +your choice, as invoking `guix pull` may pull substitutes from substitute +servers (which you may not want). + +As mentioned in a previous section, Guix expects +`${localstatedir}/guix/profiles/per-user/root/current-guix` to be populated with +`root`'s Guix profile, `guix pull`-ed and built by some former version of Guix. +However, this is not the case when we build from source. Therefore, we need to +perform a `guix pull` as `root`: + +```sh +sudo --login guix pull --branch=version- +# or +sudo --login guix pull --commit= +``` + +`guix pull` is quite a long process (especially if you're using +`--no-substitutes`). If you encounter build problems, please refer to the +[troubleshooting section](#troubleshooting). + +Note that running a bare `guix pull` with no commit or branch specified will +pull the latest commit on Guix's master branch, which is likely fine, but not +recommended. + +If you installed Guix from source, you may get an error like the following: +```sh +error: while creating symlink '/root/.config/guix/current' No such file or directory +``` +To resolve this, simply: +``` +sudo mkdir -p /root/.config/guix +``` +Then try the `guix pull` command again. + +After the `guix pull` finishes successfully, +`${localstatedir}/guix/profiles/per-user/root/current-guix` should be populated. + +#### Using the newly-pulled `guix` by restarting the daemon + +Depending on how you installed Guix, you should now make sure that your init +scripts and service configurations point to the newly-pulled `guix-daemon`. + +##### If you built Guix from source + +If you followed the instructions for [fixing argv\[0\]][fix-argv0], you can now +do the following: + +```sh +systemctl stop guix-daemon-original +systemctl disable guix-daemon-original + +systemctl enable guix-daemon +systemctl start guix-daemon +``` + +Remember to set `--no-substitutes` in `$libdir/systemd/system/guix-daemon.service` and other customizations if you used them for `guix-daemon-original.service`. + +##### If you installed Guix via the Debian/Ubuntu distribution packages + +You will need to create a `guix-daemon-latest` service which points to the new +`guix` rather than a pinned one. + +```sh +# Create guix-daemon-latest.service by modifying guix-daemon.service +sed -E -e "s|/usr/bin/guix-daemon|/var/guix/profiles/per-user/root/current-guix/bin/guix-daemon|" /etc/systemd/system/guix-daemon.service > /lib/systemd/system/guix-daemon-latest.service +chmod 664 /lib/systemd/system/guix-daemon-latest.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the old guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the new guix-daemon-latest.service is started and enabled +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### If you installed Guix via lantw44's Arch Linux AUR package + +At the time of writing (July 5th, 2021) the systemd unit for "updated Guix" is +`guix-daemon-latest.service`, therefore, you should do the following: + +```sh +systemctl stop guix-daemon +systemctl disable guix-daemon + +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### Otherwise... + +Simply do: + +```sh +systemctl restart guix-daemon +``` + +### Checking everything + +If you followed all the steps above to make your Guix setup "prim and proper," +you can check that you did everything properly by running through this +checklist. + +1. `/etc/profile.d/guix.sh` should exist and be sourced at each shell login + +2. `guix describe` should not print `guix describe: error: failed to determine + origin`, but rather something like: + + ``` + Generation 38 Feb 22 2021 16:39:31 (current) + guix f350df4 + repository URL: https://git.savannah.gnu.org/git/guix.git + branch: version-1.2.0 + commit: f350df405fbcd5b9e27e6b6aa500da7f101f41e7 + ``` + +3. `guix-daemon` should be running from `${localstatedir}/guix/profiles/per-user/root/current-guix` + +# Troubleshooting + +## Derivation failed to build + +When you see a build failure like below: + +``` +building /gnu/store/...-foo-3.6.12.drv... +/ 'check' phasenote: keeping build directory `/tmp/guix-build-foo-3.6.12.drv-0' +builder for `/gnu/store/...-foo-3.6.12.drv' failed with exit code 1 +build of /gnu/store/...-foo-3.6.12.drv failed +View build log at '/var/log/guix/drvs/../...-foo-3.6.12.drv.bz2'. +cannot build derivation `/gnu/store/...-qux-7.69.1.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-bar-3.16.5.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-baz-2.0.5.drv': 1 dependencies couldn't be built +guix time-machine: error: build of `/gnu/store/...-baz-2.0.5.drv' failed +``` + +It means that `guix` failed to build a package named `foo`, which was a +dependency of `qux`, `bar`, and `baz`. Importantly, note that the last "failed" +line is not necessarily the root cause, the first "failed" line is. + +Most of the time, the build failure is due to a spurious test failure or the +package's build system/test suite breaking when running multi-threaded. To +rebuild _just_ this derivation in a single-threaded fashion (please don't forget +to add other `guix` flags like `--no-substitutes` as appropriate): + +```sh +$ guix build --cores=1 /gnu/store/...-foo-3.6.12.drv +``` + +If the single-threaded rebuild did not succeed, you may need to dig deeper. +You may view `foo`'s build logs in `less` like so (please replace paths with the +path you see in the build failure output): + +```sh +$ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less +``` + +`foo`'s build directory is also preserved and available at +`/tmp/guix-build-foo-3.6.12.drv-0`. However, if you fail to build `foo` multiple +times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build +failure output for the most accurate, up-to-date information. + +### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character + +This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem +which rejects characters not present in the UTF-8 character code set. An example +is ZFS with the utf8only=on option set. + +More information: https://github.com/python/cpython/issues/81765 + +### openssl-1.1.1l and openssl-1.1.1n + +OpenSSL includes tests that will fail once some certificate has expired. +The workarounds from the GnuTLS section immediately below can be used. + +For openssl-1.1.1l use 2022-05-01 as the date. + +### GnuTLS: test-suite FAIL: status-request-revoked + +*The derivation is likely identified by: `/gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv`* + +This unfortunate error is most common for non-substitute builders who installed +Guix v1.2.0. The problem stems from the fact that one of GnuTLS's tests uses a +hardcoded certificate which expired on 2020-10-24. + +What's more unfortunate is that this GnuTLS derivation is somewhat special in +Guix's dependency graph and is not affected by the package transformation flags +like `--without-tests=`. + +The easiest solution for those encountering this problem is to install a newer +version of Guix. However, there are ways to work around this issue: + +#### Workaround 1: Using substitutes for this single derivation + +If you've authorized the official Guix build farm's key (more info +[here](./README.md#step-1-authorize-the-signing-keys)), then you can use +substitutes just for this single derivation by invoking the following: + +```sh +guix build --substitute-urls="https://ci.guix.gnu.org" /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +``` + +See [this section](./README.md#removing-authorized-keys) for instructions on how +to remove authorized keys if you don't want to keep the build farm's key +authorized. + +#### Workaround 2: Temporarily setting the system clock back + +This workaround was described [here](https://issues.guix.gnu.org/44559#5). + +Basically: + +1. Turn off NTP +2. Set system time to 2020-10-01 +3. guix build --no-substitutes /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +4. Set system time back to accurate current time +5. Turn NTP back on + +For example, + +```sh +sudo timedatectl set-ntp no +sudo date --set "01 oct 2020 15:00:00" +guix build /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +sudo timedatectl set-ntp yes +``` + +#### Workaround 3: Disable the tests in the Guix source code for this single derivation + +If all of the above workarounds fail, you can also disable the `tests` phase of +the derivation via the `arguments` option, as described in the official +[`package` +reference](https://guix.gnu.org/manual/en/html_node/package-Reference.html). + +For example, to disable the openssl-1.1 check phase: + +```diff +diff --git a/gnu/packages/tls.scm b/gnu/packages/tls.scm +index f1e844b..1077c4b 100644 +--- a/gnu/packages/tls.scm ++++ b/gnu/packages/tls.scm +@@ -494,4 +494,5 @@ (define-public openssl-1.1 + (arguments + `(#:parallel-tests? #f ++ #:tests? #f + #:test-target "test" +``` + +### coreutils: FAIL: tests/tail-2/inotify-dir-recreate + +The inotify-dir-create test fails on "remote" filesystems such as overlayfs +(Docker's default filesystem) due to the filesystem being mistakenly recognized +as non-remote. + +A relatively easy workaround to this is to make sure that a somewhat traditional +filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds). For +Docker users, this might mean [using a volume][docker/volumes], [binding +mounting][docker/bind-mnt] from host, or (for those with enough RAM and swap) +[mounting a tmpfs][docker/tmpfs] using the `--tmpfs` flag. + +Please see the following links for more details: + +- An upstream coreutils bug has been filed: [debbugs#47940](https://debbugs.gnu.org/cgi/bugreport.cgi?bug=47940) +- A Guix bug detailing the underlying problem has been filed: [guix-issues#47935](https://issues.guix.gnu.org/47935), [guix-issues#49985](https://issues.guix.gnu.org/49985#5) +- A commit to skip this test in Guix has been merged into the core-updates branch: +[savannah/guix@6ba1058](https://git.savannah.gnu.org/cgit/guix.git/commit/?id=6ba1058df0c4ce5611c2367531ae5c3cdc729ab4) + + +[install-script]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-bin-tarball]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-fanquake-docker]: #option-3-using-fanquakes-docker-image +[install-distro-pkg]: #option-4-using-a-distribution-maintained-package +[install-source]: #option-5-building-from-source + +[fix-argv0]: #creating-and-starting-a-guix-daemon-original-service-with-a-fixed-argv0 +[security-model]: ./README.md#choosing-your-security-model + +[docker/volumes]: https://docs.docker.com/storage/volumes/ +[docker/bind-mnt]: https://docs.docker.com/storage/bind-mounts/ +[docker/tmpfs]: https://docs.docker.com/storage/tmpfs/ + +# Purging/Uninstalling Guix + +In the extraordinarily rare case where you messed up your Guix installation in +an irreversible way, you may want to completely purge Guix from your system and +start over. + +1. Uninstall Guix itself according to the way you installed it (e.g. `sudo apt + purge guix` for Ubuntu packaging, `sudo make uninstall` for a build from source). +2. Remove all build users and groups + + You may check for relevant users and groups using: + + ``` + getent passwd | grep guix + getent group | grep guix + ``` + + Then, you may remove users and groups using: + + ``` + sudo userdel + sudo groupdel + ``` + +3. Remove all possible Guix-related directories + - `/var/guix/` + - `/var/log/guix/` + - `/gnu/` + - `/etc/guix/` + - `/home/*/.config/guix/` + - `/home/*/.cache/guix/` + - `/home/*/.guix-profile/` + - `/root/.config/guix/` + - `/root/.cache/guix/` + - `/root/.guix-profile/` diff --git a/contrib/guix/README.md b/contrib/guix/README.md index 8ce8cb97a0d7a..5e05e1016e5ed 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -9,114 +9,222 @@ downloads. We achieve bootstrappability by using Guix as a functional package manager. -## Requirements +# Requirements -Conservatively, a x86_64 machine with: +Conservatively, you will need: -- 4GB of free disk space on the partition that /gnu/store will reside in -- 24GB of free disk space on the partition that the Bitcoin Core git repository - resides in +- 16GB of free disk space on the partition that /gnu/store will reside in +- 8GB of free disk space **per platform triple** you're planning on building + (see the `HOSTS` [environment variable description][env-vars-list]) -> Note: these requirements are slightly less onerous than those of Gitian builds +# Installation and Setup -## Setup +If you don't have Guix installed and set up, please follow the instructions in +[INSTALL.md](./INSTALL.md) -### Installing Guix +# Usage -If you're just testing this out, you can use the -[Dockerfile][fanquake/guix-docker] for convenience. It automatically speeds up -your builds by [using substitutes](#speeding-up-builds-with-substitute-servers). -If you don't want this behaviour, refer to the [next -section](#choosing-your-security-model). +If you haven't considered your security model yet, please read [the relevant +section](#choosing-your-security-model) before proceeding to perform a build. -Otherwise, follow the [Guix installation guide][guix/bin-install]. +## Making the Xcode SDK available for macOS cross-compilation -> Note: For those who like to keep their filesystems clean, Guix is designed to -> be very standalone and _will not_ conflict with your system's package -> manager/existing setup. It _only_ touches `/var/guix`, `/gnu`, and -> `~/.config/guix`. +In order to perform a build for macOS (which is included in the default set of +platform triples to build), you'll need to extract the macOS SDK tarball using +tools found in the [`macdeploy` directory](../macdeploy/README.md). -### Choosing your security model +You can then either point to the SDK using the `SDK_PATH` environment variable: -Guix allows us to achieve better binary security by using our CPU time to build -everything from scratch. However, it doesn't sacrifice user choice in pursuit of -this: users can decide whether or not to bootstrap and to use substitutes. +```sh +# Extract the SDK tarball to /path/to/parent/dir/of/extracted/SDK/Xcode---extracted-SDK-with-libcxx-headers +tar -C /path/to/parent/dir/of/extracted/SDK -xaf /path/to/Xcode---extracted-SDK-with-libcxx-headers.tar.gz -After installation, you may want to consider [adding substitute -servers](#speeding-up-builds-with-substitute-servers) to speed up your build if -that fits your security model (say, if you're just testing that this works). -This is skippable if you're using the [Dockerfile][fanquake/guix-docker]. +# Indicate where to locate the SDK tarball +export SDK_PATH=/path/to/parent/dir/of/extracted/SDK +``` -If you prefer not to use any substitutes, make sure to set -`ADDITIONAL_GUIX_ENVIRONMENT_FLAGS` like the following snippet. The first build -will take a while, but the resulting packages will be cached for future builds. +or extract it into `depends/SDKs`: ```sh -export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--no-substitutes' +mkdir -p depends/SDKs +tar -C depends/SDKs -xaf /path/to/SDK/tarball ``` -Likewise, to perform a bootstrapped build (takes even longer): +## Building + +*The author highly recommends at least reading over the [common usage patterns +and examples](#common-guix-build-invocation-patterns-and-examples) section below +before starting a build. For a full list of customization options, see the +[recognized environment variables][env-vars-list] section.* + +To build Bitcoin Core reproducibly with all default options, invoke the +following from the top of a clean repository: ```sh -export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--bootstrap --no-substitutes' +./contrib/guix/guix-build ``` -### Using a version of Guix with `guix time-machine` capabilities +## Codesigning build outputs -> Note: This entire section can be skipped if you are already using a version of -> Guix that has [the `guix time-machine` command][guix/time-machine]. +The `guix-codesign` command attaches codesignatures (produced by codesigners) to +existing non-codesigned outputs. Please see the [release process +documentation](/doc/release-process.md) for more context. -Once Guix is installed, if it doesn't have the `guix time-machine` command, pull -the latest `guix`. +It respects many of the same environment variable flags as `guix-build`, with 2 +crucial differences: + +1. Since only Windows and macOS build outputs require codesigning, the `HOSTS` + environment variable will have a sane default value of `x86_64-w64-mingw32 + x86_64-apple-darwin arm64-apple-darwin` instead of all the platforms. +2. The `guix-codesign` command ***requires*** a `DETACHED_SIGS_REPO` flag. + * _**DETACHED_SIGS_REPO**_ + + Set the directory where detached codesignatures can be found for the current + Bitcoin Core version being built. + + _REQUIRED environment variable_ + +An invocation with all default options would look like: -```sh -guix pull --max-jobs=4 # change number of jobs accordingly ``` +env DETACHED_SIGS_REPO= ./contrib/guix/guix-codesign +``` + +## Cleaning intermediate work directories -Make sure that you are using your current profile. (You are prompted to do this -at the end of the `guix pull`) +By default, `guix-build` leaves all intermediate files or "work directories" +(e.g. `depends/work`, `guix-build-*/distsrc-*`) intact at the end of a build so +that they are available to the user (to aid in debugging, etc.). However, these +directories usually take up a large amount of disk space. Therefore, a +`guix-clean` convenience script is provided which cleans the current `git` +worktree to save disk space: -```bash -export PATH="${HOME}/.config/guix/current/bin${PATH:+:}$PATH" +``` +./contrib/guix/guix-clean ``` -## Usage -### As a Development Environment +## Attesting to build outputs -For a Bitcoin Core depends development environment, simply invoke +Much like how Gitian build outputs are attested to in a `gitian.sigs` +repository, Guix build outputs are attested to in the [`guix.sigs` +repository](https://github.com/bitcoin-core/guix.sigs). + +After you've cloned the `guix.sigs` repository, to attest to the current +worktree's commit/tag: + +``` +env GUIX_SIGS_REPO= SIGNER= ./contrib/guix/guix-attest +``` + +See `./contrib/guix/guix-attest --help` for more information on the various ways +`guix-attest` can be invoked. + +## Verifying build output attestations + +After at least one other signer has uploaded their signatures to the `guix.sigs` +repository: + +``` +git -C pull +env GUIX_SIGS_REPO= ./contrib/guix/guix-verify +``` + + +## Common `guix-build` invocation patterns and examples + +### Keeping caches and SDKs outside of the worktree + +If you perform a lot of builds and have a bunch of worktrees, you may find it +more efficient to keep the depends tree's download cache, build cache, and SDKs +outside of the worktrees to avoid duplicate downloads and unnecessary builds. To +help with this situation, the `guix-build` script honours the `SOURCES_PATH`, +`BASE_CACHE`, and `SDK_PATH` environment variables and will pass them on to the +depends tree so that you can do something like: ```sh -guix environment --manifest=contrib/guix/manifest.scm +env SOURCES_PATH="$HOME/depends-SOURCES_PATH" BASE_CACHE="$HOME/depends-BASE_CACHE" SDK_PATH="$HOME/macOS-SDKs" ./contrib/guix/guix-build ``` -And you'll land back in your shell with all the build dependencies required for -a `depends` build injected into your environment. +Note that the paths that these environment variables point to **must be +directories**, and **NOT symlinks to directories**. + +See the [recognized environment variables][env-vars-list] section for more +details. + +### Building a subset of platform triples + +Sometimes you only want to build a subset of the supported platform triples, in +which case you can override the default list by setting the space-separated +`HOSTS` environment variable: + +```sh +env HOSTS='x86_64-w64-mingw32 x86_64-apple-darwin' ./contrib/guix/guix-build +``` + +See the [recognized environment variables][env-vars-list] section for more +details. + +### Controlling the number of threads used by `guix` build commands + +Depending on your system's RAM capacity, you may want to decrease the number of +threads used to decrease RAM usage or vice versa. + +By default, the scripts under `./contrib/guix` will invoke all `guix` build +commands with `--cores="$JOBS"`. Note that `$JOBS` defaults to `$(nproc)` if not +specified. However, astute manual readers will also notice that `guix` build +commands also accept a `--max-jobs=` flag (which defaults to 1 if unspecified). + +Here is the difference between `--cores=` and `--max-jobs=`: + +> Note: When I say "derivation," think "package" + +`--cores=` + + - controls the number of CPU cores to build each derivation. This is the value + passed to `make`'s `--jobs=` flag. + +`--max-jobs=` -### As a Tool for Deterministic Builds + - controls how many derivations can be built in parallel + - defaults to 1 -From the top of a clean Bitcoin Core repository: +Therefore, the default is for `guix` build commands to build one derivation at a +time, utilizing `$JOBS` threads. + +Specifying the `$JOBS` environment variable will only modify `--cores=`, but you +can also modify the value for `--max-jobs=` by specifying +`$ADDITIONAL_GUIX_COMMON_FLAGS`. For example, if you have a LOT of memory, you +may want to set: ```sh -./contrib/guix/guix-build.sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' ``` -After the build finishes successfully (check the status code please), compare -hashes: +Which allows for a maximum of 8 derivations to be built at the same time, each +utilizing `$JOBS` threads. + +Or, if you'd like to avoid spurious build failures caused by issues with +parallelism within a single package, but would still like to build multiple +packages when the dependency graph allows for it, you may want to try: ```sh -find output/ -type f -print0 | sort -z | xargs -r0 sha256sum +export JOBS=1 ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' ``` -#### Recognized environment variables +See the [recognized environment variables][env-vars-list] section for more +details. + +## Recognized environment variables * _**HOSTS**_ Override the space-separated list of platform triples for which to perform a - bootstrappable build. _(defaults to "x86\_64-linux-gnu - arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu")_ + bootstrappable build. - > Windows and OS X platform triplet support are WIP. + _(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu + riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu + x86\_64-w64-mingw32 x86\_64-apple-darwin arm64-apple-darwin")_ * _**SOURCES_PATH**_ @@ -124,102 +232,205 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum depends tree. Setting this to the same directory across multiple builds of the depends tree can eliminate unnecessary redownloading of package sources. -* _**MAX_JOBS**_ + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**BASE_CACHE**_ + + Set the depends tree cache for built packages. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary building of packages. + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**SDK_PATH**_ + + Set the path where _extracted_ SDKs can be found. This is passed through to + the depends tree. Note that this is should be set to the _parent_ directory of + the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of + `$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`). + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**JOBS**_ - Override the maximum number of jobs to run simultaneously, you might want to - do so on a memory-limited machine. This may be passed to `make` as in `make - --jobs="$MAX_JOBS"` or `xargs` as in `xargs -P"$MAX_JOBS"`. _(defaults to the - value of `nproc` outside the container)_ + Override the number of jobs to run simultaneously, you might want to do so on + a memory-limited machine. This may be passed to: + + - `guix` build commands as in `guix shell --cores="$JOBS"` + - `make` as in `make --jobs="$JOBS"` + - `cmake` as in `cmake --build build -j "$JOBS"` + - `xargs` as in `xargs -P"$JOBS"` + + See [here](#controlling-the-number-of-threads-used-by-guix-build-commands) for + more details. + + _(defaults to the value of `nproc` outside the container)_ * _**SOURCE_DATE_EPOCH**_ Override the reference UNIX timestamp used for bit-for-bit reproducibility, - the variable name conforms to [standard][r12e/source-date-epoch]. _(defaults - to the output of `$(git log --format=%at -1)`)_ + the variable name conforms to [standard][r12e/source-date-epoch]. + + _(defaults to the output of `$(git log --format=%at -1)`)_ * _**V**_ If non-empty, will pass `V=1` to all `make` invocations, making `make` output verbose. -* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_ + Note that any given value is ignored. The variable is only checked for + emptiness. More concretely, this means that `V=` (setting `V` to the empty + string) is interpreted the same way as not setting `V` at all, and that `V=0` + has the same effect as `V=1`. - Additional flags to be passed to `guix environment`. For a fully-bootstrapped - build, set this to `--bootstrap --no-substitutes` (refer to the [security - model section](#choosing-your-security-model) for more details). Note that a - fully-bootstrapped build will take quite a long time on the first run. +* _**SUBSTITUTE_URLS**_ -## Tips and Tricks + A whitespace-delimited list of URLs from which to download pre-built packages. + A URL is only used if its signing key is authorized (refer to the [substitute + servers section](#option-1-building-with-substitutes) for more details). -### Speeding up builds with substitute servers +* _**ADDITIONAL_GUIX_COMMON_FLAGS**_ -_This whole section is automatically done in the convenience -[Dockerfiles][fanquake/guix-docker]_ + Additional flags to be passed to all `guix` commands. -For those who are used to life in the fast _(and trustful)_ lane, you can use -[substitute servers][guix/substitutes] to enable binary downloads of packages. +* _**ADDITIONAL_GUIX_TIMEMACHINE_FLAGS**_ -> For those who only want to use substitutes from the official Guix build farm -> and have authorized the build farm's signing key during Guix's installation, -> you don't need to do anything. + Additional flags to be passed to `guix time-machine`. -#### Authorize the signing keys +* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_ -For the official Guix build farm at https://ci.guix.gnu.org, run as root: + Additional flags to be passed to the invocation of `guix shell` inside + `guix time-machine`. +# Choosing your security model + +No matter how you installed Guix, you need to decide on your security model for +building packages with Guix. + +Guix allows us to achieve better binary security by using our CPU time to build +everything from scratch. However, it doesn't sacrifice user choice in pursuit of +this: users can decide whether or not to use **substitutes** (pre-built +packages). + +## Option 1: Building with substitutes + +### Step 1: Authorize the signing keys + +Depending on the installation procedure you followed, you may have already +authorized the Guix build farm key. In particular, the official shell installer +script asks you if you want the key installed, and the debian distribution +package authorized the key during installation. + +You can check the current list of authorized keys at `/etc/guix/acl`. + +At the time of writing, a `/etc/guix/acl` with just the Guix build farm key +authorized looks something like: + +```lisp +(acl + (entry + (public-key + (ecc + (curve Ed25519) + (q #8D156F295D24B0D9A86FA5741A840FF2D24F60F7B6C4134814AD55625971B394#) + ) + ) + (tag + (guix import) + ) + ) + ) ``` -guix archive --authorize < ~root/.config/guix/current/share/guix/ci.guix.gnu.org.pub + +If you've determined that the official Guix build farm key hasn't been +authorized, and you would like to authorize it, run the following as root: + +``` +guix archive --authorize < /var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub +``` + +If +`/var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub` +doesn't exist, try: + +```sh +guix archive --authorize < /share/guix/ci.guix.gnu.org.pub ``` +Where `` is likely: +- `/usr` if you installed from a distribution package +- `/usr/local` if you installed Guix from source and didn't supply any + prefix-modifying flags to Guix's `./configure` + For dongcarl's substitute server at https://guix.carldong.io, run as root: ```sh wget -qO- 'https://guix.carldong.io/signing-key.pub' | guix archive --authorize ``` -#### Use the substitute servers +#### Removing authorized keys + +To remove previously authorized keys, simply edit `/etc/guix/acl` and remove the +`(entry (public-key ...))` entry. -The official Guix build farm at https://ci.guix.gnu.org is automatically used -unless the `--no-substitutes` flag is supplied. +### Step 2: Specify the substitute servers -This can be overridden for all `guix` invocations by passing the -`--substitute-urls` option to your invocation of `guix-daemon`. This can also be -overridden on a call-by-call basis by passing the same `--substitute-urls` -option to client tools such at `guix environment`. +Once its key is authorized, the official Guix build farm at +https://ci.guix.gnu.org is automatically used unless the `--no-substitutes` flag +is supplied. This default list of substitute servers is overridable both on a +`guix-daemon` level and when you invoke `guix` commands. See examples below for +the various ways of adding dongcarl's substitute server after having [authorized +his signing key](#step-1-authorize-the-signing-keys). -To use dongcarl's substitute server for Bitcoin Core builds after having -[authorized his signing key](#authorize-the-signing-keys): +Change the **default list** of substitute servers by starting `guix-daemon` with +the `--substitute-urls` option (you will likely need to edit your init script): +```sh +guix-daemon --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org' ``` -export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--substitute-urls="https://guix.carldong.io https://ci.guix.gnu.org"' + +Override the default list of substitute servers by passing the +`--substitute-urls` option for invocations of `guix` commands: + +```sh +guix --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org' ``` -## FAQ +For scripts under `./contrib/guix`, set the `SUBSTITUTE_URLS` environment +variable: -### How can I trust the binary installation? +```sh +export SUBSTITUTE_URLS='https://guix.carldong.io https://ci.guix.gnu.org' +``` -As mentioned at the bottom of [this manual page][guix/bin-install]: +## Option 2: Disabling substitutes on an ad-hoc basis -> The binary installation tarballs can be (re)produced and verified simply by -> running the following command in the Guix source tree: -> -> make guix-binary.x86_64-linux.tar.xz +If you prefer not to use any substitutes, make sure to supply `--no-substitutes` +like in the following snippet. The first build will take a while, but the +resulting packages will be cached for future builds. + +For direct invocations of `guix`: +```sh +guix --no-substitutes +``` -### When will Guix be packaged in debian? +For the scripts under `./contrib/guix/`: +```sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes' +``` -Vagrant Cascadian has been making good progress on this -[here][debian/guix-package]. We have all the pieces needed to put up an APT -repository and will likely put one up soon. +## Option 3: Disabling substitutes by default -[b17e]: http://bootstrappable.org/ -[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/ +`guix-daemon` accepts a `--no-substitutes` flag, which will make sure that, +unless otherwise overridden by a command line invocation, no substitutes will be +used. -[guix/install.sh]: https://git.savannah.gnu.org/cgit/guix.git/plain/etc/guix-install.sh -[guix/bin-install]: https://www.gnu.org/software/guix/manual/en/html_node/Binary-Installation.html -[guix/env-setup]: https://www.gnu.org/software/guix/manual/en/html_node/Build-Environment-Setup.html -[guix/substitutes]: https://www.gnu.org/software/guix/manual/en/html_node/Substitutes.html -[guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html -[guix/time-machine]: https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html +If you start `guix-daemon` using an init script, you can edit said script to +supply this flag. -[debian/guix-package]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=850644 -[fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix +[b17e]: https://bootstrappable.org/ +[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/ +[env-vars-list]: #recognized-environment-variables diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest new file mode 100755 index 0000000000000..b0ef28dc3f929 --- /dev/null +++ b/contrib/guix/guix-attest @@ -0,0 +1,263 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat env basename mkdir diff sort + +if [ -z "$NO_SIGN" ]; then + # make it possible to override the gpg binary + GPG=${GPG:-gpg} + + # $GPG can contain extra arguments passed to the binary + # so let's check only the existence of arg[0] + # shellcheck disable=SC2206 + GPG_ARRAY=($GPG) + check_tools "${GPG_ARRAY[0]}" +fi + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < \\ + SIGNER=GPG_KEY_NAME[=SIGNER_NAME] \\ + [ NO_SIGN=1 ] + ./contrib/guix/guix-attest + +Example w/o overriding signing name: + + env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\ + SIGNER=achow101 \\ + ./contrib/guix/guix-attest + +Example overriding signing name: + + env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs \\ + SIGNER=0x96AB007F1A7ED999=dongcarl \\ + ./contrib/guix/guix-attest + +Example w/o signing, just creating SHA256SUMS: + + env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\ + SIGNER=achow101 \\ + NO_SIGN=1 \\ + ./contrib/guix/guix-attest + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ] || [ -z "$SIGNER" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +################ +# The key specified in SIGNER should be usable +################ + +IFS='=' read -r gpg_key_name signer_name <<< "$SIGNER" +if [ -z "${signer_name}" ]; then + signer_name="$gpg_key_name" +fi + +if [ -z "$NO_SIGN" ] && ! ${GPG} --dry-run --list-secret-keys "${gpg_key_name}" >/dev/null 2>&1; then + echo "ERR: GPG can't seem to find any key named '${gpg_key_name}'" + exit 1 +fi + +################ +# We should be able to find at least one output +################ + +echo "Looking for build output SHA256SUMS fragments in ${OUTDIR_BASE}" + +shopt -s nullglob +sha256sum_fragments=( "$OUTDIR_BASE"/*/SHA256SUMS.part ) # This expands to an array of directories... +shopt -u nullglob + +noncodesigned_fragments=() +codesigned_fragments=() + +if (( ${#sha256sum_fragments[@]} )); then + echo "Found build output SHA256SUMS fragments:" + for outdir in "${sha256sum_fragments[@]}"; do + echo " '$outdir'" + case "$outdir" in + "$OUTDIR_BASE"/*-codesigned/SHA256SUMS.part) + codesigned_fragments+=("$outdir") + ;; + *) + noncodesigned_fragments+=("$outdir") + ;; + esac + done + echo +else + echo "ERR: Could not find any build output SHA256SUMS fragments in ${OUTDIR_BASE}" + exit 1 +fi + +############## +## Attest ## +############## + +# Usage: out_name $outdir +# +# HOST: The output directory being attested +# +out_name() { + basename "$(dirname "$1")" +} + +shasum_already_exists() { +cat < "$temp_noncodesigned" + if [ -e noncodesigned.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u noncodesigned.SHA256SUMS "$temp_noncodesigned"; then + echo "A noncodesigned.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." + else + shasum_already_exists noncodesigned.SHA256SUMS + exit 1 + fi + else + mv "$temp_noncodesigned" noncodesigned.SHA256SUMS + fi + else + echo "ERR: No noncodesigned outputs found for '${VERSION}', exiting..." + exit 1 + fi + + temp_all="$(mktemp)" + trap 'rm -rf -- "$temp_all"' EXIT + + if (( ${#codesigned_fragments[@]} )); then + # Note: all.SHA256SUMS attests to all of $sha256sum_fragments, but is + # not needed if there are no $codesigned_fragments + cat "${sha256sum_fragments[@]}" \ + | sort -u \ + | sort -k2 \ + | basenameify_SHA256SUMS \ + > "$temp_all" + if [ -e all.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u all.SHA256SUMS "$temp_all"; then + echo "An all.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." + else + shasum_already_exists all.SHA256SUMS + exit 1 + fi + else + mv "$temp_all" all.SHA256SUMS + fi + else + # It is fine to have the codesigned outputs be missing (perhaps the + # detached codesigs have not been published yet), just print a log + # message instead of erroring out + echo "INFO: No codesigned outputs found for '${VERSION}', skipping..." + fi + + if [ -z "$NO_SIGN" ]; then + echo "Signing SHA256SUMS to produce SHA256SUMS.asc" + for i in *.SHA256SUMS; do + if [ ! -e "$i".asc ]; then + ${GPG} --detach-sign \ + --digest-algo sha256 \ + --local-user "$gpg_key_name" \ + --armor \ + --output "$i".asc "$i" + else + echo "Signature already there" + fi + done + else + echo "Not signing SHA256SUMS as \$NO_SIGN is not empty" + fi + echo "" +) diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build new file mode 100755 index 0000000000000..2ea574fe4b98c --- /dev/null +++ b/contrib/guix/guix-build @@ -0,0 +1,468 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## SANITY CHECKS ## +################### + +################ +# Required non-builtin commands should be invocable +################ + +check_tools cat mkdir make getent curl git guix + +################ +# GUIX_BUILD_OPTIONS should be empty +################ +# +# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that +# can perform builds. This seems like what we want instead of +# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually +# _appended_ to normal command-line options. Meaning that they will take +# precedence over the command-specific ADDITIONAL_GUIX__FLAGS. +# +# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's +# existence here and direct users of this script to use our (more flexible) +# custom environment variables. +if [ -n "$GUIX_BUILD_OPTIONS" ]; then +cat << EOF +Error: Environment variable GUIX_BUILD_OPTIONS is not empty: + '$GUIX_BUILD_OPTIONS' + +Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset +GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options +across guix commands or ADDITIONAL_GUIX__FLAGS to set build options for a +specific guix command. + +See contrib/guix/README.md for more details. +EOF +exit 1 +fi + +################ +# The git worktree should not be dirty +################ + +if ! git diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then +cat << EOF +ERR: The current git worktree is dirty, which may lead to broken builds. + + Aborting... + +Hint: To make your git worktree clean, You may want to: + 1. Commit your changes, + 2. Stash your changes, or + 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on + using a dirty worktree +EOF +exit 1 +fi + +mkdir -p "$VERSION_BASE" + +################ +# Build directories should not exist +################ + +# Default to building for all supported HOSTs (overridable by environment) +# powerpc64le-linux-gnu currently disabled due non-determinism issues across build arches. +export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu powerpc64-linux-gnu + x86_64-w64-mingw32 + x86_64-apple-darwin arm64-apple-darwin}" + +# Usage: distsrc_for_host HOST +# +# HOST: The current platform triple we're building for +# +distsrc_for_host() { + echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}" +} + +# Accumulate a list of build directories that already exist... +hosts_distsrc_exists="" +for host in $HOSTS; do + if [ -e "$(distsrc_for_host "$host")" ]; then + hosts_distsrc_exists+=" ${host}" + fi +done + +if [ -n "$hosts_distsrc_exists" ]; then +# ...so that we can print them out nicely in an error message +cat << EOF +ERR: Build directories for this commit already exist for the following platform + triples you're attempting to build, probably because of previous builds. + Please remove, or otherwise deal with them prior to starting another build. + + Aborting... + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +for host in $hosts_distsrc_exists; do + echo " ${host} '$(distsrc_for_host "$host")'" +done +exit 1 +else + mkdir -p "$DISTSRC_BASE" +fi + +################ +# When building for darwin, the macOS SDK should exist +################ + +for host in $HOSTS; do + case "$host" in + *darwin*) + OSX_SDK="$(make -C "${PWD}/depends" --no-print-directory HOST="$host" print-OSX_SDK | sed 's@^[^=]\+=@@g')" + if [ -e "$OSX_SDK" ]; then + echo "Found macOS SDK at '${OSX_SDK}', using..." + break + else + echo "macOS SDK does not exist at '${OSX_SDK}', please place the extracted, untarred SDK there to perform darwin builds, or define SDK_PATH environment variable. Exiting..." + exit 1 + fi + ;; + esac +done + +################ +# VERSION_BASE should have enough space +################ + +avail_KiB="$(df -Pk "$VERSION_BASE" | sed 1d | tr -s ' ' | cut -d' ' -f4)" +total_required_KiB=0 +for host in $HOSTS; do + case "$host" in + *darwin*) required_KiB=440000 ;; + *mingw*) required_KiB=7600000 ;; + *) required_KiB=6400000 ;; + esac + total_required_KiB=$((total_required_KiB+required_KiB)) +done + +if (( total_required_KiB > avail_KiB )); then + total_required_GiB=$((total_required_KiB / 1048576)) + avail_GiB=$((avail_KiB / 1048576)) + echo "Performing a Bitcoin Core Guix build for the selected HOSTS requires ${total_required_GiB} GiB, however, only ${avail_GiB} GiB is available. Please free up some disk space before performing the build." + exit 1 +fi + +################ +# Check that we can connect to the guix-daemon +################ + +cat << EOF +Checking that we can connect to the guix-daemon... + +Hint: If this hangs, you may want to try turning your guix-daemon off and on + again. + +EOF +if ! guix gc --list-failures > /dev/null; then +cat << EOF + +ERR: Failed to connect to the guix-daemon, please ensure that one is running and + reachable. +EOF +exit 1 +fi + +# Developer note: we could use `guix repl` for this check and run: +# +# (import (guix store)) (close-connection (open-connection)) +# +# However, the internal API is likely to change more than the CLI invocation + +################ +# Services database must have basic entries +################ + +if ! getent services http https ftp > /dev/null 2>&1; then +cat << EOF +ERR: Your system's C library cannot find service database entries for at least + one of the following services: http, https, ftp. + +Hint: Most likely, /etc/services does not exist yet (common for docker images + and minimal distros), or you don't have permissions to access it. + + If /etc/services does not exist yet, you may want to install the + appropriate package for your distro which provides it. + + On Debian/Ubuntu: netbase + On Arch Linux: iana-etc + + For more information, see: getent(1), services(5) + +EOF + +fi + +######### +# SETUP # +######### + +# Determine the maximum number of jobs to run simultaneously (overridable by +# environment) +JOBS="${JOBS:-$(nproc)}" + +# Usage: host_to_commonname HOST +# +# HOST: The current platform triple we're building for +# +host_to_commonname() { + case "$1" in + *darwin*) echo osx ;; + *mingw*) echo win ;; + *linux*) echo linux ;; + *) exit 1 ;; + esac +} + +# Determine the reference time used for determinism (overridable by environment) +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}" + +# Precious directories are those which should not be cleaned between successive +# guix builds +depends_precious_dir_names='SOURCES_PATH BASE_CACHE SDK_PATH' +precious_dir_names="${depends_precious_dir_names} OUTDIR_BASE PROFILES_BASE" + +# Usage: contains IFS-SEPARATED-LIST ITEM +contains() { + for i in ${1}; do + if [ "$i" = "${2}" ]; then + return 0 # Found! + fi + done + return 1 +} + +# If the user explicitly specified a precious directory, create it so we +# can map it into the container +for precious_dir_name in $precious_dir_names; do + precious_dir_path="${!precious_dir_name}" + if [ -n "$precious_dir_path" ]; then + if [ ! -e "$precious_dir_path" ]; then + mkdir -p "$precious_dir_path" + elif [ -L "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} cannot be a symbolic link" + exit 1 + elif [ ! -d "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} must be a directory" + exit 1 + fi + fi +done + +mkdir -p "$VAR_BASE" + +# Record the _effective_ values of precious directories such that guix-clean can +# avoid clobbering them if appropriate. +# +# shellcheck disable=SC2046,SC2086 +{ + # Get depends precious dir definitions from depends + make -C "${PWD}/depends" \ + --no-print-directory \ + -- $(printf "print-%s\n" $depends_precious_dir_names) + + # Get remaining precious dir definitions from the environment + for precious_dir_name in $precious_dir_names; do + precious_dir_path="${!precious_dir_name}" + if ! contains "$depends_precious_dir_names" "$precious_dir_name"; then + echo "${precious_dir_name}=${precious_dir_path}" + fi + done +} > "${VAR_BASE}/precious_dirs" + +# Make sure an output directory exists for our builds +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" +mkdir -p "$OUTDIR_BASE" + +# Download the depends sources now as we won't have internet access in the build +# container +for host in $HOSTS; do + make -C "${PWD}/depends" -j"$JOBS" download-"$(host_to_commonname "$host")" ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} +done + +# Usage: outdir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +outdir_for_host() { + echo "${OUTDIR_BASE}/${1}${2:+-${2}}" +} + +# Usage: profiledir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +profiledir_for_host() { + echo "${PROFILES_BASE}/${1}${2:+-${2}}" +} + + +######### +# BUILD # +######### + +# Function to be called when building for host ${1} and the user interrupts the +# build +int_trap() { +cat << EOF +** INT received while building ${1}, you may want to clean up the relevant + work directories (e.g. distsrc-*) before rebuilding + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +} + +# Deterministically build Bitcoin Core +# shellcheck disable=SC2153 +for host in $HOSTS; do + + # Display proper warning when the user interrupts the build + trap 'int_trap ${host}' INT + + ( + # Required for 'contrib/guix/manifest.scm' to output the right manifest + # for the particular $HOST we're building for + export HOST="$host" + + # shellcheck disable=SC2030 +cat << EOF +INFO: Building ${VERSION:?not set} for platform triple ${HOST:?not set}: + ...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set} + ...running at most ${JOBS:?not set} jobs + ...from worktree directory: '${PWD}' + ...bind-mounted in container to: '/bitcoin' + ...in build directory: '$(distsrc_for_host "$HOST")' + ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")' + ...outputting in: '$(outdir_for_host "$HOST")' + ...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")' + ADDITIONAL FLAGS (if set) + ADDITIONAL_GUIX_COMMON_FLAGS: ${ADDITIONAL_GUIX_COMMON_FLAGS} + ADDITIONAL_GUIX_ENVIRONMENT_FLAGS: ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} + ADDITIONAL_GUIX_TIMEMACHINE_FLAGS: ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} +EOF + + # Run the build script 'contrib/guix/libexec/build.sh' in the build + # container specified by 'contrib/guix/manifest.scm'. + # + # Explanation of `guix shell` flags: + # + # --container run command within an isolated container + # + # Running in an isolated container minimizes build-time differences + # between machines and improves reproducibility + # + # --pure unset existing environment variables + # + # Same rationale as --container + # + # --no-cwd do not share current working directory with an + # isolated container + # + # When --container is specified, the default behavior is to share + # the current working directory with the isolated container at the + # same exact path (e.g. mapping '/home/satoshi/bitcoin/' to + # '/home/satoshi/bitcoin/'). This means that the $PWD inside the + # container becomes a source of irreproducibility. --no-cwd disables + # this behaviour. + # + # --share=SPEC for containers, share writable host file system + # according to SPEC + # + # --share="$PWD"=/bitcoin + # + # maps our current working directory to /bitcoin + # inside the isolated container, which we later cd + # into. + # + # While we don't want to map our current working directory to the + # same exact path (as this introduces irreproducibility), we do want + # it to be at a _fixed_ path _somewhere_ inside the isolated + # container so that we have something to build. '/bitcoin' was + # chosen arbitrarily. + # + # ${SOURCES_PATH:+--share="$SOURCES_PATH"} + # + # make the downloaded depends sources path available + # inside the isolated container + # + # The isolated container has no network access as it's in a + # different network namespace from the main machine, so we have to + # make the downloaded depends sources available to it. The sources + # should have been downloaded prior to this invocation. + # + # --keep-failed keep build tree of failed builds + # + # When builds of the Guix environment itself (not Bitcoin Core) + # fail, it is useful for the build tree to be kept for debugging + # purposes. + # + # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} + # + # fetch substitute from SUBSTITUTE_URLS if they are + # authorized + # + # Depending on the user's security model, it may be desirable to use + # substitutes (pre-built packages) from servers that the user trusts. + # Please read the README.md in the same directory as this file for + # more information. + # + # shellcheck disable=SC2086,SC2031 + time-machine shell --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --share="$PWD"=/bitcoin \ + --share="$DISTSRC_BASE"=/distsrc-base \ + --share="$OUTDIR_BASE"=/outdir-base \ + --expose="$(git rev-parse --git-common-dir)" \ + ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + ${BASE_CACHE:+--share="$BASE_CACHE"} \ + ${SDK_PATH:+--share="$SDK_PATH"} \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + --link-profile \ + --root="$(profiledir_for_host "${HOST}")" \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- env HOST="$host" \ + DISTNAME="$DISTNAME" \ + JOBS="$JOBS" \ + SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ + ${V:+V=1} \ + ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE:+BASE_CACHE="$BASE_CACHE"} \ + ${SDK_PATH:+SDK_PATH="$SDK_PATH"} \ + DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ + OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")" \ + DIST_ARCHIVE_BASE=/outdir-base/dist-archive \ + bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh" + ) + +done diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh deleted file mode 100755 index 11d2c8b86727e..0000000000000 --- a/contrib/guix/guix-build.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bash -export LC_ALL=C -set -e -o pipefail - -# Determine the maximum number of jobs to run simultaneously (overridable by -# environment) -MAX_JOBS="${MAX_JOBS:-$(nproc)}" - -# Download the depends sources now as we won't have internet access in the build -# container -make -C "${PWD}/depends" -j"$MAX_JOBS" download ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} - -# Determine the reference time used for determinism (overridable by environment) -SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" - -# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility -# across time. -time-machine() { - guix time-machine --url=https://github.com/dongcarl/guix.git \ - --commit=b066c25026f21fb57677aa34692a5034338e7ee3 \ - -- "$@" -} - -# Function to be called when building for host ${1} and the user interrupts the -# build -int_trap() { -cat << EOF -** INT received while building ${1}, you may want to clean up the relevant - output, deploy, and distsrc-* directories before rebuilding - -Hint: To blow everything away, you may want to use: - - $ git clean -xdff --exclude='/depends/SDKs/*' - -Specifically, this will remove all files without an entry in the index, -excluding the SDK directory. Practically speaking, this means that all ignored -and untracked files and directories will be wiped, allowing you to start anew. -EOF -} - -# Deterministically build Bitcoin Core for HOSTs (overridable by environment) -# shellcheck disable=SC2153 -for host in ${HOSTS=x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu x86_64-w64-mingw32}; do - - # Display proper warning when the user interrupts the build - trap 'int_trap ${host}' INT - - ( - # Required for 'contrib/guix/manifest.scm' to output the right manifest - # for the particular $HOST we're building for - export HOST="$host" - - # Run the build script 'contrib/guix/libexec/build.sh' in the build - # container specified by 'contrib/guix/manifest.scm'. - # - # Explanation of `guix environment` flags: - # - # --container run command within an isolated container - # - # Running in an isolated container minimizes build-time differences - # between machines and improves reproducibility - # - # --pure unset existing environment variables - # - # Same rationale as --container - # - # --no-cwd do not share current working directory with an - # isolated container - # - # When --container is specified, the default behavior is to share - # the current working directory with the isolated container at the - # same exact path (e.g. mapping '/home/satoshi/bitcoin/' to - # '/home/satoshi/bitcoin/'). This means that the $PWD inside the - # container becomes a source of irreproducibility. --no-cwd disables - # this behaviour. - # - # --share=SPEC for containers, share writable host file system - # according to SPEC - # - # --share="$PWD"=/bitcoin - # - # maps our current working directory to /bitcoin - # inside the isolated container, which we later cd - # into. - # - # While we don't want to map our current working directory to the - # same exact path (as this introduces irreproducibility), we do want - # it to be at a _fixed_ path _somewhere_ inside the isolated - # container so that we have something to build. '/bitcoin' was - # chosen arbitrarily. - # - # ${SOURCES_PATH:+--share="$SOURCES_PATH"} - # - # make the downloaded depends sources path available - # inside the isolated container - # - # The isolated container has no network access as it's in a - # different network namespace from the main machine, so we have to - # make the downloaded depends sources available to it. The sources - # should have been downloaded prior to this invocation. - # - # shellcheck disable=SC2086 - time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ - --container \ - --pure \ - --no-cwd \ - --share="$PWD"=/bitcoin \ - --expose="$(git rev-parse --git-common-dir)" \ - ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ - ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ - -- env HOST="$host" \ - MAX_JOBS="$MAX_JOBS" \ - SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ - ${V:+V=1} \ - ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ - bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh" - ) - -done diff --git a/contrib/guix/guix-clean b/contrib/guix/guix-clean new file mode 100755 index 0000000000000..9af0a793cff7d --- /dev/null +++ b/contrib/guix/guix-clean @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat mkdir make git guix + + +############# +## Clean ## +############# + +# Usage: under_dir MAYBE_PARENT MAYBE_CHILD +# +# If MAYBE_CHILD is a subdirectory of MAYBE_PARENT, print the relative path +# from MAYBE_PARENT to MAYBE_CHILD. Otherwise, return 1 as the error code. +# +# NOTE: This does not perform any symlink-resolving or path canonicalization. +# +under_dir() { + local path_residue + path_residue="${2##"${1}"}" + if [ -z "$path_residue" ] || [ "$path_residue" = "$2" ]; then + return 1 + else + echo "$path_residue" + fi +} + +# Usage: dir_under_git_root MAYBE_CHILD +# +# If MAYBE_CHILD is under the current git repository and exists, print the +# relative path from the git repository's top-level directory to MAYBE_CHILD, +# otherwise, exit with an error code. +# +dir_under_git_root() { + local rv + rv="$(under_dir "$(git_root)" "$1")" + [ -n "$rv" ] && echo "$rv" +} + +shopt -s nullglob +found_precious_dirs_files=( "${version_base_prefix}"*/"${var_base_basename}/precious_dirs" ) # This expands to an array of directories... +shopt -u nullglob + +exclude_flags=() + +for precious_dirs_file in "${found_precious_dirs_files[@]}"; do + # Make sure the precious directories (e.g. SOURCES_PATH, BASE_CACHE, SDK_PATH) + # are excluded from git-clean + echo "Found precious_dirs file: '${precious_dirs_file}'" + + # Exclude the precious_dirs file itself + if dirs_file_exclude_fragment=$(dir_under_git_root "$(dirname "$precious_dirs_file")"); then + exclude_flags+=( --exclude="${dirs_file_exclude_fragment}/precious_dirs" ) + fi + + # Read each 'name=dir' pair from the precious_dirs file + while IFS='=' read -r name dir; do + # Add an exclusion flag if the precious directory is under the git root. + if under=$(dir_under_git_root "$dir"); then + echo "Avoiding ${name}: ${under}" + exclude_flags+=( --exclude="$under" ) + fi + done < "$precious_dirs_file" +done + +git clean -xdff "${exclude_flags[@]}" diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign new file mode 100755 index 0000000000000..4694209e00d5b --- /dev/null +++ b/contrib/guix/guix-codesign @@ -0,0 +1,378 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## SANITY CHECKS ## +################### + +################ +# Required non-builtin commands should be invocable +################ + +check_tools cat mkdir git guix + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { + cat < \\ + ./contrib/guix/guix-codesign + +EOF +} + +if [ -z "$DETACHED_SIGS_REPO" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_BUILD_OPTIONS should be empty +################ +# +# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that +# can perform builds. This seems like what we want instead of +# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually +# _appended_ to normal command-line options. Meaning that they will take +# precedence over the command-specific ADDITIONAL_GUIX__FLAGS. +# +# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's +# existence here and direct users of this script to use our (more flexible) +# custom environment variables. +if [ -n "$GUIX_BUILD_OPTIONS" ]; then +cat << EOF +Error: Environment variable GUIX_BUILD_OPTIONS is not empty: + '$GUIX_BUILD_OPTIONS' + +Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset +GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options +across guix commands or ADDITIONAL_GUIX__FLAGS to set build options for a +specific guix command. + +See contrib/guix/README.md for more details. +EOF +exit 1 +fi + +################ +# The codesignature git worktree should not be dirty +################ + +if ! git -C "$DETACHED_SIGS_REPO" diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then + cat << EOF +ERR: The DETACHED CODESIGNATURE git worktree is dirty, which may lead to broken builds. + + Aborting... + +Hint: To make your git worktree clean, You may want to: + 1. Commit your changes, + 2. Stash your changes, or + 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on + using a dirty worktree +EOF + exit 1 +fi + +################ +# Build directories should not exist +################ + +# Default to building for all supported HOSTs (overridable by environment) +export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin arm64-apple-darwin}" + +# Usage: distsrc_for_host HOST +# +# HOST: The current platform triple we're building for +# +distsrc_for_host() { + echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}-codesigned" +} + +# Accumulate a list of build directories that already exist... +hosts_distsrc_exists="" +for host in $HOSTS; do + if [ -e "$(distsrc_for_host "$host")" ]; then + hosts_distsrc_exists+=" ${host}" + fi +done + +if [ -n "$hosts_distsrc_exists" ]; then +# ...so that we can print them out nicely in an error message +cat << EOF +ERR: Build directories for this commit already exist for the following platform + triples you're attempting to build, probably because of previous builds. + Please remove, or otherwise deal with them prior to starting another build. + + Aborting... + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +for host in $hosts_distsrc_exists; do + echo " ${host} '$(distsrc_for_host "$host")'" +done +exit 1 +else + mkdir -p "$DISTSRC_BASE" +fi + + +################ +# Unsigned tarballs SHOULD exist +################ + +# Usage: outdir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +outdir_for_host() { + echo "${OUTDIR_BASE}/${1}${2:+-${2}}" +} + + +unsigned_tarball_for_host() { + case "$1" in + *mingw*) + echo "$(outdir_for_host "$1")/${DISTNAME}-win64-unsigned.tar.gz" + ;; + *darwin*) + echo "$(outdir_for_host "$1")/${DISTNAME}-${1}-unsigned.tar.gz" + ;; + *) + exit 1 + ;; + esac +} + +# Accumulate a list of build directories that already exist... +hosts_unsigned_tarball_missing="" +for host in $HOSTS; do + if [ ! -e "$(unsigned_tarball_for_host "$host")" ]; then + hosts_unsigned_tarball_missing+=" ${host}" + fi +done + +if [ -n "$hosts_unsigned_tarball_missing" ]; then + # ...so that we can print them out nicely in an error message + cat << EOF +ERR: Unsigned tarballs do not exist +... + +EOF +for host in $hosts_unsigned_tarball_missing; do + echo " ${host} '$(unsigned_tarball_for_host "$host")'" +done +exit 1 +fi + +################ +# Check that we can connect to the guix-daemon +################ + +cat << EOF +Checking that we can connect to the guix-daemon... + +Hint: If this hangs, you may want to try turning your guix-daemon off and on + again. + +EOF +if ! guix gc --list-failures > /dev/null; then + cat << EOF + +ERR: Failed to connect to the guix-daemon, please ensure that one is running and + reachable. +EOF + exit 1 +fi + +# Developer note: we could use `guix repl` for this check and run: +# +# (import (guix store)) (close-connection (open-connection)) +# +# However, the internal API is likely to change more than the CLI invocation + + +######### +# SETUP # +######### + +# Determine the maximum number of jobs to run simultaneously (overridable by +# environment) +JOBS="${JOBS:-$(nproc)}" + +# Determine the reference time used for determinism (overridable by environment) +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}" + +# Make sure an output directory exists for our builds +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" +mkdir -p "$OUTDIR_BASE" + +# Usage: profiledir_for_host HOST SUFFIX +# +# HOST: The current platform triple we're building for +# +profiledir_for_host() { + echo "${PROFILES_BASE}/${1}${2:+-${2}}" +} + +######### +# BUILD # +######### + +# Function to be called when codesigning for host ${1} and the user interrupts +# the codesign +int_trap() { +cat << EOF +** INT received while codesigning ${1}, you may want to clean up the relevant + work directories (e.g. distsrc-*) before recodesigning + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the SDK directory, the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +} + +# Deterministically build Bitcoin Core +# shellcheck disable=SC2153 +for host in $HOSTS; do + + # Display proper warning when the user interrupts the build + trap 'int_trap ${host}' INT + + ( + # Required for 'contrib/guix/manifest.scm' to output the right manifest + # for the particular $HOST we're building for + export HOST="$host" + + # shellcheck disable=SC2030 +cat << EOF +INFO: Codesigning ${VERSION:?not set} for platform triple ${HOST:?not set}: + ...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set} + ...from worktree directory: '${PWD}' + ...bind-mounted in container to: '/bitcoin' + ...in build directory: '$(distsrc_for_host "$HOST")' + ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")' + ...outputting in: '$(outdir_for_host "$HOST" codesigned)' + ...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)' + ...using detached signatures in: '${DETACHED_SIGS_REPO:?not set}' + ...bind-mounted in container to: '/detached-sigs' +EOF + + + # Run the build script 'contrib/guix/libexec/build.sh' in the build + # container specified by 'contrib/guix/manifest.scm'. + # + # Explanation of `guix shell` flags: + # + # --container run command within an isolated container + # + # Running in an isolated container minimizes build-time differences + # between machines and improves reproducibility + # + # --pure unset existing environment variables + # + # Same rationale as --container + # + # --no-cwd do not share current working directory with an + # isolated container + # + # When --container is specified, the default behavior is to share + # the current working directory with the isolated container at the + # same exact path (e.g. mapping '/home/satoshi/bitcoin/' to + # '/home/satoshi/bitcoin/'). This means that the $PWD inside the + # container becomes a source of irreproducibility. --no-cwd disables + # this behaviour. + # + # --share=SPEC for containers, share writable host file system + # according to SPEC + # + # --share="$PWD"=/bitcoin + # + # maps our current working directory to /bitcoin + # inside the isolated container, which we later cd + # into. + # + # While we don't want to map our current working directory to the + # same exact path (as this introduces irreproducibility), we do want + # it to be at a _fixed_ path _somewhere_ inside the isolated + # container so that we have something to build. '/bitcoin' was + # chosen arbitrarily. + # + # ${SOURCES_PATH:+--share="$SOURCES_PATH"} + # + # make the downloaded depends sources path available + # inside the isolated container + # + # The isolated container has no network access as it's in a + # different network namespace from the main machine, so we have to + # make the downloaded depends sources available to it. The sources + # should have been downloaded prior to this invocation. + # + # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} + # + # fetch substitute from SUBSTITUTE_URLS if they are + # authorized + # + # Depending on the user's security model, it may be desirable to use + # substitutes (pre-built packages) from servers that the user trusts. + # Please read the README.md in the same directory as this file for + # more information. + # + # shellcheck disable=SC2086,SC2031 + time-machine shell --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --share="$PWD"=/bitcoin \ + --share="$DISTSRC_BASE"=/distsrc-base \ + --share="$OUTDIR_BASE"=/outdir-base \ + --share="$DETACHED_SIGS_REPO"=/detached-sigs \ + --expose="$(git rev-parse --git-common-dir)" \ + --expose="$(git -C "$DETACHED_SIGS_REPO" rev-parse --git-common-dir)" \ + ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + --link-profile \ + --root="$(profiledir_for_host "${HOST}" codesigned)" \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- env HOST="$host" \ + DISTNAME="$DISTNAME" \ + JOBS="$JOBS" \ + SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ + ${V:+V=1} \ + ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ + OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)" \ + DIST_ARCHIVE_BASE=/outdir-base/dist-archive \ + DETACHED_SIGS_REPO=/detached-sigs \ + UNSIGNED_TARBALL="$(OUTDIR_BASE=/outdir-base && unsigned_tarball_for_host "$HOST")" \ + bash -c "cd /bitcoin && bash contrib/guix/libexec/codesign.sh" + ) + +done diff --git a/contrib/guix/guix-verify b/contrib/guix/guix-verify new file mode 100755 index 0000000000000..02ae022741bab --- /dev/null +++ b/contrib/guix/guix-verify @@ -0,0 +1,174 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Bitcoin Core repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat diff gpg + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < [ SIGNER= ] ./contrib/guix/guix-verify + +Example overriding signer's manifest to use as base + + env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs SIGNER=achow101 ./contrib/guix/guix-verify + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +############## +## Verify ## +############## + +OUTSIGDIR_BASE="${GUIX_SIGS_REPO}/${VERSION}" +echo "Looking for signature directories in '${OUTSIGDIR_BASE}'" +echo "" + +# Usage: verify compare_manifest current_manifest +verify() { + local compare_manifest="$1" + local current_manifest="$2" + if ! gpg --quiet --batch --verify "$current_manifest".asc "$current_manifest" 1>&2; then + echo "ERR: Failed to verify GPG signature in '${current_manifest}'" + echo "" + echo "Hint: Either the signature is invalid or the public key is missing" + echo "" + failure=1 + elif ! diff --report-identical "$compare_manifest" "$current_manifest" 1>&2; then + echo "ERR: The SHA256SUMS attestation in these two directories differ:" + echo " '${compare_manifest}'" + echo " '${current_manifest}'" + echo "" + failure=1 + else + echo "Verified: '${current_manifest}'" + echo "" + fi +} + +shopt -s nullglob +all_noncodesigned=( "$OUTSIGDIR_BASE"/*/noncodesigned.SHA256SUMS ) +shopt -u nullglob + +echo "--------------------" +echo "" +if (( ${#all_noncodesigned[@]} )); then + compare_noncodesigned="${all_noncodesigned[0]}" + if [[ -n "$SIGNER" ]]; then + signer_noncodesigned="$OUTSIGDIR_BASE/$SIGNER/noncodesigned.SHA256SUMS" + if [[ -f "$signer_noncodesigned" ]]; then + echo "Using $SIGNER's manifest as the base to compare against" + compare_noncodesigned="$signer_noncodesigned" + else + echo "Unable to find $SIGNER's manifest, using the first one found" + fi + else + echo "No SIGNER provided, using the first manifest found" + fi + + for current_manifest in "${all_noncodesigned[@]}"; do + verify "$compare_noncodesigned" "$current_manifest" + done + + echo "DONE: Checking output signatures for noncodesigned.SHA256SUMS" + echo "" +else + echo "WARN: No signature directories with noncodesigned.SHA256SUMS found" + echo "" +fi + +shopt -s nullglob +all_all=( "$OUTSIGDIR_BASE"/*/all.SHA256SUMS ) +shopt -u nullglob + +echo "--------------------" +echo "" +if (( ${#all_all[@]} )); then + compare_all="${all_all[0]}" + if [[ -n "$SIGNER" ]]; then + signer_all="$OUTSIGDIR_BASE/$SIGNER/all.SHA256SUMS" + if [[ -f "$signer_all" ]]; then + echo "Using $SIGNER's manifest as the base to compare against" + compare_all="$signer_all" + else + echo "Unable to find $SIGNER's manifest, using the first one found" + fi + else + echo "No SIGNER provided, using the first manifest found" + fi + + for current_manifest in "${all_all[@]}"; do + verify "$compare_all" "$current_manifest" + done + + # Sanity check: there should be no entries that exist in + # noncodesigned.SHA256SUMS that doesn't exist in all.SHA256SUMS + if [[ "$(comm -23 <(sort "$compare_noncodesigned") <(sort "$compare_all") | wc -c)" -ne 0 ]]; then + echo "ERR: There are unique lines in noncodesigned.SHA256SUMS which" + echo " do not exist in all.SHA256SUMS, something went very wrong." + exit 1 + fi + + echo "DONE: Checking output signatures for all.SHA256SUMS" + echo "" +else + echo "WARN: No signature directories with all.SHA256SUMS found" + echo "" +fi + +echo "====================" +echo "" +if (( ${#all_noncodesigned[@]} + ${#all_all[@]} == 0 )); then + echo "ERR: Unable to perform any verifications as no signature directories" + echo " were found" + echo "" + exit 1 +fi + +if [ -n "$failure" ]; then + exit 1 +fi diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh old mode 100644 new mode 100755 index 01f4518c73794..3184cd4afe735 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -1,11 +1,43 @@ #!/usr/bin/env bash +# Copyright (c) 2019-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C set -e -o pipefail export TZ=UTC -# Check that environment variables assumed to be set by the environment are set -echo "Building for platform triple ${HOST:?not set} with reference timestamp ${SOURCE_DATE_EPOCH:?not set}..." -echo "At most ${MAX_JOBS:?not set} jobs will run at once..." +# Although Guix _does_ set umask when building its own packages (in our case, +# this is all packages in manifest.scm), it does not set it for `guix +# shell`. It does make sense for at least `guix shell --container` +# to set umask, so if that change gets merged upstream and we bump the +# time-machine to a commit which includes the aforementioned change, we can +# remove this line. +# +# This line should be placed before any commands which creates files. +umask 0022 + +if [ -n "$V" ]; then + # Print both unexpanded (-v) and expanded (-x) forms of commands as they are + # read from this file. + set -vx + # Set VERBOSE for CMake-based builds + export VERBOSE="$V" +fi + +# Check that required environment variables are set +cat << EOF +Required environment variables as seen inside the container: + DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set} + DISTNAME: ${DISTNAME:?not set} + HOST: ${HOST:?not set} + SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set} + JOBS: ${JOBS:?not set} + DISTSRC: ${DISTSRC:?not set} + OUTDIR: ${OUTDIR:?not set} +EOF + +ACTUAL_OUTDIR="${OUTDIR}" +OUTDIR="${DISTSRC}/output" ##################### # Environment Setup # @@ -15,59 +47,75 @@ echo "At most ${MAX_JOBS:?not set} jobs will run at once..." # $HOSTs after successfully building. BASEPREFIX="${PWD}/depends" -# Setup an output directory for our build -OUTDIR="${OUTDIR:-${PWD}/output}" -[ -e "$OUTDIR" ] || mkdir -p "$OUTDIR" - -# Setup the directory where our Bitcoin Core build for HOST will occur -DISTSRC="${DISTSRC:-${PWD}/distsrc-${HOST}}" -if [ -e "$DISTSRC" ]; then - echo "DISTSRC directory '${DISTSRC}' exists, probably because of previous builds... Aborting..." - exit 1 -else - mkdir -p "$DISTSRC" -fi - # Given a package name and an output name, return the path of that output in our # current guix environment store_path() { grep --extended-regexp "/[^-]{32}-${1}-[^-]+${2:+-${2}}" "${GUIX_ENVIRONMENT}/manifest" \ | head --lines=1 \ - | sed --expression='s|^[[:space:]]*"||' \ + | sed --expression='s|\x29*$||' \ + --expression='s|^[[:space:]]*"||' \ --expression='s|"[[:space:]]*$||' } -# Set environment variables to point Guix's cross-toolchain to the right + +# Set environment variables to point the NATIVE toolchain to the right +# includes/libs +NATIVE_GCC="$(store_path gcc-toolchain)" + +unset LIBRARY_PATH +unset CPATH +unset C_INCLUDE_PATH +unset CPLUS_INCLUDE_PATH +unset OBJC_INCLUDE_PATH +unset OBJCPLUS_INCLUDE_PATH + +export C_INCLUDE_PATH="${NATIVE_GCC}/include" +export CPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include" + +case "$HOST" in + *darwin*) export LIBRARY_PATH="${NATIVE_GCC}/lib" ;; # Required for qt/qmake + *mingw*) export LIBRARY_PATH="${NATIVE_GCC}/lib" ;; + *) + NATIVE_GCC_STATIC="$(store_path gcc-toolchain static)" + export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC_STATIC}/lib" + ;; +esac + +# Set environment variables to point the CROSS toolchain to the right # includes/libs for $HOST case "$HOST" in *mingw*) # Determine output paths to use in CROSS_* environment variables CROSS_GLIBC="$(store_path "mingw-w64-x86_64-winpthreads")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) - NATIVE_GCC="$(store_path gcc-glibc-2.27-toolchain)" - export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC}/lib64" - export CPATH="${NATIVE_GCC}/include" - + # The search path ordering is generally: + # 1. gcc-related search paths + # 2. libc-related search paths + # 2. kernel-header-related search paths (not applicable to mingw-w64 hosts) export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + ;; + *darwin*) + # The CROSS toolchain for darwin uses the SDK and ignores environment variables. + # See depends/hosts/darwin.mk for more details. ;; *linux*) CROSS_GLIBC="$(store_path "glibc-cross-${HOST}")" CROSS_GLIBC_STATIC="$(store_path "glibc-cross-${HOST}" static)" CROSS_KERNEL="$(store_path "linux-libre-headers-cross-${HOST}")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) - # NOTE: CROSS_C_INCLUDE_PATH is missing ${CROSS_GCC_LIB}/include-fixed, because - # the limits.h in it is missing a '#include_next ' - export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" + export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" ;; *) exit 1 ;; @@ -76,7 +124,7 @@ esac # Sanity check CROSS_*_PATH directories IFS=':' read -ra PATHS <<< "${CROSS_C_INCLUDE_PATH}:${CROSS_CPLUS_INCLUDE_PATH}:${CROSS_LIBRARY_PATH}" for p in "${PATHS[@]}"; do - if [ ! -d "$p" ]; then + if [ -n "$p" ] && [ ! -d "$p" ]; then echo "'$p' doesn't exist or isn't a directory... Aborting..." exit 1 fi @@ -97,20 +145,19 @@ case "$HOST" in *linux*) glibc_dynamic_linker=$( case "$HOST" in - i686-linux-gnu) echo /lib/ld-linux.so.2 ;; - x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; - arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; - aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; - riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; - *) exit 1 ;; + x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; + arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; + aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; + riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; + powerpc64-linux-gnu) echo /lib64/ld64.so.1;; + powerpc64le-linux-gnu) echo /lib64/ld64.so.2;; + *) exit 1 ;; esac ) ;; esac # Environment variables for determinism -export QT_RCC_TEST=1 -export QT_RCC_SOURCE_DATE_OVERRIDE=1 export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" export TZ="UTC" @@ -119,108 +166,102 @@ export TZ="UTC" #################### # Build the depends tree, overriding variables that assume multilib gcc -make -C depends --jobs="$MAX_JOBS" HOST="$HOST" \ +make -C depends --jobs="$JOBS" HOST="$HOST" \ ${V:+V=1} \ ${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \ - i686_linux_CC=i686-linux-gnu-gcc \ - i686_linux_CXX=i686-linux-gnu-g++ \ - i686_linux_AR=i686-linux-gnu-ar \ - i686_linux_RANLIB=i686-linux-gnu-ranlib \ - i686_linux_NM=i686-linux-gnu-nm \ - i686_linux_STRIP=i686-linux-gnu-strip \ + ${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} \ + ${SDK_PATH+SDK_PATH="$SDK_PATH"} \ x86_64_linux_CC=x86_64-linux-gnu-gcc \ x86_64_linux_CXX=x86_64-linux-gnu-g++ \ - x86_64_linux_AR=x86_64-linux-gnu-ar \ - x86_64_linux_RANLIB=x86_64-linux-gnu-ranlib \ - x86_64_linux_NM=x86_64-linux-gnu-nm \ - x86_64_linux_STRIP=x86_64-linux-gnu-strip \ - qt_config_opts_i686_linux='-platform linux-g++ -xplatform bitcoin-linux-g++' + x86_64_linux_AR=x86_64-linux-gnu-gcc-ar \ + x86_64_linux_RANLIB=x86_64-linux-gnu-gcc-ranlib \ + x86_64_linux_NM=x86_64-linux-gnu-gcc-nm \ + x86_64_linux_STRIP=x86_64-linux-gnu-strip +case "$HOST" in + *darwin*) + # Unset now that Qt is built + unset C_INCLUDE_PATH + unset CPLUS_INCLUDE_PATH + unset LIBRARY_PATH + ;; +esac ########################### # Source Tarball Building # ########################### -# Define DISTNAME variable. -# shellcheck source=contrib/gitian-descriptors/assign_DISTNAME -source contrib/gitian-descriptors/assign_DISTNAME - -GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz" +GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}.tar.gz" # Create the source tarball if not already there if [ ! -e "$GIT_ARCHIVE" ]; then mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD + git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD fi +mkdir -p "$OUTDIR" + ########################### # Binary Tarball Building # ########################### # CONFIGFLAGS -CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests" -case "$HOST" in - *linux*) CONFIGFLAGS+=" --enable-glibc-back-compat" ;; -esac +CONFIGFLAGS="-DREDUCE_EXPORTS=ON -DBUILD_BENCH=OFF -DBUILD_GUI_TESTS=OFF -DBUILD_FUZZ_BINARY=OFF" # CFLAGS HOST_CFLAGS="-O2 -g" +HOST_CFLAGS+=$(find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) case "$HOST" in - *linux*) HOST_CFLAGS+=" -ffile-prefix-map=${PWD}=." ;; + *linux*) HOST_CFLAGS+=" -ffile-prefix-map=${DISTSRC}/src=." ;; *mingw*) HOST_CFLAGS+=" -fno-ident" ;; + *darwin*) unset HOST_CFLAGS ;; esac # CXXFLAGS HOST_CXXFLAGS="$HOST_CFLAGS" +case "$HOST" in + arm-linux-gnueabihf) HOST_CXXFLAGS="${HOST_CXXFLAGS} -Wno-psabi" ;; +esac + # LDFLAGS case "$HOST" in *linux*) HOST_LDFLAGS="-Wl,--as-needed -Wl,--dynamic-linker=$glibc_dynamic_linker -static-libstdc++ -Wl,-O2" ;; *mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;; esac -# Make $HOST-specific native binaries from depends available in $PATH -export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" +mkdir -p "$DISTSRC" ( cd "$DISTSRC" # Extract the source tarball - tar -xf "${GIT_ARCHIVE}" - - ./autogen.sh + tar --strip-components=1 -xf "${GIT_ARCHIVE}" # Configure this DISTSRC for $HOST # shellcheck disable=SC2086 - env CONFIG_SITE="${BASEPREFIX}/${HOST}/share/config.site" \ - ./configure --prefix=/ \ - --disable-ccache \ - --disable-maintainer-mode \ - --disable-dependency-tracking \ - ${CONFIGFLAGS} \ - CFLAGS="${HOST_CFLAGS}" \ - CXXFLAGS="${HOST_CXXFLAGS}" \ - ${HOST_LDFLAGS:+LDFLAGS="${HOST_LDFLAGS}"} - - sed -i.old 's/-lstdc++ //g' config.status libtool src/univalue/config.status src/univalue/libtool + env CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" \ + cmake -S . -B build \ + --toolchain "${BASEPREFIX}/${HOST}/toolchain.cmake" \ + -DWITH_CCACHE=OFF \ + ${CONFIGFLAGS} # Build Bitcoin Core - make --jobs="$MAX_JOBS" ${V:+V=1} + cmake --build build -j "$JOBS" ${V:+--verbose} - # Perform basic ELF security checks on a series of executables. - make -C src --jobs=1 check-security ${V:+V=1} + # Check that symbol/security checks tools are sane. + cmake --build build --target test-security-check ${V:+--verbose} + # Perform basic security checks on a series of executables. + cmake --build build -j 1 --target check-security ${V:+--verbose} + # Check that executables only contain allowed version symbols. + cmake --build build -j 1 --target check-symbols ${V:+--verbose} - case "$HOST" in - *linux*|*mingw*) - # Check that executables only contain allowed gcc, glibc and libstdc++ - # version symbols for Linux distro back-compatibility. - make -C src --jobs=1 check-symbols ${V:+V=1} - ;; - esac + mkdir -p "$OUTDIR" # Make the os-specific installers case "$HOST" in *mingw*) - make deploy ${V:+V=1} BITCOIN_WIN_INSTALLER="${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + cmake --build build -j "$JOBS" -t deploy ${V:+--verbose} + mv build/bitcoin-win64-setup.exe "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" ;; esac @@ -230,30 +271,50 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" INSTALLPATH="${PWD}/installed/${DISTNAME}" mkdir -p "${INSTALLPATH}" # Install built Bitcoin Core to $INSTALLPATH - make install DESTDIR="${INSTALLPATH}" ${V:+V=1} + case "$HOST" in + *darwin*) + # This workaround can be dropped for CMake >= 3.27. + # See the upstream commit 689616785f76acd844fd448c51c5b2a0711aafa2. + find build -name 'cmake_install.cmake' -exec sed -i 's| -u -r | |g' {} + + + cmake --install build --strip --prefix "${INSTALLPATH}" ${V:+--verbose} + ;; + *) + cmake --install build --prefix "${INSTALLPATH}" ${V:+--verbose} + ;; + esac + case "$HOST" in + *darwin*) + cmake --build build --target deploy ${V:+--verbose} + mv build/dist/Bitcoin-Core.zip "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.zip" + mkdir -p "unsigned-app-${HOST}" + cp --target-directory="unsigned-app-${HOST}" \ + contrib/macdeploy/detached-sig-create.sh + mv --target-directory="unsigned-app-${HOST}" build/dist + ( + cd "unsigned-app-${HOST}" + find . -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" && exit 1 ) + ) + ;; + esac ( cd installed case "$HOST" in - *mingw*) - mv --target-directory="$DISTNAME"/lib/ "$DISTNAME"/bin/*.dll + *darwin*) ;; + *) + # Split binaries from their debug symbols + { + find "${DISTNAME}/bin" -type f -executable -print0 + } | xargs -0 -P"$JOBS" -I{} "${DISTSRC}/build/split-debug.sh" {} {} {}.dbg ;; esac - # Prune libtool and object archives - find . -name "lib*.la" -delete - find . -name "lib*.a" -delete - - # Prune pkg-config files - rm -r "${DISTNAME}/lib/pkgconfig" - - # Split binaries and libraries from their debug symbols - { - find "${DISTNAME}/bin" -type f -executable -print0 - find "${DISTNAME}/lib" -type f -print0 - } | xargs -0 -n1 -P"$MAX_JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg - case "$HOST" in *mingw*) cp "${DISTSRC}/doc/README_windows.txt" "${DISTNAME}/readme.txt" @@ -263,6 +324,12 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" ;; esac + # copy over the example bitcoin.conf file. if contrib/devtools/gen-bitcoin-conf.sh + # has not been run before buildling, this file will be a stub + cp "${DISTSRC}/share/examples/bitcoin.conf" "${DISTNAME}/" + + cp -r "${DISTSRC}/share/rpcauth" "${DISTNAME}/share/" + # Finally, deterministically produce {non-,}debug binary tarballs ready # for release case "$HOST" in @@ -292,22 +359,44 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" \ || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" && exit 1 ) ;; + *darwin*) + find "${DISTNAME}" -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" && exit 1 ) + ;; esac - ) -) + ) # $DISTSRC/installed -case "$HOST" in - *mingw*) - cp -rf --target-directory=. contrib/windeploy - ( - cd ./windeploy - mkdir unsigned - cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" - find . -print0 \ - | sort --zero-terminated \ - | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ - | gzip -9n > "${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz" \ - || ( rm -f "${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz" && exit 1 ) - ) - ;; -esac + case "$HOST" in + *mingw*) + cp -rf --target-directory=. contrib/windeploy + ( + cd ./windeploy + mkdir -p unsigned + cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + find . -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" && exit 1 ) + ) + ;; + esac +) # $DISTSRC + +rm -rf "$ACTUAL_OUTDIR" +mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ + || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sort -k2 \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh new file mode 100755 index 0000000000000..b56d2a2309442 --- /dev/null +++ b/contrib/guix/libexec/codesign.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +# Copyright (c) 2021-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +export LC_ALL=C +set -e -o pipefail +export TZ=UTC + +# Although Guix _does_ set umask when building its own packages (in our case, +# this is all packages in manifest.scm), it does not set it for `guix +# shell`. It does make sense for at least `guix shell --container` +# to set umask, so if that change gets merged upstream and we bump the +# time-machine to a commit which includes the aforementioned change, we can +# remove this line. +# +# This line should be placed before any commands which creates files. +umask 0022 + +if [ -n "$V" ]; then + # Print both unexpanded (-v) and expanded (-x) forms of commands as they are + # read from this file. + set -vx + # Set VERBOSE for CMake-based builds + export VERBOSE="$V" +fi + +# Check that required environment variables are set +cat << EOF +Required environment variables as seen inside the container: + UNSIGNED_TARBALL: ${UNSIGNED_TARBALL:?not set} + DETACHED_SIGS_REPO: ${DETACHED_SIGS_REPO:?not set} + DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set} + DISTNAME: ${DISTNAME:?not set} + HOST: ${HOST:?not set} + SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set} + DISTSRC: ${DISTSRC:?not set} + OUTDIR: ${OUTDIR:?not set} +EOF + +ACTUAL_OUTDIR="${OUTDIR}" +OUTDIR="${DISTSRC}/output" + +git_head_version() { + local recent_tag + if recent_tag="$(git -C "$1" describe --exact-match HEAD 2> /dev/null)"; then + echo "${recent_tag#v}" + else + git -C "$1" rev-parse --short=12 HEAD + fi +} + +CODESIGNATURE_GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}-codesignatures-$(git_head_version "$DETACHED_SIGS_REPO").tar.gz" + +# Create the codesignature tarball if not already there +if [ ! -e "$CODESIGNATURE_GIT_ARCHIVE" ]; then + mkdir -p "$(dirname "$CODESIGNATURE_GIT_ARCHIVE")" + git -C "$DETACHED_SIGS_REPO" archive --output="$CODESIGNATURE_GIT_ARCHIVE" HEAD +fi + +mkdir -p "$OUTDIR" + +mkdir -p "$DISTSRC" +( + cd "$DISTSRC" + + tar -xf "$UNSIGNED_TARBALL" + + mkdir -p codesignatures + tar -C codesignatures -xf "$CODESIGNATURE_GIT_ARCHIVE" + + case "$HOST" in + *mingw*) + find "$PWD" -name "*-unsigned.exe" | while read -r infile; do + infile_base="$(basename "$infile")" + + # Codesigned *-unsigned.exe and output to OUTDIR + osslsigncode attach-signature \ + -in "$infile" \ + -out "${OUTDIR}/${infile_base/-unsigned}" \ + -CAfile "$GUIX_ENVIRONMENT/etc/ssl/certs/ca-certificates.crt" \ + -sigin codesignatures/win/"$infile_base".pem + done + ;; + *darwin*) + # Apply detached codesignatures to dist/ (in-place) + signapple apply dist/Bitcoin-Qt.app codesignatures/osx/dist + + # Make a .zip from dist/ + cd dist/ + find . -print0 \ + | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" + find . | sort \ + | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST}.zip" + ;; + *) + exit 1 + ;; + esac +) # $DISTSRC + +rm -rf "$ACTUAL_OUTDIR" +mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ + || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$UNSIGNED_TARBALL" + echo "$CODESIGNATURE_GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sort -k2 \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash new file mode 100644 index 0000000000000..428fc41e7393a --- /dev/null +++ b/contrib/guix/libexec/prelude.bash @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# shellcheck source=contrib/shell/realpath.bash +source contrib/shell/realpath.bash + +# shellcheck source=contrib/shell/git-utils.bash +source contrib/shell/git-utils.bash + +################ +# Required non-builtin commands should be invocable +################ + +check_tools() { + for cmd in "$@"; do + if ! command -v "$cmd" > /dev/null 2>&1; then + echo "ERR: This script requires that '$cmd' is installed and available in your \$PATH" + exit 1 + fi + done +} + +check_tools cat env readlink dirname basename git + +################ +# We should be at the top directory of the repository +################ + +same_dir() { + local resolved1 resolved2 + resolved1="$(bash_realpath "${1}")" + resolved2="$(bash_realpath "${2}")" + [ "$resolved1" = "$resolved2" ] +} + +if ! same_dir "${PWD}" "$(git_root)"; then +cat << EOF +ERR: This script must be invoked from the top level of the git repository + +Hint: This may look something like: + env FOO=BAR ./contrib/guix/guix- + +EOF +exit 1 +fi + +################ +# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility +# across time. +time-machine() { + # shellcheck disable=SC2086 + guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \ + --commit=53396a22afc04536ddf75d8f82ad2eafa5082725 \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \ + -- "$@" +} + + +################ +# Set common variables +################ + +VERSION="${FORCE_VERSION:-$(git_head_version)}" +DISTNAME="${DISTNAME:-bitcoin-${VERSION}}" + +version_base_prefix="${PWD}/guix-build-" +VERSION_BASE="${version_base_prefix}${VERSION}" # TOP + +DISTSRC_BASE="${DISTSRC_BASE:-${VERSION_BASE}}" + +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" + +var_base_basename="var" +VAR_BASE="${VAR_BASE:-${VERSION_BASE}/${var_base_basename}}" + +profiles_base_basename="profiles" +PROFILES_BASE="${PROFILES_BASE:-${VAR_BASE}/${profiles_base_basename}}" diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 5e011ea184898..3da98cf65103a 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -1,60 +1,43 @@ -(use-modules (gnu) - (gnu packages) - (gnu packages autotools) - (gnu packages base) - (gnu packages bash) - (gnu packages check) +(use-modules (gnu packages) + ((gnu packages bash) #:select (bash-minimal)) + (gnu packages bison) + ((gnu packages certs) #:select (nss-certs)) + ((gnu packages cmake) #:select (cmake-minimal)) (gnu packages commencement) (gnu packages compression) (gnu packages cross-base) (gnu packages file) (gnu packages gawk) (gnu packages gcc) - (gnu packages installers) - (gnu packages linux) + ((gnu packages installers) #:select (nsis-x86_64)) + ((gnu packages linux) #:select (linux-libre-headers-6.1 util-linux)) + (gnu packages llvm) (gnu packages mingw) - (gnu packages perl) + (gnu packages moreutils) (gnu packages pkg-config) - (gnu packages python) - (gnu packages shells) - (gnu packages version-control) - (guix build-system gnu) + ((gnu packages python) #:select (python-minimal)) + ((gnu packages python-build) #:select (python-tomli)) + ((gnu packages python-crypto) #:select (python-asn1crypto)) + ((gnu packages tls) #:select (openssl)) + ((gnu packages version-control) #:select (git-minimal)) + (guix build-system cmake) + (guix build-system python) (guix build-system trivial) + (guix download) (guix gexp) + (guix git-download) + ((guix licenses) #:prefix license:) (guix packages) - (guix profiles) - (guix utils)) + ((guix utils) #:select (substitute-keyword-arguments))) -(define (make-ssp-fixed-gcc xgcc) - "Given a XGCC package, return a modified package that uses the SSP function -from glibc instead of from libssp.so. Our `symbol-check' script will complain if -we link against libssp.so, and thus will ensure that this works properly. +(define-syntax-rule (search-our-patches file-name ...) + "Return the list of absolute file names corresponding to each +FILE-NAME found in ./patches relative to the current file." + (parameterize + ((%patch-path (list (string-append (dirname (current-filename)) "/patches")))) + (list (search-patch file-name) ...))) -Taken from: -http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" - (package - (inherit xgcc) - (arguments - (substitute-keyword-arguments (package-arguments xgcc) - ((#:make-flags flags) - `(cons "gcc_cv_libc_provides_ssp=yes" ,flags)))))) - -(define (make-gcc-rpath-link xgcc) - "Given a XGCC package, return a modified package that replace each instance of --rpath in the default system spec that's inserted by Guix with -rpath-link" - (package - (inherit xgcc) - (arguments - (substitute-keyword-arguments (package-arguments xgcc) - ((#:phases phases) - `(modify-phases ,phases - (add-after 'pre-configure 'replace-rpath-with-rpath-link - (lambda _ - (substitute* (cons "gcc/config/rs6000/sysv4.h" - (find-files "gcc/config" - "^gnu-user.*\\.h$")) - (("-rpath=") "-rpath-link=")) - #t)))))))) +(define building-on (string-append "--build=" (list-ref (string-split (%current-system) #\-) 0) "-guix-linux-gnu")) (define (make-cross-toolchain target base-gcc-for-libc @@ -71,16 +54,16 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" ;; 2. Build cross-compiled kernel headers with XGCC-SANS-LIBC, derived ;; from BASE-KERNEL-HEADERS (xkernel (cross-kernel-headers target - base-kernel-headers - xgcc-sans-libc - xbinutils)) + #:linux-headers base-kernel-headers + #:xgcc xgcc-sans-libc + #:xbinutils xbinutils)) ;; 3. Build a cross-compiled libc with XGCC-SANS-LIBC and XKERNEL, ;; derived from BASE-LIBC (xlibc (cross-libc target - base-libc - xgcc-sans-libc - xbinutils - xkernel)) + #:libc base-libc + #:xgcc xgcc-sans-libc + #:xbinutils xbinutils + #:xheaders xkernel)) ;; 4. Build a cross-compiling gcc targeting XLIBC, derived from ;; BASE-GCC (xgcc (cross-gcc target @@ -96,42 +79,58 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" (build-system trivial-build-system) (arguments '(#:builder (begin (mkdir %output) #t))) (propagated-inputs - `(("binutils" ,xbinutils) - ("libc" ,xlibc) - ("libc:static" ,xlibc "static") - ("gcc" ,xgcc))) + (list xbinutils + xlibc + xgcc + `(,xlibc "static") + `(,xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) (home-page (package-home-page xgcc)) (license (package-license xgcc))))) +(define base-gcc gcc-12) ;; 12.4.0 + +(define base-linux-kernel-headers linux-libre-headers-6.1) + (define* (make-bitcoin-cross-toolchain target - #:key - (base-gcc-for-libc gcc-5) - (base-kernel-headers linux-libre-headers-4.19) - (base-libc glibc-2.27) - (base-gcc (make-gcc-rpath-link gcc-9))) + #:key + (base-gcc-for-libc linux-base-gcc) + (base-kernel-headers base-linux-kernel-headers) + (base-libc glibc-2.31) + (base-gcc linux-base-gcc)) "Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values desirable for building Bitcoin Core release binaries." (make-cross-toolchain target - base-gcc-for-libc - base-kernel-headers - base-libc - base-gcc)) + base-gcc-for-libc + base-kernel-headers + base-libc + base-gcc)) -(define (make-gcc-with-pthreads gcc) - (package-with-extra-configure-variable gcc "--enable-threads" "posix")) +(define (gcc-mingw-patches gcc) + (package-with-extra-patches gcc + (search-our-patches "gcc-remap-guix-store.patch"))) + +(define (binutils-mingw-patches binutils) + (package-with-extra-patches binutils + (search-our-patches "binutils-unaligned-default.patch"))) + +(define (winpthreads-patches mingw-w64-x86_64-winpthreads) + (package-with-extra-patches mingw-w64-x86_64-winpthreads + (search-our-patches "winpthreads-remap-guix-store.patch"))) (define (make-mingw-pthreads-cross-toolchain target) "Create a cross-compilation toolchain package for TARGET" - (let* ((xbinutils (cross-binutils target)) - (pthreads-xlibc mingw-w64-x86_64-winpthreads) - (pthreads-xgcc (make-gcc-with-pthreads - (cross-gcc target - #:xgcc (make-ssp-fixed-gcc gcc-9) + (let* ((xbinutils (binutils-mingw-patches (cross-binutils target))) + (machine (substring target 0 (string-index target #\-))) + (pthreads-xlibc (winpthreads-patches (make-mingw-w64 machine + #:xgcc (cross-gcc target #:xgcc (gcc-mingw-patches base-gcc)) + #:with-winpthreads? #t))) + (pthreads-xgcc (cross-gcc target + #:xgcc (gcc-mingw-patches mingw-w64-base-gcc) #:xbinutils xbinutils - #:libc pthreads-xlibc)))) + #:libc pthreads-xlibc))) ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and ;; XGCC (package @@ -141,22 +140,358 @@ desirable for building Bitcoin Core release binaries." (build-system trivial-build-system) (arguments '(#:builder (begin (mkdir %output) #t))) (propagated-inputs - `(("binutils" ,xbinutils) - ("libc" ,pthreads-xlibc) - ("gcc" ,pthreads-xgcc))) + (list xbinutils + pthreads-xlibc + pthreads-xgcc + `(,pthreads-xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) (home-page (package-home-page pthreads-xgcc)) (license (package-license pthreads-xgcc))))) +;; While LIEF is packaged in Guix, we maintain our own package, +;; to simplify building, and more easily apply updates. +;; Moreover, the Guix's package uses cmake, which caused build +;; failure; see https://github.com/bitcoin/bitcoin/pull/27296. +(define-public python-lief + (package + (name "python-lief") + (version "0.13.2") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/lief-project/LIEF") + (commit version))) + (file-name (git-file-name name version)) + (modules '((guix build utils))) + (snippet + '(begin + ;; Configure build for Python bindings. + (substitute* "api/python/config-default.toml" + (("(ninja = )true" all m) + (string-append m "false")) + (("(parallel-jobs = )0" all m) + (string-append m (number->string (parallel-job-count))))))) + (sha256 + (base32 + "0y48x358ppig5xp97ahcphfipx7cg9chldj2q5zrmn610fmi4zll")))) + (build-system python-build-system) + (native-inputs (list cmake-minimal python-tomli)) + (arguments + (list + #:tests? #f ;needs network + #:phases #~(modify-phases %standard-phases + (add-before 'build 'change-directory + (lambda _ + (chdir "api/python"))) + (replace 'build + (lambda _ + (invoke "python" "setup.py" "build")))))) + (home-page "https://github.com/lief-project/LIEF") + (synopsis "Library to instrument executable formats") + (description + "@code{python-lief} is a cross platform library which can parse, modify +and abstract ELF, PE and MachO formats.") + (license license:asl2.0))) + +(define osslsigncode + (package + (name "osslsigncode") + (version "2.5") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/mtrojnar/osslsigncode") + (commit version))) + (sha256 + (base32 + "1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz")))) + (build-system cmake-build-system) + (inputs (list openssl)) + (home-page "https://github.com/mtrojnar/osslsigncode") + (synopsis "Authenticode signing and timestamping tool") + (description "osslsigncode is a small tool that implements part of the +functionality of the Microsoft tool signtool.exe - more exactly the Authenticode +signing and timestamping. But osslsigncode is based on OpenSSL and cURL, and +thus should be able to compile on most platforms where these exist.") + (license license:gpl3+))) ; license is with openssl exception + +(define-public python-elfesteem + (let ((commit "2eb1e5384ff7a220fd1afacd4a0170acff54fe56")) + (package + (name "python-elfesteem") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/LRGH/elfesteem") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "07x6p8clh11z8s1n2kdxrqwqm2almgc5qpkcr9ckb6y5ivjdr5r6")))) + (build-system python-build-system) + ;; There are no tests, but attempting to run python setup.py test leads to + ;; PYTHONPATH problems, just disable the test + (arguments '(#:tests? #f)) + (home-page "https://github.com/LRGH/elfesteem") + (synopsis "ELF/PE/Mach-O parsing library") + (description "elfesteem parses ELF, PE and Mach-O files.") + (license license:lgpl2.1)))) + +(define-public python-oscrypto + (package + (name "python-oscrypto") + (version "1.3.0") + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/wbond/oscrypto") + (commit version))) + (file-name (git-file-name name version)) + (sha256 + (base32 + "1v5wkmzcyiqy39db8j2dvkdrv2nlsc48556h73x4dzjwd6kg4q0a")) + (patches (search-our-patches "oscrypto-hard-code-openssl.patch")))) + (build-system python-build-system) + (native-search-paths + (list (search-path-specification + (variable "SSL_CERT_FILE") + (file-type 'regular) + (separator #f) ;single entry + (files '("etc/ssl/certs/ca-certificates.crt"))))) + + (propagated-inputs + (list python-asn1crypto openssl)) + (arguments + `(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'hard-code-path-to-libscrypt + (lambda* (#:key inputs #:allow-other-keys) + (let ((openssl (assoc-ref inputs "openssl"))) + (substitute* "oscrypto/__init__.py" + (("@GUIX_OSCRYPTO_USE_OPENSSL@") + (string-append openssl "/lib/libcrypto.so" "," openssl "/lib/libssl.so"))) + #t))) + (add-after 'unpack 'disable-broken-tests + (lambda _ + ;; This test is broken as there is no keyboard interrupt. + (substitute* "tests/test_trust_list.py" + (("^(.*)class TrustListTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_tls.py" + (("^(.*)class TLSTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + #t)) + (replace 'check + (lambda _ + (invoke "python" "run.py" "tests") + #t))))) + (home-page "https://github.com/wbond/oscrypto") + (synopsis "Compiler-free Python crypto library backed by the OS") + (description "oscrypto is a compilation-free, always up-to-date encryption library for Python.") + (license license:expat))) + +(define-public python-oscryptotests + (package (inherit python-oscrypto) + (name "python-oscryptotests") + (propagated-inputs + (list python-oscrypto)) + (arguments + `(#:tests? #f + #:phases + (modify-phases %standard-phases + (add-after 'unpack 'hard-code-path-to-libscrypt + (lambda* (#:key inputs #:allow-other-keys) + (chdir "tests") + #t))))))) + +(define-public python-certvalidator + (let ((commit "a145bf25eb75a9f014b3e7678826132efbba6213")) + (package + (name "python-certvalidator") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/achow101/certvalidator") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "1qw2k7xis53179lpqdqyylbcmp76lj7sagp883wmxg5i7chhc96k")))) + (build-system python-build-system) + (propagated-inputs + (list python-asn1crypto + python-oscrypto + python-oscryptotests)) ;; certvalidator tests import oscryptotests + (arguments + `(#:phases + (modify-phases %standard-phases + (add-after 'unpack 'disable-broken-tests + (lambda _ + (substitute* "tests/test_certificate_validator.py" + (("^(.*)class CertificateValidatorTests" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_crl_client.py" + (("^(.*)def test_fetch_crl" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_ocsp_client.py" + (("^(.*)def test_fetch_ocsp" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_registry.py" + (("^(.*)def test_build_paths" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_validate.py" + (("^(.*)def test_revocation_mode_hard" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + (substitute* "tests/test_validate.py" + (("^(.*)def test_revocation_mode_soft" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) + #t)) + (replace 'check + (lambda _ + (invoke "python" "run.py" "tests") + #t))))) + (home-page "https://github.com/wbond/certvalidator") + (synopsis "Python library for validating X.509 certificates and paths") + (description "certvalidator is a Python library for validating X.509 +certificates or paths. Supports various options, including: validation at a +specific moment in time, whitelisting and revocation checks.") + (license license:expat)))) + +(define-public python-signapple + (let ((commit "62155712e7417aba07565c9780a80e452823ae6a")) + (package + (name "python-signapple") + (version (git-version "0.1" "1" commit)) + (source + (origin + (method git-fetch) + (uri (git-reference + (url "https://github.com/achow101/signapple") + (commit commit))) + (file-name (git-file-name name commit)) + (sha256 + (base32 + "1nm6rm4h4m7kbq729si4cm8rzild62mk4ni8xr5zja7l33fhv3gb")))) + (build-system python-build-system) + (propagated-inputs + (list python-asn1crypto + python-oscrypto + python-certvalidator + python-elfesteem)) + ;; There are no tests, but attempting to run python setup.py test leads to + ;; problems, just disable the test + (arguments '(#:tests? #f)) + (home-page "https://github.com/achow101/signapple") + (synopsis "Mach-O binary signature tool") + (description "signapple is a Python tool for creating, verifying, and +inspecting signatures in Mach-O binaries.") + (license license:expat)))) + +(define-public mingw-w64-base-gcc + (package + (inherit base-gcc) + (arguments + (substitute-keyword-arguments (package-arguments base-gcc) + ((#:configure-flags flags) + `(append ,flags + ;; https://gcc.gnu.org/install/configure.html + (list "--enable-threads=posix", + "--enable-default-ssp=yes", + building-on))))))) + +(define-public linux-base-gcc + (package + (inherit base-gcc) + (arguments + (substitute-keyword-arguments (package-arguments base-gcc) + ((#:configure-flags flags) + `(append ,flags + ;; https://gcc.gnu.org/install/configure.html + (list "--enable-initfini-array=yes", + "--enable-default-ssp=yes", + "--enable-default-pie=yes", + "--enable-standard-branch-protection=yes", + "--enable-cet=yes", + building-on))) + ((#:phases phases) + `(modify-phases ,phases + ;; Given a XGCC package, return a modified package that replace each instance of + ;; -rpath in the default system spec that's inserted by Guix with -rpath-link + (add-after 'pre-configure 'replace-rpath-with-rpath-link + (lambda _ + (substitute* (cons "gcc/config/rs6000/sysv4.h" + (find-files "gcc/config" + "^gnu-user.*\\.h$")) + (("-rpath=") "-rpath-link=")) + #t)))))))) + +(define-public glibc-2.31 + (let ((commit "8e30f03744837a85e33d84ccd34ed3abe30d37c3")) + (package + (inherit glibc) ;; 2.35 + (version "2.31") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://sourceware.org/git/glibc.git") + (commit commit))) + (file-name (git-file-name "glibc" commit)) + (sha256 + (base32 + "1zi0s9yy5zkisw823vivn7zlj8w6g9p3mm7lmlqiixcxdkz4dbn6")) + (patches (search-our-patches "glibc-guix-prefix.patch")))) + (arguments + (substitute-keyword-arguments (package-arguments glibc) + ((#:configure-flags flags) + `(append ,flags + ;; https://www.gnu.org/software/libc/manual/html_node/Configuring-and-compiling.html + (list "--enable-stack-protector=all", + "--enable-bind-now", + "--disable-werror", + building-on))) + ((#:phases phases) + `(modify-phases ,phases + (add-before 'configure 'set-etc-rpc-installation-directory + (lambda* (#:key outputs #:allow-other-keys) + ;; Install the rpc data base file under `$out/etc/rpc'. + ;; Otherwise build will fail with "Permission denied." + ;; Can be removed when we are building 2.32 or later. + (let ((out (assoc-ref outputs "out"))) + (substitute* "sunrpc/Makefile" + (("^\\$\\(inst_sysconfdir\\)/rpc(.*)$" _ suffix) + (string-append out "/etc/rpc" suffix "\n")) + (("^install-others =.*$") + (string-append "install-others = " out "/etc/rpc\n"))))))))))))) (packages->manifest (append (list ;; The Basics bash-minimal which - coreutils + coreutils-minimal util-linux ;; File(system) inspection file @@ -167,32 +502,37 @@ chain for " target " development.")) patch gawk sed + moreutils ;; Compression and archiving tar - bzip2 gzip xz - zlib ;; Build tools + gcc-toolchain-12 + cmake-minimal gnu-make - libtool - autoconf - automake pkg-config ;; Scripting - perl - python-3.7 + python-minimal ;; (3.10) ;; Git - git - ;; Native gcc 9 toolchain targeting glibc 2.27 - (make-gcc-toolchain gcc-9 glibc-2.27)) + git-minimal + ;; Tests + python-lief) (let ((target (getenv "HOST"))) (cond ((string-suffix? "-mingw32" target) - ;; Windows - (list zip (make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32") nsis-x86_64)) - ((string-contains target "riscv64-linux-") - (list (make-bitcoin-cross-toolchain "riscv64-linux-gnu" - #:base-gcc-for-libc gcc-7))) + (list zip + (make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32") + nsis-x86_64 + nss-certs + osslsigncode)) ((string-contains target "-linux-") - (list (make-bitcoin-cross-toolchain target))) + (list bison + (list gcc-toolchain-12 "static") + (make-bitcoin-cross-toolchain target))) + ((string-contains target "darwin") + (list clang-toolchain-18 + lld-18 + (make-lld-wrapper lld-18 #:lld-as-ld? #t) + python-signapple + zip)) (else '()))))) diff --git a/contrib/guix/patches/binutils-unaligned-default.patch b/contrib/guix/patches/binutils-unaligned-default.patch new file mode 100644 index 0000000000000..d1bc71aee142d --- /dev/null +++ b/contrib/guix/patches/binutils-unaligned-default.patch @@ -0,0 +1,22 @@ +commit 6537181f59ed186a341db621812a6bc35e22eaf6 +Author: fanquake +Date: Wed Apr 10 12:15:52 2024 +0200 + + build: turn on -muse-unaligned-vector-move by default + + This allows us to avoid (more invasively) patching GCC, to avoid + unaligned instruction use. + +diff --git a/gas/config/tc-i386.c b/gas/config/tc-i386.c +index e0632681477..14a9653abdf 100644 +--- a/gas/config/tc-i386.c ++++ b/gas/config/tc-i386.c +@@ -801,7 +801,7 @@ static unsigned int no_cond_jump_promotion = 0; + static unsigned int sse2avx; + + /* Encode aligned vector move as unaligned vector move. */ +-static unsigned int use_unaligned_vector_move; ++static unsigned int use_unaligned_vector_move = 1; + + /* Encode scalar AVX instructions with specific vector length. */ + static enum diff --git a/contrib/guix/patches/gcc-remap-guix-store.patch b/contrib/guix/patches/gcc-remap-guix-store.patch new file mode 100644 index 0000000000000..a8b41d485b046 --- /dev/null +++ b/contrib/guix/patches/gcc-remap-guix-store.patch @@ -0,0 +1,20 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +--- a/libgcc/Makefile.in ++++ b/libgcc/Makefile.in +@@ -854,7 +854,7 @@ endif + # libgcc_eh.a, only LIB2ADDEH matters. If we do, only LIB2ADDEHSTATIC and + # LIB2ADDEHSHARED matter. (Usually all three are identical.) + +-c_flags := -fexceptions ++c_flags := -fexceptions $(shell find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) + + ifeq ($(enable_shared),yes) + +-- +2.37.0 + diff --git a/contrib/guix/patches/glibc-guix-prefix.patch b/contrib/guix/patches/glibc-guix-prefix.patch new file mode 100644 index 0000000000000..60e12ca525465 --- /dev/null +++ b/contrib/guix/patches/glibc-guix-prefix.patch @@ -0,0 +1,16 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +--- a/Makeconfig ++++ b/Makeconfig +@@ -1007,6 +1007,7 @@ object-suffixes := + CPPFLAGS-.o = $(pic-default) + # libc.a must be compiled with -fPIE/-fpie for static PIE. + CFLAGS-.o = $(filter %frame-pointer,$(+cflags)) $(pie-default) ++CFLAGS-.o += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;` + libtype.o := lib%.a + object-suffixes += .o + ifeq (yes,$(build-shared)) diff --git a/contrib/guix/patches/oscrypto-hard-code-openssl.patch b/contrib/guix/patches/oscrypto-hard-code-openssl.patch new file mode 100644 index 0000000000000..32027f2d09af1 --- /dev/null +++ b/contrib/guix/patches/oscrypto-hard-code-openssl.patch @@ -0,0 +1,13 @@ +diff --git a/oscrypto/__init__.py b/oscrypto/__init__.py +index eb27313..371ab24 100644 +--- a/oscrypto/__init__.py ++++ b/oscrypto/__init__.py +@@ -302,3 +302,8 @@ def load_order(): + 'oscrypto._win.tls', + 'oscrypto.tls', + ] ++ ++ ++paths = '@GUIX_OSCRYPTO_USE_OPENSSL@'.split(',') ++assert len(paths) == 2, 'Value for OSCRYPTO_USE_OPENSSL env var must be two paths separated by a comma' ++use_openssl(*paths) diff --git a/contrib/guix/patches/winpthreads-remap-guix-store.patch b/contrib/guix/patches/winpthreads-remap-guix-store.patch new file mode 100644 index 0000000000000..e1f1a6eba5314 --- /dev/null +++ b/contrib/guix/patches/winpthreads-remap-guix-store.patch @@ -0,0 +1,17 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +--- a/mingw-w64-libraries/winpthreads/Makefile.in ++++ b/mingw-w64-libraries/winpthreads/Makefile.in +@@ -478,7 +478,7 @@ top_build_prefix = @top_build_prefix@ + top_builddir = @top_builddir@ + top_srcdir = @top_srcdir@ + SUBDIRS = . tests +-AM_CFLAGS = -Wall -DWIN32_LEAN_AND_MEAN $(am__append_1) ++AM_CFLAGS = -Wall -DWIN32_LEAN_AND_MEAN $(am__append_1) $(shell find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) + ACLOCAL_AMFLAGS = -I m4 + lib_LTLIBRARIES = libwinpthread.la + include_HEADERS = include/pthread.h include/sched.h include/semaphore.h include/pthread_unistd.h include/pthread_time.h include/pthread_compat.h include/pthread_signal.h diff --git a/contrib/init/README.md b/contrib/init/README.md index 306a37f75ab70..affc7c2e75063 100644 --- a/contrib/init/README.md +++ b/contrib/init/README.md @@ -1,6 +1,6 @@ Sample configuration files for: ``` -SystemD: bitcoind.service +systemd: bitcoind.service Upstart: bitcoind.conf OpenRC: bitcoind.openrc bitcoind.openrcconf @@ -9,4 +9,4 @@ macOS: org.bitcoin.bitcoind.plist ``` have been made available to assist packagers in creating node packages here. -See doc/init.md for more information. +See [doc/init.md](../../doc/init.md) for more information. diff --git a/contrib/init/bitcoind.openrc b/contrib/init/bitcoind.openrc index 86222295dbe05..013a1a607027c 100644 --- a/contrib/init/bitcoind.openrc +++ b/contrib/init/bitcoind.openrc @@ -60,16 +60,17 @@ start_pre() { "${BITCOIND_PIDDIR}" checkpath -f \ - -o ${BITCOIND_USER}:${BITCOIND_GROUP} \ + -o "${BITCOIND_USER}:${BITCOIND_GROUP}" \ -m 0660 \ - ${BITCOIND_CONFIGFILE} + "${BITCOIND_CONFIGFILE}" checkconfig || return 1 } checkconfig() { - if ! grep -qs '^rpcpassword=' "${BITCOIND_CONFIGFILE}" ; then + if grep -qs '^rpcuser=' "${BITCOIND_CONFIGFILE}" && \ + ! grep -qs '^rpcpassword=' "${BITCOIND_CONFIGFILE}" ; then eerror "" eerror "ERROR: You must set a secure rpcpassword to run bitcoind." eerror "The setting must appear in ${BITCOIND_CONFIGFILE}" diff --git a/contrib/init/bitcoind.service b/contrib/init/bitcoind.service index 8b308644b1061..ade8a05926fec 100644 --- a/contrib/init/bitcoind.service +++ b/contrib/init/bitcoind.service @@ -11,13 +11,18 @@ [Unit] Description=Bitcoin daemon -After=network.target +Documentation=https://github.com/bitcoin/bitcoin/blob/master/doc/init.md + +# https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/ +After=network-online.target +Wants=network-online.target [Service] -ExecStart=/usr/bin/bitcoind -daemon \ - -pid=/run/bitcoind/bitcoind.pid \ +ExecStart=/usr/bin/bitcoind -pid=/run/bitcoind/bitcoind.pid \ -conf=/etc/bitcoin/bitcoin.conf \ - -datadir=/var/lib/bitcoind + -datadir=/var/lib/bitcoind \ + -startupnotify='systemd-notify --ready' \ + -shutdownnotify='systemd-notify --stopping' # Make sure the config directory is readable by the service user PermissionsStartOnly=true @@ -26,9 +31,12 @@ ExecStartPre=/bin/chgrp bitcoin /etc/bitcoin # Process management #################### -Type=forking +Type=notify +NotifyAccess=all PIDFile=/run/bitcoind/bitcoind.pid + Restart=on-failure +TimeoutStartSec=infinity TimeoutStopSec=600 # Directory creation and permissions @@ -73,5 +81,8 @@ PrivateDevices=true # Deny the creation of writable and executable memory mappings. MemoryDenyWriteExecute=true +# Restrict ABIs to help ensure MemoryDenyWriteExecute is enforced +SystemCallArchitectures=native + [Install] WantedBy=multi-user.target diff --git a/contrib/install_db4.sh b/contrib/install_db4.sh deleted file mode 100755 index e9130a21ded67..0000000000000 --- a/contrib/install_db4.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/sh -# Copyright (c) 2017-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# Install libdb4.8 (Berkeley DB). - -export LC_ALL=C -set -e - -if [ -z "${1}" ]; then - echo "Usage: $0 [ ...]" - echo - echo "Must specify a single argument: the directory in which db4 will be built." - echo "This is probably \`pwd\` if you're at the root of the bitcoin repository." - exit 1 -fi - -expand_path() { - cd "${1}" && pwd -P -} - -BDB_PREFIX="$(expand_path ${1})/db4"; shift; -BDB_VERSION='db-4.8.30.NC' -BDB_HASH='12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef' -BDB_URL="https://download.oracle.com/berkeley-db/${BDB_VERSION}.tar.gz" - -check_exists() { - command -v "$1" >/dev/null -} - -sha256_check() { - # Args: - # - if check_exists sha256sum; then - echo "${1} ${2}" | sha256sum -c - elif check_exists sha256; then - if [ "$(uname)" = "FreeBSD" ]; then - sha256 -c "${1}" "${2}" - else - echo "${1} ${2}" | sha256 -c - fi - else - echo "${1} ${2}" | shasum -a 256 -c - fi -} - -http_get() { - # Args: - # - # It's acceptable that we don't require SSL here because we manually verify - # content hashes below. - # - if [ -f "${2}" ]; then - echo "File ${2} already exists; not downloading again" - elif check_exists curl; then - curl --insecure --retry 5 "${1}" -o "${2}" - else - wget --no-check-certificate "${1}" -O "${2}" - fi - - sha256_check "${3}" "${2}" -} - -mkdir -p "${BDB_PREFIX}" -http_get "${BDB_URL}" "${BDB_VERSION}.tar.gz" "${BDB_HASH}" -tar -xzvf ${BDB_VERSION}.tar.gz -C "$BDB_PREFIX" -cd "${BDB_PREFIX}/${BDB_VERSION}/" - -# Apply a patch necessary when building with clang and c++11 (see https://community.oracle.com/thread/3952592) -CLANG_CXX11_PATCH_URL='https://gist.githubusercontent.com/LnL7/5153b251fd525fe15de69b67e63a6075/raw/7778e9364679093a32dec2908656738e16b6bdcb/clang.patch' -CLANG_CXX11_PATCH_HASH='7a9a47b03fd5fb93a16ef42235fa9512db9b0829cfc3bdf90edd3ec1f44d637c' -http_get "${CLANG_CXX11_PATCH_URL}" clang.patch "${CLANG_CXX11_PATCH_HASH}" -patch -p2 < clang.patch - -# The packaged config.guess and config.sub are ancient (2009) and can cause build issues. -# Replace them with modern versions. -# See https://github.com/bitcoin/bitcoin/issues/16064 -CONFIG_GUESS_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=55eaf3e779455c4e5cc9f82efb5278be8f8f900b' -CONFIG_GUESS_HASH='2d1ff7bca773d2ec3c6217118129220fa72d8adda67c7d2bf79994b3129232c1' -CONFIG_SUB_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=55eaf3e779455c4e5cc9f82efb5278be8f8f900b' -CONFIG_SUB_HASH='3a4befde9bcdf0fdb2763fc1bfa74e8696df94e1ad7aac8042d133c8ff1d2e32' - -rm -f "dist/config.guess" -rm -f "dist/config.sub" - -http_get "${CONFIG_GUESS_URL}" dist/config.guess "${CONFIG_GUESS_HASH}" -http_get "${CONFIG_SUB_URL}" dist/config.sub "${CONFIG_SUB_HASH}" - -cd build_unix/ - -"${BDB_PREFIX}/${BDB_VERSION}/dist/configure" \ - --enable-cxx --disable-shared --disable-replication --with-pic --prefix="${BDB_PREFIX}" \ - "${@}" - -make install - -echo -echo "db4 build complete." -echo -# shellcheck disable=SC2016 -echo 'When compiling bitcoind, run `./configure` in the following way:' -echo -echo " export BDB_PREFIX='${BDB_PREFIX}'" -# shellcheck disable=SC2016 -echo ' ./configure BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include" ...' diff --git a/contrib/linearize/example-linearize.cfg b/contrib/linearize/example-linearize.cfg index 5990b9307ab47..5f566261ca674 100644 --- a/contrib/linearize/example-linearize.cfg +++ b/contrib/linearize/example-linearize.cfg @@ -13,6 +13,9 @@ port=8332 #regtest default #port=18443 +#signet default +#port=38332 + # bootstrap.dat hashlist settings (linearize-hashes) max_height=313000 @@ -33,6 +36,11 @@ input=/home/example/.bitcoin/blocks #genesis=0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206 #input=/home/example/.bitcoin/regtest/blocks +# signet +#netmagic=0a03cf40 +#genesis=00000008819873e925422c1ff0f99f7cc9bbb232af63a077a480a3633bee1ef6 +#input=/home/example/.bitcoin/signet/blocks + # "output" option causes blockchain files to be written to the given location, # with "output_file" ignored. If not used, "output_file" is used instead. # output=/home/example/blockchain_directory diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 73f54cd4885e0..74d98307056e6 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -2,7 +2,7 @@ # # linearize-data.py: Construct a linear, no-fork version of the chain. # -# Copyright (c) 2013-2020 The Bitcoin Core developers +# Copyright (c) 2013-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # @@ -17,53 +17,12 @@ import time import glob from collections import namedtuple -from binascii import unhexlify settings = {} -def hex_switchEndian(s): - """ Switches the endianness of a hex string (in pairs of hex chars) """ - pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)] - return b''.join(pairList[::-1]).decode() - -def uint32(x): - return x & 0xffffffff - -def bytereverse(x): - return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | - (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) - -def bufreverse(in_buf): - out_words = [] - for i in range(0, len(in_buf), 4): - word = struct.unpack('@I', in_buf[i:i+4])[0] - out_words.append(struct.pack('@I', bytereverse(word))) - return b''.join(out_words) - -def wordreverse(in_buf): - out_words = [] - for i in range(0, len(in_buf), 4): - out_words.append(in_buf[i:i+4]) - out_words.reverse() - return b''.join(out_words) - -def calc_hdr_hash(blk_hdr): - hash1 = hashlib.sha256() - hash1.update(blk_hdr) - hash1_o = hash1.digest() - - hash2 = hashlib.sha256() - hash2.update(hash1_o) - hash2_o = hash2.digest() - - return hash2_o - def calc_hash_str(blk_hdr): - hash = calc_hdr_hash(blk_hdr) - hash = bufreverse(hash) - hash = wordreverse(hash) - hash_str = hash.hex() - return hash_str + blk_hdr_hash = hashlib.sha256(hashlib.sha256(blk_hdr).digest()).digest() + return blk_hdr_hash[::-1].hex() def get_blk_dt(blk_hdr): members = struct.unpack(" self.maxOutSz): @@ -206,7 +183,7 @@ def fetchBlock(self, extent): '''Fetch block contents from disk given extents''' with open(self.inFileName(extent.fn), "rb") as f: f.seek(extent.offset) - return f.read(extent.size) + return self.read_xored(f, extent.size) def copyOneBlock(self): '''Find the next block to be written in the input, and copy it to the output.''' @@ -231,7 +208,7 @@ def run(self): print("Premature end of block data") return - inhdr = self.inF.read(8) + inhdr = self.read_xored(self.inF, 8) if (not inhdr or (inhdr[0] == "\0")): self.inF.close() self.inF = None @@ -248,7 +225,7 @@ def run(self): inLenLE = inhdr[4:] su = struct.unpack(" MacOSX10.14.sdk.tar.gz' \; +# Unpack the .xip and place the resulting Xcode.app in your current +# working directory +python3 apple-sdk-tools/extract_xcode.py -f Xcode_15.xip | cpio -d -i ``` -on macOS the process is more straightforward: +On macOS: ```bash -xip -x Xcode_10.2.1.xip -tar -s "/MacOSX.sdk/MacOSX10.14.sdk/" -C Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ -czf MacOSX10.14.sdk.tar.gz MacOSX.sdk +xip -x Xcode_15.xip ``` -Our previously used macOS SDK (`MacOSX10.11.sdk`) can be extracted from -[Xcode 7.3.1 dmg](https://developer.apple.com/devcenter/download.action?path=/Developer_Tools/Xcode_7.3.1/Xcode_7.3.1.dmg). -The script [`extract-osx-sdk.sh`](./extract-osx-sdk.sh) automates this. First -ensure the DMG file is in the current directory, and then run the script. You -may wish to delete the `intermediate 5.hfs` file and `MacOSX10.11.sdk` (the -directory) when you've confirmed the extraction succeeded. +### Step 2: Generating the SDK tarball from `Xcode.app` + +To generate the SDK, run the script [`gen-sdk`](./gen-sdk) with the +path to `Xcode.app` (extracted in the previous stage) as the first argument. ```bash -apt-get install p7zip-full sleuthkit -contrib/macdeploy/extract-osx-sdk.sh -rm -rf 5.hfs MacOSX10.11.sdk +./contrib/macdeploy/gen-sdk '/path/to/Xcode.app' ``` -## Deterministic macOS DMG Notes -Working macOS DMGs are created in Linux by combining a recent `clang`, the Apple -`binutils` (`ld`, `ar`, etc) and DMG authoring tools. - -Apple uses `clang` extensively for development and has upstreamed the necessary -functionality so that a vanilla clang can take advantage. It supports the use of `-F`, -`-target`, `-mmacosx-version-min`, and `--sysroot`, which are all necessary when -building for macOS. - -Apple's version of `binutils` (called `cctools`) contains lots of functionality missing in the -FSF's `binutils`. In addition to extra linker options for frameworks and sysroots, several -other tools are needed as well such as `install_name_tool`, `lipo`, and `nmedit`. These -do not build under Linux, so they have been patched to do so. The work here was used as -a starting point: [mingwandroid/toolchain4](https://github.com/mingwandroid/toolchain4). +The generated archive should be: `Xcode-15.0-15A240d-extracted-SDK-with-libcxx-headers.tar.gz`. +The `sha256sum` should be `c0c2e7bb92c1fee0c4e9f3a485e4530786732d6c6dd9e9f418c282aa6892f55d`. -In order to build a working toolchain, the following source packages are needed from -Apple: `cctools`, `dyld`, and `ld64`. +## Deterministic macOS App Notes -These tools inject timestamps by default, which produce non-deterministic binaries. The -`ZERO_AR_DATE` environment variable is used to disable that. +macOS Applications are created on Linux using a recent LLVM. -This version of `cctools` has been patched to use the current version of `clang`'s headers -and its `libLTO.so` rather than those from `llvmgcc`, as it was originally done in `toolchain4`. - -To complicate things further, all builds must target an Apple SDK. These SDKs are free to -download, but not redistributable. To obtain it, register for an Apple Developer Account, -then download [Xcode 10.2.1](https://download.developer.apple.com/Developer_Tools/Xcode_10.2.1/Xcode_10.2.1.xip). - -This file is many gigabytes in size, but most (but not all) of what we need is -contained only in a single directory: +All builds must target an Apple SDK. These SDKs are free to download, but not redistributable. +See the SDK Extraction notes above for how to obtain it. -```bash -Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk -``` +The Guix build process has been designed to avoid including the SDK's files in Guix's outputs. +All interim tarballs are fully deterministic and may be freely redistributed. -See the SDK Extraction notes above for how to obtain it. +Using an Apple-blessed key to sign binaries is a requirement to produce (distributable) macOS +binaries. Because this private key cannot be shared, we'll have to be a bit creative in order +for the build process to remain somewhat deterministic. Here's how it works: -The Gitian descriptors build 2 sets of files: Linux tools, then Apple binaries which are -created using these tools. The build process has been designed to avoid including the -SDK's files in Gitian's outputs. All interim tarballs are fully deterministic and may be freely -redistributed. - -`genisoimage` is used to create the initial DMG. It is not deterministic as-is, so it has been -patched. A system `genisoimage` will work fine, but it will not be deterministic because -the file-order will change between invocations. The patch can be seen here: [cdrkit-deterministic.patch](https://github.com/bitcoin/bitcoin/blob/master/depends/patches/native_cdrkit/cdrkit-deterministic.patch). -No effort was made to fix this cleanly, so it likely leaks memory badly, however it's only used for -a single invocation, so that's no real concern. - -`genisoimage` cannot compress DMGs, so afterwards, the DMG tool from the -`libdmg-hfsplus` project is used to compress it. There are several bugs in this tool and its -maintainer has seemingly abandoned the project. - -The DMG tool has the ability to create DMGs from scratch as well, but this functionality is -broken. Only the compression feature is currently used. Ideally, the creation could be fixed -and `genisoimage` would no longer be necessary. - -Background images and other features can be added to DMG files by inserting a -`.DS_Store` before creation. This is generated by the script `contrib/macdeploy/custom_dsstore.py`. - -As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in -order to satisfy the new Gatekeeper requirements. Because this private key cannot be -shared, we'll have to be a bit creative in order for the build process to remain somewhat -deterministic. Here's how it works: - -- Builders use Gitian to create an unsigned release. This outputs an unsigned DMG which - users may choose to bless and run. It also outputs an unsigned app structure in the form - of a tarball, which also contains all of the tools that have been previously (deterministically) - built in order to create a final DMG. +- Builders use Guix to create an unsigned release. This outputs an unsigned ZIP which + users may choose to bless, self-codesign, and run. It also outputs an unsigned app structure + in the form of a tarball. - The Apple keyholder uses this unsigned app to create a detached signature, using the - script that is also included there. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs). -- Builders feed the unsigned app + detached signature back into Gitian. It uses the - pre-built tools to recombine the pieces into a deterministic DMG. + included script. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs). +- Builders feed the unsigned app + detached signature back into Guix, which combines the + pieces into a deterministic ZIP. diff --git a/contrib/macdeploy/background.svg b/contrib/macdeploy/background.svg deleted file mode 100644 index 9c330af45142e..0000000000000 --- a/contrib/macdeploy/background.svg +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - PACKAGE_NAME - - - - - diff --git a/contrib/macdeploy/custom_dsstore.py b/contrib/macdeploy/custom_dsstore.py deleted file mode 100755 index dc1c1882dd5a2..0000000000000 --- a/contrib/macdeploy/custom_dsstore.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2013-2018 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -import biplist -from ds_store import DSStore -from mac_alias import Alias -import sys - -output_file = sys.argv[1] -package_name_ns = sys.argv[2] - -ds = DSStore.open(output_file, 'w+') -ds['.']['bwsp'] = { - 'ShowStatusBar': False, - 'WindowBounds': '{{300, 280}, {500, 343}}', - 'ContainerShowSidebar': False, - 'SidebarWidth': 0, - 'ShowTabView': False, - 'PreviewPaneVisibility': False, - 'ShowToolbar': False, - 'ShowSidebar': False, - 'ShowPathbar': True -} - -icvp = { - 'gridOffsetX': 0.0, - 'textSize': 12.0, - 'viewOptionsVersion': 1, - 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', - 'backgroundColorBlue': 1.0, - 'iconSize': 96.0, - 'backgroundColorGreen': 1.0, - 'arrangeBy': 'none', - 'showIconPreview': True, - 'gridSpacing': 100.0, - 'gridOffsetY': 0.0, - 'showItemInfo': False, - 'labelOnBottom': True, - 'backgroundType': 2, - 'backgroundColorRed': 1.0 -} -alias = Alias.from_bytes(icvp['backgroundImageAlias']) -alias.volume.name = package_name_ns -alias.volume.posix_path = '/Volumes/' + package_name_ns -alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg' -alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg' -alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg' -alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff' -icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes()) -ds['.']['icvp'] = icvp - -ds['.']['vSrn'] = ('long', 1) - -ds['Applications']['Iloc'] = (370, 156) -ds['Bitcoin-Qt.app']['Iloc'] = (128, 156) - -ds.flush() -ds.close() diff --git a/contrib/macdeploy/detached-sig-apply.sh b/contrib/macdeploy/detached-sig-apply.sh deleted file mode 100755 index 5c5a85d3fe186..0000000000000 --- a/contrib/macdeploy/detached-sig-apply.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/sh -# Copyright (c) 2014-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C -set -e - -UNSIGNED="$1" -SIGNATURE="$2" -ARCH=x86_64 -ROOTDIR=dist -TEMPDIR=signed.temp -OUTDIR=signed-app - -if [ -z "$UNSIGNED" ]; then - echo "usage: $0 " - exit 1 -fi - -if [ -z "$SIGNATURE" ]; then - echo "usage: $0 " - exit 1 -fi - -rm -rf ${TEMPDIR} && mkdir -p ${TEMPDIR} -tar -C ${TEMPDIR} -xf ${UNSIGNED} -cp -rf "${SIGNATURE}"/* ${TEMPDIR} - -if [ -z "${PAGESTUFF}" ]; then - PAGESTUFF=${TEMPDIR}/pagestuff -fi - -if [ -z "${CODESIGN_ALLOCATE}" ]; then - CODESIGN_ALLOCATE=${TEMPDIR}/codesign_allocate -fi - -find ${TEMPDIR} -name "*.sign" | while read i; do - SIZE=$(stat -c %s "${i}") - TARGET_FILE="$(echo "${i}" | sed 's/\.sign$//')" - - echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}" - ${CODESIGN_ALLOCATE} -i "${TARGET_FILE}" -a ${ARCH} ${SIZE} -o "${i}.tmp" - - OFFSET=$(${PAGESTUFF} "${i}.tmp" -p | tail -2 | grep offset | sed 's/[^0-9]*//g') - if [ -z ${QUIET} ]; then - echo "Attaching signature at offset ${OFFSET}" - fi - - dd if="$i" of="${i}.tmp" bs=1 seek=${OFFSET} count=${SIZE} 2>/dev/null - mv "${i}.tmp" "${TARGET_FILE}" - rm "${i}" - echo "Success." -done -mv ${TEMPDIR}/${ROOTDIR} ${OUTDIR} -rm -rf ${TEMPDIR} -echo "Signed: ${OUTDIR}" diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh index 31a97f0a24c72..097a7c35ee451 100755 --- a/contrib/macdeploy/detached-sig-create.sh +++ b/contrib/macdeploy/detached-sig-create.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (c) 2014-2019 The Bitcoin Core developers +# Copyright (c) 2014-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -8,44 +8,23 @@ set -e ROOTDIR=dist BUNDLE="${ROOTDIR}/Bitcoin-Qt.app" -CODESIGN=codesign +BINARY="${BUNDLE}/Contents/MacOS/Bitcoin-Qt" +SIGNAPPLE=signapple TEMPDIR=sign.temp -TEMPLIST=${TEMPDIR}/signatures.txt -OUT=signature-osx.tar.gz -OUTROOT=osx +ARCH=$(${SIGNAPPLE} info ${BINARY} | head -n 1 | cut -d " " -f 1) +OUT="signature-osx-${ARCH}.tar.gz" +OUTROOT=osx/dist if [ -z "$1" ]; then - echo "usage: $0 " - echo "example: $0 -s MyIdentity" + echo "usage: $0 " + echo "example: $0 " exit 1 fi -rm -rf ${TEMPDIR} ${TEMPLIST} +rm -rf ${TEMPDIR} mkdir -p ${TEMPDIR} -${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}" - -grep -v CodeResources < "${TEMPLIST}" | while read i; do - TARGETFILE="${BUNDLE}/$(echo "${i}" | sed "s|.*${BUNDLE}/||")" - SIZE=$(pagestuff "$i" -p | tail -2 | grep size | sed 's/[^0-9]*//g') - OFFSET=$(pagestuff "$i" -p | tail -2 | grep offset | sed 's/[^0-9]*//g') - SIGNFILE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}.sign" - DIRNAME="$(dirname "${SIGNFILE}")" - mkdir -p "${DIRNAME}" - echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}" - dd if="$i" of="${SIGNFILE}" bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null -done - -grep CodeResources < "${TEMPLIST}" | while read i; do - TARGETFILE="${BUNDLE}/$(echo "${i}" | sed "s|.*${BUNDLE}/||")" - RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}" - DIRNAME="$(dirname "${RESOURCE}")" - mkdir -p "${DIRNAME}" - echo "Adding resource for: \"${TARGETFILE}\"" - cp "${i}" "${RESOURCE}" -done - -rm ${TEMPLIST} +${SIGNAPPLE} sign -f --detach "${TEMPDIR}/${OUTROOT}" "$@" "${BUNDLE}" --hardened-runtime tar -C "${TEMPDIR}" -czf "${OUT}" . rm -rf "${TEMPDIR}" diff --git a/contrib/macdeploy/extract-osx-sdk.sh b/contrib/macdeploy/extract-osx-sdk.sh deleted file mode 100755 index 3c7bdf4217c5c..0000000000000 --- a/contrib/macdeploy/extract-osx-sdk.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2016-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C -set -e - -INPUTFILE="Xcode_7.3.1.dmg" -HFSFILENAME="5.hfs" -SDKDIR="Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk" - -7z x "${INPUTFILE}" "${HFSFILENAME}" -SDKNAME="$(basename "${SDKDIR}")" -SDKDIRINODE=$(ifind -n "${SDKDIR}" "${HFSFILENAME}") -fls "${HFSFILENAME}" -rpF ${SDKDIRINODE} | - while read type inode filename; do - inode="${inode::-1}" - if [ "${filename:0:14}" = "usr/share/man/" ]; then - continue - fi - filename="${SDKNAME}/$filename" - echo "Extracting $filename ..." - mkdir -p "$(dirname "$filename")" - if [ "$type" = "l/l" ]; then - ln -s "$(icat "${HFSFILENAME}" $inode)" "$filename" - else - icat "${HFSFILENAME}" $inode >"$filename" - fi -done -echo "Building ${SDKNAME}.tar.gz ..." -MTIME="$(istat "${HFSFILENAME}" "${SDKDIRINODE}" | perl -nle 'm/Content Modified:\s+(.*?)\s\(/ && print $1')" -find "${SDKNAME}" | sort | tar --no-recursion --mtime="${MTIME}" --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > "${SDKNAME}.tar.gz" -echo 'All done!' diff --git a/contrib/macdeploy/fancy.plist b/contrib/macdeploy/fancy.plist deleted file mode 100644 index ef277a7f14ad6..0000000000000 --- a/contrib/macdeploy/fancy.plist +++ /dev/null @@ -1,32 +0,0 @@ - - - - - window_bounds - - 300 - 300 - 800 - 620 - - background_picture - background.tiff - icon_size - 96 - applications_symlink - - items_position - - Applications - - 370 - 156 - - Bitcoin-Qt.app - - 128 - 156 - - - - diff --git a/contrib/macdeploy/gen-sdk b/contrib/macdeploy/gen-sdk new file mode 100755 index 0000000000000..86a6262b5ce48 --- /dev/null +++ b/contrib/macdeploy/gen-sdk @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +import argparse +import plistlib +import pathlib +import sys +import tarfile +import gzip +import os +import contextlib + +@contextlib.contextmanager +def cd(path): + """Context manager that restores PWD even if an exception was raised.""" + old_pwd = os.getcwd() + os.chdir(str(path)) + try: + yield + finally: + os.chdir(old_pwd) + +def run(): + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument('xcode_app', metavar='XCODEAPP', nargs=1) + parser.add_argument("-o", metavar='OUTSDKTGZ', nargs=1, dest='out_sdktgz', required=False) + + args = parser.parse_args() + + xcode_app = pathlib.Path(args.xcode_app[0]).resolve() + assert xcode_app.is_dir(), "The supplied Xcode.app path '{}' either does not exist or is not a directory".format(xcode_app) + + xcode_app_plist = xcode_app.joinpath("Contents/version.plist") + with xcode_app_plist.open('rb') as fp: + pl = plistlib.load(fp) + xcode_version = pl['CFBundleShortVersionString'] + xcode_build_id = pl['ProductBuildVersion'] + print("Found Xcode (version: {xcode_version}, build id: {xcode_build_id})".format(xcode_version=xcode_version, xcode_build_id=xcode_build_id)) + + sdk_dir = xcode_app.joinpath("Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk") + sdk_plist = sdk_dir.joinpath("System/Library/CoreServices/SystemVersion.plist") + with sdk_plist.open('rb') as fp: + pl = plistlib.load(fp) + sdk_version = pl['ProductVersion'] + sdk_build_id = pl['ProductBuildVersion'] + print("Found MacOSX SDK (version: {sdk_version}, build id: {sdk_build_id})".format(sdk_version=sdk_version, sdk_build_id=sdk_build_id)) + + out_name = "Xcode-{xcode_version}-{xcode_build_id}-extracted-SDK-with-libcxx-headers".format(xcode_version=xcode_version, xcode_build_id=xcode_build_id) + + if args.out_sdktgz: + out_sdktgz_path = pathlib.Path(args.out_sdktgz_path) + else: + # Construct our own out_sdktgz if not specified on the command line + out_sdktgz_path = pathlib.Path("./{}.tar.gz".format(out_name)) + + def tarfp_add_with_base_change(tarfp, dir_to_add, alt_base_dir): + """Add all files in dir_to_add to tarfp, but prepent alt_base_dir to the files' + names + + e.g. if the only file under /root/bazdir is /root/bazdir/qux, invoking: + + tarfp_add_with_base_change(tarfp, "foo/bar", "/root/bazdir") + + would result in the following members being added to tarfp: + + foo/bar/ -> corresponding to /root/bazdir + foo/bar/qux -> corresponding to /root/bazdir/qux + + """ + def change_tarinfo_base(tarinfo): + if tarinfo.name and tarinfo.name.startswith("./"): + tarinfo.name = str(pathlib.Path(alt_base_dir, tarinfo.name)) + if tarinfo.linkname and tarinfo.linkname.startswith("./"): + tarinfo.linkname = str(pathlib.Path(alt_base_dir, tarinfo.linkname)) + # make metadata deterministic + tarinfo.mtime = 0 + tarinfo.uid, tarinfo.uname = 0, '' + tarinfo.gid, tarinfo.gname = 0, '' + # don't use isdir() as there are also executable files present + tarinfo.mode = 0o0755 if tarinfo.mode & 0o0100 else 0o0644 + return tarinfo + with cd(dir_to_add): + # recursion already adds entries in sorted order + tarfp.add(".", recursive=True, filter=change_tarinfo_base) + + print("Creating output .tar.gz file...") + with out_sdktgz_path.open("wb") as fp: + with gzip.GzipFile(fileobj=fp, mode='wb', compresslevel=9, mtime=0) as gzf: + with tarfile.open(mode="w", fileobj=gzf, format=tarfile.GNU_FORMAT) as tarfp: + print("Adding MacOSX SDK {} files...".format(sdk_version)) + tarfp_add_with_base_change(tarfp, sdk_dir, out_name) + print("Done! Find the resulting gzipped tarball at:") + print(out_sdktgz_path.resolve()) + +if __name__ == '__main__': + run() diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index d8088aa123044..eaa7b896be9f0 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -16,10 +16,11 @@ # along with this program. If not, see . # -import subprocess, sys, re, os, shutil, stat, os.path, time -from string import Template +import sys, re, os, platform, shutil, stat, subprocess, os.path from argparse import ArgumentParser -from typing import List, Optional +from pathlib import Path +from subprocess import PIPE, run +from typing import Optional # This is ported from the original macdeployqt with modifications @@ -49,28 +50,18 @@ class FrameworkInfo(object): return False def __str__(self): - return """ Framework name: {} - Framework directory: {} - Framework path: {} - Binary name: {} - Binary directory: {} - Binary path: {} - Version: {} - Install name: {} - Deployed install name: {} - Source file Path: {} - Deployed Directory (relative to bundle): {} -""".format(self.frameworkName, - self.frameworkDirectory, - self.frameworkPath, - self.binaryName, - self.binaryDirectory, - self.binaryPath, - self.version, - self.installName, - self.deployedInstallName, - self.sourceFilePath, - self.destinationDirectory) + return f""" Framework name: {self.frameworkName} + Framework directory: {self.frameworkDirectory} + Framework path: {self.frameworkPath} + Binary name: {self.binaryName} + Binary directory: {self.binaryDirectory} + Binary path: {self.binaryPath} + Version: {self.version} + Install name: {self.installName} + Deployed install name: {self.deployedInstallName} + Source file Path: {self.sourceFilePath} + Deployed Directory (relative to bundle): {self.destinationDirectory} +""" def isDylib(self): return self.frameworkName.endswith(".dylib") @@ -86,18 +77,18 @@ class FrameworkInfo(object): bundleBinaryDirectory = "Contents/MacOS" @classmethod - def fromOtoolLibraryLine(cls, line: str) -> Optional['FrameworkInfo']: + def fromLibraryLine(cls, line: str) -> Optional['FrameworkInfo']: # Note: line must be trimmed if line == "": return None - # Don't deploy system libraries (exception for libQtuitools and libQtlucene). - if line.startswith("/System/Library/") or line.startswith("@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line): + # Don't deploy system libraries + if line.startswith("/System/Library/") or line.startswith("@executable_path") or line.startswith("/usr/lib/"): return None m = cls.reOLine.match(line) if m is None: - raise RuntimeError("otool line could not be parsed: " + line) + raise RuntimeError(f"Line could not be parsed: {line}") path = m.group(1) @@ -117,7 +108,7 @@ class FrameworkInfo(object): info.version = "-" info.installName = path - info.deployedInstallName = "@executable_path/../Frameworks/" + info.binaryName + info.deployedInstallName = f"@executable_path/../Frameworks/{info.binaryName}" info.sourceFilePath = path info.destinationDirectory = cls.bundleFrameworkDirectory else: @@ -129,7 +120,7 @@ class FrameworkInfo(object): break i += 1 if i == len(parts): - raise RuntimeError("Could not find .framework or .dylib in otool line: " + line) + raise RuntimeError(f"Could not find .framework or .dylib in line: {line}") info.frameworkName = parts[i] info.frameworkDirectory = "/".join(parts[:i]) @@ -140,7 +131,7 @@ class FrameworkInfo(object): info.binaryPath = os.path.join(info.binaryDirectory, info.binaryName) info.version = parts[i+2] - info.deployedInstallName = "@executable_path/../Frameworks/" + os.path.join(info.frameworkName, info.binaryPath) + info.deployedInstallName = f"@executable_path/../Frameworks/{os.path.join(info.frameworkName, info.binaryPath)}" info.destinationDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory) info.sourceResourcesDirectory = os.path.join(info.frameworkPath, "Resources") @@ -154,10 +145,10 @@ class FrameworkInfo(object): class ApplicationBundleInfo(object): def __init__(self, path: str): self.path = path - appName = "Bitcoin-Qt" - self.binaryPath = os.path.join(path, "Contents", "MacOS", appName) + # for backwards compatibility reasons, this must remain as Bitcoin-Qt + self.binaryPath = os.path.join(path, "Contents", "MacOS", "Bitcoin-Qt") if not os.path.exists(self.binaryPath): - raise RuntimeError("Could not find bundle binary for " + path) + raise RuntimeError(f"Could not find bundle binary for {path}") self.resourcesPath = os.path.join(path, "Contents", "Resources") self.pluginPath = os.path.join(path, "Contents", "PlugIns") @@ -181,40 +172,36 @@ class DeploymentInfo(object): self.pluginPath = pluginPath def usesFramework(self, name: str) -> bool: - nameDot = "{}.".format(name) - libNameDot = "lib{}.".format(name) for framework in self.deployedFrameworks: if framework.endswith(".framework"): - if framework.startswith(nameDot): + if framework.startswith(f"{name}."): return True elif framework.endswith(".dylib"): - if framework.startswith(libNameDot): + if framework.startswith(f"lib{name}."): return True return False -def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]: - if verbose >= 3: - print("Inspecting with otool: " + binaryPath) - otoolbin=os.getenv("OTOOL", "otool") - otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - o_stdout, o_stderr = otool.communicate() - if otool.returncode != 0: - if verbose >= 1: - sys.stderr.write(o_stderr) - sys.stderr.flush() - raise RuntimeError("otool failed with return code {}".format(otool.returncode)) - - otoolLines = o_stdout.split("\n") - otoolLines.pop(0) # First line is the inspected binary +def getFrameworks(binaryPath: str, verbose: int) -> list[FrameworkInfo]: + objdump = os.getenv("OBJDUMP", "objdump") + if verbose: + print(f"Inspecting with {objdump}: {binaryPath}") + output = run([objdump, "--macho", "--dylibs-used", binaryPath], stdout=PIPE, stderr=PIPE, text=True) + if output.returncode != 0: + sys.stderr.write(output.stderr) + sys.stderr.flush() + raise RuntimeError(f"{objdump} failed with return code {output.returncode}") + + lines = output.stdout.split("\n") + lines.pop(0) # First line is the inspected binary if ".framework" in binaryPath or binaryPath.endswith(".dylib"): - otoolLines.pop(0) # Frameworks and dylibs list themselves as a dependency. + lines.pop(0) # Frameworks and dylibs list themselves as a dependency. libraries = [] - for line in otoolLines: + for line in lines: line = line.replace("@loader_path", os.path.dirname(binaryPath)) - info = FrameworkInfo.fromOtoolLibraryLine(line.strip()) + info = FrameworkInfo.fromLibraryLine(line.strip()) if info is not None: - if verbose >= 3: + if verbose: print("Found framework:") print(info) libraries.append(info) @@ -222,11 +209,11 @@ def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]: return libraries def runInstallNameTool(action: str, *args): - installnametoolbin=os.getenv("INSTALLNAMETOOL", "install_name_tool") - subprocess.check_call([installnametoolbin, "-"+action] + list(args)) + installnametoolbin=os.getenv("INSTALL_NAME_TOOL", "install_name_tool") + run([installnametoolbin, "-"+action] + list(args), check=True) def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int): - if verbose >= 3: + if verbose: print("Using install_name_tool:") print(" in", binaryPath) print(" change reference", oldName) @@ -234,7 +221,7 @@ def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int) runInstallNameTool("change", oldName, newName, binaryPath) def changeIdentification(id: str, binaryPath: str, verbose: int): - if verbose >= 3: + if verbose: print("Using install_name_tool:") print(" change identification in", binaryPath) print(" to", id) @@ -242,74 +229,63 @@ def changeIdentification(id: str, binaryPath: str, verbose: int): def runStrip(binaryPath: str, verbose: int): stripbin=os.getenv("STRIP", "strip") - if verbose >= 3: + if verbose: print("Using strip:") print(" stripped", binaryPath) - subprocess.check_call([stripbin, "-x", binaryPath]) + run([stripbin, "-x", binaryPath], check=True) def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional[str]: if framework.sourceFilePath.startswith("Qt"): #standard place for Nokia Qt installer's frameworks - fromPath = "/Library/Frameworks/" + framework.sourceFilePath + fromPath = f"/Library/Frameworks/{framework.sourceFilePath}" else: fromPath = framework.sourceFilePath toDir = os.path.join(path, framework.destinationDirectory) toPath = os.path.join(toDir, framework.binaryName) - - if not os.path.exists(fromPath): - raise RuntimeError("No file at " + fromPath) - - if os.path.exists(toPath): - return None # Already there - - if not os.path.exists(toDir): - os.makedirs(toDir) - - shutil.copy2(fromPath, toPath) - if verbose >= 3: - print("Copied:", fromPath) - print(" to:", toPath) + + if framework.isDylib(): + if not os.path.exists(fromPath): + raise RuntimeError(f"No file at {fromPath}") + + if os.path.exists(toPath): + return None # Already there + + if not os.path.exists(toDir): + os.makedirs(toDir) + + shutil.copy2(fromPath, toPath) + if verbose: + print("Copied:", fromPath) + print(" to:", toPath) + else: + to_dir = os.path.join(path, "Contents", "Frameworks", framework.frameworkName) + if os.path.exists(to_dir): + return None # Already there + + from_dir = framework.frameworkPath + if not os.path.exists(from_dir): + raise RuntimeError(f"No directory at {from_dir}") + + shutil.copytree(from_dir, to_dir, symlinks=True) + if verbose: + print("Copied:", from_dir) + print(" to:", to_dir) + + headers_link = os.path.join(to_dir, "Headers") + if os.path.exists(headers_link): + os.unlink(headers_link) + + headers_dir = os.path.join(to_dir, framework.binaryDirectory, "Headers") + if os.path.exists(headers_dir): + shutil.rmtree(headers_dir) permissions = os.stat(toPath) if not permissions.st_mode & stat.S_IWRITE: os.chmod(toPath, permissions.st_mode | stat.S_IWRITE) - if not framework.isDylib(): # Copy resources for real frameworks - - linkfrom = os.path.join(path, "Contents","Frameworks", framework.frameworkName, "Versions", "Current") - linkto = framework.version - if not os.path.exists(linkfrom): - os.symlink(linkto, linkfrom) - if verbose >= 2: - print("Linked:", linkfrom, "->", linkto) - fromResourcesDir = framework.sourceResourcesDirectory - if os.path.exists(fromResourcesDir): - toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory) - shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True) - if verbose >= 3: - print("Copied resources:", fromResourcesDir) - print(" to:", toResourcesDir) - fromContentsDir = framework.sourceVersionContentsDirectory - if not os.path.exists(fromContentsDir): - fromContentsDir = framework.sourceContentsDirectory - if os.path.exists(fromContentsDir): - toContentsDir = os.path.join(path, framework.destinationVersionContentsDirectory) - shutil.copytree(fromContentsDir, toContentsDir, symlinks=True) - if verbose >= 3: - print("Copied Contents:", fromContentsDir) - print(" to:", toContentsDir) - elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout) - qtMenuNibSourcePath = os.path.join(framework.frameworkDirectory, "Resources", "qt_menu.nib") - qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib") - if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath): - shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True) - if verbose >= 3: - print("Copied for libQtGui:", qtMenuNibSourcePath) - print(" to:", qtMenuNibDestinationPath) - return toPath -def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: +def deployFrameworks(frameworks: list[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: if deploymentInfo is None: deploymentInfo = DeploymentInfo() @@ -317,16 +293,14 @@ def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPat framework = frameworks.pop(0) deploymentInfo.deployedFrameworks.append(framework.frameworkName) - if verbose >= 2: - print("Processing", framework.frameworkName, "...") + print("Processing", framework.frameworkName, "...") # Get the Qt path from one of the Qt frameworks if deploymentInfo.qtPath is None and framework.isQtFramework(): deploymentInfo.detectQtPath(framework.frameworkDirectory) if framework.installName.startswith("@executable_path") or framework.installName.startswith(bundlePath): - if verbose >= 2: - print(framework.frameworkName, "already deployed, skipping.") + print(framework.frameworkName, "already deployed, skipping.") continue # install_name_tool the new id into the binary @@ -357,128 +331,32 @@ def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPat def deployFrameworksForAppBundle(applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int) -> DeploymentInfo: frameworks = getFrameworks(applicationBundle.binaryPath, verbose) - if len(frameworks) == 0 and verbose >= 1: - print("Warning: Could not find any external frameworks to deploy in {}.".format(applicationBundle.path)) + if len(frameworks) == 0: + print(f"Warning: Could not find any external frameworks to deploy in {applicationBundle.path}.") return DeploymentInfo() else: return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose) def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: DeploymentInfo, strip: bool, verbose: int): - # Lookup available plugins, exclude unneeded plugins = [] if deploymentInfo.pluginPath is None: return for dirpath, dirnames, filenames in os.walk(deploymentInfo.pluginPath): pluginDirectory = os.path.relpath(dirpath, deploymentInfo.pluginPath) - if pluginDirectory == "designer": - # Skip designer plugins - continue - elif pluginDirectory == "printsupport": - # Skip printsupport plugins - continue - elif pluginDirectory == "imageformats": - # Skip imageformats plugins + + if pluginDirectory not in ['styles', 'platforms']: continue - elif pluginDirectory == "sqldrivers": - # Deploy the sql plugins only if QtSql is in use - if not deploymentInfo.usesFramework("QtSql"): - continue - elif pluginDirectory == "script": - # Deploy the script plugins only if QtScript is in use - if not deploymentInfo.usesFramework("QtScript"): - continue - elif pluginDirectory == "qmltooling" or pluginDirectory == "qml1tooling": - # Deploy the qml plugins only if QtDeclarative is in use - if not deploymentInfo.usesFramework("QtDeclarative"): - continue - elif pluginDirectory == "bearer": - # Deploy the bearer plugins only if QtNetwork is in use - if not deploymentInfo.usesFramework("QtNetwork"): - continue - elif pluginDirectory == "position": - # Deploy the position plugins only if QtPositioning is in use - if not deploymentInfo.usesFramework("QtPositioning"): - continue - elif pluginDirectory == "sensors" or pluginDirectory == "sensorgestures": - # Deploy the sensor plugins only if QtSensors is in use - if not deploymentInfo.usesFramework("QtSensors"): - continue - elif pluginDirectory == "audio" or pluginDirectory == "playlistformats": - # Deploy the audio plugins only if QtMultimedia is in use - if not deploymentInfo.usesFramework("QtMultimedia"): - continue - elif pluginDirectory == "mediaservice": - # Deploy the mediaservice plugins only if QtMultimediaWidgets is in use - if not deploymentInfo.usesFramework("QtMultimediaWidgets"): - continue - elif pluginDirectory == "canbus": - # Deploy the canbus plugins only if QtSerialBus is in use - if not deploymentInfo.usesFramework("QtSerialBus"): - continue - elif pluginDirectory == "webview": - # Deploy the webview plugins only if QtWebView is in use - if not deploymentInfo.usesFramework("QtWebView"): - continue - elif pluginDirectory == "gamepads": - # Deploy the webview plugins only if QtGamepad is in use - if not deploymentInfo.usesFramework("QtGamepad"): - continue - elif pluginDirectory == "geoservices": - # Deploy the webview plugins only if QtLocation is in use - if not deploymentInfo.usesFramework("QtLocation"): - continue - elif pluginDirectory == "texttospeech": - # Deploy the texttospeech plugins only if QtTextToSpeech is in use - if not deploymentInfo.usesFramework("QtTextToSpeech"): - continue - elif pluginDirectory == "virtualkeyboard": - # Deploy the virtualkeyboard plugins only if QtVirtualKeyboard is in use - if not deploymentInfo.usesFramework("QtVirtualKeyboard"): - continue - elif pluginDirectory == "sceneparsers": - # Deploy the virtualkeyboard plugins only if Qt3DCore is in use - if not deploymentInfo.usesFramework("Qt3DCore"): - continue - elif pluginDirectory == "renderplugins": - # Deploy the renderplugins plugins only if Qt3DCore is in use - if not deploymentInfo.usesFramework("Qt3DCore"): - continue - elif pluginDirectory == "geometryloaders": - # Deploy the geometryloaders plugins only if Qt3DCore is in use - if not deploymentInfo.usesFramework("Qt3DCore"): - continue for pluginName in filenames: pluginPath = os.path.join(pluginDirectory, pluginName) - if pluginName.endswith("_debug.dylib"): - # Skip debug plugins + + if pluginName.split('.')[0] not in ['libqminimal', 'libqcocoa', 'libqmacstyle']: continue - elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib": - # Deploy the svg plugins only if QtSvg is in use - if not deploymentInfo.usesFramework("QtSvg"): - continue - elif pluginPath == "accessible/libqtaccessiblecompatwidgets.dylib": - # Deploy accessibility for Qt3Support only if the Qt3Support is in use - if not deploymentInfo.usesFramework("Qt3Support"): - continue - elif pluginPath == "graphicssystems/libqglgraphicssystem.dylib": - # Deploy the opengl graphicssystem plugin only if QtOpenGL is in use - if not deploymentInfo.usesFramework("QtOpenGL"): - continue - elif pluginPath == "accessible/libqtaccessiblequick.dylib": - # Deploy the accessible qtquick plugin only if QtQuick is in use - if not deploymentInfo.usesFramework("QtQuick"): - continue - elif pluginPath == "platforminputcontexts/libqtvirtualkeyboardplugin.dylib": - # Deploy the virtualkeyboardplugin plugin only if QtVirtualKeyboard is in use - if not deploymentInfo.usesFramework("QtVirtualKeyboard"): - continue plugins.append((pluginDirectory, pluginName)) for pluginDirectory, pluginName in plugins: - if verbose >= 2: - print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...") + print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...") sourcePath = os.path.join(deploymentInfo.pluginPath, pluginDirectory, pluginName) destinationDirectory = os.path.join(appBundleInfo.pluginPath, pluginDirectory) @@ -487,7 +365,7 @@ def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: Deployme destinationPath = os.path.join(destinationDirectory, pluginName) shutil.copy2(sourcePath, destinationPath) - if verbose >= 3: + if verbose: print("Copied:", sourcePath) print(" to:", destinationPath) @@ -503,146 +381,50 @@ def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: Deployme if dependency.frameworkName not in deploymentInfo.deployedFrameworks: deployFrameworks([dependency], appBundleInfo.path, destinationPath, strip, verbose, deploymentInfo) -qt_conf="""[Paths] -Translations=Resources -Plugins=PlugIns -""" - ap = ArgumentParser(description="""Improved version of macdeployqt. -Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file. +Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .zip file. Note, that the "dist" folder will be deleted before deploying on each run. -Optionally, Qt translation files (.qm) and additional resources can be added to the bundle. - -Also optionally signs the .app bundle; set the CODESIGNARGS environment variable to pass arguments -to the codesign tool. -E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""") +Optionally, Qt translation files (.qm) can be added to the bundle.""") ap.add_argument("app_bundle", nargs=1, metavar="app-bundle", help="application bundle to be deployed") -ap.add_argument("-verbose", type=int, nargs=1, default=[1], metavar="<0-3>", help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug") +ap.add_argument("appname", nargs=1, metavar="appname", help="name of the app being deployed") +ap.add_argument("-verbose", nargs="?", const=True, help="Output additional debugging information") ap.add_argument("-no-plugins", dest="plugins", action="store_false", default=True, help="skip plugin deployment") ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, help="don't run 'strip' on the binaries") -ap.add_argument("-sign", dest="sign", action="store_true", default=False, help="sign .app bundle with codesign tool") -ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used") -ap.add_argument("-fancy", nargs=1, metavar="plist", default=[], help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work") -ap.add_argument("-add-qt-tr", nargs=1, metavar="languages", default=[], help="add Qt translation files to the bundle's resources; the language list must be separated with commas, not with whitespace") -ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translation files") -ap.add_argument("-add-resources", nargs="+", metavar="path", default=[], help="list of additional files or folders to be copied into the bundle's resources; must be the last argument") -ap.add_argument("-volname", nargs=1, metavar="volname", default=[], help="custom volume name for dmg") +ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translations. Base translations will automatically be added to the bundle's resources.") +ap.add_argument("-zip", nargs="?", const="", metavar="zip", help="create a .zip containing the app bundle") config = ap.parse_args() -verbose = config.verbose[0] +verbose = config.verbose # ------------------------------------------------ app_bundle = config.app_bundle[0] +appname = config.appname[0] if not os.path.exists(app_bundle): - if verbose >= 1: - sys.stderr.write("Error: Could not find app bundle \"{}\"\n".format(app_bundle)) + sys.stderr.write(f"Error: Could not find app bundle \"{app_bundle}\"\n") sys.exit(1) -app_bundle_name = os.path.splitext(os.path.basename(app_bundle))[0] - -# ------------------------------------------------ -translations_dir = None -if config.translations_dir and config.translations_dir[0]: - if os.path.exists(config.translations_dir[0]): - translations_dir = config.translations_dir[0] - else: - if verbose >= 1: - sys.stderr.write("Error: Could not find translation dir \"{}\"\n".format(translations_dir)) - sys.exit(1) -# ------------------------------------------------ - -for p in config.add_resources: - if verbose >= 3: - print("Checking for \"%s\"..." % p) - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find additional resource file \"{}\"\n".format(p)) - sys.exit(1) - -# ------------------------------------------------ - -if len(config.fancy) == 1: - if verbose >= 3: - print("Fancy: Importing plistlib...") - try: - import plistlib - except ImportError: - if verbose >= 1: - sys.stderr.write("Error: Could not import plistlib which is required for fancy disk images.\n") - sys.exit(1) - - p = config.fancy[0] - if verbose >= 3: - print("Fancy: Loading \"{}\"...".format(p)) - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find fancy disk image plist at \"{}\"\n".format(p)) - sys.exit(1) - - try: - fancy = plistlib.readPlist(p) - except: - if verbose >= 1: - sys.stderr.write("Error: Could not parse fancy disk image plist at \"{}\"\n".format(p)) - sys.exit(1) - - try: - assert "window_bounds" not in fancy or (isinstance(fancy["window_bounds"], list) and len(fancy["window_bounds"]) == 4) - assert "background_picture" not in fancy or isinstance(fancy["background_picture"], str) - assert "icon_size" not in fancy or isinstance(fancy["icon_size"], int) - assert "applications_symlink" not in fancy or isinstance(fancy["applications_symlink"], bool) - if "items_position" in fancy: - assert isinstance(fancy["items_position"], dict) - for key, value in fancy["items_position"].items(): - assert isinstance(value, list) and len(value) == 2 and isinstance(value[0], int) and isinstance(value[1], int) - except: - if verbose >= 1: - sys.stderr.write("Error: Bad format of fancy disk image plist at \"{}\"\n".format(p)) - sys.exit(1) - - if "background_picture" in fancy: - bp = fancy["background_picture"] - if verbose >= 3: - print("Fancy: Resolving background picture \"{}\"...".format(bp)) - if not os.path.exists(bp): - bp = os.path.join(os.path.dirname(p), bp) - if not os.path.exists(bp): - if verbose >= 1: - sys.stderr.write("Error: Could not find background picture at \"{}\" or \"{}\"\n".format(fancy["background_picture"], bp)) - sys.exit(1) - else: - fancy["background_picture"] = bp -else: - fancy = None - # ------------------------------------------------ if os.path.exists("dist"): - if verbose >= 2: - print("+ Removing old dist folder +") - + print("+ Removing existing dist folder +") shutil.rmtree("dist") -# ------------------------------------------------ - -if len(config.volname) == 1: - volname = config.volname[0] -else: - volname = app_bundle_name +if os.path.exists(appname + ".zip"): + print("+ Removing existing .zip +") + os.unlink(appname + ".zip") # ------------------------------------------------ target = os.path.join("dist", "Bitcoin-Qt.app") -if verbose >= 2: - print("+ Copying source bundle +") -if verbose >= 3: +print("+ Copying source bundle +") +if verbose: print(app_bundle, "->", target) os.mkdir("dist") @@ -652,257 +434,74 @@ applicationBundle = ApplicationBundleInfo(target) # ------------------------------------------------ -if verbose >= 2: - print("+ Deploying frameworks +") +print("+ Deploying frameworks +") try: deploymentInfo = deployFrameworksForAppBundle(applicationBundle, config.strip, verbose) if deploymentInfo.qtPath is None: deploymentInfo.qtPath = os.getenv("QTDIR", None) if deploymentInfo.qtPath is None: - if verbose >= 1: - sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n") + sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n") config.plugins = False except RuntimeError as e: - if verbose >= 1: - sys.stderr.write("Error: {}\n".format(str(e))) + sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ if config.plugins: - if verbose >= 2: - print("+ Deploying plugins +") + print("+ Deploying plugins +") try: deployPlugins(applicationBundle, deploymentInfo, config.strip, verbose) except RuntimeError as e: - if verbose >= 1: - sys.stderr.write("Error: {}\n".format(str(e))) + sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ -if len(config.add_qt_tr) == 0: - add_qt_tr = [] -else: - if translations_dir is not None: - qt_tr_dir = translations_dir - else: - if deploymentInfo.qtPath is not None: - qt_tr_dir = os.path.join(deploymentInfo.qtPath, "translations") - else: - sys.stderr.write("Error: Could not find Qt translation path\n") - sys.exit(1) - add_qt_tr = ["qt_{}.qm".format(lng) for lng in config.add_qt_tr[0].split(",")] - for lng_file in add_qt_tr: - p = os.path.join(qt_tr_dir, lng_file) - if verbose >= 3: - print("Checking for \"{}\"...".format(p)) - if not os.path.exists(p): - if verbose >= 1: - sys.stderr.write("Error: Could not find Qt translation file \"{}\"\n".format(lng_file)) - sys.exit(1) - -# ------------------------------------------------ +if config.translations_dir: + if not Path(config.translations_dir[0]).exists(): + sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n") + sys.exit(1) -if verbose >= 2: - print("+ Installing qt.conf +") +print("+ Adding Qt translations +") -with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f: - f.write(qt_conf.encode()) +translations = Path(config.translations_dir[0]) -# ------------------------------------------------ +regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') -if len(add_qt_tr) > 0 and verbose >= 2: - print("+ Adding Qt translations +") +lang_files = [x for x in translations.iterdir() if regex.match(x.name)] -for lng_file in add_qt_tr: - if verbose >= 3: - print(os.path.join(qt_tr_dir, lng_file), "->", os.path.join(applicationBundle.resourcesPath, lng_file)) - shutil.copy2(os.path.join(qt_tr_dir, lng_file), os.path.join(applicationBundle.resourcesPath, lng_file)) +for file in lang_files: + if verbose: + print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) + shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) # ------------------------------------------------ -if len(config.add_resources) > 0 and verbose >= 2: - print("+ Adding additional resources +") +print("+ Installing qt.conf +") -for p in config.add_resources: - t = os.path.join(applicationBundle.resourcesPath, os.path.basename(p)) - if verbose >= 3: - print(p, "->", t) - if os.path.isdir(p): - shutil.copytree(p, t, symlinks=True) - else: - shutil.copy2(p, t) +qt_conf="""[Paths] +Translations=Resources +Plugins=PlugIns +""" + +with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f: + f.write(qt_conf.encode()) # ------------------------------------------------ -if config.sign and 'CODESIGNARGS' not in os.environ: - print("You must set the CODESIGNARGS environment variable. Skipping signing.") -elif config.sign: - if verbose >= 1: - print("Code-signing app bundle {}".format(target)) - subprocess.check_call("codesign --force {} {}".format(os.environ['CODESIGNARGS'], target), shell=True) +if platform.system() == "Darwin": + subprocess.check_call(f"codesign --deep --force --sign - {target}", shell=True) # ------------------------------------------------ -if config.dmg is not None: - - def runHDIUtil(verb: str, image_basename: str, **kwargs) -> int: - hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"] - if "capture_stdout" in kwargs: - del kwargs["capture_stdout"] - run = subprocess.check_output - else: - if verbose < 2: - hdiutil_args.append("-quiet") - elif verbose >= 3: - hdiutil_args.append("-verbose") - run = subprocess.check_call - - for key, value in kwargs.items(): - hdiutil_args.append("-" + key) - if value is not True: - hdiutil_args.append(str(value)) - - return run(hdiutil_args, universal_newlines=True) - - if verbose >= 2: - if fancy is None: - print("+ Creating .dmg disk image +") - else: - print("+ Preparing .dmg disk image +") - - if config.dmg != "": - dmg_name = config.dmg - else: - spl = app_bundle_name.split(" ") - dmg_name = spl[0] + "".join(p.capitalize() for p in spl[1:]) - - if fancy is None: - try: - runHDIUtil("create", dmg_name, srcfolder="dist", format="UDBZ", volname=volname, ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - else: - if verbose >= 3: - print("Determining size of \"dist\"...") - size = 0 - for path, dirs, files in os.walk("dist"): - for file in files: - size += os.path.getsize(os.path.join(path, file)) - size += int(size * 0.15) - - if verbose >= 3: - print("Creating temp image for modification...") - try: - runHDIUtil("create", dmg_name + ".temp", srcfolder="dist", format="UDRW", size=size, volname=volname, ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - if verbose >= 3: - print("Attaching temp image...") - try: - output = runHDIUtil("attach", dmg_name + ".temp", readwrite=True, noverify=True, noautoopen=True, capture_stdout=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - m = re.search(r"/Volumes/(.+$)", output) - disk_root = m.group(0) - disk_name = m.group(1) - - if verbose >= 2: - print("+ Applying fancy settings +") - - if "background_picture" in fancy: - bg_path = os.path.join(disk_root, ".background", os.path.basename(fancy["background_picture"])) - os.mkdir(os.path.dirname(bg_path)) - if verbose >= 3: - print(fancy["background_picture"], "->", bg_path) - shutil.copy2(fancy["background_picture"], bg_path) - else: - bg_path = None - - if fancy.get("applications_symlink", False): - os.symlink("/Applications", os.path.join(disk_root, "Applications")) - - # The Python appscript package broke with OSX 10.8 and isn't being fixed. - # So we now build up an AppleScript string and use the osascript command - # to make the .dmg file pretty: - appscript = Template( """ - on run argv - tell application "Finder" - tell disk "$disk" - open - set current view of container window to icon view - set toolbar visible of container window to false - set statusbar visible of container window to false - set the bounds of container window to {$window_bounds} - set theViewOptions to the icon view options of container window - set arrangement of theViewOptions to not arranged - set icon size of theViewOptions to $icon_size - $background_commands - $items_positions - close -- close/reopen works around a bug... - open - update without registering applications - delay 5 - eject - end tell - end tell - end run - """) - - itemscript = Template('set position of item "${item}" of container window to {${position}}') - items_positions = [] - if "items_position" in fancy: - for name, position in fancy["items_position"].items(): - params = { "item" : name, "position" : ",".join([str(p) for p in position]) } - items_positions.append(itemscript.substitute(params)) - - params = { - "disk" : volname, - "window_bounds" : "300,300,800,620", - "icon_size" : "96", - "background_commands" : "", - "items_positions" : "\n ".join(items_positions) - } - if "window_bounds" in fancy: - params["window_bounds"] = ",".join([str(p) for p in fancy["window_bounds"]]) - if "icon_size" in fancy: - params["icon_size"] = str(fancy["icon_size"]) - if bg_path is not None: - # Set background file, then call SetFile to make it invisible. - # (note: making it invisible first makes set background picture fail) - bgscript = Template("""set background picture of theViewOptions to file ".background:$bgpic" - do shell script "SetFile -a V /Volumes/$disk/.background/$bgpic" """) - params["background_commands"] = bgscript.substitute({"bgpic" : os.path.basename(bg_path), "disk" : params["disk"]}) - - s = appscript.substitute(params) - if verbose >= 2: - print("Running AppleScript:") - print(s) - - p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE) - p.communicate(input=s.encode('utf-8')) - if p.returncode: - print("Error running osascript.") - - if verbose >= 2: - print("+ Finalizing .dmg disk image +") - time.sleep(5) - - try: - runHDIUtil("convert", dmg_name + ".temp", format="UDBZ", o=dmg_name + ".dmg", ov=True) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) - - os.unlink(dmg_name + ".temp.dmg") +if config.zip is not None: + shutil.make_archive('{}'.format(appname), format='zip', root_dir='dist', base_dir='Bitcoin-Qt.app') # ------------------------------------------------ -if verbose >= 2: - print("+ Done +") +print("+ Done +") sys.exit(0) diff --git a/contrib/message-capture/message-capture-docs.md b/contrib/message-capture/message-capture-docs.md new file mode 100644 index 0000000000000..730196846134f --- /dev/null +++ b/contrib/message-capture/message-capture-docs.md @@ -0,0 +1,25 @@ +# Per-Peer Message Capture + +## Purpose + +This feature allows for message capture on a per-peer basis. It answers the simple question: "Can I see what messages my node is sending and receiving?" + +## Usage and Functionality + +* Run `bitcoind` with the `-capturemessages` option. +* Look in the `message_capture` folder in your datadir. + * Typically this will be `~/.bitcoin/message_capture`. + * See that there are many folders inside, one for each peer names with its IP address and port. + * Inside each peer's folder there are two `.dat` files: one is for received messages (`msgs_recv.dat`) and the other is for sent messages (`msgs_sent.dat`). +* Run `contrib/message-capture/message-capture-parser.py` with the proper arguments. + * See the `-h` option for help. + * To see all messages, both sent and received, for all peers use: + ``` + ./contrib/message-capture/message-capture-parser.py -o out.json \ + ~/.bitcoin/message_capture/**/*.dat + ``` + * Note: The messages in the given `.dat` files will be interleaved in chronological order. So, giving both received and sent `.dat` files (as above with `*.dat`) will result in all messages being interleaved in chronological order. + * If an output file is not provided (i.e. the `-o` option is not used), then the output prints to `stdout`. +* View the resulting output. + * The output file is `JSON` formatted. + * Suggestion: use `jq` to view the output, with `jq . out.json` diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py new file mode 100755 index 0000000000000..0f409717d4c8a --- /dev/null +++ b/contrib/message-capture/message-capture-parser.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Parse message capture binary files. To be used in conjunction with -capturemessages.""" + +import argparse +import os +import shutil +import sys +from io import BytesIO +import json +from pathlib import Path +from typing import Any, Optional + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional')) + +from test_framework.messages import ser_uint256 # noqa: E402 +from test_framework.p2p import MESSAGEMAP # noqa: E402 + +TIME_SIZE = 8 +LENGTH_SIZE = 4 +MSGTYPE_SIZE = 12 + +# The test framework classes stores hashes as large ints in many cases. +# These are variables of type uint256 in core. +# There isn't a way to distinguish between a large int and a large int that is actually a blob of bytes. +# As such, they are itemized here. +# Any variables with these names that are of type int are actually uint256 variables. +# (These can be easily found by looking for calls to deser_uint256, deser_uint256_vector, and uint256_from_str in messages.py) +HASH_INTS = [ + "blockhash", + "block_hash", + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "hashstop", + "prev_header", + "sha256", + "stop_hash", +] + +HASH_INT_VECTORS = [ + "hashes", + "headers", + "vHave", + "vHash", +] + + +class ProgressBar: + def __init__(self, total: float): + self.total = total + self.running = 0 + + def set_progress(self, progress: float): + cols = shutil.get_terminal_size()[0] + if cols <= 12: + return + max_blocks = cols - 9 + num_blocks = int(max_blocks * progress) + print('\r[ {}{} ] {:3.0f}%' + .format('#' * num_blocks, + ' ' * (max_blocks - num_blocks), + progress * 100), + end ='') + + def update(self, more: float): + self.running += more + self.set_progress(self.running / self.total) + + +def to_jsonable(obj: Any) -> Any: + if hasattr(obj, "__dict__"): + return obj.__dict__ + elif hasattr(obj, "__slots__"): + ret = {} # type: Any + for slot in obj.__slots__: + val = getattr(obj, slot, None) + if slot in HASH_INTS and isinstance(val, int): + ret[slot] = ser_uint256(val).hex() + elif slot in HASH_INT_VECTORS and all(isinstance(a, int) for a in val): + ret[slot] = [ser_uint256(a).hex() for a in val] + else: + ret[slot] = to_jsonable(val) + return ret + elif isinstance(obj, list): + return [to_jsonable(a) for a in obj] + elif isinstance(obj, bytes): + return obj.hex() + else: + return obj + + +def process_file(path: str, messages: list[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None: + with open(path, 'rb') as f_in: + if progress_bar: + bytes_read = 0 + + while True: + if progress_bar: + # Update progress bar + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + bytes_read = f_in.tell() - 1 + + # Read the Header + tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int + msgtype = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] # type: bytes + length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int + + # Start converting the message to a dictionary + msg_dict = {} + msg_dict["direction"] = "recv" if recv else "sent" + msg_dict["time"] = time + msg_dict["size"] = length # "size" is less readable here, but more readable in the output + + msg_ser = BytesIO(f_in.read(length)) + + # Determine message type + if msgtype not in MESSAGEMAP: + # Unrecognized message type + try: + msgtype_tmp = msgtype.decode() + if not msgtype_tmp.isprintable(): + raise UnicodeDecodeError + msg_dict["msgtype"] = msgtype_tmp + except UnicodeDecodeError: + msg_dict["msgtype"] = "UNREADABLE" + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unrecognized message type." + messages.append(msg_dict) + print(f"WARNING - Unrecognized message type {msgtype} in {path}", file=sys.stderr) + continue + + # Deserialize the message + msg = MESSAGEMAP[msgtype]() + msg_dict["msgtype"] = msgtype.decode() + + try: + msg.deserialize(msg_ser) + except KeyboardInterrupt: + raise + except Exception: + # Unable to deserialize message body + msg_ser.seek(0, os.SEEK_SET) + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unable to deserialize message." + messages.append(msg_dict) + print(f"WARNING - Unable to deserialize message in {path}", file=sys.stderr) + continue + + # Convert body of message into a jsonable object + if length: + msg_dict["body"] = to_jsonable(msg) + messages.append(msg_dict) + + if progress_bar: + # Update the progress bar to the end of the current file + # in case we exited the loop early + f_in.seek(0, os.SEEK_END) # Go to end of file + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + epilog="EXAMPLE \n\t{0} -o out.json /message_capture/**/*.dat".format(sys.argv[0]), + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "capturepaths", + nargs='+', + help="binary message capture files to parse.") + parser.add_argument( + "-o", "--output", + help="output file. If unset print to stdout") + parser.add_argument( + "-n", "--no-progress-bar", + action='store_true', + help="disable the progress bar. Automatically set if the output is not a terminal") + args = parser.parse_args() + capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths] + output = Path.cwd() / Path(args.output) if args.output else False + use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() + + messages = [] # type: list[Any] + if use_progress_bar: + total_size = sum(capture.stat().st_size for capture in capturepaths) + progress_bar = ProgressBar(total_size) + else: + progress_bar = None + + for capture in capturepaths: + process_file(str(capture), messages, "recv" in capture.stem, progress_bar) + + messages.sort(key=lambda msg: msg['time']) + + if use_progress_bar: + progress_bar.set_progress(1) + + jsonrep = json.dumps(messages) + if output: + with open(str(output), 'w+', encoding="utf8") as f_out: + f_out.write(jsonrep) + else: + print(jsonrep) + +if __name__ == "__main__": + main() diff --git a/contrib/qos/tc.sh b/contrib/qos/tc.sh old mode 100644 new mode 100755 index 8408545a218e4..7ebcbf225175a --- a/contrib/qos/tc.sh +++ b/contrib/qos/tc.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2017-2019 The Bitcoin Core developers +# Copyright (c) 2017-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -16,7 +16,7 @@ LOCALNET_V4="192.168.0.0/16" #defines the IPv6 address space for which you wish to disable rate limiting LOCALNET_V6="fe80::/10" -#delete existing rules +#delete existing rules ('Error: Cannot delete qdisc with handle of zero.' means there weren't any.) tc qdisc del dev ${IF} root #add root class diff --git a/contrib/seeds/.gitignore b/contrib/seeds/.gitignore new file mode 100644 index 0000000000000..4e0ee74a0b4f7 --- /dev/null +++ b/contrib/seeds/.gitignore @@ -0,0 +1,3 @@ +seeds_main.txt +seeds_test.txt +asmap-filled.dat diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md index 502c20d0d678c..10945e5b6852b 100644 --- a/contrib/seeds/README.md +++ b/contrib/seeds/README.md @@ -4,18 +4,24 @@ Utility to generate the seeds.txt list that is compiled into the client (see [src/chainparamsseeds.h](/src/chainparamsseeds.h) and other utilities in [contrib/seeds](/contrib/seeds)). Be sure to update `PATTERN_AGENT` in `makeseeds.py` to include the current version, -and remove old versions as necessary (at a minimum when GetDesirableServiceFlags +and remove old versions as necessary (at a minimum when SeedsServiceFlags() changes its default return value, as those are the services which seeds are added to addrman with). -The seeds compiled into the release are created from sipa's DNS seed data, like this: +The seeds compiled into the release are created from sipa's, achow101's and luke-jr's +DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands +from the `/contrib/seeds` directory: - curl -s http://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt - python3 makeseeds.py < seeds_main.txt > nodes_main.txt - python3 generate-seeds.py . > ../../src/chainparamsseeds.h - -## Dependencies - -Ubuntu: - - sudo apt-get install python3-dnspython +``` +curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt +curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt +curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt +curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt +curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt +curl https://raw.githubusercontent.com/asmap/asmap-data/main/latest_asmap.dat > asmap-filled.dat +python3 makeseeds.py -a asmap-filled.dat -s seeds_main.txt > nodes_main.txt +python3 makeseeds.py -a asmap-filled.dat -s seeds_test.txt > nodes_test.txt +# TODO: Uncomment when a seeder publishes seeds.txt.gz for testnet4 +# python3 makeseeds.py -a asmap-filled.dat -s seeds_testnet4.txt -m 30000 > nodes_testnet4.txt +python3 generate-seeds.py . > ../../src/chainparamsseeds.h +``` diff --git a/contrib/seeds/generate-seeds.py b/contrib/seeds/generate-seeds.py index 7630a7a4fa255..72dd5d28cc51b 100755 --- a/contrib/seeds/generate-seeds.py +++ b/contrib/seeds/generate-seeds.py @@ -1,31 +1,27 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2017 Wladimir J. van der Laan +# Copyright (c) 2014-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' -Script to generate list of seed nodes for chainparams.cpp. +Script to generate list of seed nodes for kernel/chainparams.cpp. -This script expects two text files in the directory that is passed as an +This script expects three text files in the directory that is passed as an argument: nodes_main.txt nodes_test.txt + nodes_testnet4.txt These files must consist of lines in the format - : - [] []: - .onion - 0xDDBBCCAA (IPv4 little-endian old pnSeeds format) + .onion: + .b32.i2p: The output will be two data structures with the peers in binary format: - static SeedSpec6 pnSeed6_main[]={ - ... - } - static SeedSpec6 pnSeed6_test[]={ + static const uint8_t chainparams_seed_{main,test}[]={ ... } @@ -33,25 +29,39 @@ ''' from base64 import b32decode -from binascii import a2b_hex +from enum import Enum import sys import os import re -# ipv4 in ipv6 prefix -pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff]) -# tor-specific ipv6 prefix -pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43]) - -def name_to_ipv6(addr): - if len(addr)>6 and addr.endswith('.onion'): +class BIP155Network(Enum): + IPV4 = 1 + IPV6 = 2 + TORV2 = 3 # no longer supported + TORV3 = 4 + I2P = 5 + CJDNS = 6 + +def name_to_bip155(addr): + '''Convert address string to BIP155 (networkID, addr) tuple.''' + if addr.endswith('.onion'): vchAddr = b32decode(addr[0:-6], True) - if len(vchAddr) != 16-len(pchOnionCat): + if len(vchAddr) == 35: + assert vchAddr[34] == 3 + return (BIP155Network.TORV3, vchAddr[:32]) + elif len(vchAddr) == 10: + return (BIP155Network.TORV2, vchAddr) + else: raise ValueError('Invalid onion %s' % vchAddr) - return pchOnionCat + vchAddr + elif addr.endswith('.b32.i2p'): + vchAddr = b32decode(addr[0:-8] + '====', True) + if len(vchAddr) == 32: + return (BIP155Network.I2P, vchAddr) + else: + raise ValueError(f'Invalid I2P {vchAddr}') elif '.' in addr: # IPv4 - return pchIPv4 + bytearray((int(x) for x in addr.split('.'))) - elif ':' in addr: # IPv6 + return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.')))) + elif ':' in addr: # IPv6 or CJDNS sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') @@ -60,20 +70,26 @@ def name_to_ipv6(addr): if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end continue x += 1 # :: skips to suffix - assert(x < 2) + assert x < 2 else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) - assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) - return bytearray(sub[0] + ([0] * nullbytes) + sub[1]) - elif addr.startswith('0x'): # IPv4-in-little-endian - return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:]))) + assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0) + addr_bytes = bytes(sub[0] + ([0] * nullbytes) + sub[1]) + if addr_bytes[0] == 0xfc: + # Assume that seeds with fc00::/8 addresses belong to CJDNS, + # not to the publicly unroutable "Unique Local Unicast" network, see + # RFC4193: https://datatracker.ietf.org/doc/html/rfc4193#section-8 + return (BIP155Network.CJDNS, addr_bytes) + else: + return (BIP155Network.IPV6, addr_bytes) else: raise ValueError('Could not parse address %s' % addr) -def parse_spec(s, defaultport): +def parse_spec(s): + '''Convert endpoint string to BIP155 (networkID, addr, port) tuple.''' match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s) if match: # ipv6 host = match.group(1) @@ -85,17 +101,42 @@ def parse_spec(s, defaultport): (host,_,port) = s.partition(':') if not port: - port = defaultport + port = 0 else: port = int(port) - host = name_to_ipv6(host) - - return (host,port) + host = name_to_bip155(host) -def process_nodes(g, f, structname, defaultport): - g.write('static SeedSpec6 %s[] = {\n' % structname) - first = True + if host[0] == BIP155Network.TORV2: + return None # TORV2 is no longer supported, so we ignore it + else: + return host + (port, ) + +def ser_compact_size(l): + r = b"" + if l < 253: + r = l.to_bytes(1, "little") + elif l < 0x10000: + r = (253).to_bytes(1, "little") + l.to_bytes(2, "little") + elif l < 0x100000000: + r = (254).to_bytes(1, "little") + l.to_bytes(4, "little") + else: + r = (255).to_bytes(1, "little") + l.to_bytes(8, "little") + return r + +def bip155_serialize(spec): + ''' + Serialize (networkID, addr, port) tuple to BIP155 binary format. + ''' + r = b"" + r += spec[0].value.to_bytes(1, "little") + r += ser_compact_size(len(spec[1])) + r += spec[1] + r += spec[2].to_bytes(2, "big") + return r + +def process_nodes(g, f, structname): + g.write('static const uint8_t %s[] = {\n' % structname) for line in f: comment = line.find('#') if comment != -1: @@ -103,14 +144,14 @@ def process_nodes(g, f, structname, defaultport): line = line.strip() if not line: continue - if not first: - g.write(',\n') - first = False - (host,port) = parse_spec(line, defaultport) - hoststr = ','.join(('0x%02x' % b) for b in host) - g.write(' {{%s}, %i}' % (hoststr, port)) - g.write('\n};\n') + spec = parse_spec(line) + if spec is None: # ignore this entry (e.g. no longer supported addresses like TORV2) + continue + blob = bip155_serialize(spec) + hoststr = ','.join(('0x%02x' % b) for b in blob) + g.write(f' {hoststr},\n') + g.write('};\n') def main(): if len(sys.argv)<2: @@ -124,14 +165,16 @@ def main(): g.write(' * List of fixed seed nodes for the bitcoin network\n') g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n') g.write(' *\n') - g.write(' * Each line contains a 16-byte IPv6 address and a port.\n') - g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n') + g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n') g.write(' */\n') with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f: - process_nodes(g, f, 'pnSeed6_main', 8333) + process_nodes(g, f, 'chainparams_seed_main') g.write('\n') with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f: - process_nodes(g, f, 'pnSeed6_test', 18333) + process_nodes(g, f, 'chainparams_seed_test') + g.write('\n') + with open(os.path.join(indir,'nodes_testnet4.txt'), 'r', encoding="utf8") as f: + process_nodes(g, f, 'chainparams_seed_testnet4') g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') if __name__ == '__main__': diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py index e8698994f1c9d..0f22046625e40 100755 --- a/contrib/seeds/makeseeds.py +++ b/contrib/seeds/makeseeds.py @@ -1,31 +1,37 @@ #!/usr/bin/env python3 -# Copyright (c) 2013-2020 The Bitcoin Core developers +# Copyright (c) 2013-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Generate seeds.txt from Pieter's DNS seeder # +import argparse +import collections +import ipaddress +from pathlib import Path +import random import re import sys -import dns.resolver -import collections +from typing import Union -NSEEDS=512 +asmap_dir = Path(__file__).parent.parent / "asmap" +sys.path.append(str(asmap_dir)) +from asmap import ASMap, net_to_prefix # noqa: E402 -MAX_SEEDS_PER_ASN=2 - -MIN_BLOCKS = 337600 +NSEEDS=512 -# These are hosts that have been observed to be behaving strangely (e.g. -# aggressively connecting to every node). -with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f: - SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()} +MAX_SEEDS_PER_ASN = { + 'ipv4': 2, + 'ipv6': 10, +} +MIN_BLOCKS = 840000 PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") -PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$") +PATTERN_ONION = re.compile(r"^([a-z2-7]{56}\.onion):(\d+)$") +PATTERN_I2P = re.compile(r"^([a-z2-7]{52}\.b32.i2p):(\d+)$") PATTERN_AGENT = re.compile( r"^/Satoshi:(" r"0.14.(0|1|2|3|99)|" @@ -33,14 +39,32 @@ r"0.16.(0|1|2|3|99)|" r"0.17.(0|0.1|1|2|99)|" r"0.18.(0|1|99)|" - r"0.19.(0|1|99)|" - r"0.20.99" + r"0.19.(0|1|2|99)|" + r"0.20.(0|1|2|99)|" + r"0.21.(0|1|2|99)|" + r"22.(0|1|99).0|" + r"23.(0|1|99).0|" + r"24.(0|1|2|99).(0|1)|" + r"25.(0|1|2|99).0|" + r"26.(0|1|99).0|" + r"27.(0|1|99).0|" + r"28.(0|99).0|" r")") -def parseline(line): +def parseline(line: str) -> Union[dict, None]: + """ Parses a line from `seeds_main.txt` into a dictionary of details for that line. + or `None`, if the line could not be parsed. + """ + if line.startswith('#'): + # Ignore line that starts with comment + return None sline = line.split() if len(sline) < 11: - return None + # line too short to be valid, skip it. + return None + # Skip bad results. + if int(sline[1]) == 0: + return None m = PATTERN_IPV4.match(sline[0]) sortkey = None ip = None @@ -49,7 +73,13 @@ def parseline(line): if m is None: m = PATTERN_ONION.match(sline[0]) if m is None: - return None + m = PATTERN_I2P.match(sline[0]) + if m is None: + return None + else: + net = 'i2p' + ipstr = sortkey = m.group(1) + port = int(m.group(2)) else: net = 'onion' ipstr = sortkey = m.group(1) @@ -59,6 +89,8 @@ def parseline(line): if m.group(1) in ['::']: # Not interested in localhost return None ipstr = m.group(1) + if ipstr.startswith("fc"): # cjdns looks like ipv6 but always begins with fc + net = "cjdns" sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds port = int(m.group(2)) else: @@ -74,9 +106,6 @@ def parseline(line): sortkey = ip ipstr = m.group(1) port = int(m.group(6)) - # Skip bad results. - if sline[1] == 0: - return None # Extract uptime %. uptime30 = float(sline[7][:-1]) # Extract Unix timestamp of last success. @@ -104,124 +133,134 @@ def parseline(line): 'sortkey': sortkey, } -def dedup(ips): - '''deduplicate by address,port''' +def dedup(ips: list[dict]) -> list[dict]: + """ Remove duplicates from `ips` where multiple ips share address and port. """ d = {} for ip in ips: d[ip['ip'],ip['port']] = ip return list(d.values()) -def filtermultiport(ips): - '''Filter out hosts with more nodes per IP''' +def filtermultiport(ips: list[dict]) -> list[dict]: + """ Filter out hosts with more nodes per IP""" hist = collections.defaultdict(list) for ip in ips: hist[ip['sortkey']].append(ip) return [value[0] for (key,value) in list(hist.items()) if len(value)==1] -def lookup_asn(net, ip): - ''' - Look up the asn for an IP (4 or 6) address by querying cymru.com, or None - if it could not be found. - ''' - try: - if net == 'ipv4': - ipaddr = ip - prefix = '.origin' - else: # http://www.team-cymru.com/IP-ASN-mapping.html - res = str() # 2001:4860:b002:23::68 - for nb in ip.split(':')[:4]: # pick the first 4 nibbles - for c in nb.zfill(4): # right padded with '0' - res += c + '.' # 2001 4860 b002 0023 - ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3 - prefix = '.origin6' - - asn = int([x.to_text() for x in dns.resolver.query('.'.join( - reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', - 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) - return asn - except Exception: - sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n') - return None - # Based on Greg Maxwell's seed_filter.py -def filterbyasn(ips, max_per_asn, max_per_net): +def filterbyasn(asmap: ASMap, ips: list[dict], max_per_asn: dict, max_per_net: int) -> list[dict]: + """ Prunes `ips` by + (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and + (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. + """ # Sift out ips by type ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] ips_onion = [ip for ip in ips if ip['net'] == 'onion'] + ips_i2p = [ip for ip in ips if ip['net'] == 'i2p'] + ips_cjdns = [ip for ip in ips if ip["net"] == "cjdns"] # Filter IPv46 by ASN, and limit to max_per_net per network result = [] - net_count = collections.defaultdict(int) - asn_count = collections.defaultdict(int) - for ip in ips_ipv46: + net_count: dict[str, int] = collections.defaultdict(int) + asn_count: dict[int, int] = collections.defaultdict(int) + + for i, ip in enumerate(ips_ipv46): if net_count[ip['net']] == max_per_net: + # do not add this ip as we already too many + # ips from this network continue - asn = lookup_asn(ip['net'], ip['ip']) - if asn is None or asn_count[asn] == max_per_asn: + asn = asmap.lookup(net_to_prefix(ipaddress.ip_network(ip['ip']))) + if not asn or asn_count[ip['net'], asn] == max_per_asn[ip['net']]: + # do not add this ip as we already have too many + # ips from this ASN on this network continue - asn_count[asn] += 1 + asn_count[ip['net'], asn] += 1 net_count[ip['net']] += 1 + ip['asn'] = asn result.append(ip) # Add back Onions (up to max_per_net) result.extend(ips_onion[0:max_per_net]) + result.extend(ips_i2p[0:max_per_net]) + result.extend(ips_cjdns[0:max_per_net]) return result -def ip_stats(ips): - hist = collections.defaultdict(int) +def ip_stats(ips: list[dict]) -> str: + """ Format and return pretty string from `ips`. """ + hist: dict[str, int] = collections.defaultdict(int) for ip in ips: if ip is not None: hist[ip['net']] += 1 - return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion']) + return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d} {hist['i2p']:6d} {hist['cjdns']:6d}" + +def parse_args(): + argparser = argparse.ArgumentParser(description='Generate a list of bitcoin node seed ip addresses.') + argparser.add_argument("-a","--asmap", help='the location of the asmap asn database file (required)', required=True) + argparser.add_argument("-s","--seeds", help='the location of the DNS seeds file (required)', required=True) + argparser.add_argument("-m", "--minblocks", help="The minimum number of blocks each node must have", default=MIN_BLOCKS, type=int) + return argparser.parse_args() def main(): - lines = sys.stdin.readlines() + args = parse_args() + + print(f'Loading asmap database "{args.asmap}"…', end='', file=sys.stderr, flush=True) + with open(args.asmap, 'rb') as f: + asmap = ASMap.from_binary(f.read()) + print('Done.', file=sys.stderr) + + print('Loading and parsing DNS seeds…', end='', file=sys.stderr, flush=True) + with open(args.seeds, 'r', encoding='utf8') as f: + lines = f.readlines() ips = [parseline(line) for line in lines] + random.shuffle(ips) + print('Done.', file=sys.stderr) - print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr) - print('%s Initial' % (ip_stats(ips)), file=sys.stderr) + print('\x1b[7m IPv4 IPv6 Onion I2P CJDNS Pass \x1b[0m', file=sys.stderr) + print(f'{ip_stats(ips):s} Initial', file=sys.stderr) # Skip entries with invalid address. ips = [ip for ip in ips if ip is not None] - print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Skip entries with invalid address', file=sys.stderr) # Skip duplicates (in case multiple seeds files were concatenated) ips = dedup(ips) - print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr) - # Skip entries from suspicious hosts. - ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS] - print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr) # Enforce minimal number of blocks. - ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] - print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr) + ips = [ip for ip in ips if ip['blocks'] >= args.minblocks] + print(f'{ip_stats(ips):s} Enforce minimal number of blocks', file=sys.stderr) # Require service bit 1. ips = [ip for ip in ips if (ip['service'] & 1) == 1] - print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr) - # Require at least 50% 30-day uptime for clearnet, 10% for onion. + print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr) + # Require at least 50% 30-day uptime for clearnet, onion and i2p; 10% for cjdns req_uptime = { 'ipv4': 50, 'ipv6': 50, - 'onion': 10, + 'onion': 50, + 'i2p': 50, + 'cjdns': 10, } ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]] - print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr) # Require a known and recent user agent. ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] - print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Require a known and recent user agent', file=sys.stderr) # Sort by availability (and use last success as tie breaker) ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) # Filter out hosts with multiple bitcoin ports, these are likely abusive ips = filtermultiport(ips) - print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', file=sys.stderr) # Look up ASNs and limit results, both per ASN and globally. - ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) - print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr) + ips = filterbyasn(asmap, ips, MAX_SEEDS_PER_ASN, NSEEDS) + print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr) # Sort the results by IP address (for deterministic output). ips.sort(key=lambda x: (x['net'], x['sortkey'])) for ip in ips: - if ip['net'] == 'ipv6': - print('[%s]:%i' % (ip['ip'], ip['port'])) + if ip['net'] == 'ipv6' or ip["net"] == "cjdns": + print(f"[{ip['ip']}]:{ip['port']}", end="") else: - print('%s:%i' % (ip['ip'], ip['port'])) + print(f"{ip['ip']}:{ip['port']}", end="") + if 'asn' in ip: + print(f" # AS{ip['asn']}", end="") + print() if __name__ == '__main__': main() diff --git a/contrib/seeds/nodes_main.txt b/contrib/seeds/nodes_main.txt index 58f6ad10b5a8a..d68a5b8d57773 100644 --- a/contrib/seeds/nodes_main.txt +++ b/contrib/seeds/nodes_main.txt @@ -1,747 +1,2056 @@ -2.39.173.126:8333 -2.57.38.133:8333 -2.92.39.39:15426 -2.230.146.163:8333 -5.2.74.175:8333 -5.8.18.29:8333 -5.39.222.39:8333 -5.103.137.146:9333 -5.128.87.126:8333 -5.149.250.76:8333 -5.182.39.200:8333 -5.187.55.242:8333 -5.188.62.24:8333 -5.188.62.33:8333 -5.188.187.130:8333 -5.189.153.179:8333 -5.198.20.227:8333 -5.199.133.193:8333 -5.254.82.130:8333 -13.237.147.15:8333 -18.27.79.17:8333 -20.184.15.116:8433 -23.17.160.159:8333 -23.175.0.212:8333 -23.245.24.154:8333 -24.76.122.108:8333 -24.96.73.156:8333 -24.96.125.57:8333 -24.155.196.27:8333 -24.203.88.167:8333 -24.233.245.188:8333 -24.246.31.205:8333 -31.6.98.94:8333 -31.14.201.156:8333 -31.25.241.224:8335 -31.43.140.190:8333 -31.134.121.223:8333 -31.173.48.61:8333 -34.203.169.172:8333 -35.178.31.4:8333 -35.185.172.62:8333 -35.206.171.89:8333 -35.208.87.203:8333 -37.61.219.34:8333 -37.143.210.19:8333 -37.143.211.83:8333 -37.235.128.11:8333 -37.252.190.88:8333 -38.102.134.85:8333 -39.109.0.150:8333 -42.200.72.205:8333 -43.229.132.102:8333 -45.36.184.6:8333 -45.58.49.35:8333 -45.76.18.47:8333 -45.115.239.108:8333 -46.23.87.218:8333 -46.28.132.34:8333 -46.32.50.98:8333 -46.36.97.10:8333 -46.38.237.108:8333 -46.39.129.82:8333 -46.160.195.121:8333 -46.166.162.45:20001 -46.188.30.118:8333 -46.229.238.187:8333 -46.254.217.169:8333 -47.52.114.198:8885 -47.88.84.126:8333 -47.108.29.152:8333 -47.108.30.165:8333 -47.222.103.234:8333 -49.245.50.224:8333 -50.53.250.162:8333 -50.225.198.67:6000 -51.154.60.34:8333 -54.242.17.7:8333 -58.146.222.198:8333 -58.158.0.86:8333 -59.149.205.197:8333 -60.251.129.61:8336 -61.155.5.4:8333 -62.45.4.139:8333 -62.97.244.242:8333 -62.109.18.23:8333 -62.133.194.156:8333 -62.138.0.217:8333 -62.152.58.16:9421 -63.143.34.98:8333 -63.211.111.122:8333 -63.224.249.240:8333 -64.182.119.36:8333 -64.229.105.111:8333 -65.27.104.112:8333 -65.183.76.73:8333 -66.151.242.154:8335 -66.206.13.70:8333 -66.240.237.155:8333 -66.240.237.172:8333 -67.205.140.145:8333 -67.210.228.203:8333 -67.221.193.55:8333 -67.222.131.151:8333 -68.110.90.111:8333 -68.142.33.36:8333 -68.199.157.183:8333 -68.202.128.19:8333 -68.206.21.144:8333 -69.30.215.42:8333 -69.55.234.74:8333 -69.59.18.22:8333 -69.145.122.160:8333 -69.175.49.230:8333 -70.64.48.41:8333 -71.33.232.126:8333 -71.73.18.32:8333 -71.146.114.111:8333 -72.53.134.182:8333 -73.126.97.99:8333 -74.83.126.150:8333 -74.84.128.158:9333 -74.98.242.97:8333 -74.118.137.119:8333 -74.220.255.190:8333 -75.45.51.41:8333 -75.158.39.231:8333 -76.11.17.187:8333 -76.84.79.211:8333 -76.167.179.75:8333 -77.53.158.137:8333 -77.119.229.106:8333 -77.120.122.22:8433 -77.120.122.114:8433 -77.163.136.136:8333 -77.220.140.74:8333 -77.247.178.130:8333 -78.128.62.52:8333 -78.128.79.22:8333 -78.141.123.99:8333 -78.143.214.223:8333 -78.159.99.85:8333 -79.77.33.131:8333 -79.120.70.47:8333 -79.142.129.218:8333 -79.175.125.210:8333 -80.47.156.43:8333 -80.89.203.172:8001 -80.93.213.246:8333 -80.111.142.213:8333 -80.147.82.165:8333 -80.211.191.11:8333 -80.211.245.151:8333 -80.229.28.60:8333 -80.229.168.1:8333 -80.253.94.252:8333 -81.4.102.69:8333 -81.7.13.84:8333 -81.10.205.21:8333 -81.117.225.245:8333 -81.177.157.81:39993 -81.235.185.150:8333 -82.29.58.109:8333 -82.117.166.77:8333 -82.118.20.37:8333 -82.146.50.143:8333 -82.146.153.130:8333 -82.149.97.25:17567 -82.169.130.61:8333 -82.181.179.230:8333 -82.181.218.229:8333 -82.194.153.233:8333 -82.195.237.253:8333 -82.197.218.97:8333 -82.199.102.10:8333 -82.199.102.133:8333 -82.202.197.224:8333 -82.217.245.7:8333 -82.221.111.136:8333 -83.89.27.50:8333 -83.89.250.69:8333 -83.167.27.4:8333 -83.208.254.182:8333 -83.217.8.31:44420 -83.221.211.116:8335 -83.243.59.41:8333 -83.251.241.0:8333 -84.38.3.249:8333 -84.40.94.170:8333 -84.192.16.234:8333 -84.209.9.23:8333 -84.234.96.115:8333 -84.248.14.210:8333 -85.119.83.25:8333 -85.144.119.222:8333 -85.145.238.93:8333 -85.184.138.108:8333 -85.190.0.5:8333 -85.202.11.119:8333 -85.204.96.207:8333 -85.208.69.13:8333 -85.214.90.161:8333 -85.240.233.220:8333 -85.241.106.203:8333 -86.15.38.61:8333 -86.76.7.132:8333 -87.79.68.86:8333 -87.79.94.221:8333 -87.118.116.237:8333 -87.120.8.5:20008 -87.222.22.255:8333 -87.233.181.146:8333 -87.246.46.132:8333 -87.249.207.89:8333 -88.86.116.140:8333 -88.86.116.142:8333 -88.88.13.249:8333 -88.147.244.250:8333 -88.150.230.95:8333 -88.202.202.221:8333 -88.208.3.195:8333 -88.212.44.33:8333 -89.25.80.42:8333 -89.28.117.31:8333 -89.106.199.38:8333 -89.142.75.60:8333 -89.190.19.162:8333 -89.212.9.96:8333 -89.212.75.6:8333 -89.248.250.12:8333 -90.94.83.26:8333 -90.182.165.18:8333 -91.185.198.234:8333 -91.193.237.116:8333 -91.204.99.178:8333 -91.204.149.5:8333 -91.210.24.30:8333 -91.211.88.33:8333 -91.216.149.28:8333 -91.222.128.59:8333 -92.18.180.225:8333 -92.53.89.123:8333 -92.240.69.195:8333 -92.249.143.44:8333 -92.255.176.109:8333 -93.57.81.162:8333 -93.90.193.195:8330 -93.90.207.46:8333 -93.115.26.186:20004 -93.115.240.26:8333 -93.123.180.164:8333 -93.175.204.121:8333 -93.180.178.213:8333 -94.19.7.55:8333 -94.52.112.227:8333 -94.53.2.181:8333 -94.72.143.26:8333 -94.103.120.173:8333 -94.237.64.138:8333 -94.237.80.207:8333 -94.242.255.31:8333 -95.24.48.84:15426 -95.42.2.113:8333 -95.69.249.63:8333 -95.79.35.133:8333 -95.87.226.56:8333 -95.90.3.210:8333 -95.110.234.93:8333 -95.156.252.34:8333 -95.211.189.3:8333 -95.217.9.207:8333 -96.9.80.109:8333 -96.245.218.247:8333 -97.104.206.3:8333 -98.29.195.204:8333 -99.231.196.126:8333 -101.100.174.24:8333 -101.100.174.240:8333 -103.14.244.190:8333 -103.37.205.47:8333 -103.60.109.184:20008 -103.84.84.250:8335 -103.85.190.218:20000 -103.99.168.100:8333 -103.99.168.130:8333 -103.214.146.86:8333 -104.171.242.155:8333 -104.199.184.15:8333 -104.244.223.151:8333 -105.29.76.194:8333 -107.150.45.18:8333 -107.180.77.21:8333 -108.58.252.82:8333 -108.183.77.12:8333 -109.72.83.127:8333 -109.99.63.159:8333 -109.109.36.19:8333 -109.110.81.90:8333 -109.173.112.224:8333 -109.202.107.125:8333 -109.205.109.56:8333 -109.236.84.141:8333 -109.238.81.82:8333 -109.248.206.13:8333 -111.40.4.103:8333 -111.90.140.217:8333 -111.90.158.212:8333 -112.213.103.98:8333 -113.52.135.125:8333 -115.47.141.250:8885 -115.70.110.4:8333 -116.87.15.244:8333 -119.17.151.61:8333 -119.171.134.87:8333 -121.18.238.39:8333 -121.78.223.186:8333 -121.98.205.102:8333 -122.112.148.153:8339 -122.116.42.140:8333 -124.160.119.93:8333 -125.236.215.133:8333 -129.13.189.212:8333 -129.97.243.18:8333 -130.185.77.105:8333 -131.114.10.233:8333 -131.188.40.34:8333 -132.249.239.163:8333 -134.19.186.195:8333 -134.249.187.97:8333 -136.144.215.219:8333 -137.226.34.46:8333 -139.9.249.234:8333 -141.101.8.36:8333 -143.89.121.207:8333 -143.176.224.104:8333 -144.34.161.65:18333 -147.253.70.208:8333 -148.66.50.82:8335 -153.92.127.216:8333 -153.120.115.15:8333 -154.52.98.2:8444 -155.4.116.169:8333 -156.19.19.90:8333 -156.34.178.138:8333 -157.13.61.66:8333 -157.13.61.67:8333 -158.181.226.33:8333 -159.100.242.254:8333 -159.100.248.234:8333 -159.253.98.209:8333 -160.16.0.30:8333 -160.20.145.62:8333 -162.62.18.226:8333 -162.62.26.218:8333 -162.209.88.174:8333 -162.244.80.208:8333 -163.158.202.112:8333 -163.172.181.191:8333 -166.62.100.55:8333 -167.114.35.12:8333 -168.62.167.209:8200 -168.235.74.110:8333 -168.235.90.188:8333 -170.249.37.243:8333 -172.99.120.113:8333 -173.21.218.95:8333 -173.51.177.2:8333 -173.95.72.234:8333 -173.208.128.10:8333 -173.209.44.34:8333 -173.231.57.194:8333 -173.255.204.124:8333 -174.65.135.60:8333 -174.94.155.224:8333 -174.115.120.186:8333 -176.53.160.170:8333 -176.85.188.213:8333 -176.99.2.207:8333 -176.121.14.157:8333 -176.122.157.173:8333 -176.126.85.34:8333 -176.198.120.197:8334 -178.61.141.198:8333 -178.119.183.34:8333 -178.234.29.184:8333 -178.255.42.126:8333 -179.48.251.41:8333 -180.150.73.100:8333 -181.47.220.242:8333 -181.170.139.47:8333 -183.110.220.210:30301 -183.230.93.139:8333 -184.95.58.164:8663 -184.164.147.82:41333 -185.15.92.18:20993 -185.25.60.199:8333 -185.52.3.185:8333 -185.61.138.4:8333 -185.64.116.15:8333 -185.83.110.53:8333 -185.83.214.123:8333 -185.95.219.53:8333 -185.96.94.24:8333 -185.102.71.6:8333 -185.138.35.183:8333 -185.140.252.253:8333 -185.143.145.113:8333 -185.148.3.227:8333 -185.157.160.220:8333 -185.163.44.44:8333 -185.176.221.32:8333 -185.186.208.162:8333 -185.198.58.47:8333 -185.198.59.183:8333 -185.215.224.22:8333 -185.232.28.254:8333 -185.239.236.116:8333 -185.251.161.54:8333 -188.42.40.234:18333 -188.65.212.138:8333 -188.65.212.157:8333 -188.68.45.143:8333 -188.127.229.105:8333 -188.131.177.130:8333 -188.134.8.36:8333 -188.134.88.5:8333 -188.138.17.92:8333 -188.150.157.11:8333 -188.208.111.62:8333 -188.231.177.149:8333 -190.2.145.177:8333 -190.104.249.44:8333 -191.209.21.188:8333 -192.3.11.20:8333 -192.3.11.24:8333 -192.34.56.59:8333 -192.65.170.15:8333 -192.65.170.50:8333 -192.146.137.18:8333 -192.166.47.32:8333 -192.169.94.29:8333 -192.227.80.83:8333 -192.254.65.126:8333 -193.10.203.23:8334 -193.29.57.4:8333 -193.58.196.212:8333 -193.59.41.11:8333 -193.84.116.22:8333 -193.108.131.43:8333 -193.148.71.10:8333 -193.169.244.190:8333 -193.194.163.35:8333 -193.194.163.53:8333 -194.5.159.197:8333 -194.14.246.205:8333 -194.135.92.96:8333 -194.135.135.69:8333 -194.158.92.150:8333 -194.187.251.163:31239 -195.56.63.5:8333 -195.56.63.10:8333 -195.67.139.54:8333 -195.95.225.17:8333 -195.135.194.8:8333 -195.154.113.90:8333 -195.206.20.114:8333 -195.206.105.42:8333 -195.209.249.164:8333 -195.224.116.20:8333 -198.1.231.6:8333 -198.251.83.19:8333 -199.48.83.58:8333 -199.96.50.211:8333 -199.188.204.25:8333 -199.192.20.201:8333 -200.76.194.7:8333 -200.87.116.213:8333 -202.28.194.82:8333 -202.55.87.45:8333 -203.130.48.117:8885 -203.132.95.10:8333 -204.14.245.180:8333 -204.152.203.98:8333 -205.209.162.98:8333 -206.221.178.149:8333 -208.110.99.105:8333 -209.133.220.74:8333 -209.151.237.71:8333 -211.149.170.31:8333 -212.51.132.226:8333 -212.241.70.213:8333 -213.21.15.22:8333 -213.136.83.8:8333 -213.227.152.108:8333 -213.254.23.116:8333 -216.108.236.180:8333 -216.194.165.98:8333 -216.236.164.82:8333 -217.16.185.165:8333 -217.21.24.146:8333 -217.26.32.10:8333 -217.64.47.138:8333 -217.64.133.220:8333 -217.92.55.246:8333 -217.172.244.9:8333 -218.75.140.45:8333 -219.75.122.47:8333 -220.233.138.130:8333 -221.130.29.230:18421 -222.122.49.40:8333 -222.186.169.1:8333 -222.222.43.29:8333 -223.16.30.175:8333 -[2001:1bc0:cc::a001]:8333 -[2001:1c02:2f18:d00:b62e:99ff:fe49:d492]:8333 -[2001:250:200:7:d6a9:fcf4:e78d:2d82]:8333 -[2001:41c9:1:424::231]:8333 -[2001:41d0:1004:19b4::]:8333 -[2001:44b8:4195:1801:5c73:5d67:d2a6:9910]:8333 -[2001:470:88ff:2e::1]:8333 -[2001:470:a:c13::2]:8333 -[2001:4800:7821:101:be76:4eff:fe04:9f50]:8333 -[2001:4801:7819:74:b745:b9d5:ff10:aaec]:8333 -[2001:48d0:1:2163:0:ff:febe:5a80]:8333 -[2001:4ba0:fffa:5d::93]:8333 -[2001:638:a000:4140::ffff:191]:8333 -[2001:678:7dc:8::2]:8333 -[2001:678:ec:1:250:56ff:fea7:47e9]:8333 -[2001:67c:10ec:2a49:8000::1082]:8333 -[2001:67c:16dc:1201:5054:ff:fe17:4dac]:8333 -[2001:67c:21ec:1000::a]:8333 -[2001:67c:26b4:12:7ae3:b5ff:fe04:6f9c]:8333 -[2001:67c:2db8:6::45]:8333 -[2001:700:300:1513:29c7:2430:190e:ab59]:8333 -[2001:718:801:311:5054:ff:fe19:c483]:8333 -[2001:818:e245:f800:4df:2bdf:ecf5:eb60]:8333 -[2001:8f1:1404:3700:8e49:715a:2e09:b634]:9444 -[2001:ba8:1f1:f069::2]:8333 -[2001:bb8:4008:20:648c:5eff:fe74:ce4]:8333 -[2001:da8:d800:821:a7d5:f5a7:530d:b71e]:8333 -[2001:e42:103:100::30]:8333 -[2001:e68:7400:2:6854:419e:221c:82f3]:8333 -[2002:b610:1ca3::b610:1ca3]:8333 -[2002:b6ff:3dca::b6ff:3dca]:28364 -[2400:2651:42e0:3300:40b4:576d:d14c:65d4]:8333 -[2400:4052:e20:4f00:69fe:bb33:7b1c:a1ca]:8333 -[2401:2500:203:184::15]:8333 -[2401:3900:2:1::2]:8333 -[2401:a400:3200:5600:14ee:f361:4bdc:1f7c]:8333 -[2401:d002:4402:0:8f28:591a:6ea0:c683]:8333 -[2402:cb40:1000:504::dead]:8333 -[2405:aa00:2::40]:8333 -[2409:10:ca20:1df0:224:e8ff:fe1f:60d9]:8333 -[2409:8a15:4a1a:2830:7285:c2ff:fe70:60a4]:8333 -[2409:8a1e:6938:d2c0:2e0:70ff:fe86:cb59]:8333 -[2409:8a28:421:2580:2e0:70ff:fe8b:13e]:8333 -[2409:8a28:421:2770:2e0:70ff:fe87:fecb]:8333 -[240d:1a:759:6000:ddab:3141:4da0:8878]:8333 -[2600:3c01::f03c:91ff:fecd:1b95]:8333 -[2600:6c40:7980:27:20a:f7ff:fe69:f4d5]:8333 -[2602:ffc5::ffc5:b844]:8333 -[2604:2d80:c808:857b:8d6:9e1c:7131:4bea]:8333 -[2604:4300:a:2e:21b:21ff:fe11:392]:8333 -[2604:5500:c134:4000::3fc]:32797 -[2604:5500:c2a3:7b00:cc6:373b:44a8:caa4]:8333 -[2604:6000:6e85:4a01:a82d:f9ff:fef5:28b9]:8333 -[2604:7780:303:80::80]:8333 -[2605:4d00::50]:8333 -[2605:9880:0:777:225:90ff:fefc:8958]:8333 -[2605:ae00:203::203]:8333 -[2605:c000:2a0a:1::102]:8333 -[2605:e000:1127:8fc:ec63:a191:32c2:633c]:8333 -[2605:e200:d202:300:20c:29ff:fef1:85ec]:8333 -[2605:f700:100:400::131:5b54]:8333 -[2606:c680:0:b:3830:34ff:fe66:6663]:8333 -[2607:4480:2:1:38:102:69:70]:8333 -[2607:9280:b:73b:250:56ff:fe21:9c2f]:8333 -[2607:f128:40:1703::2]:8333 -[2607:f188:0:4:eef4:bbff:fecc:6668]:8333 -[2607:f2c0:e1e2:11:1044:9b7a:b81e:1d74]:8333 -[2607:f470:8:1048:ae1f:6bff:fe70:7240]:8333 -[2620:11c:5001:1118:d267:e5ff:fee9:e673]:8333 -[2620:6e:a000:1:42:42:42:42]:8333 -[2804:14d:baa7:9674:21e:67ff:fea8:d799]:8333 -[2804:14d:baa7:9674:3615:9eff:fe23:d610]:8333 -[2804:39e8:ff85:a600:7285:c2ff:feae:9925]:8333 -[2804:d41:aa01:1600:5a2d:3b27:3b83:2b45]:8333 -[2a00:12d8:7001:1:46e7:6915:75be:92f9]:8333 -[2a00:1398:4:2a03:215:5dff:fed6:1033]:8333 -[2a00:1630:14::101]:8333 -[2a00:1768:2001:27::ef6a]:8333 -[2a00:1828:a004:2::666]:8333 -[2a00:1838:36:142::ec73]:8333 -[2a00:1838:36:7d::d3c6]:8333 -[2a00:1f40:2::1126]:8333 -[2a00:23a8:41d0:5800:20c:29ff:fe0d:6a75]:8333 -[2a00:23c5:fd01:9f00:6317:7c02:788f:88ea]:8333 -[2a00:6020:13c2:3800:be6a:a1c8:c9e7:65ec]:8333 -[2a00:63c2:8:88::2]:8333 -[2a00:7143:3::227]:8333 -[2a00:7b80:452:2000::138]:8333 -[2a00:8a60:e012:a00::21]:8333 -[2a00:ca8:a1f:3025:f949:e442:c940:13e8]:8333 -[2a00:d70:0:15:f816:3eff:fe73:d819]:8333 -[2a00:d880:5:331::3978]:8333 -[2a01:238:420f:9200:fa5a:1a4b:1e6a:fadf]:8333 -[2a01:430:17:1::ffff:1153]:8333 -[2a01:488:66:1000:53a9:1573:0:1]:8333 -[2a01:4f8:120:80cc::2]:8433 -[2a01:5f0:beef:5:0:3:0:1]:52101 -[2a01:79c:cebc:a630:9dd8:ef55:8374:92a1]:8333 -[2a01:7a0:2:137a::11]:8333 -[2a01:7a0:2:137c::3]:8333 -[2a01:7c8:aab6:db:5054:ff:feca:cfc8]:8333 -[2a01:8b81:6403:4700::1]:8333 -[2a01:cb00:7cd:b000:fa1f:bd1:fe0:62a6]:8333 -[2a01:cb00:d3d:7700:227:eff:fe28:c565]:8333 -[2a01:d0:bef2::12]:8333 -[2a01:d0:f34f:1:1f67:e250:6aeb:b9c4]:8333 -[2a01:e34:ee6b:2ab0:88c2:1c12:f4eb:c26c]:8333 -[2a01:e35:2fba:2e90:1:0:b:1]:8333 -[2a02:1205:505d:eb50:beae:c5ff:fe42:a973]:8333 -[2a02:120b:2c3f:a90:10dd:31ff:fe42:5079]:8333 -[2a02:130:300:1520:1::2]:8333 -[2a02:13b8:4000:1000:216:e6ff:fe92:8619]:8333 -[2a02:180:1:1::5b8f:538c]:8333 -[2a02:2168:8062:db00:96de:80ff:fea3:fd00]:8333 -[2a02:2770:5:0:21a:4aff:fe44:8370]:8333 -[2a02:2788:864:fb3:5b8a:c8f7:9fff:ae2d]:8333 -[2a02:2f0d:607:bc00:5e9a:d8ff:fe57:8bc5]:8333 -[2a02:348:9a:83b1::1]:8333 -[2a02:390:9000:0:218:7dff:fe10:be33]:8333 -[2a02:4780:8:6:2:354e:1256:7a04]:8333 -[2a02:578:4f07:24:76ad:cef7:93c1:b9b9]:8333 -[2a02:6d40:30f6:e901:89b8:bb58:25a:6050]:8333 -[2a02:750:7:c11:5054:ff:fe43:eb81]:8333 -[2a02:7aa0:1619::adc:8de0]:8333 -[2a02:7b40:4f62:19ae::1]:8333 -[2a02:8108:95bf:eae3:211:32ff:fe8e:b5b8]:8333 -[2a02:e00:fff0:23f::1]:8333 -[2a02:e00:fff0:23f::a]:8333 -[2a03:1b20:1:f410:40::3e]:16463 -[2a03:6000:870:0:46:23:87:218]:8333 -[2a03:9da0:f6:1::2]:8333 -[2a03:e2c0:1ce::2]:8333 -[2a04:2180:0:2::f2]:8333 -[2a04:2180:1:c:f000::15]:8333 -[2a04:52c0:101:97f::dcbe]:8333 -[2a04:ee41:83:50df:d908:f71d:2a86:b337]:8333 -[2a05:1700::100]:8333 -[2a05:fc87:4::2]:8333 -[2a05:fc87:4::7]:8333 -[2a07:5741:0:69d::1]:8333 -[2a07:5741:0:7cd::1]:8333 -[2a07:7200:ffff:c53f::e1:17]:8333 -[2a07:b400:1:34c::2:1002]:8333 -[2a0b:ae40:3:4a0a::15]:8333 -[2a0e:b780::55d1:f05b]:8333 -[2c0f:fce8:0:400:b7c::1]:8333 -2empatdfea6vwete.onion:8333 -34aqcwnnuiqh234f.onion:8333 -3gxqibajrtysyp5o.onion:8333 -3sami4tg4yhctjyc.onion:8333 -3w77hrilg6q64opl.onion:8333 -46xh2sbjsjiyl4fu.onion:8333 -4ee44qsamrjpywju.onion:8333 -4haplrtkprjqhm2j.onion:8333 -4u3y3zf2emynt6ui.onion:8333 -57dytizbai7o4kq7.onion:8333 -5guaeulc7xm4g2mm.onion:8334 -5mtvd4dk62ccdk4v.onion:8333 -5pmjz6mmikyabaw5.onion:8333 -6eurcxoqsa4qpiqq.onion:8333 -6ivvkeseojsmpby4.onion:8333 -6tlha6njtcuwpfa3.onion:8333 -6ymgbvnn6d5nfmv4.onion:8333 -72y2n5rary4mywkz.onion:8333 -7b75ub5dapphemit.onion:8333 -7xaqpr7exrtlnjbb.onion:8333 -a64haiqsl76l25gv.onion:8333 -ab7ftdfw6qhdx3re.onion:8333 -aiupgbtdqpmwfpuz.onion:8333 -akeg56rzkg7rsyyg.onion:8333 -akinbo7tlegsnsxn.onion:8333 -anem5aq4cr2zl7tz.onion:8333 -at3w5qisczgguije.onion:8333 -auo4zjsp44vydv6c.onion:8333 -bowg4prf63givea4.onion:8333 -cjuek22p4vv4hzbu.onion:8333 -cklaa2xdawrb75fg.onion:8333 -coxiru76nnfw3vdj.onion:8333 -cwq2fuc54mlp3ojc.onion:8333 -dganr7dffsacayml.onion:8333 -djbsspmvlc6ijiis.onion:8333 -dmfwov5ycnpvulij.onion:8333 -dp2ekfbxubpdfrt4.onion:8333 -dw2ufbybrgtzssts.onion:4333 -edkmfeaapvavhtku.onion:8333 -ejdoey3uay3cz7bs.onion:8333 -eladlvwflaahxomr.onion:8333 -ffhx6ttq7ejbodua.onion:8333 -hbnnzteon75un65y.onion:8333 -hcyxhownxdv7yybw.onion:8333 -hdfcxll2tqs2l4jc.onion:8333 -hdld2bxyvzy45ds4.onion:8333 -hlnnhn2xj2qffqjs.onion:8333 -hnqwmqikfmnkpdja.onion:8333 -hvmjovdasoin43wn.onion:8333 -hwzcbnenp6dsp6ow.onion:8333 -i5ellwzndjuke242.onion:8333 -iapvpwzs4gpbl6fk.onion:8885 -if7fsvgyqwowxkcn.onion:8333 -ilukzjazxlxrbuwy.onion:8333 -kswfyurnglm65u7b.onion:8333 -ldu2hbiorkvdymja.onion:8333 -lvvgedppmpigudhz.onion:8333 -mk3bnep5ubou7i44.onion:8333 -muhp42ytbwi6qf62.onion:8333 -n5khsbd6whw7ooip.onion:8333 -ndmbrjcvu2s6jcom.onion:8333 -nf4iypnyjwfpcjm7.onion:8333 -nkdw6ywzt3dqwxuf.onion:8333 -o4sl5na6jeqgi3l6.onion:8333 -opencubebqqx3buj.onion:8333 -ovbkvgdllk3xxeah.onion:8333 -pg2jeh62fkq3byps.onion:8333 -pkcgxf23ws3lwqvq.onion:8333 -qdtau72ifwauot6b.onion:8333 -qidnrqy2ozz3nzqq.onion:8333 -readybit5veyche6.onion:8333 -s2epxac7ovy36ruj.onion:8333 -satofxsc3xjadxsm.onion:8333 -sv5oitfnsmfoc3wu.onion:8333 -uftbw4zi5wlzcwho.onion:8333 -uz3pvdhie3372vxw.onion:8333 -v2x7gpj3shxfnl25.onion:8333 -vov46htt6gyixdmb.onion:8333 -wg3b3qxcwcrraq2o.onion:8333 -wgeecjm4w4ko66f7.onion:8333 -wmxc6ask4a5xyaxh.onion:8333 -wqrafn4zal3bbbhr.onion:8333 -xhi5x5qc44elydk4.onion:8333 -xk6bjlmgvwojvozj.onion:8333 -xmgr7fsmp7bgburk.onion:8333 -xocvz3dzyu2kzu6f.onion:8333 -xv7pt6etwxiygss6.onion:8444 -yumx7asj7feoozic.onion:8333 -zmaddsqelw2oywfb.onion:8444 +[fc1f:22c3:95dc:a3af:4a93:8251:beb9:1858]:8333 +[fc6d:f562:86a0:791d:8a20:7aa2:8879:2176]:8333 +[fc70:de9d:7fe2:b32:5828:1a3c:d0f:83ec]:8333 +[fc95:6edb:af65:9ea3:cd27:21ef:f5e2:29c6]:8333 +[fca0:151:79ac:8992:b51e:bdc4:6ed9:41be]:8333 +[fcc7:be49:ccd1:dc91:3125:f0da:457d:8ce]:8333 +[fccb:248:11a6:1042:bca:1218:f7ce:7d3d]:8333 +[fcf1:22ff:3070:582f:a873:61bc:4bc1:81bf]:8333 +23zqyu5dxq3gilx5dihlyjh6tbkwombyaks4pbeyg2pqjvv7putq.b32.i2p:0 +26wyxyscoasntccp6k3earcilrykkgzfrdit2zfw5m4patocvuqq.b32.i2p:0 +26xo6ouiqida6fivk7cfirvyss4eqoiefoim6pecdqietfwxnulq.b32.i2p:0 +2dsyxnxzwo6ltuo3hndnoop2qc2pj2czfgmjbjnqrnnxffmr2oaa.b32.i2p:0 +2eshemvuvyahgshefs47wt3eqg3vvxw5jzlxm6lidye5d6zgclja.b32.i2p:0 +2fkjb2o7p4gdj6hjiytgkhvitqwjrn7kmdxzduwiskpycnzhvkwa.b32.i2p:0 +2hjwtokw4mxvtkm5po63kjhyo7lfsarfcgqp2hp5i4epx3qvtnua.b32.i2p:0 +2jgfxxzts6rfr5m2iwklltya2kmuejnkufpt4bavwjhi2j7fmyma.b32.i2p:0 +2jt5aukq3ik7bu4frugi5udkwlckhhgxrzjhjvfzieomkx4agxqq.b32.i2p:0 +2k3lztzfal3k6dl7yrh2jykfa3xpmou2c7cm2ewo4qctkkvbhfvq.b32.i2p:0 +2kb3aja6lo6pu2b27tos64tgo4kxzate2z4ew3mpmksqh6wzjsta.b32.i2p:0 +2muk57255tdm4g273fy7aitrmqnturut6krqojxz76jssrqm2rsa.b32.i2p:0 +2o4zoex2pqqybdyxuwoimn3ke23aqo4fnaglmr4hwhaubhneppoq.b32.i2p:0 +2od4gsib5knkq5fhbmysp3nbehts67nfa26jv6gkesehwycv7tfa.b32.i2p:0 +2ot7eiaasbxhha2txgsjd4e6inugidh6yronjdkiyba5ss3kty2q.b32.i2p:0 +2rbmv3hyr4l42ysvj62dgozlmosdsr65x4dqgvx3axfzyw2uytnq.b32.i2p:0 +2v5nr3u3scbyxd3nkqxinw2pvqqe66y4hc3u6gotil2sgbyivruq.b32.i2p:0 +2vslelcwzyxvvm3rzr2irbvaw7eyillztwfrdd24zshhwhf5rmjq.b32.i2p:0 +2vwlmwbsnm6fiffcl5ngz4g7r4tyieu7j4xmpwlmc2xxl2myd2zq.b32.i2p:0 +2xnwvq4msaylv2aom7nhufsjyqho7qh3gpiej2wbl6imdfalyroq.b32.i2p:0 +2z4qckqjkz2xjwrohij664qlaraxo2ryascslw4ehfjxtltwolzq.b32.i2p:0 +37tmwq2dy4smqg46qkgvs5poca3hceoephgk5onjrgjxyph4ddlq.b32.i2p:0 +3ctq5jucmugxjum2xammfbzgdmsx23ak3peuk64b7vgq5w4z7hqq.b32.i2p:0 +3d5hlguo3yzbzwb7ek46uck2m67u4yuup456wywdvzhzazcu43vq.b32.i2p:0 +3gocb7wc4zvbmmebktet7gujccuux4ifk3kqilnxnj5wpdpqx2hq.b32.i2p:0 +3hziidrpzqvns4jwfqhzb7nydw4mtnnkawperfvnaru3zpup2ihq.b32.i2p:0 +3iixvywxqwns477r6axhylfcteq5bwggy7cmrbutqred54kigwoa.b32.i2p:0 +3jqjyte2k6xnoetygm6gfakisuz3eqxr3fcu4l5kmu3zifpnkhvq.b32.i2p:0 +3oa5fycx3tyechdfgootxcpap7sx7p3sawenrbzyyxrty74rt7zq.b32.i2p:0 +3r7m5mbeogkr6sy66f6g325nsf2jiimtfdn5v6wpxz6uqmsybqja.b32.i2p:0 +3s7zmamgfmlhwvrn5ov32uugbldoka7jxbetu6tjsq3bzkrb5dca.b32.i2p:0 +3uudj6lfywjqrbiplgwoaxpeb2k4ma75mlmpiq7owv3bofwlvadq.b32.i2p:0 +3y4oiw7cglzzshasapfmlte7ixbgpl4js7w2afsd4znemu4ddefa.b32.i2p:0 +42guqsvdee4tmq3qa5mjw5vjkzkkuk5eainecsrsdfex3wyf7twq.b32.i2p:0 +44qb3nfsrer2f4eyh54s7qdvwxiykjrer76jjevcbpdn3i6uayba.b32.i2p:0 +44vneaemid75co5t7rjnp3ofpar52gg2hbqy7cmolfnl2yhjcmpq.b32.i2p:0 +45eh6rxbfli4ju26cqaf63vfs5xx6zgt7hn3uzzz3nem45d4ps6q.b32.i2p:0 +46xory66pc6qxd6glapmcrxujp6wc5swghhbnhybvewxffo37gaq.b32.i2p:0 +47jrlqld6m7abax6mmly23pg4hyapckmx4hhzzsozkgaswqu3nbq.b32.i2p:0 +4b7mcou7wtazoxwxpm5tl4tk7qaalnabd6i7wvnmpv5omvo7ogla.b32.i2p:0 +4dduiytop3qjiodb2jnwvhvnyr6w3y5axz3vzk5qrfefur4fg6qq.b32.i2p:0 +4dixsgdl3bpnkiahm7uanupycpfg53hxobxgm5fsnsvpt2yhalqa.b32.i2p:0 +4lfukvzjcupsvjdtoyealabzbdsp3ar4tjbhxtwypbb2l7m2cpwq.b32.i2p:0 +4lmhn2xqfineeyquxxv5ixozqkn4eyuzcclk626q67impx5hnnhq.b32.i2p:0 +4lommnxazy6c734yycbmzqkodlzcxpwsby5wlmhnhcbygamgwrxa.b32.i2p:0 +4miplwxnwh3lnaag2pjehdyxyzgrfz6yc6vwulbug6jr4rnetzjq.b32.i2p:0 +4nxdyltgdwpfkautpralacdlolzlpwr43cj4ig4gozn5y43gk6nq.b32.i2p:0 +4ripix2seo5ac3bjb4a5louhtbycghyf3leysidy22wnbak7tzjq.b32.i2p:0 +4ro6okb3x64g7u4khhc7rqalmkwkkumbavkcx7vqpcvki2una72a.b32.i2p:0 +4s37ey3w4lncs6xxycnxksbhatfvx2ttgc3obdvpirir4zzjassq.b32.i2p:0 +57shcfzia75ketlrqrnvgxvaijhbtn3snrjw663ank5baj7qr25a.b32.i2p:0 +5agylbfkuwwqnyiqwtgxgqlybokkqfvoxwjbkc732cbot5f2z4nq.b32.i2p:0 +5bqnpm3tqlkygqgu5lnzxxuw6bbjdzqkdbbmlfbzn434qpyj7mtq.b32.i2p:0 +5emthm7lukjah2pnqourmnhjk6zhujrgzjqdevezdf6d6valclia.b32.i2p:0 +5f2sx5ne2cd5mtobr2abegmzwh2o6iie3ylrhgjdkbivjqbdmpeq.b32.i2p:0 +5ikjy5pzgjk37hh7fcknyyhx5v5lkchjqknx7hyrffrj7hv7lybq.b32.i2p:0 +5kog73uwths255tdrwee7dc2pshq6xlukbxtpgn3bbwunm52jwdq.b32.i2p:0 +5mcxlytzx54cd5w6oumos3ab7yya3wxazrqct4uc62f2d2ts3zaq.b32.i2p:0 +5nvlednvsjhp75f75pmlyzaujkzsvwckvvsyuavrslfcq5zgj3ta.b32.i2p:0 +5pjaaexikmlyhl5dihzvqbykuec6uzkyujvvs5bjoi6htlb3oxzq.b32.i2p:0 +5puvvkyv6qr2kaqb2avbqxzhdyiquhk2767omhoa2s52gcnfcx4q.b32.i2p:0 +5rojgskvcimuseeaxpjiohmb6b5r5vs6v6hheay3kemraqvmg6iq.b32.i2p:0 +5unuudrnxqkdgqsrfbsmx3i6iwwthivngtfm7sddptjizpqxphbq.b32.i2p:0 +5xsuuiufbmoncdvwk55rd4qs24oemoutharh5hn62y7pn6rzk6ea.b32.i2p:0 +62srw23f4rrxzs3253g7mb2lbop3pfpsu56g5iiptrao3qoasd4a.b32.i2p:0 +6dkvhlhwoiwpdjpebuemxq66soh6os6xa2sq42dwhfbp3d4sdzga.b32.i2p:0 +6e3bigypatmvcutfntlonhrhpfc5y4hbl4cyabgjv2wt6fhwjx3q.b32.i2p:0 +6hqodynvvfa3yflrcvmpniyn5v3idtkctx7gijdlv5wdd37xnq4a.b32.i2p:0 +6jh4xdqs35lkhqnynxctyhqikcnyplvs4juvgbjt4zdlzrt4uphq.b32.i2p:0 +6mhlke4onpdqcfkae6yi23mefnt24xgvipbo24kqluz2yx6fcuoq.b32.i2p:0 +6mroga77egf2fzs7y46huhxwesppxx6k72jc7h2cznq2fmcaelsq.b32.i2p:0 +6nhfpjg54gdwz2jlp3qzt6mdvkk5fp7jr4bc35ph3k5q7dn6cnfq.b32.i2p:0 +6nhgac6wpmchhcmoy36ypaeu6ubc7fq4sui6phin4zpakj3mmbxa.b32.i2p:0 +6nxoamjjskfftfulrxss5mvob56yuif4fxhix6xsw25igu4h7eea.b32.i2p:0 +6rb4nnnlaatjspeoqx53gi7p2y4fagwtuchl2m2l32hherjgkaya.b32.i2p:0 +6vvxiiqmpcaazzfx5h23a6hppgtmsa75g3oeiil5f5hyvgneq2ha.b32.i2p:0 +6vxaw4dduy3hfoff4atqfkt4dc3yd73wimldeempfrfrh6qyfdxa.b32.i2p:0 +6wtsedonta5yojby77ateqk2opazkduz7bur7loo7ds3r2u6xvaq.b32.i2p:0 +6wxenlvtjz4e74cp5dcy6ew2swj4lgcw4wqt46xbeyzegwfs2eaq.b32.i2p:0 +6ycdrrj4shtvp5y5o5nagxth6e75nrlpngeswptxlzaxaq5ttqja.b32.i2p:0 +76objhdrw6swklvz62by5vfdldugdmjz7rifbkbuae72scbdoj6q.b32.i2p:0 +7767yywxn22on4vt6elw66ondhcck7lqcadp34b22mufecj2wyma.b32.i2p:0 +77dalsgr4vbjkdn3frhiklvegub6abglscaqraiuqwn33c3vtw7a.b32.i2p:0 +7ccbhxe3ojas5ek6gtyqg64mqcdcqkrze43nuwdlofeh5t3frdoq.b32.i2p:0 +7d3mfvmjbyvxtaigtz25w7rbnbqszutcgvoai4glh3zuv34glpxa.b32.i2p:0 +7dqje2cdumk5yef346ws7w5t63cm3jjiib3zmtdxxknaqv5axsua.b32.i2p:0 +7e6xz5pfnwsns43cxi7qygyasyvlfrk7gjeqlm6asl6pbn4xf3kq.b32.i2p:0 +7etpy33vv4lz6wmmn4o3o7hubggsigxnla3eoc4ymcy6deoctwhq.b32.i2p:0 +7fddpunmty7eruhumq2iqa2yw5ckcn6xpsw26pj6csnhnty3jcqa.b32.i2p:0 +7fq6qhlxkxc2zbtshlhef3fv6sodqis4sa4ojf5ehilho6d3kvnq.b32.i2p:0 +7g6nadgsvdk6helddu7nczhhspvb54wgovhxb3djkpd4vbtkydtq.b32.i2p:0 +7hpvrb7clik652nf3s6whlkx4pjxowtpidlz273uzn2st5bymzpa.b32.i2p:0 +7iiic2ikrwmchsnlvw6jn75vpvtuugx7mqebbl6ynl4yit7a6jlq.b32.i2p:0 +7jwvx7eged34ctrgyrt5y6x5p7xzkfpidvwrxjplqywqa5dqlg3a.b32.i2p:0 +7l6l47l7pwi7vfe7romlkpnoqu76dr4gphrdmic4tzdkgiifgz5q.b32.i2p:0 +7owux5a335qgy57z7levn7efo5pnzvlvmbht46ua7cwi5jq6rhwa.b32.i2p:0 +7q2p3tvy4fw335pnmpcykdujnkg273mhqc4ou2iunqsia52bgjoa.b32.i2p:0 +7qsw6acv762ofynms5z2e354tvp2k4527l6ifor7w3uklzaitd3q.b32.i2p:0 +7r4gaxkb2iwndafis2dczac3a2mfssxekexfvf53p7siizugg2wa.b32.i2p:0 +7r4ri53lby2i3xqbgpw3idvhzeku7ubhftlf72ldqkg5kde6dauq.b32.i2p:0 +7rbqtt4zauewbonbcvaiimflvxjkipb5t7gusyp7mfdqlepnkbdq.b32.i2p:0 +7trydzxax7oh34p3totxie57k3xiqvbdc3b2gbpjuda7oyeqo5ia.b32.i2p:0 +7ttjllhoemvcjwg4qzgrcrq2uy3c43ungndomb4eahcsjy25euyq.b32.i2p:0 +7utc6ztzqaljtirabf3l5ynvf3akmzofayid4caia6nd6z6a2ebq.b32.i2p:0 +7wgrywhskd24kqqjvodx6bxnjnv374xlnrwssy7zqd4wza5drqmq.b32.i2p:0 +7wp4rrff3fbrudrj5uzsvq4xofktj5tj7omnpcqw4m7rcjshs3za.b32.i2p:0 +a2zioocy4z3jijn6qzl2grifqd7hvuk6kagjv5maoqrnxwoucdoq.b32.i2p:0 +a57uc45crgyttfs6ij3fr44q4w5ij6yy6fmhbky374au7rtq4s2a.b32.i2p:0 +abxmmqkduni73ki6aazyl5z3b6ebh63cfse43zo2fgvpqo75l7rq.b32.i2p:0 +admfhd2d3gu3vevarjd6tbty7rq7fburhkj74x5uagc4oguvmtzq.b32.i2p:0 +aho2rjopjdeh33fi67pbk3ylrf7gynvxzmw6mpuwufcqyzyxgzba.b32.i2p:0 +ajol74c46igadlezendhvwt56gcmrdcahu4jdabt7o6n4vn2xv6q.b32.i2p:0 +aoop5n4khqbob6fvc2nxyyoleenefrykudtrbtjsh3acsqjjkdqa.b32.i2p:0 +aopnrdf454a46vqnvgcx7gk3k5ua5gvmhtovqodnqgzcc65hf7ha.b32.i2p:0 +aorbiqlyf3gwukti7w2cpe3msp2ak6d4qelya2umhph2gfa4d53a.b32.i2p:0 +aqj2d5wykbhure5yfaudeedomwho4gbz2lg3lhbmd6odhy47edta.b32.i2p:0 +aqtqmjzm6tvrodvmnijfwcn7guwmgshb6yccflkpfi7nacfl5fqa.b32.i2p:0 +asisbyliqgpny4tsgdov6tvmluezz7vukut4yqkpgpxkrl5da7ja.b32.i2p:0 +auceh5qut5hck4gzlh2xjtw5qodginqixew4oteqoziwtg4oc44a.b32.i2p:0 +aue5ldovw4zb2ai3nsjiesdhjhdngga2jofptptao3gdiujcqdfq.b32.i2p:0 +awwhyxxxs7kkb2pvgr563c4n4mazrivrmofw7rdx2zdu34lwzswa.b32.i2p:0 +ay4che3hbd2p6ma5qeo6kb5ghin4dt2nh4bsppcekvjwqfckn7ya.b32.i2p:0 +ayn7r3jdijhcl6t6gexmq45dw2tap2l67rn7xgoisaenkel74wha.b32.i2p:0 +azftalqowzsrodgik4xc3hjfglff5gqxzc5midgvweyv4pruyk3a.b32.i2p:0 +b66s2rpt67dfmhwlsbhkipn6znkc27dfth5j35f3r2ps3ne65awq.b32.i2p:0 +basycxslwal7fte36essc4rp6soksye7elii6erptb2lfrnd2ysq.b32.i2p:0 +bblpml6yi6sfzopy2fr25lstes7f34lignkwue3aun5km6iubw4q.b32.i2p:0 +bfpeg5qpkber4dz5lttdbyto76nsgfno3keozwgm6tozazktaroq.b32.i2p:0 +bh54jdffg6zs3h5cyeqsj62zhi5thxpqnzv2vrhsldr4sbckshoa.b32.i2p:0 +binnt2wbflhyw2yctpudz27dhnxhg6mowmbuxx2tbh4bbpqbzzwa.b32.i2p:0 +bjjzj5jn6eb3ssrw73p52dc7ypr4vmvcf4yezmwojwmkirl72jyq.b32.i2p:0 +bmryplm5iyirjvpgqk7ifhmhbpgjw3qifnlprqmvihtimn2iovhq.b32.i2p:0 +bo6qcl6cofs3cr6ilbxeuskrofdj2ksbhugtocrebapfr3lbqs3a.b32.i2p:0 +brifkruhlkgrj65hffybrjrjqcgdgqs2r7siizb5b2232nruik3a.b32.i2p:0 +bsn5rlu4h3a4xpklhhymxorogxpdgudstloclzshghb4deiui3rq.b32.i2p:0 +btdvfwxmrgnqkhh3rpscp7szj6yybm6w7whxa676rsxia4mta7ta.b32.i2p:0 +bujcpgdp7c35xxqdmly4dyxywaovxfoif7c52j32rvcs33gefeyq.b32.i2p:0 +bwg6m4agxu2fiv3dk5ys424b27wjzbh24d3ewtsb4ed5albmk5pa.b32.i2p:0 +byoucerid5rb2dducaqwepaytvkw4b4l2wmbfkbfgicflfcd4dta.b32.i2p:0 +c3wke4uh6wbgpwzo2zs3l3rfsr2ibsfxernlhxgyrxseuglrd5za.b32.i2p:0 +c3zbgt32o4foia7wdxgc3o42mutmeghhe36glmmnyzs256g77lxq.b32.i2p:0 +c5fk6xjwbfdurepihkdjvligexlzwu64zh2e5sjd5pmhvv34kmaq.b32.i2p:0 +c6c6c6deskut33e6zmoyxwhta6aadtjattbis4pqew2ghnv4gpga.b32.i2p:0 +c6sdbzckovbl7hd5bq5pfkkwc2xtdkx7b357bk3v4vdspzmy34cq.b32.i2p:0 +carg765pk5ef25vhvpu7phus75yp2xmwf4juphulsksqrho5krsa.b32.i2p:0 +cbnfpra6rok5v6z4crifqutx7iwwnwwblczz5tfmq4lkkdj6s5ga.b32.i2p:0 +cdwp7pxcqisoet4klai4ttemesjovum7zqcp2lbcynweol3kqfna.b32.i2p:0 +ciwvimts7zzoyrknhlowmzh35mzx4cluq7uycw3ffcckuov4tjjq.b32.i2p:0 +cmfgxulstg5trnmmssjvdrq5e7taqfta6jpuiqzakqujddhljs4a.b32.i2p:0 +cqdfflsgncsl3ypnwdc6ddwdoev7zcdjseddplzp2ytxhm5wkfia.b32.i2p:0 +csielmejcwnoj6djt7oo5o4amrp6pxpzvfgphbfysf7lwd73ig4a.b32.i2p:0 +cxfxj42wboxtkwkjuakree45xcfdmthek4i7qxruz6jpyff6souq.b32.i2p:0 +cy4wamhhjqg7amanxeifn3jmes2jnuactvjtxovbqqj6x6zveqba.b32.i2p:0 +czarxkemfnh5npq3jvgeaaygz2p3dnmntgu3njz64eo5fzuv2rba.b32.i2p:0 +czr6xr2b6mxgkym6fjabegjzgcixq4ajcgexxw2mhsuu3uvhhxga.b32.i2p:0 +d4ep754ddqs4xxdhkgxzss4a7mbfa23eavmmnghgjsefnzckt6yq.b32.i2p:0 +d6dnfifawlzft2qgv2pgb2h7dekx7r4fjp6vog32hqpriy66pwka.b32.i2p:0 +day3hgxyrtwjslt54sikevbhxxs4qzo7d6vi72ipmscqtq3qmijq.b32.i2p:0 +ddq7hqixl72qxppylrwnmi57zlguqrcwjurf6myvwqmtb3l2f4ja.b32.i2p:0 +ddtuamdoynsa4r543psethu4ygfuwvcc7ftlb24ak2n35gnmekha.b32.i2p:0 +dmed6el2h56j5kno7xocalj6k7ehosyr2fbwcrk5yf62c3edcodq.b32.i2p:0 +dod6rjcelojfcjyyasqscgae5wvriru2fwdog63pxvkxmuneswoq.b32.i2p:0 +doz2ddulyddrium7kvkbfmsj65b7d2tunrj6kyzhzgsbka6ylsnq.b32.i2p:0 +dsi67lqt3t6uhkqyptlnaib3ja3y2o32umwcbluls2aiozf2qf2q.b32.i2p:0 +duivxnrpflxpwa2nd5uijndlfupqdpu6g7c4wnmxfl2uncevsocq.b32.i2p:0 +dw7oovp44knhrikivfmt3hs2lz2trlpb7xout6dlinfnzxeac3sa.b32.i2p:0 +dy2ch6am32vyobauxiv4vfdrijzzh2a3vaumaaip25m3gosmxoma.b32.i2p:0 +dzfqla37in4xpg3pa4auekc4vpjt6pa74zfd3rhiew2jurffk6aq.b32.i2p:0 +dzkszdiwq3vretzqitqabbxrdv35itzr3dlcfjskw54eya6cwcta.b32.i2p:0 +e2rqlug3nweslh2f4egq5jtbgfpfxvjwjxtxunka2ijszeuyq3ea.b32.i2p:0 +e55k6wu46rzp4pg5pk5npgbr3zz45bc3ihtzu2xcye5vwnzdy7pq.b32.i2p:0 +e6qoyaetj3zf5jmavn44avsjdynx6oc6lxdgxfsq2bq3zue4nyeq.b32.i2p:0 +eciohu5nq7vsvwjjc52epskuk75d24iccgzmhbzrwonw6lx4gdva.b32.i2p:0 +ecxzxtzwyfwvm5yyaibc5w3ui66fri5a6m7tdsoqhupzqqpsmxnq.b32.i2p:0 +edo6wfaor7oa6rmyayeisqd7kfnbyzllbysvzijhmskgxxdjw7ta.b32.i2p:0 +efj265k4vn5t3vq3vup7zkkoa7dlqbwlhhy7pvxjrqlin3n2hwoq.b32.i2p:0 +eg4j6ftn4bkovxxlazghurrgntw2pmzwvegfnbl3cocdausdawia.b32.i2p:0 +ehgib44c4gpp4oonk2tbrgsns64gx2ituv3426ztv6unryxwlhda.b32.i2p:0 +ejq34wotitjk7o5xmxdlx6zakrxchwwtk4vggpfpxx2abyacrh2q.b32.i2p:0 +ekiqvbeqqbnl6iyf5nfk4kjwcmq23whgmpkp4kawahygzz35jfwa.b32.i2p:0 +eomxrmmgfcqcr7zt6zltzu3l2pgev7sdmfjmsp2mp5cwt3y2wfxa.b32.i2p:0 +erbey2sswwn64gurd76youjvn2eamgff5hasriomgq67ocdm67hq.b32.i2p:0 +ety7tckj2ecrjizdcgjfczkc6wmabd7lwlwkc2loel5j3yzqaqaq.b32.i2p:0 +eubmbzk6rtpu3utz4crdwvm2jy564cywwwrw6dz7l2kfzvaiyuha.b32.i2p:0 +euzazzt6gnefk4y4bx44wcfgmublb64s443iih5t2bypioljw6tq.b32.i2p:0 +eyf6do7m3decbzpcr56brke5xmkxwpaa6ohexqx7atq67ceswvra.b32.i2p:0 +ezypdnfzphgy34e5kum2vxc4jk4ufc3vyl7xpnrshbigktfahuga.b32.i2p:0 +fbiz5d5suop3oecitb5gxytiggogivc56cztyb24xxjybb5gxgea.b32.i2p:0 +fbueucoovpqcd7k4klzfeflkduquqbtmqsjewpxl33kogiamtsga.b32.i2p:0 +fegsqsam453itjjhbxdoubeczzf7imdunegspi25efiswcrqmcgq.b32.i2p:0 +filwk3wdvtdeea7hgill2jc6mtnktesmp5jgc6g6ttr4x5cfy3aq.b32.i2p:0 +fjokxa2rjo5e7prepab3sezr7ii7jcuczu6hltgvddjbxp7njnha.b32.i2p:0 +fk26lfbo2hpfhmzkn3gm6kxjsd2wcszx6arbzv3z2f7c4hs4nkea.b32.i2p:0 +fkp4yg7glca6aicbec6p6n3ubb75cnyliruc5wluczsmit3manrq.b32.i2p:0 +fmuobwciypvans7dywogex6zmb7ld3k7g34rxknwvam6jicdthua.b32.i2p:0 +fnwnlkjfysevosm3skxf6lnc2a5ff3lywvustjkpib52f5gprteq.b32.i2p:0 +foc3ole3bfsxzz6sxufnwzhx72cijco6oefwggh6a7mx7kytbk3a.b32.i2p:0 +fqirohoqilnpkpgsrmbv36urfl4rmpbtabyrujihndaes44o7yzq.b32.i2p:0 +furug6s4djbinoeocgat6rybo7pvvkyfwzbxmgsndjxm2xl4rcea.b32.i2p:0 +futppfdivsek32sxzumcnok5pozmkxmybisd42mazwtdiovmtwla.b32.i2p:0 +fviyvz6qwaktg6vltzzad4osrxciipuvepu3v66gw7lo736cohca.b32.i2p:0 +fxoblyaiu6wgo4ltza3tjvsa3kdzyhbiiinlmrlbztsiwbfmilxq.b32.i2p:0 +g26gn3gslz3qhaasluxrz2ldpa5dhqvbcwunmxafpwb5ct4ghl2a.b32.i2p:0 +g5ayhhbanipee5wyehvfsf5cuqn2djl6xvrcmuv2r7us6mrcl7da.b32.i2p:0 +g5yzfhaelbngq3zqfpufdrpntpqr5urnoteaxmnzqkcsfuqs5svq.b32.i2p:0 +g6fd7bu2oae43dui7zzw34n47bnhbt3mjokfajf46zgf6ta7wbja.b32.i2p:0 +g6zvxkdudv6jandpsod7b5ys4k5omzcm2jv5ga33ze6vunhqufra.b32.i2p:0 +ga25oahtxga27pmece3snv2vlpcim4gitvzjl42mc3oxkhclxgja.b32.i2p:0 +gd5r6jobykemsb3beip7kaham3tyfr23w7lrkjvo3zagrgbrgnjq.b32.i2p:0 +gehtac45oaghz54ypyopim64mql7oad2bqclla74l6tfeolzmodq.b32.i2p:0 +gevhyfmgocdedifop2e6chs3xesb3dufjpwwsqyhjh7mf6qx22jq.b32.i2p:0 +gfartwr6pkbz4a356heez436fw5im4thimitetn6sdsq55prb74q.b32.i2p:0 +gffp4p6gsjptgrji5bm7bxybnaqctjosmdso2i3peqpuwwdjdezq.b32.i2p:0 +gfysvso2uoeulc7t4djkgpafxdko3kr5mvucidibgja2sc3aod5q.b32.i2p:0 +ghuf53y3dxdnxgjjxast56jcsjohoxffc45mbx5wvx23mt6q6nkq.b32.i2p:0 +ghvmoyhep4sayhgen7c2pcg575aiihrwlcngm7lxxqtbymbgmiha.b32.i2p:0 +ghvxfggh6bhu4qmqkepurafpvvqmab534zsbv4qrz5x6pt6wgfuq.b32.i2p:0 +gmdyky27756xd4dcky734kuqiojgwdbhavz6ou73dpkg2xhwjrsq.b32.i2p:0 +gmsk6rh4ya4qtsniu2uhehxavxarfmeojbe7sbj22opq6g2rsmha.b32.i2p:0 +gqerl5bp6brqik4pxagwldb6d2sy5enbxav7y2a4ttgeczlk6dzq.b32.i2p:0 +gracsh4efegi54gsmgqpvkqpmqxumrw2hnjuouvawg24hl7ffbea.b32.i2p:0 +gt3w2l4fdh5xq3vtcgz2qr64lx6wi2zqgrxju7dxrroejemn4x5q.b32.i2p:0 +gya5qjekil4fezub57z73pwccsshktot2bnr7wckhmbfwx5mzmja.b32.i2p:0 +gyde2opnhd3wvwalasugxmelkhd3f7tsesyuhppo7sbzz5wsaemq.b32.i2p:0 +h4x4ur4lr5rcspnenmzy6i5pa3e3pdtsyvnff5u5lh3vedv63qvq.b32.i2p:0 +h4yswxrmlmep4m3qu4fp6od2p7lioodmjp4zripxph7bfvmy34la.b32.i2p:0 +h5rbl7ltuivi6gx4iop4icjpvkp2rtm6tnweddkrqkbcwldhz35q.b32.i2p:0 +h63rqifntenj6titygonng376oval5vgp2gtz72uuzxfzjy7g24q.b32.i2p:0 +hcv5zizlrdemafcezwzchu7klydbrquutmdvr5vwaawdcpcwouyq.b32.i2p:0 +hgb3jvlzx2lqrgwshzkzqchw3kmwdlpam5y6j6wlwajqo3sbmdba.b32.i2p:0 +hhneoshcc76hb5pa5vq2pg7va4xe3ikbnacdzurbzbdsygbihiia.b32.i2p:0 +hj6kfjp6yusezysuvaxendfv2byw2erniku6lztfka736g5ia7qq.b32.i2p:0 +hlz7kz7pkh6tr7buzfgnysrwnhkhp7mu76hbmh2nn7b22l5vs5rq.b32.i2p:0 +hnmrabzioiwxj64ih237rxf2zbdttfndiepp7vikt2jko7ovvrkq.b32.i2p:0 +hnxxsx7stxzesijof4fgr4xxysunipnivnxc3lqj2lefs7k7dqfa.b32.i2p:0 +hpalvgj7fh3by7n2v6ha3qsv6hv7qf4jcovtegbqlttgto33fc7a.b32.i2p:0 +hvafnot34y5uycrdkkue3grpa5smiosvl3nhoeiqk6b6en325yoa.b32.i2p:0 +hxoaugx3mwnypgjkm33vidgp4vt6thpckrfaqfgifyn2bzkyjmfa.b32.i2p:0 +hxw6inru6crfmkui4xjjucwdj5pffej5ke67zynbg6hw3bgrehoq.b32.i2p:0 +hyrmdeqafuv63ydnihuwnuhaanblitletw57qu4ytgkgus5eszca.b32.i2p:0 +hyxogpxrpnrjysofu4gr5jdptftwjjlz6thqxefsafctct2hcxwq.b32.i2p:0 +i2k32sb3fmkhemu2gzm7h4m24ubcgqdohhgq44qya7zbkrgw6mua.b32.i2p:0 +i2x72j2oejknudedxsbaxl6amq6a7phifs63bfnvnuxxfd2de7xq.b32.i2p:0 +i2yx56vnsicamm5nzszd6atxgxy2ltjufva6jkfujer3a3pojcja.b32.i2p:0 +i4cebpfoieapy7uxny7rjvnz7jl7rqv53sflga54papl4qycl7ea.b32.i2p:0 +i7ygposcafodsmzuiotuup7qjgyklv3tuah2r7rzuwwk4mmjb4tq.b32.i2p:0 +if5nrcqnju2by5r6n2mxmggq6zw2qdsctj3edpqmjmvqk6ndtkaa.b32.i2p:0 +ihaiw7hs6obvpj4eho6yqagi4ecuf5ber6hlvouprif4zfxr72ga.b32.i2p:0 +ii7ra5loq7nw43kxjtr6tdhf6clvtnfqqatnaz2mtr6pf5hyelbq.b32.i2p:0 +ilsxmoyndew4ed2gglf5mbwoo4we5tt6qmp4kzvzyzvwev2isf7q.b32.i2p:0 +inwsvefcxszherj6bxn7ou7svsitty25637hz4ifyn7xzrbh74tq.b32.i2p:0 +iub2v6hygn4znn34nw2zuihupczrcpniq2bdilvslldlsucfrrqa.b32.i2p:0 +ixhgawpp5anrlwxxgfun4zl3m5xhlxassszncph4i4e6aymb5fqq.b32.i2p:0 +j225nrmndwviihpe7ib6mm5h723cg62wrb7vnwofopv472ue3zwa.b32.i2p:0 +j26gpaan5ablb56k4rtbeamv3ihd4vdzdv74dw4kkevxruapwarq.b32.i2p:0 +jh7ev4a5zfzmeyekinhtbqfi6c7xhvc4msdpyi67j3sxl6r44ukq.b32.i2p:0 +jhxhwfiqysrj5dipbtdokif25ahsj75atdqrbloy564me5lxeada.b32.i2p:0 +jkszx66voakpa72effkj2xxjn4qglsidwejmxomqychvnypgq7fa.b32.i2p:0 +jl4vr5kac7njyltivn7ut5afyq5ipzc5woxl4523emodxixci3zq.b32.i2p:0 +jlczqz5nill3vscbjx4jfm5ogxdk3fznvssytzjano2uhem4wvra.b32.i2p:0 +jndr5i4qhxs6aa2bqvufrgttq6enavyghi54dqfuku2qnstdqa5a.b32.i2p:0 +jprlg3owt37gubh33q2hlz2kx34vdxu464p5pqa4yhwafxnpupma.b32.i2p:0 +jrert3o2rhbkpquybwg7tq4bdvlxeh6lnlcr5ddexkuguvi2l2pq.b32.i2p:0 +jrgzkopi5pqtnb4weo7yti2igm7g2ene4aggzdcrrjxlg3nln7ra.b32.i2p:0 +jrvg22xvbmjyrkqg7mr622zhnda3ijtua65cqngwrven7sf5zd7q.b32.i2p:0 +jsg4dsxvgjcqz3c2qgtwi4ip3l4n4hlxodbvgukwaipycwo26zua.b32.i2p:0 +jsjuwfzebgcyehvh5m33iv6d6cdpokeakfh3vnsqvpubicc73qna.b32.i2p:0 +jvaoh33cpczp626ldwr4azh4hb5cjmxlbz3dq3cxisdlzn7z25eq.b32.i2p:0 +jwqlvqmfvodnrslde2idqt25qxiyvgqdlr2q4uod5s6lkmlempya.b32.i2p:0 +jxqyize2ct6mm3jvvzavm2zbccsoe5qd4ekqo7zgt75rzz3mni3q.b32.i2p:0 +k5oyn7higpqwarsh3ukdgmx3z6tvqcwju4xawrge6aaswrdnoo3a.b32.i2p:0 +k5vjrlzt33dhk3wossfn5depmvy5txbivpvz3wr52maaijgxhztq.b32.i2p:0 +kbawcieyelwwitrjow537zpmdkncwiq42rhjgayw5o7u562mk23a.b32.i2p:0 +khxruoaom7juockko2tbxqo3bnrjmoqjhdoady3yyr4qacz4somq.b32.i2p:0 +klsaj4g2jh7w7gxwctniktg3hqwspm7zlkwtsaxw4nu35jssjasq.b32.i2p:0 +km3b5j6cqrcqewdpuveibptw5gguwktwan36jlow4xykpg2dgr6a.b32.i2p:0 +koh74kvbfguiam33fsefdlks5htuckvb3hy36vhbmvwhrqlxpbma.b32.i2p:0 +kteedl3k6kw7kgn5gbrqbdlxvmqbgiataunqrqtni6uw7uykof6q.b32.i2p:0 +kw3v6gq5semgt4gg6itum3qtaylanyof7wn6bnbngdeby4xixuoa.b32.i2p:0 +l6o5pefljcg2ffdrbvatzc37a2dkxrnd25z7vxkim2t3zmszwshq.b32.i2p:0 +lc54ao2ovn6awezl6ogpxd3xakew46otdu7uly5agtbdmifkgp4q.b32.i2p:0 +liu75cvktv4icbctg72w7nxbk4eibt7wamizfdii4omz7gcke5vq.b32.i2p:0 +lknjmixwvxzla3cgvb5qmpbehqpfflujcapn5mhkku63xaagxgsa.b32.i2p:0 +llavu6nhg2opvap2l35ufi277z7ew2wntepvornuav4t6fmpmfza.b32.i2p:0 +lmxiapsgob5aqje6ofu4npxoc5gnjtbfdgys5ceb4hbscortlzyq.b32.i2p:0 +lpektonr2uyiohuzi35shtj3oaa77rklmqsivuydxkcxkea2dwuq.b32.i2p:0 +lrah7acdsgopybg43shadwwiv6igezaw64i6jb5muqdg7dmhj3la.b32.i2p:0 +lrfcts7yrwq4qtrabl3ai5ljxnhgbza4ojb2oknzoobdvdnzhp6q.b32.i2p:0 +lvdbqavgdom5h5denwkmdwslfzxckf6eddwflelbog7tgo2m7usa.b32.i2p:0 +lwjrapkfexjyjqf2rrdr4ghhxmlr3jdygsonetgtheglvmru5x6q.b32.i2p:0 +lxh4dszgwvjzfjpy32eridbn2yxzmcbcfs6xham7272rd3dmitya.b32.i2p:0 +lyg26sjkcx5ied5a4a7jdxmfeplfcnux3fux5rsxzkm5mgbbsllq.b32.i2p:0 +lzanccluo7lzsg5zzlyomj44eabqvopb27vlbsargetjbib7shdq.b32.i2p:0 +lzrg3vee3wbz7qa2d4mvaijzebynhx3567rpjhdvebeevfhfrp6a.b32.i2p:0 +mbyf5d3vhlykjmqpkdiw42spbb7u4c7vu7juvs3yfbwaqtjnqn5q.b32.i2p:0 +mbz4wyfeqtllccary2crak2qfovnaq2366vdddro76ihxcaavfpq.b32.i2p:0 +mcvkdet26xzsz47hluq5zgaymyjvz2z6nisc5etmnpkoudzkyliq.b32.i2p:0 +mqvyz64lyw5okdpsfjfffabu5u2leob6usqmfd5at5eaa7r37dfq.b32.i2p:0 +mwevh5r5dkzddlo2ol6bojpdds3kno4xqoy6p6ulid3paamgrtla.b32.i2p:0 +mwyjzdrgtypbwjyulw4ifetejz6xusqstvzylztsphpg2r2zf7ua.b32.i2p:0 +mxglgq5ic4r2migtume54owf2pkolj4rwrgklv5qsd5amb5vmiza.b32.i2p:0 +n3p5dki4vkjmvq7mgp5behafe5crvl6q5pekcfchrwyts7d6dbbq.b32.i2p:0 +n3xexp2kjvcqrd5xsmgodps2ihvvskcqiupl2dozdzbrzea6jnqq.b32.i2p:0 +n5hids757nhhjm4efinnfvgb5kmgxwvt6adgbh645g5fjbphch7q.b32.i2p:0 +n7jorimlqrjfsdbsvwozme6eicpui353co3oqb2iupfew6wljnjq.b32.i2p:0 +naox7zpagk3z2zcrcnrvnjsuqbgmhstb3z5gznpndw3hlb4x5zaa.b32.i2p:0 +nc2dn6nife7jemcbkcl3igpywuuzi55stqev3svcnnuj2atjowlq.b32.i2p:0 +ndd3remf3idc47ueuyph33ce2dgqwpirxaqomvg22vn7dffw2l6a.b32.i2p:0 +ndtoi53fz7e6ml6v6jn33675nwciw7mu5msn7afzbhebprusqg7a.b32.i2p:0 +nipevh5opcg7stsmf64dvtk5bn45orckoa2r36gcntf5i3hs3jdq.b32.i2p:0 +nkj2zabj5b4vhold33nz3kgp2x3i3es2l3iz64rxx7em7rkgubaq.b32.i2p:0 +nluwsgpbk3cplcb2fpnfpjrhot2w7a6mtjggkqikudspysqlsceq.b32.i2p:0 +nm2dusg3py77m23cr3nt2j4q7ukwzvqgv6h635pw4slgwwptekfa.b32.i2p:0 +npofpbbruz2qa4bvchlal363r3uii6gjmgfpc5rregb2oi5lcakq.b32.i2p:0 +ntpnfr2cmxzlmsznvgfv32rw2pdvbz7mxut3bgphe4o24dqlvd4a.b32.i2p:0 +nuwphtuudfrswids22qjq63zgxh5wu7erafl36jyniuxhz75ikyq.b32.i2p:0 +nwfdwlblhudom3oqe32yzzuuj2sb37tjvlmimiru4coazi5freeq.b32.i2p:0 +nxhs7kh72gtex73hhu5a7tgjul5zm6q7dxejqflvdq6boq6a4miq.b32.i2p:0 +nz4lq7pmswngaevv2uqyainqzutfxlkavxgde2w2slo2p6e2kcfq.b32.i2p:0 +o2rckrq7jejk6zfxqu3zed5vyjpqagk6ogqspq767evacer2twaq.b32.i2p:0 +o4tkad7wdeyarefuew2d3ytpwc45twkca5vobgidhuv6h74r3sfq.b32.i2p:0 +o5s5j3ghhi7jipsd4j5jcb7qjbsatkwxcvotgjjygtcj3liajoya.b32.i2p:0 +oanp5s4ols5idrplaomlkjo6x6eaaji2xbu5knlouxahmwizqipq.b32.i2p:0 +oayctm7vxtagndwapn5sojqw23odgnzn3c47zlwc5xalcrnffykq.b32.i2p:0 +od6xadvvlqedl7vje5auuop4pnwf26pfj46mxxucvt34rl5x5o4a.b32.i2p:0 +odm4oxozmmg5vfhz7c7vlppikb2s7t6rnhk5ibz5vgje7m3igc5a.b32.i2p:0 +oggn6qbpmbag224gumagzgno53mgozb65tpzt5lezjbfbbiqky4q.b32.i2p:0 +ohparrugvj2lqhjd52f3bdm3wa4mme25ozjqjqgcgjnu4sswyjaa.b32.i2p:0 +oikfcbxug34vtfnkflh6ogncxpvzs5jzfhaai24xuvzynfsi3slq.b32.i2p:0 +oiusghp3k343ofsyw2ym6iejviznukx5lrhafxzdasmjky24ixba.b32.i2p:0 +okfxeoh6itu4f5f43dhbzvkqwfrvm5c66lj6lvjj4q2b35i4pk4q.b32.i2p:0 +omkilp7edzk7fpmml2fylar64pc5okaxuizv7tfqk2qmsfdokp4a.b32.i2p:0 +orzhfle7yenbiesrtg37lklyukcjyjcvfdrpk3pnrrshtl6sdcrq.b32.i2p:0 +ovkjylbsfe2756cpphzx2zvhhmtbetr5ci6swmydqc5itajqxm7a.b32.i2p:0 +owexluejb3eszx4p3b6zuxyggsxxtkxfgxoylkagfecs3bgfnpja.b32.i2p:0 +p3qxhtoxajkmt7tgc6stv55msievc5323rtrsiwstvtf37iwi4dq.b32.i2p:0 +p4tsqwvdpaitvlgaujfr2m2qbr36qiwusas5zkiut7w2wjcp3sqa.b32.i2p:0 +p6l4k3yl6hmsep4shr47nexwzd5m4t7ddz6yepl2sqmkicxvfbba.b32.i2p:0 +perwhflptmjkumfb2q3mtxqkzhkyqp723lbl4ct2zivhymupktuq.b32.i2p:0 +pleconnukdt4fp7cjcjh7af2a3nzsnkfqjgx2s7jb5xyhq3b5xaa.b32.i2p:0 +pomodyf3nnav7yeu2usrshzq7dvdqawrhoau3uxlimivhy46lmaq.b32.i2p:0 +pouelx3dkoqn5dvlb2veazhiiknwvtvadbbzkprk3nmvl4vqpcma.b32.i2p:0 +ppez53yrs6lanyvyxuxblqxiuvhnqvvtafmwaid2kgp2dx2v5iaq.b32.i2p:0 +pqnqwxl7jsrok7vgpgcvlxwlkg6yckx33n3g4xf5lvuntwn63b6a.b32.i2p:0 +pqpjrjnvbrlzzznv3rn24pkfemv62suddrtf5kzxxl3abek5xf3q.b32.i2p:0 +pt3ikblq2ytoqecetxdui3excysnrbzj2rwr4qbxqtlcklskjo5a.b32.i2p:0 +puhesrcq5ojcvpendpjv5hi7kfxvy64hlpgcokfwiq3cekomhr6a.b32.i2p:0 +pv6g7uin653rerdiivdgtoirjvokjowi4b3fwatszdlteyos3i2a.b32.i2p:0 +pvqyvn2lpvoeyhgcgunoqtetkrkp76iegyoii2af4crnlto6gb2q.b32.i2p:0 +pxn6af3w4p6auglktieukus6uzz2pbp2eya4ouvr7lzlxh2grz4q.b32.i2p:0 +q52c3eye3gvaepqhu45nt4ihgqfuzsr5hh5m5ngqtrhqmwjejjnq.b32.i2p:0 +q7mnmpgl54cbaorenckaybhwnh5ovyakcvthmjdsmvquip5ef4na.b32.i2p:0 +qddg7myylinn4tw6kdjmmp6fsyetkosnrbp2gsjx77tmkqyqv6ua.b32.i2p:0 +qfbughfr5hhgoomasyviwk3zin24uerpl6urz5smzxc2div5ixxa.b32.i2p:0 +qksthizqtfdjjxcrahqxkl3bev75k24blagrkoszxen45zelijpa.b32.i2p:0 +qm6vfq4pfvjg3r4g22xkg47xc4d2y5c7qlsoqyhfppsvh3grjfka.b32.i2p:0 +qmqsbya7yhpdkgxyqtzcmxajihednp6smhngx7s2n4gx5murt6wa.b32.i2p:0 +qnqojlthym7z4gwizcblhpd2thy7v4a6ifme57bzyl3nxzkj6ica.b32.i2p:0 +qpa5np77mrgjmmhh72bqjnhlxq5jj3annthoqwc72sz7u4j4zkgq.b32.i2p:0 +qqmxvujwi4ktgj2cuqmw4kiujkf7ukrkoe5ryy4bjb7tyleplsja.b32.i2p:0 +qqrvyctuomsxqiqkwtzpfxtdopzrdqmaxnwnhnuzzl2lrdcw7gpa.b32.i2p:0 +qwbmjxgnwnitin27gawtu5weiybo5nnlgs4trrh5vi5225fezmiq.b32.i2p:0 +qwhvlprhk3ntswr5xntnc2hhgmvd3bbgzgkbombiibhrsj7k6gyq.b32.i2p:0 +qwtpmh6ssnkeowocnvl7uveuhfodht3hsgwwydhy3v4xmwqbwveq.b32.i2p:0 +qxwd3nu4kicwuyhoje4dagdhbif4bxem4sk5etvvigycbe52ofia.b32.i2p:0 +r6hd3knqi2p6kaw7hybrcf2q5lcarulcczs3sgcuz4yc2saj3gya.b32.i2p:0 +r7bug6wbhevqqlbavouj3ggpa7e57sbd3oivkzqeyagrtxshmjpa.b32.i2p:0 +ra7ztq7oq7jcozpui7c4zv76gh7rjwhq5fkpxp7dvw7ritick66q.b32.i2p:0 +rb7tyjd6gi7evmt5mzvtboramqip43sh72zjxnwhj5k72zfi3g6a.b32.i2p:0 +rbciwwoimo3v55a7n4xb2yhyxeruvzyt2cdfeng3h3aragq4iziq.b32.i2p:0 +refo4v727jmff6ylrpbkvd5emlfr2hamaeh7zho6oval5dmnwlta.b32.i2p:0 +rfgsjhyvqbunef5b5r2emjuoxx2i7rcsl7gathy2n4gwjyyat6bq.b32.i2p:0 +riqrkj7zhuwqbpdluelijgyuvyubeqez2ndnvi6zezmvcwqmyxsq.b32.i2p:0 +rj37gjtx2umqccclxwmtfdgdxkfswp4pgrngrvhqq4fhhjqeud7a.b32.i2p:0 +rls6o23iz5wdado2nxsdxwxskf6mctsf623ki6ukuprdf2lfcbva.b32.i2p:0 +rm6vjhtun5zji3fegbvu2zmdlo67tuvda2dbveqi2vmqqxdrbsla.b32.i2p:0 +rqmwy4cazwtmkvluwxzz7k2ly5jwcoibar6vfxpr52aamlrm5wea.b32.i2p:0 +rrm2pems425buhonptp7lbtbprmwwjhbftey4ujvk25nclx7rerq.b32.i2p:0 +rs4exny4th3a7ngigrgpllsgoesxoxzqqk5upzzu24gdfv6rjacq.b32.i2p:0 +rtwm6njef5tifwv5ols3b4nsboniig5kdw32ik5qbv3wmhoo3pba.b32.i2p:0 +s2j2rv2dcizg3ibmrwzcvqe5cgtadezawown76l6sfqdtkfnzzja.b32.i2p:0 +sbalp2doxyedtr52kj57va2rmbi5npspv4drk4vxnujag72gtpiq.b32.i2p:0 +scfyzzdnycppb2vs4iae7wwjrbtywtuf4iyydaiolkvzy2d6b3ta.b32.i2p:0 +segnvtkjqqhzo52njxma3jgerwblnpvqrojjyeuqgzav7dwjlzoa.b32.i2p:0 +senbs2vd3mjgsf2nvpjcne63pzcfolnk2vtf2jxa7pxecuepfjjq.b32.i2p:0 +sgkdf2nk62slx6gtrnrxeoiknpdhfjjznqrcetdnu2j5iiltx6ca.b32.i2p:0 +sh5hww42vwlsl57cdropaeqmmwozinnr2tg6wq4prg5wrkusvxja.b32.i2p:0 +shh2ewyegnuwnmdse5kl5toybdvzkvk2yj4zcowz6iwhhh3ykdfa.b32.i2p:0 +sirb7csejtk7xwr6qymggudtft6cwd5d4iroj4vlajhiipt2hiaq.b32.i2p:0 +sjs76j7kaulnplp6wjs5nokshdowgfb7c6npmmtnnqimgzxxrh2q.b32.i2p:0 +sle3cbbdom6rknc3drqtawctpy635ica5d5gerjjdahfymkok4ma.b32.i2p:0 +solinsdewyzdgqbe4xso6wk5i6su4gcwas6zxd4q3om4ljvjv42a.b32.i2p:0 +spncsew2zgynatyvvl5mb4tuysd73oxmaztbiwjnocleyxuexyuq.b32.i2p:0 +stltasmf4b54srrjb3mf7hjtjvmvvms26btxakccdtllgrm2qzgq.b32.i2p:0 +su7d4biurihkyr3qeea7makkxzikxr5zi4znvryh3bjespppfhxq.b32.i2p:0 +sya6ydi4i6dslzjxmllugs5gyosg7rpwwfgalrzhd247k42xrcbq.b32.i2p:0 +syhxehvl6rublw6k5ysmzcsqrzdsnd7eqrbwalfkvhgfccpu2osq.b32.i2p:0 +sykjw3jnb7n6bo574wnpiaxhp2nm4gc6hc4jh4v6trsbpboysooa.b32.i2p:0 +szqvwf4yxnqvpprv6uzxkn7voxab2i5wega7kn56oru2i6wpzutq.b32.i2p:0 +t4notlid4bejwz2tzucpvednkeuskenpnu5sqcbdhh3lqouigqxa.b32.i2p:0 +t66iyinlubtg4znht2a6gwwcyiftjosuq7xbur4gfekkhwymxoda.b32.i2p:0 +td6ecor7qphgru56xt2ydih6k6taj3tvp77zhimoj42t3lo5u7na.b32.i2p:0 +temfakn3i7wa2dekclbg47smybf52o3kyw4cnywcmrhrq3dajnwa.b32.i2p:0 +tetoqjagsf7fpejajiwm4rosqscy5huqbz5hcqgfuha5tdfnlrnq.b32.i2p:0 +tf4tozh5unsgyzpdsmrdcpbgekw2agu7tp5jvyclzcs5kjudwwpa.b32.i2p:0 +tjfdp6mel3wzdpxkhbyhrmfrha7gekv3gghalnxezrmvnhy2ncra.b32.i2p:0 +tmc54rhj6yxnezeg6rovgtkyd7kwleinb7cj4ytdatwhdel4vswq.b32.i2p:0 +tpys7fw73bmglnjtitxebprgxthg6ew4gnhp54uc23cneef7si2q.b32.i2p:0 +trucvlawpufrszky4zzhhxtddnhio4mnqawzc47n7kik6i444m2q.b32.i2p:0 +tumvq4vpa25z5z4phsl2odrgswtf3lhayrftqsdhvejzzp7y2zla.b32.i2p:0 +tv3x4kddbu753tnlghgh3txogp26tlydt47rl5scx7eoxgnocf4a.b32.i2p:0 +twwjmbvrcfkl7s3hh473jwmxdys2zxo2ozlfivsjvbh2kobl3vsq.b32.i2p:0 +tx75t5cfpc5usmipedmn5w7sxnmeofolxyk3srcsd4yupkkgsu5q.b32.i2p:0 +txmrkbijumgxdhr35gbyqscfb4a3y7lbrl2da2nyqrdtilqpe56a.b32.i2p:0 +u4enqhila7pkwbwuyfbfd36qllbyv7y3p4nelxekrf3fizp4htza.b32.i2p:0 +u635477uxqs7z4uvwx224u6ojn3c3ewcb66f3j7qlbzqyrrevxja.b32.i2p:0 +ua4fdaez7efn74yivvlyg2qooscqskviuovygiujgh3jyn6pfama.b32.i2p:0 +ucgc3ocprcieyjj3kjlysa5y2yzfekvtpmeg46t4htvviaet5evq.b32.i2p:0 +uczz22yiczuydnoqddryai65547al4kc23ev7u6cmgjdnwxqwcna.b32.i2p:0 +ue4j4hkw6dy4bqx5l4gws5kdseqto2y6yxbadh32yipwcywsf4pa.b32.i2p:0 +uegl5ai5v2ddzfeq6eda7k3ul254ecbyjj5lqlct7nsrljjxksha.b32.i2p:0 +uf62jvb56gmhzfvy4q3rrripw3ga2gpeutp6zyplferc4uu7j2ia.b32.i2p:0 +uhkfrosqc2uqczwcktbb7ups5b5t2up7yhmicryrzqknkj334rnq.b32.i2p:0 +ujbl6syp4h7whbgmqfbshln7xvmsiorjq5xj25wvl6wpcymbwwaq.b32.i2p:0 +ujg6b4cyxhi5pf4puwvwupur3iddm23uibapigpwl4bstvlt4cva.b32.i2p:0 +ulwhvqmfei6zvxnusl5yqyjxgb6cxoxofa2egp5ecvrp5hxpmqrq.b32.i2p:0 +umlbkwpm35dpp66ggxt23tdzsqh3t2ancdyrga5nxidn2lnkdgxq.b32.i2p:0 +undzufsjeb4qlf7y5llh56tji6zlhtshlsyht4yjdsa2k4ayx7vq.b32.i2p:0 +upcwcphup3eoiq4oqcing5rplkcl64legnv63rxxr2v26at6i3hq.b32.i2p:0 +uqfpr4d4vcbs7nd5l5o4s3mzl3dptascgesfirfuuhvuadzdn26a.b32.i2p:0 +uv44mjoqrj3m3gzz5wxlnszt5pvgk3iqlc3pmqfe6un6gxays2cq.b32.i2p:0 +uyboj62kmr2qe3pma7lej42wgcxk2i6jztimkw4deb7yf55om7ba.b32.i2p:0 +v7pncuqxlofwmhxi2rebbxvjr3cqddzewbzxgsmsz57i5rsj4zea.b32.i2p:0 +vavpoevto45dv5spd4csk3ut3afe5cldtzvoceuiix4ingmfhxla.b32.i2p:0 +vcksnyuyw3i6hfviob5yzoynq7okxi677bw7224273fjhsk2kgra.b32.i2p:0 +vkz72iimesw6klnurujjyoyaphgxfj74wg7hjuiyr3jhqu7fo3rq.b32.i2p:0 +vlpktuqjkol5yvvjjtamlb5k5t2ywrssy2nivancn3er4ymnyxiq.b32.i2p:0 +vo22o2zy6gb4nsmcteelpyoa4khh3hogoe3jsgis6yglyrtxzloq.b32.i2p:0 +vq5vh45dihzuxkgvu5le6mauvdlmk3pjzfobncvaelumwf56d4wq.b32.i2p:0 +vqt22x4nnflmbgitjhenfagzhkmqekbjoim6vxjmayco4ewdnxsq.b32.i2p:0 +vran2pe26rju2bol6dllrhmgrud6eutmh6i2dotlv4gnk3yuk2yq.b32.i2p:0 +vuqk76jvxfk55bkovqcmmkvycmndee7j3tuv7ezdb2slrfpjyvoa.b32.i2p:0 +vurewltyyuo7s6xznj7udjtoqlcm65k7zbf2jdjublcmiueobauq.b32.i2p:0 +vzvt4ddkoz7scjmjzu4dvankrkxx3bstfsjuice6466bleuegjsq.b32.i2p:0 +w25qtnu5zo5s4eczoiidyxjyx2gw62nitaek3uotaawarvdj35da.b32.i2p:0 +w2mgaza75amrvfocl7wt6v7eimprlasuj3bi4xezdr2wyqhkxzwq.b32.i2p:0 +w2uruwbfqmuroj4s2konb7jctqd7mt47yqwsggy65he7fvfrsgwa.b32.i2p:0 +w3hyqnlueb4yv5lkzj3wlnrjp7fzpxnig6x6m3w5du2ptfjcm2jq.b32.i2p:0 +w3r5hrvyb2ppiwulw5s2r5wwiv6x5xvirpftdugvtvartkeifsbq.b32.i2p:0 +w4zymjmq7fxpecyc6b3pscasjbi3kj4ggfucb7gh4woeycnjsxma.b32.i2p:0 +wacfewi6ehmfxvftxqmracfh7se2t7ozl62u4hsuyd4c5xfzuajq.b32.i2p:0 +weck2efmesdewefhddub35opskzw55thubvqcjfabnmuahzuls6a.b32.i2p:0 +wfrvnaeqad6zevflyme66n3p77b4axymni33fu42bhsaei6cmrpa.b32.i2p:0 +wl36w4dicaovalv6w4vijn3jwws6wur35vnlhid5r76hal2ou5yq.b32.i2p:0 +woc63cuawux33zg2co5fidtw3alfqfc3mov3bjk65ip4ge4dz4eq.b32.i2p:0 +wpau3xgpkr72r2ysbanhet77xhtawm5mrm6a6mvjnv4oq4kypxva.b32.i2p:0 +wsbf5tpo7ecsafusflyym72k6tbyclgvqav56qafvyc4j3spzdoq.b32.i2p:0 +wtfebbwmsxoywu6nw5cowlxqhtokxrxftve2zee6flvrpqg4j6ma.b32.i2p:0 +wuikfwkext6lbl6urhoysr2abcyff5lkm2ojr6mfenqq25nzlluq.b32.i2p:0 +wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:0 +wwwrx7f3urm4g2b5v3rtcxcijdefjireyqycl6dcoyap6vav2sqa.b32.i2p:0 +wy2udklgydh7iknnffbzvldoiwsct6dy3o7fjcsjcdjoq4b65vha.b32.i2p:0 +wylodalll6bd3l3vjfazlvmtsymgn5bokdwydmbzp5ncww35jpsa.b32.i2p:0 +x3mabmgx32hljbx46xqjobh6l3tvrridnxgrla4q7s2bgkk63aqa.b32.i2p:0 +xasewpiqpzhyggxwsajmnkrppi2ozmapqv6673zuuymoueq67tla.b32.i2p:0 +xbnl2smr777bqkflwbzstuwmlvu7evlxl4me3ynphcs3e72rlg5a.b32.i2p:0 +xf4xjhiry3x57gs73xhcfswulggzazfouszkbg5uo7oakc7cu3oq.b32.i2p:0 +xfyzfugo3qtfpq3s5zccbbqrxfkgy3ttlqhz6t6ovmtgrsuyce7q.b32.i2p:0 +xhzoxaskarhu5xsa6sat7kbxpce6tlvc2wfq6imdmfqgwbbyqypq.b32.i2p:0 +xjtrrpbdo5dvqjzsr7dhoyjocrrljfkutoyrmcyajfzbpozlytsa.b32.i2p:0 +xnb3pxtmai6ofbarycclwwueaarn4r3zt3zgkeepo4pgmswqvfcq.b32.i2p:0 +xnivz7w6yhjhvrvytupf4d7edydnfvzmfpqv6ndhxfcpkcz46yca.b32.i2p:0 +xofaehrg3ptjydgglyzkw36cxi6cgogkdt7yc4lmbng6vpnoegxq.b32.i2p:0 +xtxh4wtmbjls76wxnehe57etubuqvsdiunwgve6kwjt4acwswnjq.b32.i2p:0 +xuivvnpcj2rhsxmkyyjzmyq6a7gwgusnqwdklgimamvkzu7rnnmq.b32.i2p:0 +xvdqgttjov3jzgmke3owof4y63jwitxtbxflspxp7gqf4t2vobvq.b32.i2p:0 +xvpzaqftlfx2etqys733mndm7jr3l2j3if3wskfljzaow5s4rrfq.b32.i2p:0 +xzkwbpxki3prsrmylrjzyg5z3akjtvw4iu5zhevmz5aedgb2nn5q.b32.i2p:0 +y3occl5rqc2mz64esu5mqzoyfzlbxop7tttf2b3gyxjust57txfq.b32.i2p:0 +y45xhqkb43ncokfwhsmr4z6fwykuit6o3p2kbso3emv7stpiwwoq.b32.i2p:0 +y7cahac3wyh6kxubwikyrobe46bm5hhe4ovxn4ex6zf2ojkkawrq.b32.i2p:0 +y7flfkirmnbzzvwjyf4l5pgeqzjgxr3h2ds63lc7klx4byineowq.b32.i2p:0 +ybdhpsgmtw4eewreo7kswclgexdztzk32a5x4pbr5ws6nm42gpoq.b32.i2p:0 +ycntiiypllboyp3ikl6njjy47gxkfycvwcvmbyikshnjx3l2bvva.b32.i2p:0 +yemrkyqmjzwwn2yast2ga6dcnsovnxwip2rjpid56grdg7itugpa.b32.i2p:0 +yf2ny2doz2qirgwlzc6gebk6eifqvzeozcesreme7u76umfmva4a.b32.i2p:0 +yj3v3ocgldnackxptsjmwasa4xzxj3it6rtuhmlpxvwlq5kmyytq.b32.i2p:0 +ykv62rivlxurq2wkecagzs76tulfor765c237bsjn7jt4436kn2q.b32.i2p:0 +yl2y3q4k63d4a7tvqjmbtq2uyzmrjqgnhl5xatcamjen6faaxroq.b32.i2p:0 +yllvqk2utimxjtoyzk7l24s4n5sqp5dbn5vwsbt3g3dd6h4dxseq.b32.i2p:0 +ylxiqtda5q3ioliyff4cswzly4bvbaaabpic6chrclwjkt33tdaq.b32.i2p:0 +yodn52qhw4v5aoaoqfsunvfecbbl6cyk5j4aq2pfm4otu63qlp6a.b32.i2p:0 +yophu2fzfwls6dmpnhy3hzpfefz3sunu63bt5oi3zx6wyfheaouq.b32.i2p:0 +yosnjs5hintkbjrwyrvbs6myc2zd7gpfsrg7t7bprntwyilqnfnq.b32.i2p:0 +ypdrun7wxb3ah5rp7cycnizg6hnkduioqac7p75lklstuczoijjq.b32.i2p:0 +yphmy6w3myyy3am5awdksfyhbmmleqmblru3pfoajhjdilktwdca.b32.i2p:0 +ywkrrbyzwgc5b2lub4i5z5iwtedfz2byghh753vzlr4lxskwrsnq.b32.i2p:0 +yx6ghqhj7c5q3dfmwtmpg46dquicigagnwhw6aw3guzig4yrdwta.b32.i2p:0 +z2jyij2spdcwjqqfz6thpa4a6bp4lyg2jbaqkniiyyd4hocblwgq.b32.i2p:0 +z34pw5tlowwi3gpj3ycb2dptgyuq65bpj7w36xahslgikggo5eaa.b32.i2p:0 +z3j2xshl4tpbxaybcprhzq7cuz6urboqoxvvpnfriv4n2lq72jsq.b32.i2p:0 +z3pgyfiwfzcd2g7v4rs6el5tvc55y7a3tai4gcbpso6flaejckea.b32.i2p:0 +z6g6lmwuhnldiazyf72zlsvtwbql5mfvpmyaipuvjfwnb6slel6a.b32.i2p:0 +z6gf7u676kcbjcf3xydssjaxe77vatqr7lr4356usgdvsuh54jpa.b32.i2p:0 +zcwgqw7hlw7437a7au6n6obljdb4arnshoibdqo6voree4xiznoq.b32.i2p:0 +zh34uj3cwyr7zs2uc4aeyrhffgumxd5uzpfpbmbuyetqkd3xy4ia.b32.i2p:0 +znt6mdvmstxcdaqeiuo67k2ug4e422gjftrnm7ov2izaeuh73d5q.b32.i2p:0 +zqhoqgf3enj2pv74sjov6dthpr6jqafw5qqzzsvnfdqzycychxpq.b32.i2p:0 +zr744lzxu2lvzncuccwbo2p5rpocsidorh5nug2it2yfsppbcjsa.b32.i2p:0 +ztmhh3h46uvwd5pmuauzjhcsybo6tqgx5a4pmhpolwtxyeiud6xa.b32.i2p:0 +zxr2swmk37nxncdkzh3aym6bke3wnkf66zffv6k4wivce6smbreq.b32.i2p:0 +zyirlmzsnhkyhbs2lya5mnbvzbvnx2tqqnmplksa2ehs7s4yxcsq.b32.i2p:0 +2.39.163.191:8333 # AS30722 +2.83.248.77:8333 # AS3243 +2.87.72.235:8333 # AS6799 +3.9.188.44:8333 # AS16509 +3.76.143.185:8333 # AS16509 +3.231.154.195:8333 # AS14618 +5.2.23.226:8333 # AS21472 +5.45.72.11:8333 # AS50673 +5.78.71.173:8333 # AS212317 +5.78.116.127:8333 # AS212317 +5.128.87.126:8333 # AS31200 +5.144.88.83:8333 # AS61349 +5.172.132.104:8333 # AS15600 +5.186.60.13:8333 # AS44869 +5.188.62.18:8333 # AS34665 +5.255.97.92:8333 # AS60404 +8.209.70.77:8333 # AS45102 +14.203.57.50:8333 # AS7545 +15.235.82.178:8333 # AS16276 +18.183.87.240:8333 # AS8987 +20.89.243.139:8333 # AS8075 +23.134.94.82:8333 # AS63023 +23.142.145.238:8333 # AS210000 +23.175.0.222:8333 # AS395502 +24.6.89.155:8333 # AS33651 +24.36.96.14:8333 # AS7992 +24.76.199.86:8333 # AS6327 +24.116.163.227:8333 # AS11492 +24.119.41.234:8333 # AS11492 +24.146.33.13:8333 # AS7992 +24.148.52.108:8333 # AS6079 +24.156.42.154:8333 # AS19108 +27.148.206.140:8333 # AS133774 +31.19.205.19:8333 # AS3209 +31.30.59.201:8333 # AS16019 +31.189.24.188:8333 # AS24608 +35.170.77.206:8333 # AS14618 +35.208.62.130:8333 # AS15169 +35.213.159.168:8333 # AS15169 +35.228.210.193:8333 # AS396982 +35.244.9.182:8333 # AS396982 +37.60.246.82:8333 # AS51167 +37.157.192.94:8333 # AS197019 +37.221.197.208:8333 # AS197540 +38.38.192.248:8333 # AS394432 +38.40.110.66:8333 # AS398721 +38.75.215.250:8333 # AS397377 +38.75.235.97:8333 # AS397142 +38.102.85.36:8333 # AS26832 +38.111.114.155:8333 # AS62563 +38.254.140.40:8333 # AS54936 +40.160.13.7:8333 # AS16276 +43.159.61.16:8333 # AS132203 +43.198.159.17:8333 # AS8987 +43.225.143.17:8333 # AS136907 +45.45.27.233:8333 # AS5769 +45.88.106.107:8333 # AS204601 +45.129.84.136:8333 # AS206264 +45.129.181.107:8333 # AS197540 +45.130.20.177:8333 # AS3214 +45.142.235.46:8333 # AS206238 +45.150.66.10:8333 # AS200195 +45.207.43.110:8333 # AS133861 +46.10.211.143:8333 # AS8866 +46.23.87.218:8333 # AS60131 +46.28.205.68:8333 # AS197988 +46.32.178.82:8333 # AS196925 +46.39.167.49:8333 # AS31246 +46.59.40.91:8333 # AS8473 +46.148.235.36:8333 # AS49505 +46.150.161.43:8333 # AS49106 +46.175.178.3:8333 # AS56427 +46.229.238.187:8333 # AS29405 +47.12.228.122:8333 # AS20115 +47.184.159.236:8333 # AS5650 +47.254.178.44:8333 # AS45102 +50.4.18.90:8333 # AS12083 +50.30.36.140:8333 # AS30083 +50.45.128.28:8333 # AS27017 +50.79.86.113:8333 # AS33489 +50.126.96.22:8333 # AS27017 +50.144.135.40:8333 # AS33657 +51.154.26.11:8333 # AS15796 +51.174.206.76:8333 # AS29695 +51.194.13.25:8333 # AS5607 +58.229.105.38:8333 # AS9318 +59.188.108.150:8333 # AS9381 +62.12.168.100:8333 # AS15623 +62.168.65.42:8333 # AS5578 +62.177.111.78:8333 # AS29208 +62.210.207.63:8333 # AS12876 +64.20.45.42:8333 # AS19318 +64.31.61.150:8333 # AS46475 +64.42.176.234:8333 # AS63018 +64.46.58.172:8333 # AS20161 +64.68.203.11:8333 # AS16686 +64.156.192.61:8333 # AS21581 +64.185.225.26:8333 # AS18450 +64.187.168.198:8333 # AS11404 +64.253.104.118:8333 # AS4364 +66.18.13.162:8333 # AS13767 +66.29.129.233:8333 # AS22612 +66.35.84.30:8333 # AS2734 +66.194.38.45:8333 # AS35863 +66.228.28.63:8333 # AS11233 +67.145.204.18:8333 # AS19901 +67.205.190.143:8333 # AS14061 +67.210.228.203:8333 # AS7819 +67.217.61.213:8333 # AS19318 +67.251.135.17:8333 # AS12271 +68.39.190.185:8333 # AS33491 +68.75.195.2:8333 # AS17235 +68.102.134.227:8333 # AS22773 +68.219.242.34:8333 # AS8075 +69.4.94.226:8333 # AS55286 +69.142.28.54:8333 # AS33287 +69.146.62.1:8333 # AS33588 +69.176.188.251:8333 # AS18988 +69.180.170.215:8333 # AS13367 +69.181.64.87:8333 # AS33651 +69.196.152.33:8333 # AS5645 +70.35.98.12:8333 # AS32264 +70.67.139.204:8333 # AS6327 +70.121.50.147:8333 # AS11427 +70.172.132.78:8333 # AS22773 +71.224.201.224:8333 # AS33287 +72.18.53.189:8333 # AS40545 +72.46.131.18:8333 # AS36114 +72.50.220.5:8333 # AS10242 +72.71.209.96:8333 # AS13977 +73.35.246.175:8333 # AS33650 +73.98.107.37:8333 # AS33654 +73.98.178.236:8333 # AS33660 +73.117.132.138:8333 # AS7016 +73.131.209.70:8333 # AS33660 +73.157.112.178:8333 # AS33650 +73.228.173.21:8333 # AS13367 +73.253.55.217:8333 # AS7015 +74.73.229.148:8333 # AS12271 +74.112.115.197:8333 # AS11525 +74.118.138.124:8333 # AS20326 +74.118.143.8:8333 # AS20326 +74.133.90.251:8333 # AS10796 +74.213.251.233:8333 # AS14978 +74.220.255.190:8333 # AS23175 +76.113.120.246:8333 # AS33654 +76.127.211.90:8333 # AS7015 +76.154.162.182:8333 # AS33652 +77.33.144.156:8333 # AS44869 +77.38.3.90:8333 # AS3212 +77.100.20.178:8333 # AS5089 +77.109.112.223:8333 # AS9031 +77.165.250.62:8333 # AS1136 +77.223.120.139:8333 # AS50340 +77.240.190.41:8333 # AS24641 +78.30.57.78:8333 # AS15704 +78.157.91.120:8333 # AS21211 +79.54.240.124:8333 # AS3269 +79.116.53.50:8333 # AS57269 +79.134.99.114:8333 # AS16302 +79.156.138.107:8333 # AS3352 +80.60.120.119:8333 # AS1136 +80.64.211.102:8333 # AS200295 +80.64.211.103:8333 # AS200295 +80.90.4.178:8333 # AS20546 +80.108.219.153:8333 # AS8412 +80.209.231.126:8333 # AS62282 +80.241.194.147:8333 # AS8881 +81.7.17.202:8333 # AS35366 +81.83.214.134:8333 # AS6848 +81.197.182.195:8333 # AS719 +81.229.234.87:8333 # AS3301 +82.10.250.130:8333 # AS5089 +82.64.49.27:8333 # AS12322 +82.66.10.11:8333 # AS12322 +82.71.47.216:8333 # AS13037 +82.73.188.178:8333 # AS33915 +82.85.110.20:8333 # AS8612 +82.124.33.119:8333 # AS3215 +82.174.142.202:8333 # AS50266 +82.195.237.251:8333 # AS1836 +82.218.34.162:8333 # AS8339 +83.49.10.227:8333 # AS3352 +83.78.112.142:8333 # AS3303 +83.136.232.21:8333 # AS29182 +83.144.180.88:8333 # AS2860 +83.150.2.128:8333 # AS8758 +83.168.65.186:8333 # AS31304 +83.208.193.242:8333 # AS5610 +83.218.160.161:8333 # AS31543 +84.32.248.63:8333 # AS16125 +84.75.0.178:8333 # AS6730 +84.113.129.195:8333 # AS8412 +84.195.116.26:8333 # AS6848 +84.246.200.122:8333 # AS42455 +84.255.238.120:8333 # AS34779 +85.14.79.26:8333 # AS31242 +85.146.115.136:8333 # AS50266 +85.159.237.71:8333 # AS43350 +85.195.83.50:8333 # AS20773 +85.214.161.252:8333 # AS6724 +85.214.223.36:8333 # AS6724 +85.215.75.210:8333 # AS8560 +85.234.145.132:8333 # AS29550 +85.246.38.150:8333 # AS3243 +85.252.216.146:8333 # AS2116 +86.41.130.108:8333 # AS5466 +86.101.92.93:8333 # AS21334 +86.104.228.12:8333 # AS45021 +86.104.228.36:8333 # AS45021 +86.109.12.60:8333 # AS54825 +86.111.48.71:8333 # AS50304 +86.111.48.72:8333 # AS50304 +86.124.145.184:8333 # AS8708 +86.133.133.82:8333 # AS2856 +87.120.8.239:8333 # AS34224 +87.179.122.129:8333 # AS3320 +87.236.195.198:8333 # AS35592 +88.85.88.133:8333 # AS35415 +88.88.58.251:8333 # AS2119 +88.99.71.213:8333 # AS24940 +88.119.128.36:8333 # AS8764 +88.134.41.88:8333 # AS3209 +88.210.15.24:8333 # AS25308 +88.212.53.246:8333 # AS42841 +88.218.226.91:8333 # AS48314 +89.1.104.97:8333 # AS8422 +89.39.106.26:8333 # AS49981 +89.179.240.133:8333 # AS8402 +89.207.131.19:8333 # AS62370 +89.233.207.67:8333 # AS29518 +90.26.82.45:8333 # AS3215 +90.173.118.109:8333 # AS12479 +90.250.10.165:8333 # AS5378 +91.90.166.203:8333 # AS214815 +91.92.144.226:8333 # AS44901 +91.92.154.18:8333 # AS61098 +91.202.4.65:8333 # AS43641 +91.228.45.130:8333 # AS197895 +91.239.130.62:8333 # AS62240 +91.240.84.52:8333 # AS29182 +92.42.110.214:8333 # AS29066 +92.84.188.206:8333 # AS9050 +92.205.232.47:8333 # AS21499 +92.206.105.31:8333 # AS16202 +92.247.49.210:8333 # AS29580 +93.16.43.241:8333 # AS198949 +93.38.127.179:8333 # AS12874 +93.115.26.6:8333 # AS16125 +93.177.188.74:8333 # AS16010 +94.19.128.204:8333 # AS35807 +94.131.0.73:8333 # AS8772 +94.136.2.126:8333 # AS48943 +94.156.128.153:8333 # AS44901 +94.203.133.251:8333 # AS15802 +94.210.55.89:8333 # AS33915 +94.241.90.251:8333 # AS42908 +95.105.172.171:8333 # AS15962 +95.169.196.247:8333 # AS201133 +95.211.152.100:8333 # AS60781 +95.213.145.218:8333 # AS49505 +96.41.133.58:8333 # AS20115 +96.74.179.132:8333 # AS33491 +97.113.140.223:8333 # AS209 +98.13.77.64:8333 # AS11351 +98.156.108.211:8333 # AS11427 +99.95.54.159:8333 # AS7018 +99.229.106.225:8333 # AS812 +99.240.98.36:8333 # AS812 +101.32.127.143:8333 # AS132203 +101.100.139.249:8333 # AS9790 +103.37.205.47:8333 # AS56068 +103.45.247.202:8333 # AS41436 +103.76.205.213:8333 # AS58610 +103.99.169.110:8333 # AS54415 +103.101.203.44:8333 # AS36007 +103.231.42.36:8333 # AS18229 +103.233.83.28:8333 # AS4213 +104.128.64.58:8333 # AS36007 +104.128.201.183:8333 # AS13428 +104.172.235.227:8333 # AS20001 +104.219.214.211:8333 # AS398823 +104.221.34.226:8333 # AS5769 +104.229.101.78:8333 # AS11351 +104.238.220.199:8333 # AS23470 +107.148.56.81:8333 # AS399195 +107.148.68.174:8333 # AS394432 +107.155.67.210:8333 # AS29802 +107.191.33.82:8333 # AS20473 +108.26.149.161:8333 # AS701 +108.225.195.53:8333 # AS7018 +109.150.129.227:8333 # AS2856 +109.207.79.248:8333 # AS44709 +109.224.84.149:8333 # AS197197 +109.235.247.122:8333 # AS205950 +111.90.158.123:8333 # AS45839 +111.90.158.137:8333 # AS45839 +115.140.124.99:8333 # AS17858 +116.86.195.192:8333 # AS55430 +117.48.133.67:8333 # AS140292 +118.24.37.253:8333 # AS45090 +120.226.39.100:8333 # AS9808 +121.98.22.147:8333 # AS9790 +122.40.25.57:8333 # AS17858 +128.0.98.214:8333 # AS42652 +128.0.190.26:8333 # AS30764 +128.2.12.38:8333 # AS9 +129.10.85.103:8333 # AS156 +129.80.192.20:8333 # AS31898 +129.126.172.115:8333 # AS17547 +130.180.211.123:8333 # AS199636 +130.250.7.252:8333 # AS394901 +131.153.203.205:8333 # AS20454 +131.153.232.199:8333 # AS19437 +131.153.238.121:8333 # AS19437 +133.3.249.155:8333 # AS2504 +133.5.165.199:8333 # AS2508 +133.125.50.180:8333 # AS7684 +134.65.193.149:8333 # AS44486 +134.195.196.65:8333 # AS62563 +136.49.31.88:8333 # AS16591 +136.62.152.251:8333 # AS16591 +136.169.52.139:8333 # AS20910 +138.2.110.216:8333 # AS31898 +138.75.131.48:8333 # AS4773 +140.186.199.14:8333 # AS11232 +141.0.155.19:8333 # AS56478 +142.115.140.2:8333 # AS577 +142.202.48.124:8333 # AS63023 +143.0.142.156:8333 # AS264009 +144.2.104.35:8333 # AS57370 +144.2.104.189:8333 # AS57370 +144.126.147.252:8333 # AS40021 +147.28.211.75:8333 # AS54825 +147.32.95.62:8333 # AS2852 +149.7.216.178:8333 # AS174 +149.28.116.34:8333 # AS20473 +149.50.101.15:8333 # AS201814 +149.50.101.28:8333 # AS201814 +152.117.88.43:8333 # AS30600 +152.165.38.160:8333 # AS2527 +153.92.93.114:8333 # AS41998 +154.7.1.114:8333 # AS139646 +154.26.130.95:8333 # AS141995 +154.26.154.73:8333 # AS141995 +154.38.167.152:8333 # AS40021 +154.65.14.19:8333 # AS37628 +155.4.142.33:8333 # AS8473 +157.143.21.102:8333 # AS8758 +157.147.131.251:8333 # AS2527 +158.248.34.141:8333 # AS29695 +159.138.87.18:8333 # AS136907 +159.196.227.196:8333 # AS4764 +159.246.25.53:8333 # AS30491 +160.16.110.6:8333 # AS9370 +160.80.12.16:8333 # AS137 +160.80.97.66:8333 # AS137 +161.97.151.9:8333 # AS51167 +162.0.226.60:8333 # AS22612 +162.55.122.93:8333 # AS24940 +162.219.38.94:8333 # AS10099 +162.245.196.45:8333 # AS23314 +164.152.167.208:8333 # AS59253 +165.22.229.88:8333 # AS14061 +166.70.211.78:8333 # AS6315 +166.78.241.20:8333 # AS19994 +166.78.241.25:8333 # AS19994 +167.88.11.203:8333 # AS20278 +167.248.185.196:8333 # AS398721 +169.155.170.211:8333 # AS44486 +171.101.73.128:8333 # AS7470 +172.81.182.240:8333 # AS174 +172.92.31.45:8333 # AS11404 +172.93.106.85:8333 # AS23470 +172.233.211.171:8333 # AS63949 +172.234.95.35:8333 # AS63949 +172.241.70.236:8333 # AS7979 +173.66.197.19:8333 # AS701 +173.87.234.220:8333 # AS5650 +173.181.35.50:8333 # AS852 +173.183.130.47:8333 # AS852 +173.197.244.157:8333 # AS10838 +174.21.76.8:8333 # AS209 +174.57.136.72:8333 # AS33659 +174.63.171.76:8333 # AS33661 +174.88.243.94:8333 # AS577 +175.110.115.120:8333 # AS49981 +176.74.136.237:8333 # AS35613 +176.99.2.90:8333 # AS197695 +176.114.248.225:8333 # AS202618 +176.118.220.29:8333 # AS60042 +176.126.116.7:8333 # AS207586 +176.136.243.63:8333 # AS5410 +176.205.158.198:8333 # AS5384 +178.38.6.52:8333 # AS6730 +183.88.223.208:8333 # AS45629 +184.56.122.69:8333 # AS10796 +184.95.32.130:8333 # AS20454 +184.105.131.181:8333 # AS6939 +184.171.208.109:8333 # AS40788 +185.8.106.179:8333 # AS204770 +185.11.61.33:8333 # AS57523 +185.19.30.242:8333 # AS61098 +185.26.99.171:8333 # AS44051 +185.31.136.166:8333 # AS60414 +185.31.136.246:8333 # AS60414 +185.63.97.216:8333 # AS50825 +185.64.125.16:8333 # AS59921 +185.65.93.104:8333 # AS201730 +185.68.67.42:8333 # AS6772 +185.70.43.193:8333 # AS62371 +185.78.209.28:8333 # AS202128 +185.88.229.254:8333 # AS20963 +185.112.144.119:8333 # AS44925 +185.137.173.125:8333 # AS13030 +185.140.246.140:8333 # AS48602 +185.144.83.131:8333 # AS9009 +185.148.3.227:8333 # AS203003 +185.150.162.111:8333 # AS34197 +185.156.202.35:8333 # AS56388 +185.163.44.36:8333 # AS39798 +185.197.30.66:8333 # AS63473 +185.197.160.61:8333 # AS60144 +185.209.12.76:8333 # AS212323 +185.239.221.23:8333 # AS61282 +185.243.218.19:8333 # AS56655 +185.243.218.106:8333 # AS56655 +185.248.160.163:8333 # AS43350 +188.12.149.216:8333 # AS3269 +188.63.158.192:8333 # AS3303 +188.138.88.47:8333 # AS8972 +188.138.112.60:8333 # AS8972 +188.213.92.39:8333 # AS206238 +188.243.71.145:8333 # AS35807 +189.32.136.9:8333 # AS28573 +190.53.100.34:8333 # AS27773 +190.64.134.52:8333 # AS6057 +191.13.128.58:8333 # AS27699 +191.251.32.162:8333 # AS18881 +191.255.221.37:8333 # AS27699 +192.3.11.20:8333 # AS36352 +192.3.11.26:8333 # AS36352 +192.34.87.86:8333 # AS33083 +192.161.48.47:8333 # AS8100 +192.187.121.46:8333 # AS33387 +192.227.73.9:8333 # AS13886 +192.243.215.102:8333 # AS63297 +193.22.128.23:8333 # AS56469 +193.72.32.187:8333 # AS33965 +193.84.116.22:8333 # AS2852 +193.176.1.74:8333 # AS24961 +193.200.206.14:8333 # AS49747 +193.218.118.13:8333 # AS207656 +193.222.130.14:8333 # AS29208 +194.0.157.6:8333 # AS25099 +194.14.246.9:8333 # AS50066 +194.67.64.229:8333 # AS49352 +194.67.208.191:8333 # AS209641 +195.56.63.12:8333 # AS5483 +195.154.172.177:8333 # AS12876 +195.181.245.149:8333 # AS62282 +195.189.97.38:8333 # AS59642 +197.155.6.43:8333 # AS37199 +198.27.174.140:8333 # AS1299 +198.98.117.238:8333 # AS21949 +198.154.93.110:8333 # AS55081 +199.7.144.151:8333 # AS21949 +199.36.253.252:8333 # AS396952 +200.122.181.26:8333 # AS3790 +200.180.197.188:8333 # AS8167 +202.7.254.250:8333 # AS136994 +202.186.41.219:8333 # AS9930 +203.11.72.118:8333 # AS401199 +203.11.72.199:8333 # AS401199 +203.34.58.43:8333 # AS7545 +203.51.11.167:8333 # AS1221 +204.15.11.35:8333 # AS13331 +204.194.220.39:8333 # AS20055 +204.194.220.40:8333 # AS20055 +206.125.169.164:8333 # AS25795 +206.204.106.8:8333 # AS212947 +207.66.71.46:8333 # AS399220 +207.90.192.54:8333 # AS26832 +207.182.146.130:8333 # AS10297 +208.93.231.240:8333 # AS29893 +209.141.37.57:8333 # AS53667 +209.177.138.245:8333 # AS7832 +209.205.204.218:8333 # AS55081 +211.221.42.143:8333 # AS4766 +212.10.229.240:8333 # AS39642 +212.29.41.158:8333 # AS15763 +212.51.129.60:8333 # AS13030 +212.68.218.124:8333 # AS12392 +212.86.32.106:8333 # AS15366 +212.158.133.185:8333 # AS1299 +213.165.95.142:8333 # AS8560 +213.174.156.81:8333 # AS39572 +213.174.156.86:8333 # AS39572 +213.217.210.90:8333 # AS12709 +213.227.147.244:8333 # AS60781 +216.83.150.142:8333 # AS5048 +216.226.128.189:8333 # AS13706 +217.11.240.4:8333 # AS21430 +217.20.131.64:8333 # AS5483 +217.64.47.138:8333 # AS39324 +217.64.47.200:8333 # AS39324 +217.155.244.170:8333 # AS13037 +217.180.221.162:8333 # AS30600 +217.230.42.55:8333 # AS3320 +219.79.200.233:8333 # AS4760 +220.92.141.84:8333 # AS4766 +222.239.166.108:8333 # AS9318 +[2001:13d8:1c01:21:215:17ff:fe63:2a7e]:8333 # AS3790 +[2001:1528:111:ffff:214::207]:8333 # AS15685 +[2001:1620:542c:210::100]:8333 # AS13030 +[2001:18b8:0:100:0:b00b:420:69]:8333 # AS29789 +[2001:19f0:0:4f89:5400:4ff:fea0:3837]:8333 # AS20473 +[2001:19f0:4401:e8a:5400:4ff:fe8e:d398]:8333 # AS20473 +[2001:19f0:5000:1a80:5400:4ff:fe71:aac5]:8333 # AS20473 +[2001:19f0:5:2b12:5400:4ff:fe6e:3afe]:8333 # AS20473 +[2001:19f0:5:5b81:5e6f:69ff:fe57:94d0]:8333 # AS20473 +[2001:19f0:6801:6ec:2::1]:8333 # AS20473 +[2001:19f0:c800:2ce5:5400:4ff:fed7:663d]:8333 # AS20473 +[2001:1bc0:c1::2000]:8333 # AS29686 +[2001:1c03:3911:d900:4b0c:5f2:138a:5212]:8333 # AS33915 +[2001:1c04:1308:3100::985]:8333 # AS33915 +[2001:1c04:4008:6300:8a5f:2678:114b:a660]:8333 # AS33915 +[2001:4060:4419:8001::42]:8333 # AS6772 +[2001:41d0:1004:24be::]:8333 # AS16276 +[2001:41d0:203:52c2::]:8333 # AS16276 +[2001:41d0:203:ba6c::]:8333 # AS16276 +[2001:41d0:303:146e::]:8333 # AS16276 +[2001:41d0:403:c20::]:8333 # AS16276 +[2001:41d0:700:1c4d::]:8333 # AS16276 +[2001:41d0:700:70a4::]:8333 # AS16276 +[2001:41d0:c:4b8::102]:8333 # AS16276 +[2001:428:5002:600:80be:7bff:fec4:9c43]:8333 # AS209 +[2001:470:1a34:2:a804:86ff:fec2:863a]:8333 # AS6939 +[2001:470:26:472::b7c]:8333 # AS6939 +[2001:470:28:b17::2]:8333 # AS6939 +[2001:470:71:358:6083:be0b:cbaa:a97]:8333 # AS6939 +[2001:470:7:b74::2]:8333 # AS6939 +[2001:470:88ff:2e::1]:8333 # AS6939 +[2001:470:8a71:2::200]:8333 # AS6939 +[2001:470:a:c13::2]:8333 # AS6939 +[2001:470:da72::2:3]:8333 # AS6939 +[2001:4dd0:3564:0:30b7:1d7b:6fec:4c5c]:8333 # AS8422 +[2001:4dd0:3564:0:88e:b4ff:2ad0:699b]:8333 # AS8422 +[2001:4dd0:3564:0:9c1c:cc31:9fe8:5505]:8333 # AS8422 +[2001:4dd0:3564:0:a0c4:d41f:4c4:1bb0]:8333 # AS8422 +[2001:4dd0:3564:0:fd76:c1d3:1854:5bd9]:8333 # AS8422 +[2001:4dd0:3564:1::7676:8090]:8333 # AS8422 +[2001:4dd0:3564:1:b977:bd71:4612:8e40]:8333 # AS8422 +[2001:4dd0:af0e:3564:0:69:90:8333]:8333 # AS8422 +[2001:4dd0:af0e:3564::69:1]:8333 # AS8422 +[2001:4dd0:af0e:3564::69:90]:8333 # AS8422 +[2001:569:5079:abd2::c9]:8333 # AS852 +[2001:5a8:40c7:f500:2601:7cdc:1a2d:661c]:8333 # AS1299 +[2001:5a8:40c7:f501:e0af:1c:1468:e272]:8333 # AS1299 +[2001:5a8:40db:2000:8668:a702:c89:bdd4]:8333 # AS1299 +[2001:5a8:40db:2000:867b:8aa6:2010:879a]:8333 # AS1299 +[2001:5a8:60c0:d500::7840]:8333 # AS1299 +[2001:638:a000:4140::ffff:47]:8333 # AS680 +[2001:638:a000:b101::2b3d]:8333 # AS680 +[2001:648:2800:131:4b1f:f6fc:20f7:f99f]:8333 # AS5470 +[2001:678:68c:fffb::195]:8333 # AS13259 +[2001:678:d78:22d0:5065:c1ff:fef2:3f65]:8333 # AS8298 +[2001:67c:1220:808::93e5:81f]:8333 # AS197451 +[2001:67c:1254:d1:a7b7::1]:8333 # AS4455 +[2001:67c:1254:d2:6b9c::1]:8333 # AS4455 +[2001:67c:26b4:ff00::44]:8333 # AS57672 +[2001:67c:2db8:6::36]:8333 # AS39798 +[2001:7c0:2310:0:f816:3eff:fe6c:4f58]:8333 # AS34878 +[2001:7d0:8410:df00::1488]:8333 # AS3249 +[2001:8003:d117:3500:bfc5:7e90:9da5:a8c0]:8333 # AS1221 +[2001:818:df59:5800:f8a4:ceff:fefd:d63a]:8333 # AS12353 +[2001:871:23d:d5d1:5a47:caff:fe71:c8d]:8333 # AS8447 +[2001:871:25f:ef0d:5e55:dad6:fe43:1e63]:8333 # AS8447 +[2001:8b0:1301:1000::60]:8333 # AS20712 +[2001:8e0:140b::94]:8333 # AS8758 +[2001:8f8:1b69:15e5:da9e:f3ff:fe75:d10d]:8333 # AS8966 +[2001:910:109d:2c03:d217:c2ff:fe07:2cd9]:8333 # AS20766 +[2001:b011:8013:3a06:e6a:7f23:65d3:f1df]:8333 # AS3462 +[2001:b030:2422::208d]:8333 # AS3462 +[2001:b07:2e9:5bb0:a2d5:2db3:8af4:4bf]:8333 # AS12874 +[2001:b07:5d26:7fb3:6ad1:350a:b65e:88dd]:8333 # AS12874 +[2001:b07:6461:7811:489:d2da:e07:1af7]:8333 # AS12874 +[2001:b07:646d:caf:3c06:693f:73a9:e71b]:8333 # AS12874 +[2001:b07:6474:51d8:c27e:427e:fe37:6356]:8333 # AS12874 +[2001:b07:6474:51d8:f31f:fdc:1c90:67ac]:8333 # AS12874 +[2001:b07:aa7:f93a:21b8:eee1:4973:edf9]:8333 # AS12874 +[2001:bc8:1201:701:ca1f:66ff:fec9:221c]:8333 # AS12876 +[2001:bc8:1201:715:ca1f:66ff:fec9:5ff0]:8333 # AS12876 +[2001:bc8:1201:71a:2e59:e5ff:fe42:52f4]:8333 # AS12876 +[2001:bc8:1600:0:208:a2ff:fe0c:8a2e]:8333 # AS12876 +[2001:bc8:3e54:6b02::1]:8333 # AS12876 +[2001:bc8:6005:1d:208:a2ff:fe0c:6cc2]:8333 # AS12876 +[2001:bc8:610:9:46a8:42ff:fe0c:d385]:8333 # AS12876 +[2001:bc8:700:2313::1]:8333 # AS12876 +[2001:bc8:701:409:b683:51ff:fe06:75f4]:8333 # AS12876 +[2001:bc8:701:40d:ae16:2dff:fea6:e868]:8333 # AS12876 +[2001:d08:d9:7a02:9e6b:ff:fe56:e9b3]:8333 # AS9534 +[2001:df0:a280:1001::5:1]:8333 # AS139225 +[2001:e68:5425:2834:618e:9069:a977:5c66]:8333 # AS4788 +[2001:ee0:4b4f:d480:2e0:4cff:fe08:8998]:8333 # AS45899 +[2001:f40:95c:8d55:7038:f146:d2d4:118]:8333 # AS9930 +[2001:f40:95c:8d55:7270:fcff:fe05:3cd]:8333 # AS9930 +[2001:f40:987:1182:6f:a46e:ec5a:48aa]:8333 # AS9930 +[2003:d5:b703:eb00:b241:6fff:fe10:312c]:8333 # AS3320 +[2003:dc:2f4a:c200:4ecc:6aff:fe25:c9a3]:8333 # AS3320 +[2003:e6:7f42:a900:e65f:1ff:feac:cbfc]:8333 # AS3320 +[2003:ec:2f04:b100:211:32ff:fef7:beef]:8333 # AS3320 +[2003:f0:df08:ec02:aaa1:59ff:fe57:7779]:8333 # AS3320 +[2400:2411:3e05:cc00:46a:1744:4d0f:d26b]:8333 # AS17676 +[2400:2411:a3e1:4900:85c8:62de:e8cc:6875]:8333 # AS17676 +[2400:4053:1203:3f00:1:1:1:134]:8333 # AS4713 +[2400:6180:0:d1::14e:b001]:8333 # AS14061 +[2400:8901::f03c:92ff:fe3e:e1d6]:8333 # AS63949 +[2400:8901::f03c:92ff:fe4e:95f3]:8333 # AS63949 +[2400:8901::f03c:93ff:fe5a:685c]:8333 # AS63949 +[2400:8905::f03c:94ff:fecc:1466]:8333 # AS48337 +[2400:8907::f03c:94ff:fed9:9696]:8333 # AS63949 +[2401:2500:204:1149:133:125:50:180]:8333 # AS7684 +[2401:b140:1::100:220]:8333 # AS54415 +[2401:b140:3::44:110]:8333 # AS54415 +[2401:b140:3::44:120]:8333 # AS54415 +[2401:d002:2103:400:211:32ff:fe9e:7ae3]:8333 # AS38195 +[2401:d002:3902:700:d72c:5e22:4e95:389d]:8333 # AS38195 +[2401:d002:602:7800:289b:8dab:d50e:dea2]:8333 # AS38195 +[2402:a7c0:8100:a015::6f2:79a5]:8333 # AS59253 +[2402:b801:287c:800:1218:45cf:2d05:cbe4]:8333 # AS18371 +[2403:580c:c505:0:6955:67d3:6229:88e7]:8333 # AS4764 +[2403:5816:c8a3:0:2677:3ff:fe03:9422]:8333 # AS4764 +[2403:71c0:2000:b3e0::100]:8333 # AS3258 +[2403:71c0:2000:b3e0::101]:8333 # AS3258 +[2403:71c0:2000:b3e0::7693]:8333 # AS3258 +[2404:4408:6397:8201::250]:8333 # AS9790 +[2405:6582:de0:4400:8ce:2b80:2960:7b4e]:8333 # AS4685 +[2405:6582:de0:4400:f:854d:5057:4fc9]:8333 # AS4685 +[2405:aa00:2::40]:8333 # AS7712 +[2406:3003:2002:3a69:387e:3c9a:8166:106b]:8333 # AS55430 +[2406:3400:216:8b00:211:32ff:feca:336b]:8333 # AS10143 +[2406:8c00:0:3422:133:18:228:108]:8333 # AS24282 +[2406:da12:ce1:f000:afcd:2e3a:11f6:67c0]:8333 # AS16509 +[2406:da12:ce1:f001:2f6e:891:55f0:55e1]:8333 # AS16509 +[2406:da12:ce1:f001:8974:8c19:bae2:5213]:8333 # AS16509 +[2406:da12:ce1:f001:a791:ac49:22a4:fd2]:8333 # AS16509 +[2406:da1a:5b2:ea00:f17a:1319:baed:1582]:8333 # AS16509 +[2406:da1a:5b2:ea01:58c:5fb:ae7c:484c]:8333 # AS16509 +[2407:3640:2107:1278::1]:8333 # AS141995 +[2407:c800:4f12:5e7:95e3:4bf5:b37:86f7]:8333 # AS9365 +[2408:8207:5455:8d00::704]:8333 # AS4808 +[2409:8a28:ec1:6840:f7a3:88c8:cacc:fe0a]:8333 # AS56041 +[2409:8a7c:1e42:2a50:45d9:11d8:b04:cbd7]:8333 # AS9808 +[240d:1a:4b1:e700:19:d9ef:7f3:8e75]:8333 # AS2527 +[240e:38a:3e3a:cf00:a771:2e91:51a7:2fb4]:8333 # AS4134 +[2600:1700:3101:b00f:2c96:27a0:452c:f186]:8333 # AS7018 +[2600:1700:38d5:140f:f64d:30ff:fe63:504e]:8333 # AS7018 +[2600:1700:488:10::421]:8333 # AS7018 +[2600:1700:539e:b00f:5054:ff:fe1b:2913]:8333 # AS7018 +[2600:1700:5453:69e::109]:8333 # AS7018 +[2600:1700:5af3:2c10:46a8:42ff:fe08:5835]:8333 # AS7018 +[2600:1700:6b0:d200::ff]:8333 # AS7018 +[2600:1700:944c:e00f:2a27:f664:1801:599f]:8333 # AS7018 +[2600:1700:ec7b:5730::46]:8333 # AS7018 +[2600:1700:ec7b:5730:f2b6:1eff:fe70:7583]:8333 # AS7018 +[2600:1900:4000:4cc4:0:13::]:8333 # AS396982 +[2600:1900:4020:65f:0:1::]:8333 # AS396982 +[2600:1900:4030:a25e::]:8333 # AS396982 +[2600:1900:40b0:3af2:0:5::]:8333 # AS396982 +[2600:1900:40b0:3af2::]:8333 # AS396982 +[2600:1900:40c0:2210::]:8333 # AS396982 +[2600:1900:40e0:41fa:0:4::]:8333 # AS396982 +[2600:1900:41a0:af83::]:8333 # AS396982 +[2600:1900:5400:6e4:0:3::]:8333 # AS396982 +[2600:1901:8180:5c1::]:8333 # AS396982 +[2600:1f14:40e:e300:cea2:19ef:fa3a:e125]:8333 # AS16509 +[2600:1f16:a08:b900:2321:e069:cde4:67fb]:8333 # AS16509 +[2600:1f18:64d9:1603:4436:871e:2bfe:7403]:8333 # AS14618 +[2600:1f18:66fc:d700:9b71:f45:f3c9:c43b]:8333 # AS14618 +[2600:1f18:66fc:d701:d68d:ea70:77a0:4072]:8333 # AS14618 +[2600:1f18:719a:e301:773d:1375:24fb:bf24]:8333 # AS14618 +[2600:1f18:719a:e302:2758:8042:929f:a384]:8333 # AS14618 +[2600:1f18:719a:e302:4c90:e1e6:2a59:82c4]:8333 # AS14618 +[2600:1f18:719a:e302:e6c0:8878:401c:27c2]:8333 # AS14618 +[2600:1f18:719a:e302:ef0c:40ab:f8f:6d6c]:8333 # AS14618 +[2600:1f18:719a:e302:fa9e:86ad:e463:ae17]:8333 # AS14618 +[2600:1f1c:2d3:2401:6989:b1fd:d2a6:fbc8]:8333 # AS16509 +[2600:2104:1003:c5ab:dc5e:90ff:fe18:1d08]:8333 # AS11404 +[2600:3c00::f03c:92ff:fe92:2745]:8333 # AS63949 +[2600:3c00::f03c:94ff:feb7:4dd7]:8333 # AS63949 +[2600:3c00::f03c:94ff:fed1:1d3d]:8333 # AS63949 +[2600:3c01::f03c:93ff:fee6:2146]:8333 # AS63949 +[2600:3c02::f03c:92ff:fe5d:9fb]:8333 # AS63949 +[2600:3c02::f03c:94ff:fe12:8938]:8333 # AS63949 +[2600:6c54:7100:1ad1:c92e:36d:651:bd18]:8333 # AS20115 +[2600:6c67:2100:670:b179:be4c:8cca:e8f0]:8333 # AS33588 +[2600:6c67:8a3f:e191:4261:86ff:fe4f:aac]:8333 # AS33588 +[2600:70ff:eaad:beef::2]:8333 # AS6939 +[2600:8801:2f80:b2::141c]:8333 # AS22773 +[2600:8806:2300:460:a0e6:7e10:c3a8:bb47]:8333 # AS22773 +[2601:152:497f:2a38::7b]:8333 # AS33657 +[2601:185:8302:12f0:1ab2:2840:9de4:1550]:8333 # AS7015 +[2601:18c:9002:3de5:219:d1ff:fe75:dc2f]:8333 # AS7015 +[2601:19c:417e:3a11:20e7:b3ff:fecf:a99]:8333 # AS7015 +[2601:243:820:5824:4216:8de:e04a:fb54]:8333 # AS33491 +[2601:280:5c00:43d:4aba:4eff:fef8:6e5d]:8333 # AS33652 +[2601:603:5300:83b7:0:ff:fe00:4209]:8333 # AS33650 +[2602:fec3:0:1::69]:8333 # AS62563 +[2602:fec3:2:5::5:73]:8333 # AS62563 +[2602:ffb6:4:4d3d:f816:3eff:fec6:c15]:8333 # AS174 +[2602:ffb6:4:739e:f816:3eff:fe00:c2b3]:8333 # AS174 +[2602:ffb6:4:7b8e:f816:3eff:fe9d:9dc2]:8333 # AS174 +[2602:ffc5:200:1e01:241d:e589:9650:c773]:8333 # AS20473 +[2603:3003:11b:e100:20c:29ff:fe38:bbc0]:8333 # AS33657 +[2603:3003:11b:e100::f202]:8333 # AS33657 +[2603:3004:717:5800:485b:39ff:feab:1d54]:8333 # AS33490 +[2603:3005:1503:700::8af2]:8333 # AS7015 +[2603:3007:701:8000:b7d9:9e1e:8e0d:52fa]:8333 # AS7016 +[2603:300a:912:627a:be24:11ff:fe7b:39c3]:8333 # AS33491 +[2603:3015:e21:6800:15b3:d692:a536:12ff]:8333 # AS33668 +[2603:3024:1c07:e00::8c2d]:8333 # AS33651 +[2603:3024:2005:8000::f41b]:8333 # AS33651 +[2603:6011:af41:7214::5]:8333 # AS10796 +[2603:6080:9003:2fbc:60b1:6a4e:e364:c85d]:8333 # AS11426 +[2603:8001:3300:7d75::c48]:8333 # AS20001 +[2603:8080:1f07:6fdd:7de2:d969:78c9:b7ea]:8333 # AS11427 +[2603:8081:6c00:77:215:5dff:fe02:1555]:8333 # AS11427 +[2603:80a0:700:1886::39]:8333 # AS11427 +[2604:4080:1036:80b1:50e1:43ff:fe0e:9df5]:8333 # AS11404 +[2604:4500:6:285::18]:8333 # AS29802 +[2604:86c0:3001:5::12:73]:8333 # AS63023 +[2604:a00:3:2098:216:3eff:fe28:c447]:8333 # AS19318 +[2604:a00:50:39:c514:becd:bece:ad3a]:8333 # AS19318 +[2604:a880:400:d0::1c96:e001]:8333 # AS14061 +[2604:a880:400:d0::1d46:c001]:8333 # AS14061 +[2604:a880:400:d0::1def:8001]:8333 # AS14061 +[2604:a880:400:d0::1dfe:a001]:8333 # AS14061 +[2604:a880:400:d1::849:6001]:8333 # AS14061 +[2604:a880:4:1d0::31:e000]:8333 # AS14061 +[2604:a880:4:1d0::c5:6000]:8333 # AS14061 +[2605:1080:0:f00::70]:8333 # AS23367 +[2605:21c0:2000:11:204:194:220:40]:8333 # AS20055 +[2605:59c8:1800:2596:c26c:e780:26fd:fcf8]:8333 # AS14593 +[2605:59c8:2a99:9d00:5377:7333:efde:d32]:8333 # AS14593 +[2605:59c8:61f:3900:2efd:a1ff:fedc:f8d4]:8333 # AS14593 +[2605:6400:30:f220::]:8333 # AS53667 +[2605:6440:3001:2f:3eec:efff:fe91:f840]:8333 # AS396356 +[2605:6440:3001:2f::2]:8333 # AS396356 +[2605:6440:3001:49:7ec2:55ff:fea8:31cc]:8333 # AS396356 +[2605:6440:3001:49::2]:8333 # AS396356 +[2605:6f80:0:7:fc1b:ccff:fe8a:d822]:8333 # AS36114 +[2605:ae00:203::203]:8333 # AS7819 +[2606:6d00:100:5102:3d2:f06a:c2e8:a54]:8333 # AS1403 +[2607:5300:203:46ea::]:8333 # AS16276 +[2607:5300:60:314c::1]:8333 # AS16276 +[2607:9280:b:73b:250:56ff:fe14:25b5]:8333 # AS395502 +[2607:9280:b:73b:250:56ff:fe21:9c2f]:8333 # AS395502 +[2607:9280:b:73b:250:56ff:fe21:bf32]:8333 # AS395502 +[2607:9280:b:73b:250:56ff:fe33:4d1b]:8333 # AS395502 +[2607:9280:b:73b:250:56ff:fe3d:401]:8333 # AS395502 +[2607:f2c0:e045:f2e0:fe4c:fbb:6c99:1220]:8333 # AS5645 +[2607:f2c0:f00e:300::54]:8333 # AS5645 +[2607:f2f8:ad40:ea11::1]:8333 # AS25795 +[2607:fdc0:5:9::2ca]:8333 # AS20326 +[2607:fea8:601e:7d01:be24:11ff:fe89:27f3]:8333 # AS812 +[2620:11c:5001:1118:d267:e5ff:fee9:e673]:8333 # AS13331 +[2620:11c:5001:2199:d267:e5ff:fee9:e673]:8333 # AS13331 +[2620:6:2003:105:67c:16ff:fe51:58bf]:8333 # AS395460 +[2620:6e:a000:1:42:42:42:42]:8333 # AS397444 +[2620:a6:2000:1:1:0:9:742a]:8333 # AS27566 +[2620:a6:2000:1:1:0:d:7f1d]:8333 # AS27566 +[2620:a6:2000:1:2:0:6:49c8]:8333 # AS27566 +[2620:a6:2000:1:2:0:b:388a]:8333 # AS27566 +[2620:ca:a000:beef::1]:8333 # AS401199 +[2620:ca:a000:beef::10]:8333 # AS401199 +[2620:ca:a000:beef::2]:8333 # AS401199 +[2620:ca:a000:beef::3]:8333 # AS401199 +[2620:ca:a000:beef::6]:8333 # AS401199 +[2800:150:11d:1093:c9e3:1ef4:bc4:250d]:8333 # AS22047 +[2800:40:17:24f:4d95:e130:7f97:90f2]:8333 # AS16814 +[2800:40:18:7d1:a236:bcff:fe58:b6ec]:8333 # AS16814 +[2800:40:74:4b8b:f673:db63:6f6f:2310]:8333 # AS16814 +[2803:5180:4100:4000::2]:8333 # AS263762 +[2803:9800:9447:84bb:b87f:1df2:ff01:1c28]:8333 # AS19037 +[2803:9800:a007:8391:1fd7:c263:a55b:b9fb]:8333 # AS19037 +[2804:14c:65d7:8ea5:6060:2102:6ba6:5614]:8333 # AS28573 +[2804:14d:7e33:83b0:6e41:1ccc:cf20:aff9]:8333 # AS28573 +[2804:229c:8200:18d6:14a:d8c8:b4dd:3f25]:8333 # AS264111 +[2804:431:e038:cd01:aaa1:59ff:fe0d:44b8]:8333 # AS27699 +[2804:d41:e028:5900:aea:4236:2d76:1eb9]:8333 # AS7738 +[2804:d45:cb22:ef00:f89c:d038:2a36:a3fa]:8333 # AS7738 +[2804:d57:4b3d:7d00:752d:1d78:6b32:f71c]:8333 # AS8167 +[2804:d57:5949:1800:2a0:98ff:fe79:339b]:8333 # AS8167 +[2804:fec:d2d8:6100:fa63:2ecd:48a0:2f34]:8333 # AS262493 +[2806:103e:1b:4835:a787:2ea5:9025:aaa7]:8333 # AS8151 +[2806:267:148a:1d10:dc4b:3694:423b:b6b4]:8333 # AS13999 +[2806:2f0:80e1:e17b:3529:8aa:90c:509a]:8333 # AS17072 +[2806:2f0:a481:c585:603:478e:57f9:409e]:8333 # AS17072 +[2a00:1169:114:dc00::]:8333 # AS29066 +[2a00:1190:c013::be:1337]:8333 # AS16302 +[2a00:11c0:60:294:c48f:beff:fe15:a97f]:8333 # AS197540 +[2a00:1298:8001::6542]:8333 # AS5578 +[2a00:12e0:101:99:20c:29ff:fe29:d03f]:8333 # AS6798 +[2a00:1398:4:2a03:3eec:efff:fe05:d93e]:8333 # AS34878 +[2a00:1398:4:2a03::bc03]:8333 # AS34878 +[2a00:13a0:3015:1:85:14:79:26]:8333 # AS31242 +[2a00:1768:2001:27::ef6a]:8333 # AS43350 +[2a00:1a08:ffff:5::11]:8333 # AS25534 +[2a00:1f40:5001:108:5d17:7703:b0f5:4133]:8333 # AS42864 +[2a00:1f40:5001:386:dead:beef:b1ac:c0fe]:8333 # AS42864 +[2a00:23c5:fe80:7301:d6ae:52ff:fed5:56a5]:8333 # AS2856 +[2a00:23c6:5c8a:5c00:c05a:4dff:fe65:9d69]:8333 # AS2856 +[2a00:4d80::1]:8333 # AS43150 +[2a00:5980:93::135]:8333 # AS197869 +[2a00:6020:4914:5700:db31:ca50:797:c468]:8333 # AS60294 +[2a00:6020:509e:a400:211:32ff:fe5c:369c]:8333 # AS60294 +[2a00:6020:b406:e00:79e:9ad6:9181:ebb8]:8333 # AS60294 +[2a00:6020:b489:2000:42:c0ff:fea8:b209]:8333 # AS60294 +[2a00:7c80:0:4e::2]:8333 # AS49981 +[2a00:7c80:0:4e:b7c0::2001]:8333 # AS49981 +[2a00:8a60:e012:a00::9001]:8333 # AS47610 +[2a00:a040:199:84fa:1ac0:4dff:fe41:3e93]:8333 # AS12849 +[2a00:bba0:1204:3700:21e:6ff:fe4a:5378]:8333 # AS207375 +[2a00:bbe0:0:221f::246]:8333 # AS60414 +[2a00:d4e0:ff:fc02:9e6b:ff:fe17:6115]:8333 # AS15600 +[2a00:d880:5:c2::d329]:8333 # AS198203 +[2a00:ee2:4d00:600::1]:8333 # AS5603 +[2a00:ee2:4d00:6b0::1000:1]:8333 # AS5603 +[2a01:238:425f:4600:bbb8:16d6:e907:cb6f]:8333 # AS6724 +[2a01:239:265:ad00::1]:8333 # AS8560 +[2a01:261:218:3f00:8d0f:2105:c657:4ae7]:8333 # AS34779 +[2a01:4b00:807c:3100:a36:c9ff:fe7e:de5f]:8333 # AS56478 +[2a01:4b00:b906:f100:d812:4f64:a931:19b7]:8333 # AS56478 +[2a01:4b00:b906:f100:dea6:32ff:fed5:f142]:8333 # AS56478 +[2a01:4b00:bf1b:7200:d826:7d6f:b13:276c]:8333 # AS56478 +[2a01:4f8:171:16af::2]:8333 # AS24940 +[2a01:4f8:171:1f16::2]:8333 # AS24940 +[2a01:4f8:202:4205::2]:8333 # AS24940 +[2a01:4f8:242:4246::2]:8333 # AS24940 +[2a01:4f8:252:1ceb::2]:8333 # AS24940 +[2a01:4f8:271:5ca8::2]:8333 # AS24940 +[2a01:4f9:1a:aad4::2]:8333 # AS24940 +[2a01:4f9:4a:515b::2]:8333 # AS24940 +[2a01:4f9:5a:16cb:876a:bce7:b3c8:118a]:8333 # AS24940 +[2a01:4f9:5a:25c2::2]:8333 # AS24940 +[2a01:4ff:1f0:8517::1]:8333 # AS212317 +[2a01:4ff:1f0:91ad::1]:8333 # AS212317 +[2a01:4ff:1f0:ec0d::1]:8333 # AS212317 +[2a01:4ff:f0:cc2a::1]:8333 # AS213230 +[2a01:4ff:f0:e4e9::1]:8333 # AS213230 +[2a01:5a8:303:13ac::1]:8333 # AS8866 +[2a01:5a8:308:4333:4074:6aff:fe9c:f5d2]:8333 # AS8866 +[2a01:7a7:2:2804:ae1f:6bff:fe9d:6c94]:8333 # AS29066 +[2a01:7c8:aaac:89:5054:ff:feb7:f5cb]:8333 # AS20857 +[2a01:7c8:aac2:180:5054:ff:fe56:8d10]:8333 # AS20857 +[2a01:8740:1:753::e5cb]:8333 # AS203380 +[2a01:8740:1:ff2e::9428]:8333 # AS203380 +[2a01:8740:1:ffc5::8c6a]:8333 # AS203380 +[2a01:cb00:139e:a00:142d:fec1:d0df:da18]:8333 # AS3215 +[2a01:cb00:1428:ea00:8a4c:1b72:f959:ca4]:8333 # AS3215 +[2a01:cb10:249:7300:b:c:b:c]:8333 # AS3215 +[2a01:e0a:1c1:a3e0:443b:bcab:7778:b03b]:8333 # AS12322 +[2a01:e0a:252:6bd0::2]:8333 # AS12322 +[2a01:e0a:3b3:1420:7ca0:3a9a:5cc3:b644]:8333 # AS12322 +[2a01:e0a:57b:a0:7039:12e3:6547:2849]:8333 # AS12322 +[2a01:e0a:83d:dd30:246a:4af7:53f4:8d65]:8333 # AS12322 +[2a01:e0a:9e9:c240:8e3a:af64:4f0:8f79]:8333 # AS12322 +[2a01:e0a:b7:7db0:24e:1ff:feaa:e183]:8333 # AS12322 +[2a01:e0a:db3:66a0:92e1:4c4:8ce9:2b5d]:8333 # AS12322 +[2a01:e0a:df:b9a0:b62e:99ff:fece:1395]:8333 # AS12322 +[2a01:e0a:e6e:6bb0:2e0:4cff:fe68:232]:8333 # AS12322 +[2a01:e11:100c:70:39f3:e3c9:832f:37a]:8333 # AS29447 +[2a02:1210:1c04:d900:86e5:2135:4f88:82e]:8333 # AS3303 +[2a02:1210:200a:3c00:b559:6c65:10cb:3765]:8333 # AS3303 +[2a02:1210:3c3a:5600:61e6:a811:ef0b:f9c2]:8333 # AS3303 +[2a02:1210:4857:ed00:4c86:3d1c:db1a:460d]:8333 # AS3303 +[2a02:1210:4aba:e800:21ec:346:a29f:90be]:8333 # AS3303 +[2a02:1210:60e0:800:8d6e:134d:a0ca:ef24]:8333 # AS3303 +[2a02:1210:7823:de00:211:32ff:feae:152d]:8333 # AS3303 +[2a02:1210:84ea:f600:503e:6f19:c2c1:9ca6]:8333 # AS3303 +[2a02:13b8:f000:101::a]:8333 # AS15614 +[2a02:168:2000:97::26]:8333 # AS13030 +[2a02:168:420b:7::7]:8333 # AS13030 +[2a02:168:420b:a::20]:8333 # AS13030 +[2a02:168:62a7::b1c]:8333 # AS13030 +[2a02:168:675e:0:e65f:1ff:fe09:3591]:8333 # AS13030 +[2a02:168:b5cf:4::]:8333 # AS13030 +[2a02:1748:f7df:95b1:96c6:91ff:fe1d:e0b6]:8333 # AS51184 +[2a02:21b4:2089:9100:106b:c6b:c328:be4e]:8333 # AS57370 +[2a02:21b4:c820:c300:3126:c960:f356:aab5]:8333 # AS57370 +[2a02:22a0:bbb3:dc10:50e1:57ff:fe70:9492]:8333 # AS28685 +[2a02:247a:215:3e00::1]:8333 # AS8560 +[2a02:247a:22d:c000:1::1]:8333 # AS8560 +[2a02:247a:243:7b00::1]:8333 # AS8560 +[2a02:2780:9000:70::7]:8333 # AS35434 +[2a02:2780:9000:70::f]:8333 # AS35434 +[2a02:29b8:dc01:3750::c4d]:8333 # AS51852 +[2a02:2f05:6307:100:fd:ac4b:7f1a:1d95]:8333 # AS8708 +[2a02:3102:4d5c:f000:dea6:32ff:febb:b9cb]:8333 # AS6805 +[2a02:3102:c324:1049:ca5:9dff:fea9:1cbb]:8333 # AS6805 +[2a02:390:9000:0:aaa1:59ff:fe43:b57b]:8333 # AS12496 +[2a02:6d40:3055:b201:dea6:32ff:fe44:4b25]:8333 # AS42652 +[2a02:6ea0:d14a::a921:e257]:8333 # AS60068 +[2a02:768:f92b:db46:5e46:772b:71d:29b7]:8333 # AS44489 +[2a02:7a01::91:228:45:130]:8333 # AS197895 +[2a02:7b40:50d1:e77e::1]:8333 # AS62282 +[2a02:7b40:5928:89::1]:8333 # AS62282 +[2a02:7b40:b0df:8f88::1]:8333 # AS62282 +[2a02:7b40:b945:3599::1]:8333 # AS62282 +[2a02:7b40:c3b5:f595::1]:8333 # AS62282 +[2a02:7b40:d418:69b4::1]:8333 # AS62282 +[2a02:7b40:d418:6dfe::1]:8333 # AS62282 +[2a02:8070:f181:f600:bcb:2d1:d790:78ff]:8333 # AS3209 +[2a02:8071:6380:c500:d250:99ff:fe14:afb2]:8333 # AS3209 +[2a02:8084:2021:7393::66e6]:8333 # AS6830 +[2a02:8108:28c0:5d60:da3a:ddff:fe45:4cb5]:8333 # AS3209 +[2a02:8108:8ac0:5db:d250:99ff:fe9e:792a]:8333 # AS3209 +[2a02:810b:181f:fa8e:1cc7:c528:4a59:6334]:8333 # AS3209 +[2a02:8308:8188:5100:6d8b:4531:4331:eee2]:8333 # AS16019 +[2a02:8388:e302:7980:6f85:a0b3:4b4d:8b0f]:8333 # AS8412 +[2a02:8388:e5c3:4a80:201:2eff:fe82:b3cc]:8333 # AS8412 +[2a02:908:c200:6d00:caad:5e32:35e7:3157]:8333 # AS3209 +[2a02:a457:1a1b:ff04::1033]:8333 # AS1136 +[2a02:a45a:94cd:f00d::1]:8333 # AS1136 +[2a02:a465:80f4:1:f369:4ef5:aa12:7566]:8333 # AS1136 +[2a02:a466:4d4f:1:8471:fe5d:cff:d524]:8333 # AS1136 +[2a02:a468:61f8:1::2]:8333 # AS1136 +[2a02:a469:2d51:1:921b:eff:fe8c:7975]:8333 # AS1136 +[2a02:a469:3eda:1:7e83:34ff:feb6:13f3]:8333 # AS1136 +[2a02:ab88:20b:ce00:223:24ff:fe56:6202]:8333 # AS21334 +[2a02:ab8:201:403::126]:8333 # AS48943 +[2a02:ab8:201:403:b87a:46a1:aece:21ed]:8333 # AS48943 +[2a02:af8:fab0:808:85:234:145:132]:8333 # AS29550 +[2a02:b48:207:2:8333::4]:8333 # AS39572 +[2a02:b48:207:2:8333::5]:8333 # AS39572 +[2a02:c206:2131:403::1]:8333 # AS51167 +[2a02:c206:2161:3107::1]:8333 # AS51167 +[2a02:c206:2170:3718::1]:8333 # AS51167 +[2a02:c206:2172:2852::1]:8333 # AS51167 +[2a02:c206:2179:6690::1]:8333 # AS51167 +[2a02:c206:2181:6405::1]:8333 # AS51167 +[2a02:c206:2188:9353::1]:8333 # AS51167 +[2a02:c206:3013:3626::1]:8333 # AS51167 +[2a02:c207:2043:5542::1]:8333 # AS51167 +[2a02:c207:3002:7468::1]:8333 # AS51167 +[2a02:cb43:4000::178]:8333 # AS20546 +[2a02:e5e:1:10::27]:8333 # AS25057 +[2a02:e98:20:1504::1]:8333 # AS24641 +[2a03:4000:28:68:7411:53ff:fe4c:21d]:8333 # AS197540 +[2a03:4000:2:1e3:c8b7:eeff:feb0:d26c]:8333 # AS197540 +[2a03:4000:5d:bd4:a8bf:78ff:fe98:7ea4]:8333 # AS197540 +[2a03:4000:5d:ea7:944c:6bff:fead:b1f1]:8333 # AS197540 +[2a03:4000:5f:cfc:14c3:eff:feb5:1c1a]:8333 # AS197540 +[2a03:4000:63:dc7:d418:2dff:fef3:94d9]:8333 # AS197540 +[2a03:4000:9:7d9:c88f:1dff:fe4e:44d]:8333 # AS197540 +[2a03:6000:870:0:46:23:87:218]:8333 # AS60131 +[2a03:b0c0:1:e0::368:d001]:8333 # AS14061 +[2a03:b0c0:1:e0::6aa:7001]:8333 # AS14061 +[2a03:cfc0:8000:2a::9532:6507]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:650f]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:6514]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:6516]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:651b]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:651c]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:6520]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:6522]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:6523]:8333 # AS201814 +[2a03:cfc0:8000:2a::9532:659c]:8333 # AS201814 +[2a03:ec0:0:928::701:701]:8333 # AS199669 +[2a04:3543:1000:2310:e878:79ff:fe3c:1729]:8333 # AS202053 +[2a04:52c0:102:2219::1]:8333 # AS60404 +[2a04:52c0:102:49af::1]:8333 # AS60404 +[2a04:52c0:103:c455::1]:8333 # AS60404 +[2a04:52c0:104:160c::1]:8333 # AS60404 +[2a05:3580:d101:3700::]:8333 # AS35807 +[2a05:4cc0:0:332::2]:8333 # AS8772 +[2a05:6d40:b94e:d100:230:48ff:fedf:1432]:8333 # AS202128 +[2a05:d01e:1b1:6c03:5369:fd23:e62f:a257]:8333 # AS16509 +[2a05:f480:2c00:100c:5400:4ff:fed7:dead]:8333 # AS20473 +[2a05:f480:3000:2b4e:5400:4ff:fed7:4206]:8333 # AS20473 +[2a06:dd00:10:0:225:90ff:fe33:56e8]:8333 # AS56694 +[2a06:dd01::36:0:0:1]:8333 # AS42474 +[2a06:e881:3408:2::2]:8333 # AS205165 +[2a07:7200:ffff:0:3016:d5ff:fe5e:1114]:8333 # AS34197 +[2a07:7200:ffff:0:60d1:eff:fe09:3886]:8333 # AS34197 +[2a07:7200:ffff:0:b0a5:23ff:fe34:d292]:8333 # AS34197 +[2a07:7200:ffff:0:c43e:80ff:fe3c:e0cd]:8333 # AS34197 +[2a07:7200:ffff:0:f4d3:aff:febe:ad99]:8333 # AS34197 +[2a07:b242:1000:1300:f250:8f0a:cdba:4d76]:8333 # AS202618 +[2a07:d884::127e]:8333 # AS23959 +[2a09:2681:1001::23]:8333 # AS61282 +[2a0a:31c0:100:0:888f:90ff:fe2c:761b]:8333 # AS62098 +[2a0a:4580:101d::1]:8333 # AS29670 +[2a0a:4cc0:100:37b:c44a:2fff:fe10:2d3c]:8333 # AS197540 +[2a0a:4cc0:1:340:1460:fdff:feb2:2994]:8333 # AS197540 +[2a0b:4880::266e:96ff:fedb:7cdc]:8333 # AS48614 +[2a0b:f300:2:6::2]:8333 # AS62240 +[2a0b:f4c0:c1:920e:b25a:daff:fe87:77b4]:8333 # AS205100 +[2a0c:b641:6f0:193::]:8333 # AS203394 +[2a0e:8f02:21d1:144::101]:8333 # AS203528 +[2a0e:cb00:700b:0:cb0f:cba:41b2:28b3]:8333 # AS60377 +[2a0e:e701:103e::4]:8333 # AS2027 +[2a0f:b780:300:1::2]:8333 # AS49095 +[2a0f:df00:0:2010::162]:8333 # AS41281 +[2a0f:e586:f:f::13]:8333 # AS207656 +[2a10:3781:2c19::1]:8333 # AS206238 +[2a10:3781:3a73:25::25]:8333 # AS206238 +[2a10:3781:3a73:25:a177:ad25:b14a:176a]:8333 # AS206238 +[2a10:3781:3fff::1]:8333 # AS206238 +[2a10:3781:84b:1:8002:99d3:191f:c738]:8333 # AS206238 +[2a10:c941:100:24::2:1001]:8333 # AS35277 +[2a11:d540:531:b00b::5]:8333 # AS207586 +[2a12:8e40:5668:e40b::1]:8333 # AS45021 +[2a12:8e40:5668:e40c::1]:8333 # AS45021 +[2a12:8e40:5668:e410::1]:8333 # AS45021 +[2a12:8e40:5668:e412::1]:8333 # AS45021 +[2a12:8e40:5668:e416::1]:8333 # AS45021 +[2a12:8e40:5668:e417::1]:8333 # AS45021 +[2a12:8e40:5668:e41d::1]:8333 # AS45021 +[2a12:8e40:5668:e41e::1]:8333 # AS45021 +[2a12:8e40:5668:e422::1]:8333 # AS45021 +[2a12:8e40:5668:e424::1]:8333 # AS45021 +[2c0f:fb18:402:5::3]:8333 # AS37199 +22pz2giqglrjahnebbeedjyfhmy26wifq7d6wnioioqv3kbctddraead.onion:8333 +244f4ods2jyhxjbzmjoo74hvdgzxkkf4xysnwlin4b66zactsve44mad.onion:8333 +25l62iysmquzrl2xgocvipsb2qhwxizahgfo55ssj2ducahghmc4rlyd.onion:8333 +25nwqmtbqicqrprsfv75tghg432c4viqlps7ov52pb2uj2u2w4ek2dqd.onion:8333 +25zmfaqaki3f34pwra24yl6eqgsrhmcxwa2vjbfxzddgalegee6e6sid.onion:8333 +26jfbxrdnsbxlai7qonraijp3sa2sybizmx75ry6dyr3s3dampmwufyd.onion:8333 +27nda43gq3qie4mw2gscszxw45cktlkzb2dsehanwqrv3il45bc5xmid.onion:8333 +2b5myuudry2fc5abwur67yxenqkjbp3gwy4xkxly6elurxwpq6my6pyd.onion:8333 +2bu7aca5dslx53okedetkovivsfzgjku5krxnxv6jcwpovrqceyktgid.onion:8333 +2frnyqoym5gqwo7px6blri2p6cqxpqwjz6fxtruwbo6hcfbuh7r6qxad.onion:8333 +2g7f4dst4ga2mwtavj3uhixhwtktas53rutzhf3dl4lkeb3x6i7ollqd.onion:8333 +2hyo2nphjdhk7srnkj722pnynbtd4qcu2kaqqsqfqn4du3qi45ydkgid.onion:8333 +2hyxmlakirtpgbxmyoylskwm3q3p45rseqz4bezpwocx5zk2rzgdsoyd.onion:8333 +2jnduapgnsztxqlw7an263mjjrd2hh2okgpg6s4pxltkwsawyfrabvid.onion:8333 +2nbfeuyzysqhw2aawievujdc7s3pvri3gygufownc73pbbq7pwoa3aad.onion:8333 +2ockgj6yxkytfiup3rlutk7j62ttnvndivrsqq423gi72bg5puqyacad.onion:8333 +2oyz7zkpukhlodz4hcdsfkb7f3tdopyk2h65nwtaiviuwcv4lku53myd.onion:8333 +2qmfh2ay6zvpvvsc2fgvcarwi6ny7voht36uwqzh2s2lckktxde663id.onion:8333 +2rpqi26jltpqxghloljkfvpywqyiztklvqe2j6g535ct4qc5cfi6wcid.onion:8333 +2rpxcrfxrtdcs7f7kdba6j7r3ej6k2xiwcrbbncvuyo3dms3iqscadad.onion:8333 +3bzy4vmqovhimund35clac4tx3isy4fzpnqq7chzioktr5in4a246lad.onion:8333 +3d37tkrobqlm74estbowv34vegntpby4o35f4cp4adopksrhvqpbgaid.onion:8333 +3dxq5z3rmhzhr5jaaiwzn34dlxjashwdfyezp3pxi2bokf6ljlrysuid.onion:8333 +3f44ddpy2c6wytq37d73ro7j3hl24njgmkhknxyekd3nfi6fi62adhad.onion:8333 +3f6x3accjzo72tcxf3fwzotwt2wi4f6jqimuodd5td3tkla7tvs25lyd.onion:8333 +3k5pwv5mqjo4cgekyy2ncv6ioyduqudk6gjo6rq3ozdqatgdma5n2ead.onion:8333 +3lurimig54ozppuvqbh5lorxcm4z65l6ybyrst3ksdjfioyic6nms6ad.onion:8333 +3m4o7fccoizgqe3iybwetbqtm3vsb7dpoodgd5gtanyxz7g3usg7lgyd.onion:8333 +3qggxbp2bggve2x2evcs6nynoaxryuew5mcr4llsknczewi3occ7bqad.onion:8333 +3sap4vo5v42zf6uvbeoxgj2tmqwfxnsm6lltrcexhed7zbi67rvkbwid.onion:8333 +3tofha4w7b7y5amgwhj3hssjr7z3rzrptlpm6wgzgkrruqw4jy2msqqd.onion:8333 +3tpbeteemou53yfzifzbbmhgq7avlrumtacstk62hlukw4vgt7ip54ad.onion:8333 +3wldmps5hoxc76qff2of3adp6fqda3vobcenodoqwwn2grlgsnurn5qd.onion:8333 +42qfhqoyebkpddesxi7zbm3ujcfzmattclannvkdp6g57gkbg5l6nuqd.onion:8333 +42ukdi6zsgq5hcf5d4b6xxdj77ffismhz2sm7gevibcnwhzfgunluayd.onion:8333 +47u4lbj7qmyllkxj3jbzfjoag4z5yiuoypsyp4354aprosmunkaszvyd.onion:8333 +4l2kbv6dyaghiqrzcuylzuhwjcag3vbujr4cre2f2f3a5gbf7s2t3bid.onion:8333 +4ns7eu3lxa3b5zqvajvzrigo24ltbjmeiqm2parheihatd4n6mqkbuid.onion:8333 +4pm2bfb3aekl3lkpf4rhjjbdnfcn2xcnqkk4de3pz6ekvhf5ym35gjid.onion:8333 +4q6xkttr2vvycn6e4uw55feflsq3oyrmazn7tfz45yxni73yhjj42jid.onion:8333 +4rf36sj773fhwxbq2smywp4xkpdxviy6azro4epcl5vtjf7fsjn5c6ad.onion:8333 +4v5mywuxbpdqyfwwo4bvygbqyvne27ekguavq266ap6o75jbcjlme4id.onion:8333 +4vp2psi73qfspc6neodpxoxe4hexphfzq5llosrah5gk4njwhn7hzlqd.onion:8333 +52mfjouhwxf3puu2af4dsoksjfhnvuf4mgpkcblw6gdmsjgrbprxyzad.onion:8333 +53bpgsfyljzx5uztq5zu2ybm2oqjiba2ecmmcwjqz54fslxr2j67txad.onion:8333 +56ff7zlhr3slha3d2f62iexx7gn3jxrjvkb3e7nx7u2jtnuc2s7xxgid.onion:8333 +5bxpoorrgyhsc5746rg4advtjmxkpfwo4vhokze7xv7vcaa3anetujid.onion:8333 +5gikcukg56x7tygwtjpexvd7ca3mihjptzfrcxbrlh32veywo4lr6tqd.onion:8333 +5kggaeljugzw53t7mirx2hqmscuc5fab23pfg57mivrbww3pe5corlid.onion:8333 +5ngcxrrpkcciev6uagutm6zjoi7d6l3nlhiktii3z3mhzpcb5qkiisad.onion:8333 +5pia4po7brgcabehjypingeufxwayoj4min6jga5qyzacn5lp4c2cwyd.onion:8333 +5pmr2qluaafemlifmsrd3dtxhgtzkhc2u7kpszqvl5h3lwb54jmnloyd.onion:8333 +5sl6ffjj3mecnskhs6ltphzq7z4vxdhfsm6f7xv4fmyqjtqlbzz6evad.onion:8333 +5tm6ubis2atpjieggptfit5zk4dnmoewcclj3gbjfcpnosywve7isbad.onion:8333 +5vikh7wzmyktdbj4otnizwsoetpq7t3swag526lvvyzw5wjmoijx2uad.onion:8333 +5vllnjopzyhmlfpsakb6vzqr4fpgbqeilcurxmi5ktamtvkhqduc57id.onion:8333 +5vpbnwmkqjmeos4qfta5o76nsui4iissafgzl3qdktmsznzbe33i7wyd.onion:8333 +5wcgiu7egpkqpn66vy6dz435r7ovqsp2y7efpjy6nabb5bhtnigvj6yd.onion:8333 +5yenarpzihhwz75lbcrt22wj2qtql44k5pzb2tyev6fxlbpzmp64ytad.onion:8333 +5zi4cuchmzuwnkg6li7ith3persinyvomcuch5fmxyuzt6enf2c7maqd.onion:8333 +62tw7sc55265w2sqhhkbncoumgxbixxtkakiuog7zcqyug7iebbxzzid.onion:8333 +63cquq2e3gpcgfti5pettx4sbg5tg6xsjl7neskl2w6wnqgik6k4koid.onion:8333 +65jiaxxehmakn64nsyeax2xk3dqkegohu33fi77gjhukzw7dm5urwkid.onion:8333 +6bzt2mdeo6afhflfxmilnhokkvw6edxue4lgcdvjib6fn5dmy7hx4tid.onion:8333 +6hobuesgn3fk4b6xkkgjj4yabn4rdkqxhor2yg5sdflprmtjjuo6n6yd.onion:8333 +6irbiwo6m3vyooxia6gsyxvx5cxufdpvnb3pv5wwy4eoloo5dmah3ead.onion:8333 +6jya753mqzrkmgkop2nyx65cxqfoup22mdapj5tkk6gqwh7m6zej5byd.onion:8333 +6m5667klqvbya6uqnmcqspo7aeoreozcipzzb5f222vlrjhhodronyyd.onion:8333 +6ovl23cbwhc2dpm5rg4rs2xrqg7r6v2d4btsvs6teh3j3fwi5y7drvad.onion:8333 +6q2qwfnphyov635sq4j6hiiy55wi4ode5jbcd6wociu4t5cn7mm52jyd.onion:8333 +6qk6btc23vtuldtnnmkvrqvmzcjpoullfwdmnnctvlvekxhkdjlsobyd.onion:8333 +6voevyc6f7ng2axpgzw3irmu677os3b6l65ljhuozf7hfkar2fpbxxyd.onion:8333 +6wyq2i4gsjvidvz7umsdnwrgxvksjs7wqvvuf4zdjxsjfjwg7wil5zid.onion:8333 +6znucpvx7k6zasghyu2dqfp5thsrr7a56cdhb3yqoagre3uv52od6gid.onion:8333 +74oj4sd4e5zf7mub55pem4kjcqo6udugo533mlnahxmx6kliwmhurkad.onion:8333 +76yijbvce4sblwgr32q2se2hakcbxesccnzjmlpoptm2d2txmsepd2qd.onion:8333 +77xulqs6goaawmm625koq2jm3uhnd5dccr5cthgins3jpuywncq3unid.onion:8333 +7baui46dvqf374kpi7aaffa7xnh36xndc5mjmqf4kzbxcuuxa5ojlaqd.onion:8333 +7bjwdnx5umef3bchtn4coz4qawx2st4752ckrmwqr27ueg45w6mouaid.onion:8333 +7d7kfcywf53obtguqshs344lgthjac26hndditvodqlh5rocy3cjplad.onion:8333 +7dje2pijmwanoj5thrg7xmc6hfsld2tgrrknsuem3dfx2l6o4f2cdoyd.onion:8333 +7e3rx4potc2rt345yeathbegxnd5pknju7iw2lwi2cz3ap5onnk3ktid.onion:8333 +7g6vpzfqmzbx6cbqu27euah2ync2d35wfega5uco7xq7ywrkdsjtlxad.onion:8333 +7hefggwwkbpyxsq42idogvxpfbdayayf4vgiteog5ekthegmy3dl6fqd.onion:8333 +7mlt6sj3gcjoo5jpf2ctc5ozvljizu6vw2cs2dw3oyzomeaoiw3uqcyd.onion:8333 +7nnpzpyfbqqzpmmfen6w6y7gr22dey25zvrav3icfmlyb2nfwxticlad.onion:8333 +7pyrpvqdhmayxggpcyqn5l3m5vqkw3qubnmgwlpya2mdo6x7pih7r7id.onion:8333 +7qwkvlpilk5fel2biif4nstoubxdedxirvqujcyptxgy6bf6dzsj5kyd.onion:8333 +7r362setmugzrgptpjz2cgzm7qv4nhx3r6dhbabki7xsv4qqwumzz4id.onion:8333 +7r52rv5epyoq4ni4qhqxspnfyajxhfl6rynw6c4a2p27r4vnq64nizid.onion:8333 +7rpz563sgtfza2qmaj7ltamswufnmdxv72vcqloxfksmfr7fudjsuyqd.onion:8333 +7sm42kkqswwp6axdpz53ssefbmgrag7exwiis2ifdg7wfio6bxnu7cad.onion:8333 +7twzyh323ott7mpbo6y5qkyk3eugwitnsgwkxegqfg45u3crxoa6e7ad.onion:8333 +7wakp4xjxksgreekwjeonul2gj4omcn67g2qkiazfq44hgqit4z7rryd.onion:8333 +7zgfoegeed6jotgmuf2wkotbb2drdkuk223v3moomc4akiuy3yincsyd.onion:8333 +7zlknyoxay3rbdb4bj2zdsnagldpzjlzzlrmwihpb6yutklzqkhpwmid.onion:8333 +a4dkz55hsbef26ja7bz2tfsvlgjigg55ocnhycxq34efwpuzgtgfwwid.onion:8333 +aec4fcjyfr5g4eqsejmvc7t3eit6nbnv4voxnykmw2orn2smhixfrfid.onion:8333 +af3uv4mrxrqenu6a3kbfeovapp6k4v2p2ymxv44jw3i7szagcmxf3bad.onion:8333 +afn2rl2wdprisevyhxea5nrbflt3vwdh5crg4hqb2c4iukaxdw34j2yd.onion:8333 +afseqft4jxpkyeiz4c6v57nxwttwte7r4xas4lnkuxckfn4ohpycb4ad.onion:8333 +ahwkawuxyowyesovtvriaghqdvud7ksy3kt6rjhkcadsff5rllpnjtad.onion:8333 +ajws3x7xec4kdoprrljsdsnv2grzqng7yb2p2mp4gm735hly2rdfatqd.onion:8333 +ak226w3yvth4s4fs4ia4rcdkhkzoyrkjk25kk34qgxeoglvuho7dj6id.onion:8333 +alhwf5hus5gfkeq6nsqxhrxlq3nxherufycamaf7xnjwpnexuf2mziid.onion:8333 +alsnycayqigth3b5vrjtftlowxdnkne4qmnqawjw3qmhdoig4spqlayd.onion:8333 +atygn6lucla7t6coygbfd62n533kashhwavubkiwz5qgjmt5nkk2vkid.onion:8333 +awznblcc4yvfocchm73gwgtig7ohxxszmwx5ft5rpbeno2i6fwllukqd.onion:8333 +azf3zr4ldr74nemt5z342melrvrffm5qw4qx4p47zfokwqupw3gxl6yd.onion:8333 +azvij5u6agee2dl2k7eh2eliqu3f3d5gldzxodoo7qtjfiky6ewufmyd.onion:8333 +b3jgrfpicqpymo62sdgfmvgk65gw47rhr2iywrusj2rfs3piwwhsboad.onion:8333 +b6fljee4jrarq23x4ppnwulldtsysqviknpgtdu5aizpkxomkgvtsxyd.onion:8333 +bacwevhzgtzgxvdkkwyvkaxsxwhkhqwa6hb3cvuizjsihwklqhslmiyd.onion:8333 +badjfbjiddjpnru7nemmscmtshyicdwmmj4tce23v3iihjh5tsu6zqid.onion:8333 +bap5i44uxcoomakmwcjls4spwh3uipliitfs6mfkrczdnsyc27gwf4ad.onion:8333 +bb4n3x3uacfdd4o7n6xlgomnott533kjjdrjnpo6ptbigpe5h26fm5id.onion:8333 +bbf7tqqbgnk5kat6ltxjlaxeruqeyyovbxsc3baucjar7aerlq6pv6qd.onion:8333 +bdrm5qxuox7ksubnmweootuxhb4v3qpvp7fkkwadavzqvgyputydfaad.onion:8333 +bgimz47rqq6nr7yzjaucrx4llfbtrytooxkpy57okjhubz6kcxlh6cad.onion:8333 +bipshow4n2uhgfb7my5ddmeqn3ysxgtsfwlzp2yhqg6ex7z7guzkkwqd.onion:8333 +blfpqvmciektsqzy46bse4mzyxpsb3dz4z7wpoufjr4d3tvzigtlbmid.onion:8333 +botxev6ve7ushdwdkbgsizb5k5ugozfst4x5ptsat3munr5eyqxx3jqd.onion:8333 +brlajuce7fehga3y2fq75rwm63ecvnyh3evsydyah62d5x7mzzmwbayd.onion:8333 +bszynsgeze7m5lknibgenz5rl4zcfel7lojxe6j4y6aecfttftowp7id.onion:8333 +bu2bpe3ucyws6fbzekadnbfdxjq4z3tqxsgpnvm36nf5tes3u37gyjyd.onion:8333 +bu3xgmjbtkfkk5jqy3ckdivwfgufefpc43yczszpriggnksbeapx3tid.onion:8333 +bx6z6hkku5ksqq66fuqrghbaz4bkw2qu4uiv4qxajfmqnrfejtmm44yd.onion:8333 +bzb74unsgxuv3wrkjzee4nxe7knmf44a3tbwsf5ljvfci7hjidmlflid.onion:8333 +bznwam37uhpeuodct2ppxkoe4h6xs37vjb64cpb22aiafh75vabqujqd.onion:8333 +c64xcctcx7aelj5yy2sxa7jinpi7wzhzzvdydr5aqllzu2fzgufkuzad.onion:8333 +c6fok2ng5z3jvk3bbujnprs2cl2htlfpht5hdethnkkfyfhg6wiz6fqd.onion:8333 +c6k3i4245y6honrsxfcm2l5gcnn5rlcatxzyyubf3kubhzdvgxop3rqd.onion:8333 +cbgpdxoree2eg43xmd4szoaawstbxqvo5kb23ky5bez2pq75k7vpi3id.onion:8333 +cbnuxupqql42wefzspje5wqkaauk2mk6ptqb6khle443attsannqvzyd.onion:8333 +ccwhslmsgslatdy37tsqn75ojxtweg3a66phlxm4o7s3g6byj4x4ghyd.onion:8333 +ccwupmup76qqjmtzuiv3hpi3u7akqte3d6wzuvee5spe4lcdzeldzuyd.onion:8333 +cfgoe62d76nn46h2dp7ut4oigb35kkphi5ghzn7hmsxly5qfijlpzwad.onion:8333 +crbqu3nsuag7y3fty2s2o6khgxl4k5wfbn6mguy2kvdbybvtbqzgkoyd.onion:8333 +crqe5di5puk2ehjiw2ruuqaqmefc6hwryo5cpicyydkxekdbzno4rtyd.onion:8333 +ct2is77gyqeckpqu3khdvzvq5bm3cowixixdlgadafkvdzeviwyrw2ad.onion:8333 +ctto73iocvgjkru6iw62a6hre4lubq2kluupye2dnp4raxr2urgu6oyd.onion:8333 +cy4utydmbvspzfbz2vdlzuuxkyyzhjuumra7klk6ojiptv7y6cup75yd.onion:8333 +d2mlhomjbqjxv6mn6xx3xir5x55jrmx37pz2swm5m3arctpt7n24kaid.onion:8333 +d4acgxoszymwjrjjcyclht2zzq47pomhavsafxiervejqyzipd5uxbid.onion:8333 +d4clcpzl66owm7t3pm7czb7zzgsqefpdh5plrnfypba7f2mikfv4u2yd.onion:8333 +d4jnim6os7bshrjgpfecsg4i2g3vt4azycssuqahiupsd7hwtjpfjsqd.onion:8333 +d5sd6yiuwt5rts6xghywihgxpbfih7rcrv2ubt5gh6nf67dfxq5g23yd.onion:8333 +d7nslqez2k4rhvxbg2kxedlzyoyt2g3nvelcniyg7xuc2fidtgstdwid.onion:8333 +damegzcogdp5lvypiy5p5fo6lfom5s4fvuwa2wjlm2cmsnhhpcadtyqd.onion:8333 +db5atsyx3n63dyc5upujumsp2b5jihafxpw4tp4k4pnlmbahilicaeyd.onion:8333 +dgezrh3kfcngsvufx5lrp6nodcshy2fs3w4ltalwt2etr4qvvylvrkyd.onion:8333 +dhipk4h5xsan2jjjfci2pljnyy3ip34tmuzyt2rh7zlmezzdcqesxnyd.onion:8333 +dje3novlozm5ceky6npbt7qjnf4emui6f4ygwfhhy57ddkasvy73bqid.onion:8333 +dni2rjvqe53gpmdr3ykhnsuiscjy655j6ir55uegdgiogkwz7cdgqcad.onion:8333 +dnyvygq2dmvevz6ajcf3asd3d5cqmpevo44qyt3km57nysyt75rpwlyd.onion:8333 +dqaeyhf5v4472dygabraclznfhtyytwy4hdvrjqarwij7qu375sidoad.onion:8333 +dujcffazdttews7bernuv7weox3asklnwuzqsdoyzqxy3logfupzwayd.onion:8333 +dv4dtz5zu3te3g2ccrz3nrgcfzozqesgmgtb5aozhkgonqwg3dicgtid.onion:8333 +dvzwybrkcynzfjg4jpckt5kungd4nsqbwylshblesuwzflvocm4rgbyd.onion:8333 +dxkkytqbvl4hhwh5i6u77rx2rkicau6dyygt6xewgy3apbolw6w4mlqd.onion:8333 +dzhhqpetvyq5g6xhgy7gkuwey23dso6sc7jb5irlcfs7cbfaec3gqlid.onion:8333 +e26a4bpxijuh4nmeqm5g6d2ir6h2cwyw7i7nroxy7s4yo2eog4vvd3id.onion:8333 +e4hdaommmm4y6q62g5jvh6mtcot3vhqj42wmopm32bu3etrdo5vbsoad.onion:8333 +e5mzbzdvpngxjkbfq76hdpfti3wirwqezugqaksprdctb3p3unw36oqd.onion:8333 +eaqxyujymqzwamyf5qtbbnvstu4sg3bv5umpkzxjbsaxamzfu6xbi5ad.onion:8333 +ecx4pf7gwdpllaf3sz6nseb42ojgp3cto5xo5j6vn7afu4spccub5mad.onion:8333 +ed4imq444il5pp5gks3yimgh3j7v3vh7qydzyzksvv222ebudoyvfsqd.onion:8333 +eheqmoscz2jqa63b47255pkknl655xkjbgwhpeuiryfrj6hkf4vureqd.onion:8333 +eiywuxh2hwhk2qalfvqtfvj4ljuez3o2d4deqmowabci4o3evmgjpjyd.onion:8333 +ejgeelmm62ygi6ruynkzt5trh7ys2csgvkxzct4qptlwqcdnpqiu7yyd.onion:8333 +ejrykyejku5dlmuz6on2pi7ysjg6c5puuownbytwmq7mvw6xyqbze6ad.onion:8333 +enc4qxoyoap5ndr7duevmjhv2zcpor3c347523yafo6ct4tzj457baad.onion:8333 +et2jf6nekem3x53tyzcheqo7ar73wg4by3ucvzyjfliul7357cei5uad.onion:8333 +eta57alktm4zhvh3avor2j5hbhdaelb3r7jy2hipraffsyp6bzo6ahyd.onion:8333 +etcabymwunkkvnx4fwghc47qf2qf4ix3jtzfhp4gohyhcdibg6sbeuyd.onion:8333 +etj2w3zby7hfaldy34dsuttvjtimywhvqjitk3w75ufprsqe47vr6vyd.onion:8333 +fbuvbm4kekb3wze6xocibjkxnkyj257ot6d52342bzvvs4uzk2ykljid.onion:8333 +fig7txolf3throetkrs2zbhev46rq2qd75zr7pdnmewgngmcseu4udad.onion:8333 +flgedyhmormdyrxqtayz65kpmgw4bmgpr6steg4r7hjixnjowdanf3id.onion:8333 +fliooz7kq6lj6es45jg32n66jq6myliqbnzg2iyusun5dt5tyxg7tmid.onion:8333 +fwykc7ow6i4o2m7vygmgfdkevyqvw2gspdf6lxrfhdemalb34dbkjtid.onion:8333 +g3rzugt4b4rv2yq266jeaykvbv76nnqi6735nghovcsnvyqwmfoqsdqd.onion:8333 +g4ojra3inm4l25p7zl263vz2lz2qufwtjwlehwzffi3vfruef2kfg2qd.onion:8333 +g5bsayc3x23fkkssptmllu2qn4kg7jkpcl4fx5edjbecojymiukikead.onion:8333 +g5r2uda6whsgs65yf7v7yoqmqokbkqiwvgwjigej64yskk3skleticad.onion:8333 +g5uuvidgkvdf2cmoekggzbifzbojh7dn75e75y7v2ig5jflkykm7niyd.onion:8333 +g7wnozndrisbuu664xzbwwzuui7b5xkuzfew3xmfmld5yw6ypswtpgqd.onion:8333 +gf6vyqezos2s52kxwz3itpma5wnb63ol7qg3kuwxg537sejtu5jnmnid.onion:8333 +gfoqfn4vlnzk4ocrwegqrgmzmfriwrzgnx7jdpinqkybidtyt7kehqid.onion:8333 +gkgud3alqw7vqywtawsssovmvkhslqr4xenrmrg6om6iljub5o2fh6ad.onion:8333 +gkvgggf5guqcygywlenme5ml7dgtkpjmbtemsblwdex6jv5kvpct6dad.onion:8333 +glp5z36juz4zfh7touj5tbchprcfbdptdhci7shyyimd6bbjlcijjeqd.onion:8333 +gotjkzjvun4p3etaf33p26vlwtmlceytyynsbgiknb4tra3snl2wigid.onion:8333 +gptcyzjkqzt56r4a43ergixiljheby53t5xtb7dcqx6hfkpt5f62stqd.onion:8333 +gpzwpzigfrhkyhc3l5sllo24duxa6ousnk5or44jsrtdj24dstol6syd.onion:8333 +gqcqsp6ktamk2wad7sy2quysxivgu3gjxitx7krpwgsh37csllx4xoid.onion:8333 +grkruzba44rm5m4gkyejjn35wxblxos4ba2vsi7ddzjub43ydnrjhqqd.onion:8333 +gx3hsyxf2jznihgprtkagqjnpwcp4hl4k5kde55go2vtfczh2t2zcmid.onion:8333 +gzte3h7x5qjzdbyibrprffzf2mzmhmkhzymq7zs4ksgwstdxtdpzeuqd.onion:8333 +h7kfqg6lf7v5dywswdzka2eepdyiqira6mamj6l7razuxejlspopleqd.onion:8333 +h7rors5zihhghcytr6lh7wgmioaht2bubslski4o3nas4lnqwq4i5byd.onion:8333 +hagkd2gq3v6a42lralbpmhpcjjlgfq6ledege3abfpwvj5ln7ydyciqd.onion:8333 +hakum7yuy4qpjzu7czyl5j2nnli5mw3fx2fxjcwfjs3gr5vkglrgs4qd.onion:8333 +hckgp7as2kephoosoqe42v2ibt5o3kgjeyrgcbipilxnuhqrhxmlg2id.onion:8333 +hg6syx4oyvrcraxytqymj2blngpwji5rhfbjnglo7tvzfpmc5sobdjid.onion:8333 +hh5d6gja23hlwmtrpmwbd7zn266zro43hzxjpwa23p4xnq64wibazsad.onion:8333 +hibgbt66hb4tvmhkmyzwlxirdmpkg2aa46xznnswwsuy3w4foitwedqd.onion:8333 +hl2fornwqseckgkkqpx2r2r22nstdg45amytkfkffcgnlgyayyodtyid.onion:8333 +hl5z56lx7zyq5zwqy7htw6w7skbb3lgrj3dglj46dynsygncs73meuad.onion:8333 +hlinnnjusoqzc5taurrf7oe2k3m33re66clnbpmme5ild3cebctbukad.onion:8333 +hlsihfuyafpoxchsrhlh4wdkda77iv4wmpnz2sgofonswq4kn6ns4pid.onion:8333 +hmnvzj67qtygqbe4uultm6gzhpwg7bd5auww4piraenmbqp54plzyqad.onion:8333 +hpieksjfcnba4js2rcuf3dl3igt2njsm54ztf4iux7jhrr4zmgqltdqd.onion:8333 +hqvdx6ny4p6lruba7wlaaycndmenzygc3nnjwjd7lhfnnkuznriyb5ad.onion:8333 +hrmvwr7x5vdhpfeuo46a2ry3grmt2mul7a7rhjpaaq6einsmrunuvpyd.onion:8333 +hsjtdnlt676ufkgas6ozkrwcjrag26awhkm7xejov4nu776lk2lb3oid.onion:8333 +hvl5d5gesz7vq6uyktalim3ypwqfdytrhg2nlqx34yq2u65hgvb4iiid.onion:8333 +hvp5reyglzzbyzfetg2wibpz3sk3dgpii44bpqgzulk3i753kqj2veid.onion:8333 +hymhpwqwoi4atw2pj2kqlkzpzyy4pegj7e5titydrlenpg7sh3oi34qd.onion:8333 +hyygg2luyb554u5dstu3yxj3akkooayzdu2iwx36y2d3wiyrawffznid.onion:8333 +hzhmvmupvgcawbcs5yqeejti2il5x66rwtlf6aplrwhx2begwfhpesid.onion:8333 +i2e44r55u4obbtvjq7p5edhrnhwvo6xxudhq4qkx6dxupcdkvon56xad.onion:8333 +i44vl2trbapr54ocbr5zaqhctuabklnugsoofmxpw2to4gr5e2tr6qid.onion:8333 +i5auigbdj6fwx4c7r6wxa4iuwkjozapzety2ndm7yn7qdxq6frmgkwyd.onion:8333 +i7wicebmued4xlwqejvoexi5uhw3ttzxkjwyqldonk2bccpd3p46q7id.onion:8333 +i7z3b4cwoswz7sknczkdnwmhbkn7yk5uvxjwtrtezner5t4mbi2rs7qd.onion:8333 +ibeldkbgecvvapqmltodznun3hocnt2u6lpocmtwfklp76xp6edwb7qd.onion:8333 +icyzujf4jopznrgpsagmfucv47wwb45tsruwdjuepp7hha2hy2hy24id.onion:8333 +id4pu6h47tfnmpmhurq5womypqmxw5ri4jxbp2pqt56zq4litdbxxjqd.onion:8333 +id7ca4f4yb5yrf5itoqczqp2bsyinqjkwuo5mvzx6245ef3nmj2wodid.onion:8333 +iehll6sixysyqtfripbtc6ob5zaa2rqlnlthuxkygmamo2hj3fm3rlqd.onion:8333 +ifdlnn2cxerjvc4egepebsv5gnkgqtovwutfja37osq2g24gows65uyd.onion:8333 +igns5t6crnyqxlos7lnvhcpltiocus4ox4lbw475ugl7yctzj3yz54yd.onion:8333 +iioiaikirvhrwoixgql4tbnkamb47yamsrzih7npit7bumnc7be2j7id.onion:8333 +ijj7n53gnbmieojkeq6ptz2ffxtauguqqsluuy2rrjoi2najk3yjlqid.onion:8333 +iltm5bpnfaoqgwbzfyzo22vqhgpfqjtkd7xw6cpzjtrmanaetgxgwlqd.onion:8333 +itt3znksrv5dbodyvshvmltsnw4ihzdlfcepgbcr3pzrjxm6dpzvvpyd.onion:8333 +iuvdsszxh6ec52vfz4w2wsaswy2nhvptvoqp42ti4kuv22mryhza36id.onion:8333 +j2xonbopjepbjnuhq7su4hpwbcykx7sqrml5n2n7lvqahyx7s4bhkhad.onion:8333 +j3t6pxfjlbklt6vvsrxapk2ctwxsqwkxhvon7i4c4y2m3u4ibzh7wqyd.onion:8333 +j3tdqjirxly4fdw2nuearkjinmmwq6wup3uq2xnkixy4xvspz7jhqdyd.onion:8333 +j67ve57vc5dn5xcorufb5kg7yxyzub27sgzasn2vwhpysvrcfkfehvyd.onion:8333 +j6h4k4lymw3zmtin5n7vm2vp6bnjr42vlk2kgv7dbuoagpzih3dlppid.onion:8333 +jagbaxijmydmaxjv2tq23ybmtqdqiydm2qabkdnuscohsm5moowja7id.onion:8333 +jclj32gvup4rk3kq7gh5yhdyze7b2isfegxafbtxg4k2czs2kzvo4mqd.onion:8333 +jeqj462sr274gnhng7d5jpveuuj5b7gcp6hsm7skum3gio5abhcv45id.onion:8333 +jguxnkcfn4r4pgyoj7cvisyfwqaqpkd7o2wdqztsfl6sh4bcc63ngqyd.onion:8333 +jhztcjkmp23adfz4dimyuyzjkkfbno3nfgeq4ndp44amcnfxrc7vgoyd.onion:8333 +jj6fehlqile22m4flh4ujcn2f475ckm5zsfnu6f4qyfbgr75pquf2ryd.onion:8333 +jli6qkv745s4wxfob6ruxua262mghtj7cu7qghdfo6wygryk6qi425ad.onion:8333 +jlkvkyalczcidry6eq4jxywv6sz3cb46a6ynqvl4fia6ah5vtzhtocad.onion:8333 +jmahatsxjgfg3iz7hkel5lkf73k2switbletcahhajzhrtndj2w7a7qd.onion:8333 +jnetopradsod3332aswwycxwtwdk24bo5gqv34dk3hzx3crfclougfqd.onion:8333 +jow6ete5u5cs2z3ntgbgygqssirexaoet5c4hsu3t42n3uvtcct4q3yd.onion:8333 +jtdbt54n6tccbwpbvqay3n3pfnrmu4kagqztghfal626wimuu6ynfpqd.onion:8333 +jxkgl7uxgyk7fy7bd6hjlkycc6dbtqiypvatm4r3y4eldj7asqhcgpqd.onion:8333 +jxlhkqbtkumhra652u2b65nbminvpqvzj57z7ihwnd7xbqr7oocbjzad.onion:8333 +k4tlmd5ifhbxw7j3g2eeqqmfudae3j2iwj7ytxgv4aq56zmiwxnp4tid.onion:8333 +k5ukx467c6kgf7svvohlecnde7veu7z3tpk7ow46ryo2f25xgdzczqqd.onion:8333 +k6bsyqwkmd5umwsa4okocd5qqkdine76ennwd66jlzip64hfindn6ead.onion:8333 +kamatyg2sqhbcgxnlbih3vg2prwtk4yqhnbefkl6cdmrhhbug3hhu4qd.onion:8333 +kcy4qzf3zznqsejy4w7tfhx6vdryk4u3wcodb377hjxzcolbfnycq3ad.onion:8333 +kjnokdj37cimgseydldmehyquvgo5d6uqce7plauwtdpwzpmfkgrizyd.onion:8333 +ko7igyyk4dr6ofshs75mbhiwgim6eppyspda755y6exlqdkymnw2qsyd.onion:8333 +kovsmudoognktyjhucswkm2poshl7vsilpsbptxugz72a27hokopu2ad.onion:8333 +kqsnps2opo3eajaucdys7ofss63p3oirdesiqvaqdiqmzj6ctfitwayd.onion:8333 +kr5gulo3vhxqk22pctlbpcukl2bplmuegl5dwigc6oti2zqapmu2ddyd.onion:8333 +kymtxayvuvaddtahyx3kpxntaki3gmlmlexukgxw7ib2qtwuqbat66qd.onion:8333 +l2uhir4t3mq6cj2ldyxziydskyh7k2nx5oxhq223bryenvygs6iwlgqd.onion:8333 +l4djzf7rkj2nkkfget3p2r2sh466uddwqpb7knndb7bqlawf3bovjhyd.onion:8333 +lacfexi6mowtpmfrnddvthsczivqa4nyvv6ng23htkn4sfxfqsbp5vqd.onion:8333 +lb45zexgtnb72v752s5l2t56eiq4duhqvrzk6irsvpp4h6xxgoc5e7qd.onion:8333 +le4k76j3ysbyy3ithq3oyhhaesch7mbwfqqfcze6z7txjdymznpo7gad.onion:8333 +lgilelzyhm5em7gnmqm46b6u3g2iam2dxbc3ywktlbq6gpibphy7ayqd.onion:8333 +lh3myis4neyqoz6er6av4cwffoy5hdgirmvnz4ywy5mvi2gggv2tl4qd.onion:8333 +lizmefe3njrrelmdzynyhhtye2jiffh42x277xysq7zqbevcfkyphcad.onion:8333 +lj2ur7xjnwl4qeijrcj2644khjlzczc6wipfcshevuynvlbgfcullzyd.onion:8333 +lkhigyl7ky7zvgj3aiitbnlsqxt2zhk4nygs3mi2fokrbp2saw2jghqd.onion:8333 +llxd4aegg4bf42kbrdsgt43azlazu2k4ssgzgwtgdajv3pwxtdtyg4ad.onion:8333 +lrjqlddfglvcw4ahzcosg7mqtzy6bahymqxc6k33qqzk65z4jy2uhuqd.onion:8333 +lrysypmxkxxhgay74gkgucnfg7ppupfxgio4v3dpzbhoeyyzygwbztqd.onion:8333 +lvwcux6i5qekbpz6njlt7ghk6v5dvl3f3arc4tprbkuxmtk4jo34mvad.onion:8333 +lw3fr3synjzzqptoonwftim7wpbhpie2yf7pyj67vdnouz2ovbdyl7id.onion:8333 +lws7kt2jgjy3kurxxzqxww6r7mrc4grjmkekhgcpfw4tbgcn4kts2zyd.onion:8333 +lxrasz7dvra6e7tnulyw5wvh6h6oien4arihaohjeak4bslx5oihorad.onion:8333 +lykf5rgcaocbyfmxhhcq2aqi7e2jv5rixmmxisyrfubb7phaujimhgyd.onion:8333 +lypt4weazmqrm7y44ohdc3ey23n3xeei6yerofknbg7e2flfkakabzad.onion:8333 +m2lpxaib55uttwv57g4mjfjrr7dqt7cdw4aq7s4pg3zxyywuorr74zad.onion:8333 +m3haeigwafocrug6fi3dczpaxmowwg3dspdbnt5fla4qa3hvsnhifmad.onion:8333 +m7zjjsdog4qeo75x5j2sqi7eet3kepiqr7uciiknwnxetpzksojpeqyd.onion:8333 +mdum4stqqbjs4ups45rqghxpxee2qwhkqb4kizyyzpa2jqryxtmj4eid.onion:8333 +mfla23r3bodslvrduvethu4ituej5k56vldpp7m5pu5bfykvq2oquvad.onion:8333 +mh6okc4axtxpna2vsv6madexepg2rhyeiqnqrfw2pktgyjbmnn76ivyd.onion:8333 +mhqf4lnddyujweijl2hlvyqfeehn2whtnlaxdj7m4rkjxffvzzis4oid.onion:8333 +mkchot4yopdk3buttae2chruenu23xg5srd3r7gb52gkg4eznpavv5yd.onion:8333 +mortxv4zzlyupri7vhvmi2cd4uyhletlj2uth4sy4rxtm26m47dyzxqd.onion:8333 +mpbsa7gm3tpp5nasl453ggh4l5tsc5y5ky3ppmlgf2cgbtzs3dbceuid.onion:8333 +mrvayqa6w654g2p6kgsr5lrshn4bahmac2o26s6yetvmxqwllo7fwfad.onion:8333 +mthicrguvyzgyfxfn6tmdswnmldpvxwcib25xg3ghdna5qttod5huuid.onion:8333 +mvjotqstesnrivtjtmtpl2qurvkyf3gsfgftmjm2upkyodxkwhmrnpad.onion:8333 +mweq6yuerljpnvwlkvnh7duofgd4q7yjtbls7g3muz747pdnddt4cnad.onion:8333 +mxv3rijxa4g2mgafhwyax7lcfyrhhila5g4rnm46cwemileo7okfnpyd.onion:8333 +n2buii4clktnynlfp3ccmuxsgwvngugzhpgbggjmrxv7yqi7gejhgaid.onion:8333 +n3jzmnv7pss45lrbrc57tsy3gznnr6asyosdkdlprilc6queug6e4bid.onion:8333 +n5zxq26im25j4qgymyohaa5i5tcpihxfpfwkwfook5tivipoa24yy5ad.onion:8333 +n6465c2af6s2fzz25ljpv7xjvbj2gthqail7ns6wd6v6tgkkofgedmad.onion:8333 +n65mfok2vfir64opzuxqqpmxnhubcvpyite4kxhf3grs76hzbjyaxjqd.onion:8333 +n6wythsibolbz6hdv3kdxbo3bdhf5wtefgd4tksb224zk577mxytnaid.onion:8333 +ncolh5aclnjsjtthsmn7qscvwx2mf6jq2z75cx4snbeqt7mpbjzmcoad.onion:8333 +ndme2euw6yve7bj5ya3awkhvaznb3gauaidw5ov6qmf4kqghsa2wvmqd.onion:8333 +ngupd4e7h6eldd6tn3a7yvarujoomtvj27dcfwgz6umcta52bygjguqd.onion:8333 +nhondlor7mswosksexpy22qvrmd2gmskxohdzt3szv73zvkmlw53nzid.onion:8333 +nhqdw4pprxgvxxqxtid3frx5t73tcmmdk5sax7a43c5vd6nqw6fxpxad.onion:8333 +ni4u52jpbfjs7vkwndlornh3u5ezfl7e6i4vjccpoczmx4wqxee4wxad.onion:8333 +ni7gpz5a3nkn6gxo4xgcsset6ws37ju75e2z5c6azmavcvs5wxrbzyad.onion:8333 +nir3jv7w3hleurdz7yfwr3f4bmyw7ce7u3rnlf6z7tbxoi76jwnelnad.onion:8333 +njeci67qcye45cl5pp4wptml7gk7wlqqjnez6ob7ps74wkpfdjhsmuid.onion:8333 +nklnhb6lpiwtpxjxkdfthnckpcashewdubutl563mvph2xjdbaxkctqd.onion:8333 +nlsk62k2xyvermr7b7f7cwtxdrgk4npnewmr3omk7uaggoshcdz5tyyd.onion:8333 +noeuviuxcwwlkplsncm3pfu5qtx7b3kncfxe5tfq4v7iv362skor6oid.onion:8333 +nq2fwdqt3qf3fkfhlnrlnscmrucnirsunsezr3kfsc7jrj4nbzsephid.onion:8333 +ns5p2fa3afrainrynk4dzjfbsvknw6jao5sgsvvv2esaw5b43cieenid.onion:8333 +nwmjxig6azspof2ahmvaekxeb6kywdbvf3d5rxldyormrmvh2rfz4pid.onion:8333 +nzrp7w2xr7chajnwiv72oea6kd4pvle5mbqqhnvhkh4egnpytynosjqd.onion:8333 +o7rmuyo6u6gnvnqy5lmxvvqmb3d7i7txrdrt2ns4vjrostcvbdrwpvad.onion:8333 +oe2ipczaggoszamekva5an5idscmbwflde6yqymyyvexxyrwzxylceyd.onion:8333 +offbdtz3vbdyvjfddp47o4vkocl5bjcc7lhoxast74raakgd37j2gtid.onion:8333 +ogiz2gvxv3gjr3tn2os2y3pscvq37fbgkrfjg4c5q7x6mmeg52duoaad.onion:8333 +ognns2u5bdmwu66oktrluycronq4jsqxqs7dqnzzjoskobfpgz6nqcad.onion:8333 +ohcp4dgbobbe3y327wxys6gdolluhezxvbojhw5ixhbuun2rmzofxfyd.onion:8333 +ohf5ytsaptvjfmrgtgesypb3osa6z27mj55rnlxsmrisxrbnx6v3czqd.onion:8333 +ok2owqkjhsslywvo7pytpcjf6443tdvgs5sh5vzvpqoakfmo44a5bpad.onion:8333 +olp6o4vhznaftlmnlhibjxpoyaopdurzsiqvchh55kh3ceqiaiwm2yyd.onion:8333 +onlybestsir3tjebxjh2h52q2q4xgt2rpx3z56adzjzyo6rk6jo3rtid.onion:8333 +osfazvr657jukhdyj7rostfg57lzy6c7gvaxbh7ul3jakppktisckbyd.onion:8333 +otlq5dpuoexvu2ufu23z2rnx3vbetoi3kytxlatmh7loukwolrhn25id.onion:8333 +owvvw6jozvhzsgg5bwcukzvnpfsowaw4pmz3btzetqtwtdt5nbiqunyd.onion:8333 +ox36ix2aclqlilqnr5j6xj3erdenrby754a2r44vw5xlibqlwbemacad.onion:8333 +oxfqjol6335es6zabisfhjljaykd7si3sni4nkjwong5duzw7tw6jjqd.onion:8333 +ozynvhwue5n7kqkivx7y6o2jqe6atusrutpu5c4u2duydzywewjgheyd.onion:8333 +p3chmg2yfray3kuhdoe7nh4zkxmq2opp3zu7rznb7v3gyqaryrxfgmid.onion:8333 +p3x6cqcik3hwdhevmqnwruhsidgdsqvxwbcmylaoup472rpcorptv6id.onion:8333 +p6cdmihgjpyu5ds5sbpfwjscf2w5ltyqgtwobeud74e4rxamtdlhoyad.onion:8333 +pcxlvkgabsowmrx54b5bgqglershgfchr6xavrhbfngridplzhf2pwqd.onion:8333 +pev4gqbesjnma22gnhgcp64hjdentn7cf2zuwmclfe4w5vdhujb5dkad.onion:8333 +pfqr7kal3efztxigrbrdb5qvhozpzxqqawt5dymbloicbre2mp7fvuyd.onion:8333 +pk5wtrhot5qa4fjcsd433u6j6vm7nws2kif6xwby7antjhz3jh3ab5ad.onion:8333 +pnimgn35ybiyje377slm5wi7a4mekzrfjioqboolj7fhoa6bitpl7yid.onion:8333 +ppujyg4f66wpy2rwtdiu3dosq563mv5hcimmaoaeyn5riau4kq45akyd.onion:8333 +ppvvdgnhirjtdrleg3nytysjd75gjsksd3s4bpqwv74qrobzl5arxiid.onion:8333 +prwpkeljx7embshafpgq4qrfkanvlqfgwzwmhzfsdrv3xvsw3b463qyd.onion:8333 +pw2oq7uvf3xg2cg4444ouq6aqrkmf2taqw5oiznghebiquhxvywsnkid.onion:8333 +pyuqmbjilfbju55vbaqxhcggvs2rmwsgg5im2gcbwsmnbvkride5iuyd.onion:8333 +pzltogfefm4zdsj7mtxdq5cp7rw4o7nizyoteta5zdbj7oy72rk4hnqd.onion:8333 +q4waeljubnwct7u4olw4wr7ptuduuprzw4dtvxaioxyhjhudpp5h5dad.onion:8333 +q5iracp7tn57fkhhcewahux4vofnzkqmfjldtoasq6h45vxv526evrqd.onion:8333 +qannow2dbksedv7wgi4j5gpgpf2qfgajjzlenmc7vmrfy7xku25pz6qd.onion:8333 +qdi5bvzkmysxe2a7u3nwytwqqrboujxg7dsmukukff2mjhcqsjm6qvad.onion:8333 +qhar6p3u6oyepbngfbxcsxdyzk7nmmxw7xnyk4sjiutjmnx7zvnsd7qd.onion:8333 +qi37t7cbq2bkfofqvmimodblq2qmo4jmn7nuiv5eyhtxrbj23l3feuqd.onion:8333 +qilhe2eagh2wj4itojf4x7muoakf3tb7n4rvb5tx5cihqhr6hkgx2vyd.onion:8333 +qnmxlslrhoqosnuf2v45rsifgwnsdj4e7zwkvloi6tubvkndg7akadad.onion:8333 +qox2gvzjm2bhzzl736kvhkqo5dnrzadruyfzalczh2x4g3fu52o5ksyd.onion:8333 +qumhzryq2oghxbrusaltdo466x7spsq6hgdevmdzqihqopceqksdqjqd.onion:8333 +qvfefulvat26zdnkot5xykjdm7nsoubmlrg3xrtwzqms5ns3ug2ovbyd.onion:8333 +qvfhewhezgbiepbc7bu7olsmoboint3pkry67wumhshyqosnpbthyjid.onion:8333 +qxgwkwwmul3lxfdgetsvvleeiz2bdhiq5h4523rq7xf6c2cfxxczodyd.onion:8333 +qz26vwgeqexbzepivswq7bycpujdnhtd43prsm427rfu6e3avwgxcnqd.onion:8333 +r2vq7zrtapss4dx2fbx7fm3xyg7gwdgq6mr32fcf6uovi3ft6fdigwqd.onion:8333 +r4nmrs2ewv2bfs6z4z7a6ypcey6ndmyweuw5puktnlpcesl3e25pauqd.onion:8333 +r7zbw2sx5sd3gkkop2po2fd53y6xsz7lncpwzcgyv66gtsrq7yaj4cqd.onion:8333 +rc352vcqyom5dp2k6jqqwfwbe6e27jwazk3itojqxrzuccrjcqr4azqd.onion:8333 +rc37l6as2mzvu6rssgpusy23uhtdzerd5cd3noaevtiktv5ndhjzazyd.onion:8333 +rcav4e6uz77o3o6q6rxexsiqnlvv7tfzsc3so2wcw4wqrsipo76c5myd.onion:8333 +rduriat5h4skczq7yeuy7cv2ecqrysz3bblfuo5z7u4p37ga7vabszad.onion:8333 +rekpfdhbguudic6ws5scnpieixjz6yi2heg7jeh3lj3yk2cxm27hplid.onion:8333 +rep3zta43guqsxuaem54jkjy6yrgsipxix3qhhvnt2kiek7fohjalpid.onion:8333 +rf6v6ew7gj3btll2hhbrfvzfbqk3arve5iwd3yjmcymmxyweiz6dlhad.onion:8333 +rgfftmtoaiyrub35duua4yovzjkfumuwukfgtagyk73yoqselezaioid.onion:8333 +rjsvlw34hal764kqvs2me6adx5ucwf4e5u77ilq5azql2vbetcieypid.onion:8333 +rn7qxkbh4puk66rnbxhhx3f6ntfc6vjzl5anknzichwpgq3ulz6knfqd.onion:8333 +ro2liqfgbjpybc65auyprcmhwffi6wi7kcmdicdzv3y6ueurky47sbad.onion:8333 +rq4mvn4xvntnmw7somr2biuswtik35vxogneb2rwq6spou5fy3n4oyqd.onion:8333 +rq5alkdxudjedsloei5u5x374eastaqoxwd4p5sdj3ewse54rgkkuzid.onion:8333 +rrxjpptqxvyxeq6q7ei7e6q6p4bqh75vtholcte3s45lg2hrwoqujaqd.onion:8333 +rvpwevsgpdipz2ewgmujeflni3hl7fbxl4lkltjcxjszfrczdxy65tyd.onion:8333 +rxejubww67egcw6speu4jvn3uh4h5dcrokd3bedoxa2qb4w2w27gn4ad.onion:8333 +s2h2gdkz5mk4qrfcgiecocbkkoapshuzsf5mxn5cl67ptzgtrqv4vzyd.onion:8333 +s3kcvocfgvhflaz4ook3zxyhqlt3kcz5zwnesxu3qu22xmkmxkairrad.onion:8333 +s4sazpnes67rqgxklf5l6qrgzizayuymqd2cehr5z3dnwqj2tonkn3qd.onion:8333 +s4t6eoru6b4xs7uyj4p7bhqob373a2suht7ggbeacevilwj752lqhoqd.onion:8333 +s7uoggkmsvuhff4k2r5jntcci5jwrwnjggb2k2zwg3s4em7xh5itncid.onion:8333 +sajwhnvra2pyb3s63pinuglaliun76wcxxhzfua5j57iyn3x7wc336qd.onion:8333 +sayaua65hiacjjwvxxkjwnpd6ecjwow2tv6k7iup3v3sreqa5cq2wxyd.onion:8333 +sm4me5bhu2wy6bu4w53e2wffkialasbh6fnjivf32bqjhjnjlb6ys7qd.onion:8333 +spzwladgktg6tqb4muptylfudoqaxszgz4holksftryymnmtoq652rad.onion:8333 +srmcvqymewfeynldgohku4pr6wrxotudfy34lfrjsooet2tvsrudfvad.onion:8333 +st6ctsi5uzvobfqq7raqc2wbzk7tpnegsvledwopmbegyfxhpk2feeyd.onion:8333 +suwdv5bah5udmttir35xmyws26aghffy6od4ejosxfh5l7ggqeb7evad.onion:8333 +svo4wrmgp6mwc64jlcz3moaxhhczwbd4yuwmrik4ssbasbslp65ma3yd.onion:8333 +sywwdc7yn42qohpqukbhdnxn2ptmv6434lu5fxieuoqq37cvq7tmyyqd.onion:8333 +szd2nyb5v3wwshb6jqdkafz4kuouvudq3zn3m3vc367radxdi6dhnbid.onion:8333 +t35zxzi6bmtq22mwdmygyjophnvt7wevvrxcmnc2p6g4ojpwhtop5yqd.onion:8333 +tcorvsyyhizume456jxit4mwxq2tgqkk222snmrz4piezalej5vojhad.onion:8333 +tet4eei5uiltcdd3t6gwieu5hrfmf22jek5xpnzrusekjhjztar7p4qd.onion:8333 +tggbmakxzrd7jmoft5knvkgrq5vgqg5iar4dfhbqj43ehvimxbvdmrqd.onion:8333 +thtzju4iz5vytcj5k32gixt3zagqsauyk3gvqcwxymcbf2uh2dnovjid.onion:8333 +thxoas6jir5jmkroiziwmiecvrktuadr6l3e5kiphx7e6awh5ijn7iid.onion:8333 +tiglomyomrtwigmox4lazdv5bfqcucsq2xjimitls7vv2c7ldofbttad.onion:8333 +tkxx2fvz4dwim3ksq64jbc4ezotdn7tmahpcgguvdnprdxjgm27haqqd.onion:8333 +tpjlymsdalyawl6qyw4ktcaj46mnb5slzgilo6moiwhk6yhssjuvzeid.onion:8333 +txaahqhk65mnxzdnboq6hw7b7gqnjvt4aspqo2iku53lk57vkd7giuid.onion:8333 +tzxfkfejmzl634qqybcp6cfkw74hzry6fb372bsy6lc37semeqgknsqd.onion:8333 +u243bnf3p54ule4un5kdszc3n5dcvpj4oofdgbu5zqkwqrckejsf5mad.onion:8333 +u2njpxmuzsteyympxqz5zpm7a2r3cbnwpc7unfgk5q2y3ltwy4kdrpid.onion:8333 +u3edrtzedv5tmzizamlj6n4kmpfrn4ekvxfjpzauxoy4jps3orzftiyd.onion:8333 +u3qhxwdbmjks2hbfouqcvoewbfadjotcgfnl656ahb33imy4ana4zgqd.onion:8333 +u4cdrgrbmdnxzwcrfcejbcpkoc3ugueoul52bircs6hfejlgfpksi7id.onion:8333 +uctbvktn6rprd5ra3bzlmx2nlgor3mnel2ju66iejykftlroflysdnad.onion:8333 +uegjypkyta55u2h5psaqlucpo3sm3hrumsyg4y7q24kxw3lxtkkwnvid.onion:8333 +ugkq3kl6unjm5vf7t3l4cenjs73ciasqqurdqmyc4zi5jtb57x2g2iad.onion:8333 +ui4i2ltehfchk3fkn6fssar4qo33v7qye3teozkmyghjvgqbwb2fwfqd.onion:8333 +un7aswzx4gjeym2jg4n65hp3nmgfvh54phkip7qjpudeajmdkdhqlpqd.onion:8333 +usk6s3ykfzq2mow7cmx65zyaqfckdjczkk6amm3oeqcsrily6vwsppad.onion:8333 +uslkjewqh5yaqptrf3ldmgijwunx2xzypaatyysokfsbhfknqor3load.onion:8333 +ust72z3wsgefcjkfybb2bbhyup2zcdidzdxzs4amtly7ubv2ljglkrad.onion:8333 +uzxiqhes6ksfbml5x744tv5bpktjehoed67l3fbeeof3xqyxq4pvx5yd.onion:8333 +v327cnzsw7fheim63ge2o54votgzaryprve4usxzqavozecwcunyhuad.onion:8333 +vabgp4i7lum6kckx6tnbb5gbnc2svd3ffeob3dcorvroqcxme3ppsuid.onion:8333 +vakmqacvrkv3vfooq6s7se3q6rtytuv5pk7oho56r6b4qhl4mqfp52yd.onion:8333 +vcoc7ldllc6ygydkiae6mopxfhunedgcpjbhs63mkm6v7trivd4ivxad.onion:8333 +vczxwtjlkilethuw7yrqfncd5lkfz6jfseklxcq3b4yyeg6xlvlpv3ad.onion:8333 +vegl6cmizlodquwawwn2gjtfxloap2hnqhc4ypw4mqzt7qlhklocfdyd.onion:8333 +vfabvpb5s3cbr7lfcnim5c6jpvls53p5f4vnzelbu75xgumg5quwd4yd.onion:8333 +vfr3vp3ifrsya5uzfwr6kopolohglhr4aso5htmo7js5eyno3fr5eqid.onion:8333 +vgsa2r2loouede4gr7dvfdlsq52rvbpddiltz6r5rwpjqwpdmzmtzmqd.onion:8333 +vm6siv5jl5whb22qqrxe65cnut3utcmgao7tajknfdmt4qbksduyz7id.onion:8333 +vmobjbqi4cos26uh4pntgielxzsm6sw34zhqlivjaegznplqjls3olqd.onion:8333 +vmyzflrxe53lrzjfi6cnsf57i4dovnitypjhyovkh5t6t7ln3l6dbgad.onion:8333 +vo673kizhvk62wxdsiu6cf5rgmapktxx2l7ztxvzfsndu6spb5ujkcyd.onion:8333 +vokml5p54goq73fudpo2rzrq2fgxeknoaj64b64lqc4hvqohelsjavqd.onion:8333 +vqx2th7tt6ngg3nucixdzlvw45b7na4j5srkwcqlgo4vixv5ki5rzmad.onion:8333 +vthdpsloo4bvdqlgymhz7rqfofw63jv2ieueqaw6inxrt5och7djl5yd.onion:8333 +vujnfef63nhvm3fdyf2fwkrgwdbgx4mov5lfpsswnpimbhyv2qy2g3ad.onion:8333 +vvjpadfmgwock3qhk6mdyzricilgsbgazkgvwt75nfyhxil5wogwnbyd.onion:8333 +vxysqr5f3wmcnnlkct3j7lppjinyy7vminf7qe2ttwpsfxrxtjbtc4qd.onion:8333 +vzleatncxglazfyw6f6keqi3276f2hewgisopidxh2mx3uxu4iflw2id.onion:8333 +w2qvjxn2eljr5g2s6n4pnyq27jwyydtesfdj2zl6ksz3caqoqmi4zjid.onion:8333 +w3bk6j5k53fmx4yf47ii6lja6vstgk265ixuyrglhpil54circo6oryd.onion:8333 +w4pcxveq6mssnk7egg6durw7ndznt7cd7pfn5q3f6usq5qgmx4tnr7ad.onion:8333 +w7uxnbpo7n5ekrljtn7ayhrmrbjqedf7zixpyljsrw2bh67fvbf4rdad.onion:8333 +wajgqp2kx32h2web3qdbsa4ruin4z3plxcg7xdtfe75uosgmw3pfvfid.onion:8333 +wbp2wqy3q4uzzpis26f7hveaokvbf5jnk3z42sjbv6uauwq6oy3fh3qd.onion:8333 +wdsyieovtttjog2hddbc4noq3xdiytis7nhul5ks2rie4plefe3x4fid.onion:8333 +wfufce72bnkhae75iyndpbihzr76xc6nlehhzihrnmqdqy2xmru4fpyd.onion:8333 +wh3cdsnjylvrrhbnis73fyrsy53fyfkajhbf7dlobjccjp5j5ev6ykqd.onion:8333 +wh5iploisetyxiqzutfd5ka2xfpnfsszthwecwommysip3jcba37vfyd.onion:8333 +wnhrfj3jvpfowqa4yf4m35haci2jxgxq2tugjrih7t2k7izj3672jwqd.onion:8333 +wockkiso5afh47gppalqlyrqddpjafgtq5sysupkdiwwsqn6hgqytlyd.onion:8333 +wrhikxkpwssh2jn3cmicrgzpiwagvx3wfvrbrojara3neblwa2hklvqd.onion:8333 +ws52h2nbknubs5hwrlkad7trtrpu3a5kqqjtiig7eulwkg7775vytryd.onion:8333 +wtdzgw46xbydiu7c4x5ojyvc4z6upmytuaghey7k6nhpaantmmwcynad.onion:8333 +wxnqkadi7mrhabjt722ltvj3o5zumdljeawekf7gk5slcqc2fdbazbid.onion:8333 +x2my6gggeyq2kddxr2bad32x6luabusxwdbac5juqp4zwzjqxyosg5yd.onion:8333 +x2n7skbsacubrzsnvdj5zstr6sjsgzvn66pd2tkmzbdzz7z5phm5hxid.onion:8333 +x3scsk6x5fos6hnoil52m3weehh3feefqwjv7tqrfjerosoqdvdkfiid.onion:8333 +x4mwiwksj5pel672lgdmsxctltnealbzda34cd2ur6xnmh4o3hgmt5id.onion:8333 +xbqby56ecqhzqw5e25aqj6jd7b3t7bkxo7khvh37jggjehspyh7btnid.onion:8333 +xbxhtvgzy4ikywim7zleucubsnl7efqowdoqukfjg4mkzs4sr6vphoqd.onion:8333 +ximsnnqifspqkrvkdebsrxmggzlu64hsxjp3qw55uosflvbgp5kz2qyd.onion:8333 +xl3sljm3pnulbj7nrzppkc4fnwtxetmpy24rzluqxr4uyp3auntlupid.onion:8333 +xmr5epdwuvylwsevzq32owihkxkwaiyghrb4mjusfob7daamjmyiload.onion:8333 +xn4crbhjyycbj2pa3quv6333i2xpmscbzshmssbhuchc74d6uphblxad.onion:8333 +xpdst6l2cblapbr4ubi2vbvn2hbrr5m4sjl5d7irpzb5ylzo22kkbayd.onion:8333 +xrd4o7bs7272pbhqttqzzudfpdzjvzopcbtdkxexejeubhtigitftvid.onion:8333 +xt5pt2gn2qnprz5vvdbet4p4wgf3ejbq77guay5fzjlxgn7dmnzqooid.onion:8333 +xti4kdl4ojss45oio5harzghee7zr2whzo5gn47f74rgh4h3vlq244yd.onion:8333 +xz463kgdshepjf7gtnsxibjaomtbuksh2wc447xgawmrl4bbfbzg4dyd.onion:8333 +ybji2ssvc4je4cqzbad2pczrhancohnyy66ogsbpg3d6bspvh6au7iqd.onion:8333 +yfgo33tiu6w3nrvnfyw2nmpcwtfozinctn57566y5l4hfcttuvgff7id.onion:8333 +yftuiua74fspnqanvait246qn6vmab4aoympjd3omppu46jkq777axid.onion:8333 +yj3gs4nvumsfztawn7ku4cmner2zlmcn5kke33g4j2vywxsrqmbd7kyd.onion:8333 +ykh3t44zjojpj76ouqeo662ljhyc2t6e3mike76iqpelcdztgaxuw5id.onion:8333 +ylaprgwti4dsumx5xvlhul3esf3yi3ebyyubb2dwfgwe3zysv3e6nyad.onion:8333 +ymt2hdfgdxo2awvj2k4y5tbpvapj75j7v34tvvlrqy44lsvvdcnjx3qd.onion:8333 +yqrnkmnwp23adyswjvnvw7uyss7vtbfcickq6ug7i35dafmgfjd3bnyd.onion:8333 +yse66djcdrlmjvxlyibyye3s7r5hv474iteas3tplq2p2famsxkczzid.onion:8333 +ytbeafalpctlsx2s2qphnawkabtu2jb3nvarkwv57bqdmxgmhe2jx3qd.onion:8333 +yti2ofsqg7k4zbln6aqx4zqaor2zpmlsebyljxlu7jczsjqtd4ctd5yd.onion:8333 +ytm5ihwofux4ktxhtxa45shbjaelybzmnb7uqillpx2iym5tkbnzfkad.onion:8333 +yu6ilgxjfffdb3fq7srwwmz6ymp3fxberstpl6vdocmxbrrf5e3tw2qd.onion:8333 +yux32z6r5leyhbdrgdpti4sggxyoy336qb73ia6sk5egfli5qsvkxvyd.onion:8333 +yzo2vz4kuitm53zwynkm7ddfdw5escsp5fp7rgf6uspphe653mvdekqd.onion:8333 +z2eky4p4ozlkrrmuq2jqcz6w2k22re6hxrm3um4ypcnmzmpsi4p6l2id.onion:8333 +z4gvqymmj5jmg3dnao7k3vc245pzepqcrevzyh4s5xn5fcc6ape22sqd.onion:8333 +z6d7llz7maqgnooab4b6sogf7eszagl7higrvks2eykwogifwh3g6gid.onion:8333 +z6soa5spkni5ze6p2xg6lnehu3d7hnqn5ddiy34jtedwegf4syqnnaad.onion:8333 +z7f7lrvup632wkbzinvbkyyrhmguaf4fzt6zleiynk2vuvwdwt6ysnqd.onion:8333 +z7jtpaghm4na5po5p27fwbdh2aq3z4rkpwhhzemlrhsoefru2z5eyvyd.onion:8333 +zbj3cbl6t7oj2f6neql7bhpqlmimp6if5pbzmczji7l3uzyhpjcdeaqd.onion:8333 +zcdv7z3vetrpeo7souyie7u3vyy6w43cuautaogldhs4n2fitytsihyd.onion:8333 +zcw6xk4bfnl46fx3mkrmlnknbwnpssxsp7hlllj34fdrkutzts6yepyd.onion:8333 +zdoyjklv576u3w4paxyjb4jmdhx2id7xoqkd7zn3sspodasmmazbbpqd.onion:8333 +zike5s7xtcgp6frbjgzjhcz6gsvd3pghkl2jin3sdasrek3vy7l4ibqd.onion:8333 +zjy7d5t6rxljc64pvta3hqi6yxzfsnixtbslafamoxiiw6ptcmv3kfyd.onion:8333 +zkisbl3pv7dsz3szdawti4r7mhrhssqxf3uopi3vrib5dpt6u2u7ckyd.onion:8333 +zlcqdqzbgq4dhaspf6d6ehkhzjx5vwd72iospxxbdobjaxi4eo6ggoyd.onion:8333 +zmasrnq5gqwvg7yv62ts6pywn5pijnohgfqbo6jytiuikqdufp6v3zyd.onion:8333 +zogiy54bertbyhcs5vye7xjspuwvyggomehc6n2tbkwwdzyp3hwrhpyd.onion:8333 +zpx2tq27yviwzbpc4nyjfqtlhigoa5k7cwl676nft7bkajo6jq224hid.onion:8333 +ztcmlrstdkp7coheeolpwj75gp3imzgiw26xnoiucquwhxxcofao63ad.onion:8333 +zx5ttamyvo4vz3x666eqaez327buy7t2mxl6tfivbc46cdhrtkrrqpqd.onion:8333 diff --git a/contrib/seeds/nodes_test.txt b/contrib/seeds/nodes_test.txt index 98365ee505ced..5339f33f86d3f 100644 --- a/contrib/seeds/nodes_test.txt +++ b/contrib/seeds/nodes_test.txt @@ -1,11 +1,311 @@ -# List of fixed seed nodes for testnet - -# Onion nodes -thfsmmn2jbitcoin.onion -it2pj4f7657g3rhi.onion -nkf5e6b7pl4jfd4a.onion -4zhkir2ofl7orfom.onion -t6xj6wilh4ytvcs7.onion -i6y6ivorwakd7nw3.onion -ubqj4rsu3nqtxmtp.onion - +[fccb:248:11a6:1042:bca:1218:f7ce:7d3d]:18333 +2.87.72.235:18333 # AS6799 +3.253.163.14:18333 # AS16509 +5.182.4.106:18333 # AS49505 +5.188.119.196:18333 # AS49505 +5.189.175.92:18333 # AS51167 +5.252.21.232:18333 # AS50673 +5.255.97.91:18333 # AS60404 +5.255.99.130:18333 # AS60404 +8.222.228.217:18333 # AS45102 +18.143.108.213:18333 # AS16509 +23.93.89.199:18333 # AS46375 +23.94.96.134:18333 # AS23352 +23.137.57.100:18333 # AS16904 +31.220.99.65:18333 # AS12430 +35.192.191.229:18333 # AS396982 +35.200.201.79:18333 # AS15169 +35.233.152.219:18333 # AS15169 +37.27.58.134:18333 # AS3209 +37.27.116.87:18333 # AS3209 +40.118.228.187:18333 # AS8075 +43.247.184.50:18333 # AS4134 +45.77.25.14:18333 # AS20473 +45.129.182.59:18333 # AS47147 +47.254.127.252:18333 # AS45102 +51.77.42.234:18333 # AS16276 +51.79.82.75:18333 # AS16276 +51.250.75.48:18333 # AS200350 +52.174.187.17:18333 # AS8075 +62.72.27.212:18333 # AS32641 +62.168.65.42:18333 # AS5578 +62.210.207.63:18333 # AS12876 +62.210.222.73:18333 # AS12876 +65.108.39.171:18333 # AS24940 +66.85.145.134:18333 # AS12189 +66.135.29.243:18333 # AS20473 +66.183.0.205:18333 # AS395570 +67.4.82.9:18333 # AS209 +68.197.203.181:18333 # AS6128 +69.59.18.23:18333 # AS397444 +69.61.32.242:18333 # AS141518 +69.197.185.106:18333 # AS32097 +71.8.29.12:18333 # AS20115 +71.13.92.62:18333 # AS20115 +71.171.123.161:18333 # AS701 +72.46.129.50:18333 # AS53340 +73.22.9.231:18333 # AS7922 +73.53.42.105:18333 # AS7922 +74.213.175.99:18333 # AS21949 +77.163.221.171:18333 # AS1136 +79.192.39.105:18333 # AS3320 +80.79.4.249:18333 # AS49981 +80.93.179.252:18333 # AS50340 +80.241.194.147:18333 # AS12964 +81.17.102.136:18333 # AS15598 +83.231.240.3:18333 # AS2914 +84.24.77.191:18333 # AS6830 +84.155.113.177:18333 # AS3320 +84.247.164.103:18333 # AS49788 +85.203.53.89:18333 # AS39351 +85.203.53.149:18333 # AS39351 +85.208.69.12:18333 # AS25091 +85.208.69.13:18333 # AS25091 +88.80.148.215:18333 # AS44901 +89.117.19.191:18333 # AS7029 +89.153.161.16:18333 # AS2860 +89.155.239.107:18333 # AS2860 +91.123.182.164:18333 # AS51648 +91.148.141.210:18333 # AS203380 +101.100.139.249:18333 # AS133579 +104.237.131.138:18333 # AS63949 +109.233.109.26:18333 # AS41798 +122.208.117.197:18333 # AS2519 +124.236.16.91:18333 # AS4134 +129.226.198.211:18333 # AS132203 +129.226.198.246:18333 # AS132203 +131.188.40.47:18333 # AS680 +132.226.61.215:18333 # AS31898 +135.84.136.157:18333 # AS174 +137.184.2.124:18333 # AS14061 +138.2.100.114:18333 # AS31898 +141.98.219.142:18333 # AS20326 +141.98.219.199:18333 # AS20326 +149.50.101.27:18333 # AS174 +149.154.176.47:18333 # AS3257 +152.53.17.53:18333 # AS81 +152.53.18.109:18333 # AS81 +158.178.228.41:18333 # AS39550 +160.80.11.66:18333 # AS137 +162.0.208.90:18333 # AS22612 +162.244.80.218:18333 # AS19624 +164.92.140.21:18333 # AS14061 +169.155.45.180:18333 # AS3356 +169.155.171.252:18333 # AS3356 +172.172.62.86:18333 # AS7018 +175.209.228.141:18333 # AS4766 +176.108.193.97:18333 # AS47914 +178.21.118.82:18333 # AS49544 +178.21.118.96:18333 # AS49544 +178.63.87.163:18333 # AS24940 +184.74.240.157:18333 # AS7843 +185.28.96.16:18333 # AS8220 +185.70.43.192:18333 # AS19905 +185.107.68.135:18333 # AS43350 +185.132.177.104:18333 # AS49981 +185.186.208.124:18333 # AS206428 +185.190.24.72:18333 # AS9002 +185.209.223.195:18333 # AS51167 +185.210.125.33:18333 # AS205671 +185.232.70.226:18333 # AS47147 +186.154.207.228:18333 # AS19429 +188.117.132.82:18333 # AS31242 +188.213.90.149:18333 # AS43414 +188.246.168.144:18333 # AS8595 +192.198.81.243:18333 # AS31863 +193.198.34.24:18333 # AS2108 +194.95.66.129:18333 # AS680 +194.110.169.133:18333 # AS9121 +194.233.91.153:18333 # AS141995 +195.123.244.121:18333 # AS24971 +195.179.230.180:18333 # AS12874 +198.58.102.18:18333 # AS63949 +203.132.94.196:18333 # AS38195 +205.209.119.150:18333 # AS19318 +206.204.104.7:18333 # AS6939 +[2001:19f0:4400:63c7:5400:4ff:fecc:fc1e]:18333 # AS20473 +[2001:41d0:303:2dbe::]:18333 # AS16276 +[2001:41d0:303:87b9::]:18333 # AS16276 +[2001:41d0:601:2000::1f70]:18333 # AS16276 +[2001:41d0:602:2dea::]:18333 # AS16276 +[2001:41d0:700:544c::]:18333 # AS16276 +[2001:41d0:800:1d55::]:18333 # AS16276 +[2001:41d0:800:3d4c::]:18333 # AS16276 +[2001:470:1f05:4e5::2020]:18333 # AS6939 +[2001:470:1f07:6f2:1671:5a1c:2433:b517]:18333 # AS6939 +[2001:470:1f07:6f2:2e58:b9ff:fe18:e093]:18333 # AS6939 +[2001:470:1f07:6f2:4dfe:d8fe:3dc7:63f]:18333 # AS6939 +[2001:470:1f07:6f2:6a34:c61d:97b1:c98c]:18333 # AS6939 +[2001:638:a000:4140::ffff:47]:18333 # AS680 +[2001:728:1000:402:546e:98ff:fe16:68c6]:18333 # AS2914 +[2001:bc8:1201:409:1618:77ff:fe5f:b12]:18333 # AS12876 +[2401:c080:1000:4cb2:3eec:efff:feb9:8604]:18333 # AS20473 +[2401:d002:3902:700:8708:37c4:e231:d3d8]:18333 # AS38195 +[2402:1f00:8101:713::]:18333 # AS16276 +[2600:3c00::f03c:91ff:fe5b:4cf3]:18333 # AS63949 +[2600:3c00::f03c:91ff:fe9e:7f03]:18333 # AS63949 +[2601:603:5300:83b7:0:ff:fe00:420a]:18333 # AS7922 +[2604:1380:4531:1700::5]:18333 # AS54825 +[2604:a880:2:d0::65:c001]:18333 # AS14061 +[2605:a143:2162:7067::1]:18333 # AS174 +[2607:5300:203:540a::]:18333 # AS16276 +[2607:5300:60:85a9::]:18333 # AS16276 +[2620:6e:a000:1:43:43:43:43]:18333 # AS397444 +[2804:431:e038:cd01:aaa1:59ff:fe0d:44b8]:18333 # AS27699 +[2806:2f0:90a0:45fe:dd25:f6df:4741:4a3f]:18333 # AS3356 +[2a00:1298:8001::6542]:18333 # AS5578 +[2a01:4f8:173:230a::2]:18333 # AS24940 +[2a01:4f8:190:4026::2]:18333 # AS24940 +[2a01:4f8:231:1eaa::2]:18333 # AS24940 +[2a01:4f8:242:4c1b::2]:18333 # AS24940 +[2a01:4f9:1a:97e8::2]:18333 # AS24940 +[2a01:4f9:1a:9a57::2]:18333 # AS24940 +[2a01:4f9:2a:2ddd::2]:18333 # AS24940 +[2a01:4f9:3070:266e::2]:18333 # AS24940 +[2a01:4f9:3071:219d::2]:18333 # AS24940 +[2a01:4f9:3080:358c::2]:18333 # AS24940 +[2a01:e0a:3b3:1420:7ca0:3a9a:5cc3:b644]:18333 # AS12322 +[2a02:4780:10:402f::1]:18333 # AS47583 +[2a02:c202:2189:3702::1]:18333 # AS51167 +[2a02:c206:2075:3352::1]:18333 # AS51167 +[2a02:c206:2196:799::1]:18333 # AS51167 +[2a03:4000:2a:514::]:18333 # AS47147 +[2a03:cfc0:8000:2a::9532:651b]:18333 # AS201814 +[2a04:52c0:102:2219::1]:18333 # AS60404 +[2a04:52c0:102:49af::1]:18333 # AS60404 +[2a04:52c0:104:160c::1]:18333 # AS60404 +[2a05:d01c:392:c900:78ea:ba95:334c:1d7c]:18333 # AS16509 +2lsncqdflwk272dhydrxf7ikfy23ppnmm54dnynyxiym6lqf3wowrmqd.onion:18333 +36fwktckggarkclbpu2pumsdpck46ahe6cwpozd2gm6q7kgdqljclmad.onion:18333 +3rlrfxaj4sphzedozs27qrefuw6u3v2cqu2ctbrbvhsc3wn34iezxjad.onion:18333 +3tao6wnydihsqndjw5pqbko25rxk23wb53gtyv7vmbmc2j7vw74cr2ad.onion:18333 +44sgcv5dvpplt32enlneddyl4gd4z3tbezl2scedwccndyzrrp6lcgyd.onion:18333 +4w3f2mxe4ftodocermsazs3qlpo37igkdgne6ka2p6wnnrgwpzqw65yd.onion:18333 +67l6l2k7mqbl2btyvo5h5lki3kxcrgbunlk7brcloyaoaftbs5mnsuad.onion:18333 +6m7khktveimc35yjusgeprndw43grjexs5bpruwkogbh5lovv47drlid.onion:18333 +6rnqpqqcpllqhjoa4gwrtq5yi6fdch6uqzapdee2gq67gxgsl73v4uqd.onion:18333 +7ph7mrc24te57mvppajfkfj4mk7zuz4teukymt3wgpdpw6vpndeivtyd.onion:18333 +7poqajl6svz4vr3aqi7vdtar2t56crbrtj6yi75ydrt3ighyx7q6qvyd.onion:18333 +7zlqrihb5do5ebbmjwgspxigqfdmkfslkqtg2ngdc6ypsunzb4iootqd.onion:18333 +adjnsxivlsb5m4e4bfthvvccjfv7zkpyjyebvatlhyqycykd7ibjyoqd.onion:18333 +adstabjz7ec2y3jt4w2dvummowzv7g6m2f3kajeejffuaz7ojwj6epqd.onion:18333 +aesy6tfufadkut6flu2bsqgnw2422ur2ynjalguxlzuzuktg3zehttqd.onion:18333 +ailib3qk4brh4z2nvmmadoi6gbsw7fihfxtr7rnm4z5d6qcspr7ky2qd.onion:18333 +ayx35r2mhwydczzoqu7b6dl3sup4oht74sgnlrjkxybzh2hmfnaix3qd.onion:18333 +bek652lfs7mje46zauhfxtdh2zohe2jpbsmlhulusuzao27uwpwd76ad.onion:18333 +bkzo7mpxuar7rhsbiwdaxqcymixarcbdmb3sdaqtv6yb2svqttz2s6ad.onion:18333 +bwyudvl3if3r7ese2caymfnbxqztc3mgr2i5hurjttlabhl3dfgyzead.onion:18333 +bzn63lsmsuvzlg4uqadyylxaggdcrzkb56muw2b43ft5qkynvzfopbyd.onion:18333 +c7zrq2yqt6rtn5dxkczp5bv62k2jncv3mscoo24c24rljbplr2dvhsid.onion:18333 +cqhut2deas6savhop2yue7vebu77fd3einx36tlgxr7bb6vlef3lb2yd.onion:18333 +cv5z3izcu7fmtby4hyjliphp6bh2a3zmm27tfendycu34tw2tka72syd.onion:18333 +cyl7eujkxl3s3cbuhmouhkxt4cdfplpbha2z6j5adacdaq2rjj2dlzyd.onion:18333 +d63vf45ta4chrewnruyvypm7ybfxtut327crtq6qfyrvcwx434rli5qd.onion:18333 +ddsxs3zdkspqkcszmx74rkw7gqjgpz2ufb4qqh4dz5iaysoe3fvtpdyd.onion:18333 +duuwdfnlrudonwgdvvaud3i52zdxg2bjey7jmzskgcauiyejyffrh5qd.onion:18333 +dwb47cmqa2tjpmvjaear7gdcars2lez6niefhi4qf22qehtyta6577qd.onion:18333 +e27nbjkou2mkquoytjil27g5h34wkgjtrwupn3muc2c2b76dfdkvkiyd.onion:18333 +e7tkrf54ng3q5vcn5gn77zwjwm74lkfav4mwdux3pvon6yvqg3tf46qd.onion:18333 +eev3vuh55dymhn3z4lmhargcy2xzm6l4yyd5b4bgfstx2sf75ms6mdid.onion:18333 +eyjahrgj7qadzcotanxfbhshsjm7yj3p6tssiupiub7k5s55nxc5xwad.onion:18333 +f6awicobzr3x4fz3dnxbroarbuleq26jzltnl6wknkph2luxruwijiqd.onion:18333 +fbimesnyhzubbzqc3uaufzkbyfmnkxvypoxaveaub7rzpzh2foxrn2yd.onion:18333 +fjcr55gg3vv7qik64sxnihheuvkfytpvzm5jmgz55cp6427yvyjsotid.onion:18333 +fpzycmcz4drh46fhcfdkgibnp7qgioo3upquxyjwk6zegirbcd4xqcid.onion:18333 +fqls7cvum7lpawad7n5zrvmrqwrd76quc5r7pblndaod3wqjlpfzxgid.onion:18333 +fwswqt2rbdbstx5kza4gb7aok3gezazfgtokadqeiwgkgdkwwdmmidyd.onion:18333 +fx7erwmkuhj7p2eiuisjwg3g3a3aw6uglepa47gwc76dlrwpc3nn5uid.onion:18333 +ghqbqp3dirya7espnj3m3kh6mhalqxjv75muum4seukopedtwysxc2qd.onion:18333 +gpvbbdsv673zfg2mpbzxexnmv3oqakgqooyvyzrlhu6mucylfob4whid.onion:18333 +gri2j4tudvoxaz26jbzmn4kloo5z7qsi3vfi3c5ok5tunm3yzmfgwyqd.onion:18333 +gsw6sn27quwf6u3swgra6o7lrp5qau6kt3ymuyoxgkth6wntzm2bjwyd.onion:18333 +gy6nih4pmp5esyvvnhlj6qvk7zkbjuoswkxffyiip3dbkvsfxwz5zcqd.onion:18333 +h4xr7yovy3kgrohdlb5zljtyweelyctfgf5q24nlhyoxbmjmbl7bfjyd.onion:18333 +h4yusgljraifanij4pxkw4qks4iilqbq35absi24cinuvuym56ge55ad.onion:18333 +hba3mphxhpuy4x5gjqyq64bwtlzlfuscxc7n5lh4k4oiwrgcz5ujwtid.onion:18333 +hj2txlxajdlh7jsfwqxtp3cltlptsr33ctjkbxhbzbyirkmcpfhsnyad.onion:18333 +hmtbnymouaimhnxazujw7i5b7hqdixrlibdyn3td4bdw4sj7sbnb7fqd.onion:18333 +htkiqaqoql7pjstenmw6v4blm6l57d6hl6mewxkmfi3qumozflx3gzyd.onion:18333 +hvbmmzvqrpgps2x5u4ip4ksf3e5m2fneac754gtnhjn2rsevni6cz3ad.onion:18333 +hvtxymvdb55u7lhdw6775akqy46inunl3uup27gnsxm2bqmv55wbacqd.onion:18333 +i5fjp7ggx62zlfkibzn7s6glpjp3h2ypfom3lmqixig7qbdgej35wfyd.onion:18333 +i5gphw2d224tniqkjebxdwz5ygbbo5gcushoyevv7x7o454b6qlrzeid.onion:18333 +ihalgm3hs54h7aq37hbryaxcdlowmlms54i4cz6hqbz4n2qjjiw5uwid.onion:18333 +ipbzs2lbe7lab2xaikvkdkwxian6t3nakaoltwbshpevbszdvoyyjiad.onion:18333 +iysx2dl4tpojiclh32iyhel6z7h227c7o7buzk5wuqw6qhuytvsu66yd.onion:18333 +jbgve7zi4vx5l7654ih4nzutspu36dvs66xlegciur2cpym4gdjigbyd.onion:18333 +jjfuyj7krgzkmpxvn3b2j2hwlzkmze3ezy3ifwk7dnswwawgmzqhjrqd.onion:18333 +jnphftehkfstwbko34idxlpo5fdw6lromkmeukxh3xclthlstehpg4ad.onion:18333 +jpujgmcr2ftklxrr7uwxh3jg43uqatys7wgosfbpfbx5txw2nz7wgnid.onion:18333 +jsc4frvvnl2d3bhzyofsc72xpztgm23nl4fnb4dwkzsxr6fhij2q5iyd.onion:18333 +jun6gdmbgasr57bhr4zs3dfhc6pzpaw27nqb4dadbai7yrbq55zskeid.onion:18333 +kamanho5clcpn5l3sc6ih3vb4skmfhgxw3h2mgs7e5idfezu5dw5oxyd.onion:18333 +kejyzttreyd7nj2sxx25nk4qwgxtuqybylxuqrjroe5mbhlr532dplad.onion:18333 +knkkqbxhdit4xilksb5ys3qnqv63tah5uvu3n2imu4bycipu354ambyd.onion:18333 +kpsy7bbqxiotk55aacjt2hiaj6h5pxjjvsb63hgos4ayfzwrmnsqgsad.onion:18333 +kwjxlauwjtecjfsiwopbl5pvn5n6z5rz76uk6osmlurd3uyuymcw7aid.onion:18333 +m27hd27n7dmnygddnp5bnf6vl5uks77jrq2uhtxxvgewclzjwsixz3ad.onion:18333 +m33zm5qtzgaf65ahaxbom75zoehyzr3lbqf2obtpzzzjebv7yd73fsyd.onion:18333 +mjs6pdnd5h4ycmsgruww4crsesqzdw3femdinsi6oghmjld7loycomid.onion:18333 +n23anw4zb7f3paroac626oc2sqy3yn5kjfpcqvb3lvdyx7dzzpovd3id.onion:18333 +n4e6fp6xoyo2glsafjwbaq3wirhldpjw4mskn5lzkm4o5lhh5n5k7uqd.onion:18333 +noyzwtpfqybyhemrmxzd7nf4il5mhsolupp7xtmrjosat7qdmq4xgrad.onion:18333 +oln7ybci53wk4g5n42nipyixvyjxbludsbrfsmhnirb6tk7ovlikd5id.onion:18333 +oobwtn5csi4auevb2ns2b5rrjnxmhlzdmay4zxtzvkj7tayrgwfddiqd.onion:18333 +oond3qmndkbdg76ryyjztbmsmvoj43u64gjhr2nvqriujyteeu43irqd.onion:18333 +oqxivxpit74wsoptsb6kr3k2xeqpzfdozvw4m2wvvujrflxo4ykmzmqd.onion:18333 +oxkyujnjbl6k7kcxactyf4akp3ejtftnbmztk6utxt4zy4fhhtk3x5qd.onion:18333 +p2oitystyas5pxeo262mox6aof4tukybnl7cj7fcwoshqesvfk2yzhad.onion:18333 +pj2rxq2sfbwtsqqcbnfnwkg36d72yce56qmp2sjjmb7hvzpzai3ndead.onion:18333 +pu3zcecujzbsdaogf4qgo3ue67livoza62awlffjnqnribgmvyqfoxad.onion:18333 +q5qox756id5nfdu6ht4dghc35jezls4bgjcdnrds3pwndvdx32zmwlad.onion:18333 +qe2jbe447he6panfvpyqhyntf7346gmuf55bxrmdzggmgwyjsyknhxyd.onion:18333 +qf2taqwtz3pstt73hgbrfvtilgecsc556lizkzk2cisdqi7lrnst5byd.onion:18333 +qiep4hvuovedbbc36hl7nwslwi6ah6uw4nnseyjdtc73cc5rfdauvnad.onion:18333 +qqzemertqrw5qdfakjoay35weu6qshr2l5h4smlnqafi3zqfztx2mhyd.onion:18333 +qzx5f2ig2rk2ssrbopz2zlljq5mkeh3izhhx3t3cepse5qnhvpo2tzid.onion:18333 +r2bdfkchj2z6nrb56alubzmb4twzdncckbcxsvkszjc6mrinibakj3ad.onion:18333 +rfi2hdpjhhknalltbwdvyuhskh5yjs66rqzpy6ley7xtu4kvbeob6fqd.onion:18333 +rra7q7x3euc3ozyealiv3tsywrz34l466faxohmttwtf5dts73musjid.onion:18333 +sf2fkr6hgfezefxaqfqqn5rengrlkvwytrzepsavwotehp2lkvuys6id.onion:18333 +shph2uqcvbe2hc25q3ln73qsche5ddqb4u5tkmgqv73aicrmmkkj4kid.onion:18333 +slpigm5ep4zan3a65bybne67phjiyesbuk5s3bodefuhnx2cfdtit6qd.onion:18333 +sxm6kwrdp6f2ggaozhrl47xuy3es3sgm3ivppmh4xm3hfmetvwbyrcad.onion:18333 +t3z7sscjdbnsydohxcs5nmoxfi35p6zv4jv5jsltsl4datztkefumbid.onion:18333 +tdnctk4lx7jnklojisdczp6h4b4ejtuetyh5cbemzmcno7vvo7whvaad.onion:18333 +trxouvgcwrc3rkewpzvkxbfvpltfwmgl37woiujj4vn63zwhvk4xwbid.onion:18333 +tyo6jvqjh34r2b7i477xlcrwt7rq2fwuwpax5s3nw3tfbtwbox7aakyd.onion:18333 +u3znfxh6yjvclu3l646zzmgclqkmmrrrwtb5urtr4wd6svhrhdgrdxid.onion:18333 +uc25t5mdmnpxn52qib3cps4qyc4ghbntitetzia2vkqaawivyqzdhkad.onion:18333 +ud55gevhzhhcjsaghfzqq7wywxmw7n6syhwsds2uw4j7oj2oh5zxb6qd.onion:18333 +ueyykmnyqk2bnmbgvmqrb4jfbjvpgeew6cuq4gaqe7v2oiyxaubkx4qd.onion:18333 +uw7if6wybpzzwrawp6rdtkcript7vmuo4bptlg4den7t2o733j4hwrid.onion:18333 +vctlwaqgmu53eutz2hewuakcipfgtyljsd7czut4dd62xr3rp6fqezad.onion:18333 +vqnkdmpmecc5kondj32jwbemmruhmd5gkmfngkscetibzozryp4elkad.onion:18333 +w2nnsjp3o5ndh4reorwsudmkjvy3bou5gt7fmusvp3ne2fojgypapmyd.onion:18333 +wnxgjgjgplv5iu4mssyuunycvku4qnqr5t4q6cfdt47k7uwrfifuirad.onion:18333 +wou7tjvuqmql27mqmxbtsrrdxn4dqbti3jbpnklkruqjrztty2jmj4qd.onion:18333 +wpsahjbejxehpz772kzxonj777pejol2akcicduqov2r7ktm2b6l5pid.onion:18333 +wui5m2y5b7cms36ai6shzyqsg6qteejwmxsf26nyms6vcykxbvmh4uad.onion:18333 +wve6wksecq6eeu62phkkrjo5vydreenazzp5c6gttto5ynuyqjevddad.onion:18333 +x4wvxqotarjnii6ue4tfvqt5rxlc27p2c5imkipsu27oprfxstmdjgyd.onion:18333 +xafcyh6mxwebvcsgw4wcbuw7n4v7vbqhcznhwqcpqcu6dww2mjuw4nad.onion:18333 +xmzegggz4kmjmyip2dogvhj3a2m3s4mkimdjdqkwql5n3crqetddabid.onion:18333 +xr4vm6rxolszjw7m73mii35ppnyt7aqp7w53yyg7smcpbo2mg3fazpqd.onion:18333 +xr6damsupgddwwziixaeaogj67hnjdpjtey5bcnhs5ydppul4w5hnjyd.onion:18333 +xuhmq42du7dckfophyr2rmf75aqkp3bk6y3wyyc6jxw7jmpp66zlm3id.onion:18333 +y4eud6iabao4666vcq3qch6kvg6lg5q5hazzetk4jnwr6vcdyjdv2yyd.onion:18333 +y5h652jz6sgmb4bkcfnfxnj24gnczqf7lhd5lqbvdhf5mpll4gye5kid.onion:18333 +ya6s7ov7myixz3ql23u45hty7moxo3r4d26qainw55z656vtau676xid.onion:18333 +yda7kwpii33j2qpq32ftf6lp22znknswipjwaccvsqj7l337jvfesnid.onion:18333 +ykaknq35wnwbobp4hgsxdd3ve575bsflst6xppo5b4wc44fp3xpqavad.onion:18333 +yqlk3ooxjpjd6u7b4kabe3gidclf25yuqbcpbkwnedmljrmxm3smqrid.onion:18333 +zcsn3j6aswnrf56xj2n5jn4tlwmyoq4benjn5ujcgz46co2y5tm642id.onion:18333 +zefnna2a3ga4ez2nutvypma7my35prw3ycinbqwva7v4pf3aurqhjcyd.onion:18333 +zevf5zwmkq2mvwuwyfavq62hjapkcdfslewiyi7mlrbx7vzidtij3xad.onion:18333 +zhiju2obxifqpjbcm6xtlgjdbof7jhoctvw3x57vhiftstb5hi3gmsqd.onion:18333 +zii3p3plpqt34xxd4mr7ofvvixnipt7ikohgvi3gtjlnzofjj37byiid.onion:18333 +zkqddzui5pkrqvjj2zwsf5kln7stlbmw5pmn6ut46fobvzyv2sn2ryid.onion:18333 +zmvizz7fd5hdue6wt3lwqumd6qwt4ijymmmotfzh75curq3mzjm53hyd.onion:18333 +zmxlrzoxg4fmso6l2xuq5tdxmlyakdqellzujh3a23iuzg4zlatnogqd.onion:18333 +zv5egkfmdpunhdm3whdaxgzpiapbkerfqd24juq647hmzhec7q7uxlid.onion:18333 diff --git a/contrib/seeds/nodes_testnet4.txt b/contrib/seeds/nodes_testnet4.txt new file mode 100644 index 0000000000000..0be4b9ee3ddd1 --- /dev/null +++ b/contrib/seeds/nodes_testnet4.txt @@ -0,0 +1,9 @@ +18.189.156.102:48333 # AS16509 +18.201.207.55:48333 # AS16509 +51.158.248.8:48333 # AS12876 +57.128.176.163:48333 # AS8220 +82.67.102.15:48333 # AS12322 +88.99.248.50:48333 # AS24940 +95.217.73.162:48333 # AS24940 +103.99.171.212:48333 # AS54415 +103.165.192.210:48333 # AS54415 diff --git a/contrib/seeds/suspicious_hosts.txt b/contrib/seeds/suspicious_hosts.txt deleted file mode 100644 index 13385cc816209..0000000000000 --- a/contrib/seeds/suspicious_hosts.txt +++ /dev/null @@ -1,16 +0,0 @@ -130.211.129.106 -148.251.238.178 -176.9.46.6 -178.63.107.226 -54.173.72.127 -54.174.10.182 -54.183.64.54 -54.194.231.211 -54.66.214.167 -54.66.220.137 -54.67.33.14 -54.77.251.214 -54.94.195.96 -54.94.200.247 -83.81.130.26 -88.198.17.7 \ No newline at end of file diff --git a/contrib/shell/git-utils.bash b/contrib/shell/git-utils.bash new file mode 100644 index 0000000000000..37bac1f38d8f0 --- /dev/null +++ b/contrib/shell/git-utils.bash @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +git_root() { + git rev-parse --show-toplevel 2> /dev/null +} + +git_head_version() { + local recent_tag + if recent_tag="$(git describe --exact-match HEAD 2> /dev/null)"; then + echo "${recent_tag#v}" + else + git rev-parse --short=12 HEAD + fi +} diff --git a/contrib/shell/realpath.bash b/contrib/shell/realpath.bash new file mode 100644 index 0000000000000..389b77b56266d --- /dev/null +++ b/contrib/shell/realpath.bash @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# Based on realpath.sh written by Michael Kropat +# Found at: https://github.com/mkropat/sh-realpath/blob/65512368b8155b176b67122aa395ac580d9acc5b/realpath.sh + +bash_realpath() { + canonicalize_path "$(resolve_symlinks "$1")" +} + +resolve_symlinks() { + _resolve_symlinks "$1" +} + +_resolve_symlinks() { + _assert_no_path_cycles "$@" || return + + local dir_context path + if path=$(readlink -- "$1"); then + dir_context=$(dirname -- "$1") + _resolve_symlinks "$(_prepend_dir_context_if_necessary "$dir_context" "$path")" "$@" + else + printf '%s\n' "$1" + fi +} + +_prepend_dir_context_if_necessary() { + if [ "$1" = . ]; then + printf '%s\n' "$2" + else + _prepend_path_if_relative "$1" "$2" + fi +} + +_prepend_path_if_relative() { + case "$2" in + /* ) printf '%s\n' "$2" ;; + * ) printf '%s\n' "$1/$2" ;; + esac +} + +_assert_no_path_cycles() { + local target path + + target=$1 + shift + + for path in "$@"; do + if [ "$path" = "$target" ]; then + return 1 + fi + done +} + +canonicalize_path() { + if [ -d "$1" ]; then + _canonicalize_dir_path "$1" + else + _canonicalize_file_path "$1" + fi +} + +_canonicalize_dir_path() { + (cd "$1" 2>/dev/null && pwd -P) +} + +_canonicalize_file_path() { + local dir file + dir=$(dirname -- "$1") + file=$(basename -- "$1") + (cd "$dir" 2>/dev/null && printf '%s/%s\n' "$(pwd -P)" "$file") +} diff --git a/contrib/signet/README.md b/contrib/signet/README.md new file mode 100644 index 0000000000000..5fcd8944e6468 --- /dev/null +++ b/contrib/signet/README.md @@ -0,0 +1,82 @@ +Contents +======== +This directory contains tools related to Signet, both for running a Signet yourself and for using one. + +getcoins.py +=========== + +A script to call a faucet to get Signet coins. + +Syntax: `getcoins.py [-h|--help] [-c|--cmd=] [-f|--faucet=] [-a|--addr=] [-p|--password=] [--] []` + +* `--cmd` lets you customize the bitcoin-cli path. By default it will look for it in the PATH +* `--faucet` lets you specify which faucet to use; the faucet is assumed to be compatible with https://github.com/kallewoof/bitcoin-faucet +* `--addr` lets you specify a Signet address; by default, the address must be a bech32 address. This and `--cmd` above complement each other (i.e. you do not need `bitcoin-cli` if you use `--addr`) +* `--password` lets you specify a faucet password; this is handy if you are in a classroom and set up your own faucet for your students; (above faucet does not limit by IP when password is enabled) + +If using the default network, invoking the script with no arguments should be sufficient under normal +circumstances, but if multiple people are behind the same IP address, the faucet will by default only +accept one claim per day. See `--password` above. + +miner +===== + +You will first need to pick a difficulty target. Since signet chains are primarily protected by a signature rather than proof of work, there is no need to spend as much energy as possible mining, however you may wish to choose to spend more time than the absolute minimum. The calibrate subcommand can be used to pick a target appropriate for your hardware, eg: + + MINER="./contrib/signet/miner" + GRIND="./build/src/bitcoin-util grind" + $MINER calibrate --grind-cmd="$GRIND" + nbits=1e00f403 for 25s average mining time + +It defaults to estimating an nbits value resulting in 25s average time to find a block, but the --seconds parameter can be used to pick a different target, or the --nbits parameter can be used to estimate how long it will take for a given difficulty. + +To mine the first block in your custom chain, you can run: + + CLI="./build/src/bitcoin-cli -conf=mysignet.conf" + ADDR=$($CLI -signet getnewaddress) + NBITS=1e00f403 + $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=$NBITS + +This will mine a single block with a backdated timestamp designed to allow 100 blocks to be mined as quickly as possible, so that it is possible to do transactions. + +Adding the --ongoing parameter will then cause the signet miner to create blocks indefinitely. It will pick the time between blocks so that difficulty is adjusted to match the provided --nbits value. + + $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=$NBITS --ongoing + +Other options +------------- + +The --debug and --quiet options are available to control how noisy the signet miner's output is. Note that the --debug, --quiet and --cli parameters must all appear before the subcommand (generate, calibrate, etc) if used. + +Instead of specifying --ongoing, you can specify --max-blocks=N to mine N blocks and stop. + +The --set-block-time option is available to manually move timestamps forward or backward (subject to the rules that blocktime must be greater than mediantime, and dates can't be more than two hours in the future). It can only be used when mining a single block (ie, not when using --ongoing or --max-blocks greater than 1). + +Instead of using a single address, a ranged descriptor may be provided via the --descriptor parameter, with the reward for the block at height H being sent to the H'th address generated from the descriptor. + +Instead of calculating a specific nbits value, --min-nbits can be specified instead, in which case the minimum signet difficulty will be targeted. Signet's minimum difficulty corresponds to --nbits=1e0377ae. + +By default, the signet miner mines blocks at fixed intervals with minimal variation. If you want blocks to appear more randomly, as they do in mainnet, specify the --poisson option. + +Using the --multiminer parameter allows mining to be distributed amongst multiple miners. For example, if you have 3 miners and want to share blocks between them, specify --multiminer=1/3 on one, --multiminer=2/3 on another, and --multiminer=3/3 on the last one. If you want one to do 10% of blocks and two others to do 45% each, --multiminer=1-10/100 on the first, and --multiminer=11-55 and --multiminer=56-100 on the others. Note that which miner mines which block is determined by the previous block hash, so occasional runs of one miner doing many blocks in a row is to be expected. + +When --multiminer is used, if a miner is down and does not mine a block within five minutes of when it is due, the other miners will automatically act as redundant backups ensuring the chain does not halt. The --backup-delay parameter can be used to change how long a given miner waits, allowing one to be the primary backup (after five minutes) and another to be the secondary backup (after six minutes, eg). + +The --standby-delay parameter can be used to make a backup miner that only mines if a block doesn't arrive on time. This can be combined with --multiminer if desired. Setting --standby-delay also prevents the first block from being mined immediately. + +Advanced usage +-------------- + +The process generate follows internally is to get a block template, convert that into a PSBT, sign the PSBT, move the signature from the signed PSBT into the block template's coinbase, grind proof of work for the block, and then submit the block to the network. + +These steps can instead be done explicitly: + + $CLI -signet getblocktemplate '{"rules": ["signet","segwit"]}' | + $MINER --cli="$CLI" genpsbt --address="$ADDR" | + $CLI -signet -stdin walletprocesspsbt | + jq -r .psbt | + $MINER --cli="$CLI" solvepsbt --grind-cmd="$GRIND" | + $CLI -signet -stdin submitblock + +This is intended to allow you to replace part of the pipeline for further experimentation (eg, to sign the block with a hardware wallet). + diff --git a/contrib/signet/getcoins.py b/contrib/signet/getcoins.py new file mode 100755 index 0000000000000..19751ae2695f9 --- /dev/null +++ b/contrib/signet/getcoins.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import argparse +import io +import requests +import subprocess +import sys +import xml.etree.ElementTree + +DEFAULT_GLOBAL_FAUCET = 'https://signetfaucet.com/claim' +DEFAULT_GLOBAL_CAPTCHA = 'https://signetfaucet.com/captcha' +GLOBAL_FIRST_BLOCK_HASH = '00000086d6b2636cb2a392d45edc4ec544a10024d30141c9adf4bfd9de533b53' + +# braille unicode block +BASE = 0x2800 +BIT_PER_PIXEL = [ + [0x01, 0x08], + [0x02, 0x10], + [0x04, 0x20], + [0x40, 0x80], +] +BW = 2 +BH = 4 + +# imagemagick or compatible fork (used for converting SVG) +CONVERT = 'convert' + +class PPMImage: + ''' + Load a PPM image (Pillow-ish API). + ''' + def __init__(self, f): + if f.readline() != b'P6\n': + raise ValueError('Invalid ppm format: header') + line = f.readline() + (width, height) = (int(x) for x in line.rstrip().split(b' ')) + if f.readline() != b'255\n': + raise ValueError('Invalid ppm format: color depth') + data = f.read(width * height * 3) + stride = width * 3 + self.size = (width, height) + self._grid = [[tuple(data[stride * y + 3 * x:stride * y + 3 * (x + 1)]) for x in range(width)] for y in range(height)] + + def getpixel(self, pos): + return self._grid[pos[1]][pos[0]] + +def print_image(img, threshold=128): + '''Print black-and-white image to terminal in braille unicode characters.''' + x_blocks = (img.size[0] + BW - 1) // BW + y_blocks = (img.size[1] + BH - 1) // BH + + for yb in range(y_blocks): + line = [] + for xb in range(x_blocks): + ch = BASE + for y in range(BH): + for x in range(BW): + try: + val = img.getpixel((xb * BW + x, yb * BH + y)) + except IndexError: + pass + else: + if val[0] < threshold: + ch |= BIT_PER_PIXEL[y][x] + line.append(chr(ch)) + print(''.join(line)) + +parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing bitcoin-cli arguments.') +parser.add_argument('-c', '--cmd', dest='cmd', default='bitcoin-cli', help='bitcoin-cli command to use') +parser.add_argument('-f', '--faucet', dest='faucet', default=DEFAULT_GLOBAL_FAUCET, help='URL of the faucet') +parser.add_argument('-g', '--captcha', dest='captcha', default=DEFAULT_GLOBAL_CAPTCHA, help='URL of the faucet captcha, or empty if no captcha is needed') +parser.add_argument('-a', '--addr', dest='addr', default='', help='Bitcoin address to which the faucet should send') +parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any') +parser.add_argument('-n', '--amount', dest='amount', default='0.001', help='Amount to request (0.001-0.1, default is 0.001)') +parser.add_argument('-i', '--imagemagick', dest='imagemagick', default=CONVERT, help='Path to imagemagick convert utility') +parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to bitcoin-cli (default: -signet)') + +args = parser.parse_args() + +if args.bitcoin_cli_args == []: + args.bitcoin_cli_args = ['-signet'] + + +def bitcoin_cli(rpc_command_and_params): + argv = [args.cmd] + args.bitcoin_cli_args + rpc_command_and_params + try: + return subprocess.check_output(argv).strip().decode() + except FileNotFoundError: + raise SystemExit(f"The binary {args.cmd} could not be found") + except subprocess.CalledProcessError: + cmdline = ' '.join(argv) + raise SystemExit(f"-----\nError while calling {cmdline} (see output above).") + + +if args.faucet.lower() == DEFAULT_GLOBAL_FAUCET: + # Get the hash of the block at height 1 of the currently active signet chain + curr_signet_hash = bitcoin_cli(['getblockhash', '1']) + if curr_signet_hash != GLOBAL_FIRST_BLOCK_HASH: + raise SystemExit('The global faucet cannot be used with a custom Signet network. Please use the global signet or setup your custom faucet to use this functionality.\n') +else: + # For custom faucets, don't request captcha by default. + if args.captcha == DEFAULT_GLOBAL_CAPTCHA: + args.captcha = '' + +if args.addr == '': + # get address for receiving coins + args.addr = bitcoin_cli(['getnewaddress', 'faucet', 'bech32']) + +data = {'address': args.addr, 'password': args.password, 'amount': args.amount} + +# Store cookies +# for debugging: print(session.cookies.get_dict()) +session = requests.Session() + +if args.captcha != '': # Retrieve a captcha + try: + res = session.get(args.captcha) + res.raise_for_status() + except requests.exceptions.RequestException as e: + raise SystemExit(f"Unexpected error when contacting faucet: {e}") + + # Size limitation + svg = xml.etree.ElementTree.fromstring(res.content) + if svg.attrib.get('width') != '150' or svg.attrib.get('height') != '50': + raise SystemExit("Captcha size doesn't match expected dimensions 150x50") + + # Convert SVG image to PPM, and load it + try: + rv = subprocess.run([args.imagemagick, 'svg:-', '-depth', '8', 'ppm:-'], input=res.content, check=True, capture_output=True) + except FileNotFoundError: + raise SystemExit(f"The binary {args.imagemagick} could not be found. Please make sure ImageMagick (or a compatible fork) is installed and that the correct path is specified.") + + img = PPMImage(io.BytesIO(rv.stdout)) + + # Terminal interaction + print_image(img) + print(f"Captcha from URL {args.captcha}") + data['captcha'] = input('Enter captcha: ') + +try: + res = session.post(args.faucet, data=data) +except Exception: + raise SystemExit(f"Unexpected error when contacting faucet: {sys.exc_info()[0]}") + +# Display the output as per the returned status code +if res: + # When the return code is in between 200 and 400 i.e. successful + print(res.text) +elif res.status_code == 404: + print('The specified faucet URL does not exist. Please check for any server issues/typo.') +elif res.status_code == 429: + print('The script does not allow for repeated transactions as the global faucet is rate-limitied to 1 request/IP/day. You can access the faucet website to get more coins manually') +else: + print(f'Returned Error Code {res.status_code}\n{res.text}\n') + print('Please check the provided arguments for their validity and/or any possible typo.') diff --git a/contrib/signet/miner b/contrib/signet/miner new file mode 100755 index 0000000000000..3c90fe96a1d8b --- /dev/null +++ b/contrib/signet/miner @@ -0,0 +1,571 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import argparse +import json +import logging +import math +import os +import re +import struct +import sys +import time +import subprocess + +PATH_BASE_CONTRIB_SIGNET = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +PATH_BASE_TEST_FUNCTIONAL = os.path.abspath(os.path.join(PATH_BASE_CONTRIB_SIGNET, "..", "..", "test", "functional")) +sys.path.insert(0, PATH_BASE_TEST_FUNCTIONAL) + +from test_framework.blocktools import get_witness_script, script_BIP34_coinbase_height # noqa: E402 +from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, from_binary, from_hex, ser_string, ser_uint256, tx_from_hex # noqa: E402 +from test_framework.psbt import PSBT, PSBTMap, PSBT_GLOBAL_UNSIGNED_TX, PSBT_IN_FINAL_SCRIPTSIG, PSBT_IN_FINAL_SCRIPTWITNESS, PSBT_IN_NON_WITNESS_UTXO, PSBT_IN_SIGHASH_TYPE # noqa: E402 +from test_framework.script import CScript, CScriptOp # noqa: E402 + +logging.basicConfig( + format='%(asctime)s %(levelname)s %(message)s', + level=logging.INFO, + datefmt='%Y-%m-%d %H:%M:%S') + +SIGNET_HEADER = b"\xec\xc7\xda\xa2" +PSBT_SIGNET_BLOCK = b"\xfc\x06signetb" # proprietary PSBT global field holding the block being signed +RE_MULTIMINER = re.compile(r"^(\d+)(-(\d+))?/(\d+)$") + +def signet_txs(block, challenge): + # assumes signet solution has not been added yet so does not need + # to be removed + + txs = block.vtx[:] + txs[0] = CTransaction(txs[0]) + txs[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER) + hashes = [] + for tx in txs: + tx.rehash() + hashes.append(ser_uint256(tx.sha256)) + mroot = block.get_merkle_root(hashes) + + sd = b"" + sd += block.nVersion.to_bytes(4, "little", signed=True) + sd += ser_uint256(block.hashPrevBlock) + sd += ser_uint256(mroot) + sd += block.nTime.to_bytes(4, "little") + + to_spend = CTransaction() + to_spend.version = 0 + to_spend.nLockTime = 0 + to_spend.vin = [CTxIn(COutPoint(0, 0xFFFFFFFF), b"\x00" + CScriptOp.encode_op_pushdata(sd), 0)] + to_spend.vout = [CTxOut(0, challenge)] + to_spend.rehash() + + spend = CTransaction() + spend.version = 0 + spend.nLockTime = 0 + spend.vin = [CTxIn(COutPoint(to_spend.sha256, 0), b"", 0)] + spend.vout = [CTxOut(0, b"\x6a")] + + return spend, to_spend + +def decode_psbt(b64psbt): + psbt = PSBT.from_base64(b64psbt) + + assert len(psbt.tx.vin) == 1 + assert len(psbt.tx.vout) == 1 + assert PSBT_SIGNET_BLOCK in psbt.g.map + + scriptSig = psbt.i[0].map.get(PSBT_IN_FINAL_SCRIPTSIG, b"") + scriptWitness = psbt.i[0].map.get(PSBT_IN_FINAL_SCRIPTWITNESS, b"\x00") + + return from_binary(CBlock, psbt.g.map[PSBT_SIGNET_BLOCK]), ser_string(scriptSig) + scriptWitness + +def finish_block(block, signet_solution, grind_cmd): + block.vtx[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER + signet_solution) + block.vtx[0].rehash() + block.hashMerkleRoot = block.calc_merkle_root() + if grind_cmd is None: + block.solve() + else: + headhex = CBlockHeader.serialize(block).hex() + cmd = grind_cmd.split(" ") + [headhex] + newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip() + newhead = from_hex(CBlockHeader(), newheadhex.decode('utf8')) + block.nNonce = newhead.nNonce + block.rehash() + return block + +def generate_psbt(tmpl, reward_spk, *, blocktime=None, poolid=None): + signet_spk = tmpl["signet_challenge"] + signet_spk_bin = bytes.fromhex(signet_spk) + + scriptSig = script_BIP34_coinbase_height(tmpl["height"]) + if poolid is not None: + scriptSig = CScript(b"" + scriptSig + CScriptOp.encode_op_pushdata(poolid)) + + cbtx = CTransaction() + cbtx.vin = [CTxIn(COutPoint(0, 0xffffffff), scriptSig, 0xffffffff)] + cbtx.vout = [CTxOut(tmpl["coinbasevalue"], reward_spk)] + cbtx.vin[0].nSequence = 2**32-2 + cbtx.rehash() + + block = CBlock() + block.nVersion = tmpl["version"] + block.hashPrevBlock = int(tmpl["previousblockhash"], 16) + block.nTime = tmpl["curtime"] if blocktime is None else blocktime + if block.nTime < tmpl["mintime"]: + block.nTime = tmpl["mintime"] + block.nBits = int(tmpl["bits"], 16) + block.nNonce = 0 + block.vtx = [cbtx] + [tx_from_hex(t["data"]) for t in tmpl["transactions"]] + + witnonce = 0 + witroot = block.calc_witness_merkle_root() + cbwit = CTxInWitness() + cbwit.scriptWitness.stack = [ser_uint256(witnonce)] + block.vtx[0].wit.vtxinwit = [cbwit] + block.vtx[0].vout.append(CTxOut(0, bytes(get_witness_script(witroot, witnonce)))) + + signme, spendme = signet_txs(block, signet_spk_bin) + + psbt = PSBT() + psbt.g = PSBTMap( {PSBT_GLOBAL_UNSIGNED_TX: signme.serialize(), + PSBT_SIGNET_BLOCK: block.serialize() + } ) + psbt.i = [ PSBTMap( {PSBT_IN_NON_WITNESS_UTXO: spendme.serialize(), + PSBT_IN_SIGHASH_TYPE: bytes([1,0,0,0])}) + ] + psbt.o = [ PSBTMap() ] + return psbt.to_base64() + +def get_poolid(args): + if args.poolid is not None: + return args.poolid.encode('utf8') + elif args.poolnum is not None: + return b"/signet:%d/" % (args.poolnum) + else: + return None + +def get_reward_addr_spk(args, height): + assert args.address is not None or args.descriptor is not None + + if hasattr(args, "reward_spk"): + return args.address, args.reward_spk + + if args.address is not None: + reward_addr = args.address + elif '*' not in args.descriptor: + reward_addr = args.address = json.loads(args.bcli("deriveaddresses", args.descriptor))[0] + else: + remove = [k for k in args.derived_addresses.keys() if k+20 <= height] + for k in remove: + del args.derived_addresses[k] + if height not in args.derived_addresses: + addrs = json.loads(args.bcli("deriveaddresses", args.descriptor, "[%d,%d]" % (height, height+20))) + for k, a in enumerate(addrs): + args.derived_addresses[height+k] = a + reward_addr = args.derived_addresses[height] + + reward_spk = bytes.fromhex(json.loads(args.bcli("getaddressinfo", reward_addr))["scriptPubKey"]) + if args.address is not None: + # will always be the same, so cache + args.reward_spk = reward_spk + + return reward_addr, reward_spk + +def do_genpsbt(args): + poolid = get_poolid(args) + tmpl = json.load(sys.stdin) + _, reward_spk = get_reward_addr_spk(args, tmpl["height"]) + psbt = generate_psbt(tmpl, reward_spk, poolid=poolid) + print(psbt) + +def do_solvepsbt(args): + block, signet_solution = decode_psbt(sys.stdin.read()) + block = finish_block(block, signet_solution, args.grind_cmd) + print(block.serialize().hex()) + +def nbits_to_target(nbits): + shift = (nbits >> 24) & 0xff + return (nbits & 0x00ffffff) * 2**(8*(shift - 3)) + +def target_to_nbits(target): + tstr = "{0:x}".format(target) + if len(tstr) < 6: + tstr = ("000000"+tstr)[-6:] + if len(tstr) % 2 != 0: + tstr = "0" + tstr + if int(tstr[0],16) >= 0x8: + # avoid "negative" + tstr = "00" + tstr + fix = int(tstr[:6], 16) + sz = len(tstr)//2 + if tstr[6:] != "0"*(sz*2-6): + fix += 1 + + return int("%02x%06x" % (sz,fix), 16) + +def seconds_to_hms(s): + if s == 0: + return "0s" + neg = (s < 0) + if neg: + s = -s + out = "" + if s % 60 > 0: + out = "%ds" % (s % 60) + s //= 60 + if s % 60 > 0: + out = "%dm%s" % (s % 60, out) + s //= 60 + if s > 0: + out = "%dh%s" % (s, out) + if neg: + out = "-" + out + return out + +class Generate: + INTERVAL = 600.0*2016/2015 # 10 minutes, adjusted for the off-by-one bug + + + def __init__(self, multiminer=None, ultimate_target=None, poisson=False, max_interval=1800, + standby_delay=0, backup_delay=0, set_block_time=None, + poolid=None): + if multiminer is None: + multiminer = (0, 1, 1) + (self.multi_low, self.multi_high, self.multi_period) = multiminer + self.ultimate_target = ultimate_target + self.poisson = poisson + self.max_interval = max_interval + self.standby_delay = standby_delay + self.backup_delay = backup_delay + self.set_block_time = set_block_time + self.poolid = poolid + + def next_block_delta(self, last_nbits, last_hash): + # strategy: + # 1) work out how far off our desired target we are + # 2) cap it to a factor of 4 since that's the best we can do in a single retarget period + # 3) use that to work out the desired average interval in this retarget period + # 4) if doing poisson, use the last hash to pick a uniformly random number in [0,1), and work out a random multiplier to vary the average by + # 5) cap the resulting interval between 1 second and 1 hour to avoid extremes + + current_target = nbits_to_target(last_nbits) + retarget_factor = self.ultimate_target / current_target + retarget_factor = max(0.25, min(retarget_factor, 4.0)) + + avg_interval = self.INTERVAL * retarget_factor + + if self.poisson: + det_rand = int(last_hash[-8:], 16) * 2**-32 + this_interval_variance = -math.log1p(-det_rand) + else: + this_interval_variance = 1 + + this_interval = avg_interval * this_interval_variance + this_interval = max(1, min(this_interval, self.max_interval)) + + return this_interval + + def next_block_is_mine(self, last_hash): + det_rand = int(last_hash[-16:-8], 16) + return self.multi_low <= (det_rand % self.multi_period) < self.multi_high + + def next_block_time(self, now, bestheader, is_first_block): + if self.set_block_time is not None: + logging.debug("Setting start time to %d", self.set_block_time) + self.mine_time = self.set_block_time + self.action_time = now + self.is_mine = True + elif bestheader["height"] == 0: + time_delta = self.INTERVAL * 100 # plenty of time to mine 100 blocks + logging.info("Backdating time for first block to %d minutes ago" % (time_delta/60)) + self.mine_time = now - time_delta + self.action_time = now + self.is_mine = True + else: + time_delta = self.next_block_delta(int(bestheader["bits"], 16), bestheader["hash"]) + self.mine_time = bestheader["time"] + time_delta + + self.is_mine = self.next_block_is_mine(bestheader["hash"]) + + self.action_time = self.mine_time + if not self.is_mine: + self.action_time += self.backup_delay + + if self.standby_delay > 0: + self.action_time += self.standby_delay + elif is_first_block: + # for non-standby, always mine immediately on startup, + # even if the next block shouldn't be ours + self.action_time = now + + # don't want fractional times so round down + self.mine_time = int(self.mine_time) + self.action_time = int(self.action_time) + + # can't mine a block 2h in the future; 1h55m for some safety + self.action_time = max(self.action_time, self.mine_time - 6900) + + def gbt(self, bcli, bestblockhash, now): + tmpl = json.loads(bcli("getblocktemplate", '{"rules":["signet","segwit"]}')) + if tmpl["previousblockhash"] != bestblockhash: + logging.warning("GBT based off unexpected block (%s not %s), retrying", tmpl["previousblockhash"], bci["bestblockhash"]) + time.sleep(1) + return None + + if tmpl["mintime"] > self.mine_time: + logging.info("Updating block time from %d to %d", self.mine_time, tmpl["mintime"]) + self.mine_time = tmpl["mintime"] + if self.mine_time > now: + logging.error("GBT mintime is in the future: %d is %d seconds later than %d", self.mine_time, (self.mine_time-now), now) + return None + + return tmpl + + def mine(self, bcli, grind_cmd, tmpl, reward_spk): + psbt = generate_psbt(tmpl, reward_spk, blocktime=self.mine_time, poolid=self.poolid) + input_stream = os.linesep.join([psbt, "true", "ALL"]).encode('utf8') + psbt_signed = json.loads(bcli("-stdin", "walletprocesspsbt", input=input_stream)) + if not psbt_signed.get("complete",False): + logging.debug("Generated PSBT: %s" % (psbt,)) + sys.stderr.write("PSBT signing failed\n") + return None + block, signet_solution = decode_psbt(psbt_signed["psbt"]) + return finish_block(block, signet_solution, grind_cmd) + +def do_generate(args): + if args.set_block_time is not None: + max_blocks = 1 + elif args.max_blocks is not None: + if args.max_blocks < 1: + logging.error("--max_blocks must specify a positive integer") + return 1 + max_blocks = args.max_blocks + elif args.ongoing: + max_blocks = None + else: + max_blocks = 1 + + if args.set_block_time is not None and args.set_block_time < 0: + args.set_block_time = time.time() + logging.info("Treating negative block time as current time (%d)" % (args.set_block_time)) + + if args.min_nbits: + args.nbits = "1e0377ae" + logging.info("Using nbits=%s" % (args.nbits)) + + if args.set_block_time is None: + if args.nbits is None or len(args.nbits) != 8: + logging.error("Must specify --nbits (use calibrate command to determine value)") + return 1 + + if args.multiminer is None: + my_blocks = (0,1,1) + else: + if not args.ongoing: + logging.error("Cannot specify --multiminer without --ongoing") + return 1 + m = RE_MULTIMINER.match(args.multiminer) + if m is None: + logging.error("--multiminer argument must be k/m or j-k/m") + return 1 + start,_,stop,total = m.groups() + if stop is None: + stop = start + start, stop, total = map(int, (start, stop, total)) + if stop < start or start <= 0 or total < stop or total == 0: + logging.error("Inconsistent values for --multiminer") + return 1 + my_blocks = (start-1, stop, total) + + if args.max_interval < 960: + logging.error("--max-interval must be at least 960 (16 minutes)") + return 1 + + poolid = get_poolid(args) + + ultimate_target = nbits_to_target(int(args.nbits,16)) + + gen = Generate(multiminer=my_blocks, ultimate_target=ultimate_target, poisson=args.poisson, max_interval=args.max_interval, + standby_delay=args.standby_delay, backup_delay=args.backup_delay, set_block_time=args.set_block_time, poolid=poolid) + + mined_blocks = 0 + bestheader = {"hash": None} + lastheader = None + while max_blocks is None or mined_blocks < max_blocks: + + # current status? + bci = json.loads(args.bcli("getblockchaininfo")) + + if bestheader["hash"] != bci["bestblockhash"]: + bestheader = json.loads(args.bcli("getblockheader", bci["bestblockhash"])) + + if lastheader is None: + lastheader = bestheader["hash"] + elif bestheader["hash"] != lastheader: + next_delta = gen.next_block_delta(int(bestheader["bits"], 16), bestheader["hash"]) + next_delta += bestheader["time"] - time.time() + next_is_mine = gen.next_block_is_mine(bestheader["hash"]) + logging.info("Received new block at height %d; next in %s (%s)", bestheader["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup")) + lastheader = bestheader["hash"] + + # when is the next block due to be mined? + now = time.time() + gen.next_block_time(now, bestheader, (mined_blocks == 0)) + + # ready to go? otherwise sleep and check for new block + if now < gen.action_time: + sleep_for = min(gen.action_time - now, 60) + if gen.mine_time < now: + # someone else might have mined the block, + # so check frequently, so we don't end up late + # mining the next block if it's ours + sleep_for = min(20, sleep_for) + minestr = "mine" if gen.is_mine else "backup" + logging.debug("Sleeping for %s, next block due in %s (%s)" % (seconds_to_hms(sleep_for), seconds_to_hms(gen.mine_time - now), minestr)) + time.sleep(sleep_for) + continue + + # gbt + tmpl = gen.gbt(args.bcli, bci["bestblockhash"], now) + if tmpl is None: + continue + + logging.debug("GBT template: %s", tmpl) + + # address for reward + reward_addr, reward_spk = get_reward_addr_spk(args, tmpl["height"]) + + # mine block + logging.debug("Mining block delta=%s start=%s mine=%s", seconds_to_hms(gen.mine_time-bestheader["time"]), gen.mine_time, gen.is_mine) + mined_blocks += 1 + block = gen.mine(args.bcli, args.grind_cmd, tmpl, reward_spk) + if block is None: + return 1 + + # submit block + r = args.bcli("-stdin", "submitblock", input=block.serialize().hex().encode('utf8')) + + # report + bstr = "block" if gen.is_mine else "backup block" + + next_delta = gen.next_block_delta(block.nBits, block.hash) + next_delta += block.nTime - time.time() + next_is_mine = gen.next_block_is_mine(block.hash) + + logging.debug("Block hash %s payout to %s", block.hash, reward_addr) + logging.info("Mined %s at height %d; next in %s (%s)", bstr, tmpl["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup")) + if r != "": + logging.warning("submitblock returned %s for height %d hash %s", r, tmpl["height"], block.hash) + lastheader = block.hash + +def do_calibrate(args): + if args.nbits is not None and args.seconds is not None: + sys.stderr.write("Can only specify one of --nbits or --seconds\n") + return 1 + if args.nbits is not None and len(args.nbits) != 8: + sys.stderr.write("Must specify 8 hex digits for --nbits\n") + return 1 + + TRIALS = 600 # gets variance down pretty low + TRIAL_BITS = 0x1e3ea75f # takes about 5m to do 600 trials + + header = CBlockHeader() + header.nBits = TRIAL_BITS + targ = nbits_to_target(header.nBits) + + start = time.time() + count = 0 + for i in range(TRIALS): + header.nTime = i + header.nNonce = 0 + headhex = header.serialize().hex() + cmd = args.grind_cmd.split(" ") + [headhex] + newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip() + + avg = (time.time() - start) * 1.0 / TRIALS + + if args.nbits is not None: + want_targ = nbits_to_target(int(args.nbits,16)) + want_time = avg*targ/want_targ + else: + want_time = args.seconds if args.seconds is not None else 25 + want_targ = int(targ*(avg/want_time)) + + print("nbits=%08x for %ds average mining time" % (target_to_nbits(want_targ), want_time)) + return 0 + +def bitcoin_cli(basecmd, args, **kwargs): + cmd = basecmd + ["-signet"] + args + logging.debug("Calling bitcoin-cli: %r", cmd) + out = subprocess.run(cmd, stdout=subprocess.PIPE, **kwargs, check=True).stdout + if isinstance(out, bytes): + out = out.decode('utf8') + return out.strip() + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--cli", default="bitcoin-cli", type=str, help="bitcoin-cli command") + parser.add_argument("--debug", action="store_true", help="Print debugging info") + parser.add_argument("--quiet", action="store_true", help="Only print warnings/errors") + + cmds = parser.add_subparsers(help="sub-commands") + genpsbt = cmds.add_parser("genpsbt", help="Generate a block PSBT for signing") + genpsbt.set_defaults(fn=do_genpsbt) + + solvepsbt = cmds.add_parser("solvepsbt", help="Solve a signed block PSBT") + solvepsbt.set_defaults(fn=do_solvepsbt) + + generate = cmds.add_parser("generate", help="Mine blocks") + generate.set_defaults(fn=do_generate) + howmany = generate.add_mutually_exclusive_group() + howmany.add_argument("--ongoing", action="store_true", help="Keep mining blocks") + howmany.add_argument("--max-blocks", default=None, type=int, help="Max blocks to mine (default=1)") + howmany.add_argument("--set-block-time", default=None, type=int, help="Set block time (unix timestamp); implies --max-blocks=1") + nbit_target = generate.add_mutually_exclusive_group() + nbit_target.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)") + nbit_target.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)") + generate.add_argument("--poisson", action="store_true", help="Simulate randomised block times") + generate.add_argument("--multiminer", default=None, type=str, help="Specify which set of blocks to mine (eg: 1-40/100 for the first 40%%, 2/3 for the second 3rd)") + generate.add_argument("--backup-delay", default=300, type=int, help="Seconds to delay before mining blocks reserved for other miners (default=300)") + generate.add_argument("--standby-delay", default=0, type=int, help="Seconds to delay before mining blocks (default=0)") + generate.add_argument("--max-interval", default=1800, type=int, help="Maximum interblock interval (seconds)") + + calibrate = cmds.add_parser("calibrate", help="Calibrate difficulty") + calibrate.set_defaults(fn=do_calibrate) + calibrate_by = calibrate.add_mutually_exclusive_group() + calibrate_by.add_argument("--nbits", type=str, default=None) + calibrate_by.add_argument("--seconds", type=int, default=None) + + for sp in [genpsbt, generate]: + payto = sp.add_mutually_exclusive_group(required=True) + payto.add_argument("--address", default=None, type=str, help="Address for block reward payment") + payto.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment") + pool = sp.add_mutually_exclusive_group() + pool.add_argument("--poolnum", default=None, type=int, help="Identify blocks that you mine") + pool.add_argument("--poolid", default=None, type=str, help="Identify blocks that you mine (eg: /signet:1/)") + + for sp in [solvepsbt, generate, calibrate]: + sp.add_argument("--grind-cmd", default=None, type=str, required=(sp==calibrate), help="Command to grind a block header for proof-of-work") + + args = parser.parse_args(sys.argv[1:]) + + args.bcli = lambda *a, input=b"", **kwargs: bitcoin_cli(args.cli.split(" "), list(a), input=input, **kwargs) + + if hasattr(args, "address") and hasattr(args, "descriptor"): + args.derived_addresses = {} + + if args.debug: + logging.getLogger().setLevel(logging.DEBUG) + elif args.quiet: + logging.getLogger().setLevel(logging.WARNING) + else: + logging.getLogger().setLevel(logging.INFO) + + if hasattr(args, "fn"): + return args.fn(args) + else: + logging.error("Must specify command") + return 1 + +if __name__ == "__main__": + main() diff --git a/contrib/testgen/README.md b/contrib/testgen/README.md index 573a71a675312..2f0288df165b3 100644 --- a/contrib/testgen/README.md +++ b/contrib/testgen/README.md @@ -2,7 +2,7 @@ Utilities to generate test vectors for the data-driven Bitcoin tests. -Usage: +To use inside a scripted-diff (or just execute directly): - PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_keys_valid.json - PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_keys_invalid.json + ./gen_key_io_test_vectors.py valid 70 > ../../src/test/data/key_io_valid.json + ./gen_key_io_test_vectors.py invalid 70 > ../../src/test/data/key_io_invalid.json diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py deleted file mode 100644 index da67cb2d9052e..0000000000000 --- a/contrib/testgen/base58.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2012-2018 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' -Bitcoin base58 encoding and decoding. - -Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain) -''' -import hashlib - -# for compatibility with following code... -class SHA256: - new = hashlib.sha256 - -if str != bytes: - # Python 3.x - def ord(c): - return c - def chr(n): - return bytes( (n,) ) - -__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' -__b58base = len(__b58chars) -b58chars = __b58chars - -def b58encode(v): - """ encode v, which is a string of bytes, to base58. - """ - long_value = 0 - for (i, c) in enumerate(v[::-1]): - if isinstance(c, str): - c = ord(c) - long_value += (256**i) * c - - result = '' - while long_value >= __b58base: - div, mod = divmod(long_value, __b58base) - result = __b58chars[mod] + result - long_value = div - result = __b58chars[long_value] + result - - # Bitcoin does a little leading-zero-compression: - # leading 0-bytes in the input become leading-1s - nPad = 0 - for c in v: - if c == 0: - nPad += 1 - else: - break - - return (__b58chars[0]*nPad) + result - -def b58decode(v, length = None): - """ decode v into a string of len bytes - """ - long_value = 0 - for i, c in enumerate(v[::-1]): - pos = __b58chars.find(c) - assert pos != -1 - long_value += pos * (__b58base**i) - - result = bytes() - while long_value >= 256: - div, mod = divmod(long_value, 256) - result = chr(mod) + result - long_value = div - result = chr(long_value) + result - - nPad = 0 - for c in v: - if c == __b58chars[0]: - nPad += 1 - continue - break - - result = bytes(nPad) + result - if length is not None and len(result) != length: - return None - - return result - -def checksum(v): - """Return 32-bit checksum based on SHA256""" - return SHA256.new(SHA256.new(v).digest()).digest()[0:4] - -def b58encode_chk(v): - """b58encode a string, with 32-bit checksum""" - return b58encode(v + checksum(v)) - -def b58decode_chk(v): - """decode a base58 string, check and remove checksum""" - result = b58decode(v) - if result is None: - return None - if result[-4:] == checksum(result[:-4]): - return result[:-4] - else: - return None - -def get_bcaddress_version(strAddress): - """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ - addr = b58decode_chk(strAddress) - if addr is None or len(addr)!=21: - return None - version = addr[0] - return ord(version) - -if __name__ == '__main__': - # Test case (from http://gitorious.org/bitcoin/python-base58.git) - assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 - _ohai = 'o hai'.encode('ascii') - _tmp = b58encode(_ohai) - assert _tmp == 'DYB3oMS' - assert b58decode(_tmp, 5) == _ohai - print("Tests passed") diff --git a/contrib/testgen/gen_key_io_test_vectors.py b/contrib/testgen/gen_key_io_test_vectors.py index a00acb1f41fe4..7bfb1d76a8b76 100755 --- a/contrib/testgen/gen_key_io_test_vectors.py +++ b/contrib/testgen/gen_key_io_test_vectors.py @@ -1,22 +1,21 @@ #!/usr/bin/env python3 -# Copyright (c) 2012-2018 The Bitcoin Core developers +# Copyright (c) 2012-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' -Generate valid and invalid base58 address and private key test vectors. - -Usage: - PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json - PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json +Generate valid and invalid base58/bech32(m) address and private key test vectors. ''' -# 2012 Wladimir J. van der Laan -# Released under MIT License -import os + from itertools import islice -from base58 import b58encode_chk, b58decode_chk, b58chars +import os import random -from binascii import b2a_hex -from segwit_addr import bech32_encode, decode, convertbits, CHARSET +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional')) + +from test_framework.address import base58_to_byte, byte_to_base58, b58chars # noqa: E402 +from test_framework.script import OP_0, OP_1, OP_2, OP_3, OP_16, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, OP_CHECKSIG # noqa: E402 +from test_framework.segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET, Encoding # noqa: E402 # key types PUBKEY_ADDRESS = 0 @@ -30,21 +29,13 @@ PRIVKEY_REGTEST = 239 # script -OP_0 = 0x00 -OP_1 = 0x51 -OP_2 = 0x52 -OP_16 = 0x60 -OP_DUP = 0x76 -OP_EQUAL = 0x87 -OP_EQUALVERIFY = 0x88 -OP_HASH160 = 0xa9 -OP_CHECKSIG = 0xac pubkey_prefix = (OP_DUP, OP_HASH160, 20) pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG) script_prefix = (OP_HASH160, 20) script_suffix = (OP_EQUAL,) p2wpkh_prefix = (OP_0, 20) p2wsh_prefix = (OP_0, 32) +p2tr_prefix = (OP_1, 32) metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip'] # templates for valid sequences @@ -55,48 +46,68 @@ ((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix), ((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix), ((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix), + ((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), pubkey_prefix, pubkey_suffix), + ((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), script_prefix, script_suffix), ((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix), ((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix), ((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()), ((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()), ((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()), ((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()), + ((PRIVKEY_TEST,), 32, (), (True, 'signet', False, None), (), ()), + ((PRIVKEY_TEST,), 32, (1,), (True, 'signet', True, None), (), ()), ((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()), ((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ()) ] # templates for valid bech32 sequences bech32_templates = [ - # hrp, version, witprog_size, metadata, output_prefix - ('bc', 0, 20, (False, 'main', None, True), p2wpkh_prefix), - ('bc', 0, 32, (False, 'main', None, True), p2wsh_prefix), - ('bc', 1, 2, (False, 'main', None, True), (OP_1, 2)), - ('tb', 0, 20, (False, 'test', None, True), p2wpkh_prefix), - ('tb', 0, 32, (False, 'test', None, True), p2wsh_prefix), - ('tb', 2, 16, (False, 'test', None, True), (OP_2, 16)), - ('bcrt', 0, 20, (False, 'regtest', None, True), p2wpkh_prefix), - ('bcrt', 0, 32, (False, 'regtest', None, True), p2wsh_prefix), - ('bcrt', 16, 40, (False, 'regtest', None, True), (OP_16, 40)) + # hrp, version, witprog_size, metadata, encoding, output_prefix + ('bc', 0, 20, (False, 'main', None, True), Encoding.BECH32, p2wpkh_prefix), + ('bc', 0, 32, (False, 'main', None, True), Encoding.BECH32, p2wsh_prefix), + ('bc', 1, 32, (False, 'main', None, True), Encoding.BECH32M, p2tr_prefix), + ('bc', 2, 2, (False, 'main', None, True), Encoding.BECH32M, (OP_2, 2)), + ('tb', 0, 20, (False, 'test', None, True), Encoding.BECH32, p2wpkh_prefix), + ('tb', 0, 32, (False, 'test', None, True), Encoding.BECH32, p2wsh_prefix), + ('tb', 1, 32, (False, 'test', None, True), Encoding.BECH32M, p2tr_prefix), + ('tb', 3, 16, (False, 'test', None, True), Encoding.BECH32M, (OP_3, 16)), + ('tb', 0, 20, (False, 'signet', None, True), Encoding.BECH32, p2wpkh_prefix), + ('tb', 0, 32, (False, 'signet', None, True), Encoding.BECH32, p2wsh_prefix), + ('tb', 1, 32, (False, 'signet', None, True), Encoding.BECH32M, p2tr_prefix), + ('tb', 3, 32, (False, 'signet', None, True), Encoding.BECH32M, (OP_3, 32)), + ('bcrt', 0, 20, (False, 'regtest', None, True), Encoding.BECH32, p2wpkh_prefix), + ('bcrt', 0, 32, (False, 'regtest', None, True), Encoding.BECH32, p2wsh_prefix), + ('bcrt', 1, 32, (False, 'regtest', None, True), Encoding.BECH32M, p2tr_prefix), + ('bcrt', 16, 40, (False, 'regtest', None, True), Encoding.BECH32M, (OP_16, 40)) ] # templates for invalid bech32 sequences bech32_ng_templates = [ - # hrp, version, witprog_size, invalid_bech32, invalid_checksum, invalid_char - ('tc', 0, 20, False, False, False), - ('tb', 17, 32, False, False, False), - ('bcrt', 3, 1, False, False, False), - ('bc', 15, 41, False, False, False), - ('tb', 0, 16, False, False, False), - ('bcrt', 0, 32, True, False, False), - ('bc', 0, 16, True, False, False), - ('tb', 0, 32, False, True, False), - ('bcrt', 0, 20, False, False, True) + # hrp, version, witprog_size, encoding, invalid_bech32, invalid_checksum, invalid_char + ('tc', 0, 20, Encoding.BECH32, False, False, False), + ('bt', 1, 32, Encoding.BECH32M, False, False, False), + ('tb', 17, 32, Encoding.BECH32M, False, False, False), + ('bcrt', 3, 1, Encoding.BECH32M, False, False, False), + ('bc', 15, 41, Encoding.BECH32M, False, False, False), + ('tb', 0, 16, Encoding.BECH32, False, False, False), + ('bcrt', 0, 32, Encoding.BECH32, True, False, False), + ('bc', 0, 16, Encoding.BECH32, True, False, False), + ('tb', 0, 32, Encoding.BECH32, False, True, False), + ('bcrt', 0, 20, Encoding.BECH32, False, False, True), + ('bc', 0, 20, Encoding.BECH32M, False, False, False), + ('tb', 0, 32, Encoding.BECH32M, False, False, False), + ('bcrt', 0, 20, Encoding.BECH32M, False, False, False), + ('bc', 1, 32, Encoding.BECH32, False, False, False), + ('tb', 2, 16, Encoding.BECH32, False, False, False), + ('bcrt', 16, 20, Encoding.BECH32, False, False, False), ] def is_valid(v): '''Check vector v for validity''' if len(set(v) - set(b58chars)) > 0: return is_valid_bech32(v) - result = b58decode_chk(v) - if result is None: + try: + payload, version = base58_to_byte(v) + result = bytes([version]) + payload + except ValueError: # thrown if checksum doesn't match return is_valid_bech32(v) for template in templates: prefix = bytearray(template[0]) @@ -109,27 +120,29 @@ def is_valid(v): def is_valid_bech32(v): '''Check vector v for bech32 validity''' for hrp in ['bc', 'tb', 'bcrt']: - if decode(hrp, v) != (None, None): + if decode_segwit_address(hrp, v) != (None, None): return True return False def gen_valid_base58_vector(template): '''Generate valid base58 vector''' prefix = bytearray(template[0]) - payload = bytearray(os.urandom(template[1])) + payload = rand_bytes(size=template[1]) suffix = bytearray(template[2]) dst_prefix = bytearray(template[4]) dst_suffix = bytearray(template[5]) - rv = b58encode_chk(prefix + payload + suffix) + assert len(prefix) == 1 + rv = byte_to_base58(payload + suffix, prefix[0]) return rv, dst_prefix + payload + dst_suffix def gen_valid_bech32_vector(template): '''Generate valid bech32 vector''' hrp = template[0] witver = template[1] - witprog = bytearray(os.urandom(template[2])) - dst_prefix = bytearray(template[4]) - rv = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5)) + witprog = rand_bytes(size=template[2]) + encoding = template[4] + dst_prefix = bytearray(template[5]) + rv = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5)) return rv, dst_prefix + witprog def gen_valid_vectors(): @@ -141,9 +154,7 @@ def gen_valid_vectors(): rv, payload = valid_vector_generator(template) assert is_valid(rv) metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None} - hexrepr = b2a_hex(payload) - if isinstance(hexrepr, bytes): - hexrepr = hexrepr.decode('utf8') + hexrepr = payload.hex() yield (rv, hexrepr, metadata) def gen_invalid_base58_vector(template): @@ -158,21 +169,22 @@ def gen_invalid_base58_vector(template): corrupt_suffix = randbool(0.2) if corrupt_prefix: - prefix = os.urandom(1) + prefix = rand_bytes(size=1) else: prefix = bytearray(template[0]) if randomize_payload_size: - payload = os.urandom(max(int(random.expovariate(0.5)), 50)) + payload = rand_bytes(size=max(int(random.expovariate(0.5)), 50)) else: - payload = os.urandom(template[1]) + payload = rand_bytes(size=template[1]) if corrupt_suffix: - suffix = os.urandom(len(template[2])) + suffix = rand_bytes(size=len(template[2])) else: suffix = bytearray(template[2]) - val = b58encode_chk(prefix + payload + suffix) + assert len(prefix) == 1 + val = byte_to_base58(payload + suffix, prefix[0]) if random.randint(0,10)<1: # line corruption if randbool(): # add random character to end val += random.choice(b58chars) @@ -188,23 +200,24 @@ def gen_invalid_bech32_vector(template): to_upper = randbool(0.1) hrp = template[0] witver = template[1] - witprog = bytearray(os.urandom(template[2])) + witprog = rand_bytes(size=template[2]) + encoding = template[3] if no_data: - rv = bech32_encode(hrp, []) + rv = bech32_encode(encoding, hrp, []) else: data = [witver] + convertbits(witprog, 8, 5) - if template[3] and not no_data: + if template[4] and not no_data: if template[2] % 5 in {2, 4}: data[-1] |= 1 else: data.append(0) - rv = bech32_encode(hrp, data) + rv = bech32_encode(encoding, hrp, data) - if template[4]: + if template[5]: i = len(rv) - random.randrange(1, 7) rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:] - if template[5]: + if template[6]: i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4) rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:] @@ -217,6 +230,9 @@ def randbool(p = 0.5): '''Return True with P(p)''' return random.random() < p +def rand_bytes(*, size): + return bytearray(random.getrandbits(8) for _ in range(size)) + def gen_invalid_vectors(): '''Generate invalid test vectors''' # start with some manual edge-cases @@ -231,9 +247,9 @@ def gen_invalid_vectors(): yield val, if __name__ == '__main__': - import sys import json iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} + random.seed(42) try: uiter = iters[sys.argv[1]] except IndexError: diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md new file mode 100644 index 0000000000000..c471770a7d73b --- /dev/null +++ b/contrib/tracing/README.md @@ -0,0 +1,338 @@ +Example scripts for User-space, Statically Defined Tracing (USDT) +================================================================= + +This directory contains scripts showcasing User-space, Statically Defined +Tracing (USDT) support for Bitcoin Core on Linux using. For more information on +USDT support in Bitcoin Core see the [USDT documentation]. + +[USDT documentation]: ../../doc/tracing.md + + +Examples for the two main eBPF front-ends, [bpftrace] and +[BPF Compiler Collection (BCC)], with support for USDT, are listed. BCC is used +for complex tools and daemons and `bpftrace` is preferred for one-liners and +shorter scripts. + +[bpftrace]: https://github.com/iovisor/bpftrace +[BPF Compiler Collection (BCC)]: https://github.com/iovisor/bcc + + +To develop and run bpftrace and BCC scripts you need to install the +corresponding packages. See [installing bpftrace] and [installing BCC] for more +information. For development there exist a [bpftrace Reference Guide], a +[BCC Reference Guide], and a [bcc Python Developer Tutorial]. + +[installing bpftrace]: https://github.com/iovisor/bpftrace/blob/master/INSTALL.md +[installing BCC]: https://github.com/iovisor/bcc/blob/master/INSTALL.md +[bpftrace Reference Guide]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md +[BCC Reference Guide]: https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md +[bcc Python Developer Tutorial]: https://github.com/iovisor/bcc/blob/master/docs/tutorial_bcc_python_developer.md + +## Examples + +The bpftrace examples contain a relative path to the `bitcoind` binary. By +default, the scripts should be run from the repository-root and assume a +self-compiled `bitcoind` binary. The paths in the examples can be changed, for +example, to point to release builds if needed. See the +[Bitcoin Core USDT documentation] on how to list available tracepoints in your +`bitcoind` binary. + +[Bitcoin Core USDT documentation]: ../../doc/tracing.md#listing-available-tracepoints + +**WARNING: eBPF programs require root privileges to be loaded into a Linux +kernel VM. This means the bpftrace and BCC examples must be executed with root +privileges. Make sure to carefully review any scripts that you run with root +privileges first!** + +### log_p2p_traffic.bt + +A bpftrace script logging information about inbound and outbound P2P network +messages. Based on the `net:inbound_message` and `net:outbound_message` +tracepoints. + +By default, `bpftrace` limits strings to 64 bytes due to the limited stack size +in the eBPF VM. For example, Tor v3 addresses exceed the string size limit which +results in the port being cut off during logging. The string size limit can be +increased with the `BPFTRACE_STRLEN` environment variable (`BPFTRACE_STRLEN=70` +works fine). + +``` +$ bpftrace contrib/tracing/log_p2p_traffic.bt +``` + +Output +``` +outbound 'ping' msg to peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes +inbound 'pong' msg from peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes +inbound 'inv' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes +outbound 'getdata' msg to peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes +inbound 'tx' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 222 bytes +outbound 'inv' msg to peer 9 (outbound-full-relay, faketorv3addressa2ufa6odvoi3s77j4uegey0xb10csyfyve2t33curbyd.onion:8333) with 37 bytes +outbound 'inv' msg to peer 7 (outbound-full-relay, XX.XX.XXX.242:8333) with 37 bytes +… +``` + +### p2p_monitor.py + +A BCC Python script using curses for an interactive P2P message monitor. Based +on the `net:inbound_message` and `net:outbound_message` tracepoints. + +Inbound and outbound traffic is listed for each peer together with information +about the connection. Peers can be selected individually to view recent P2P +messages. + +``` +$ python3 contrib/tracing/p2p_monitor.py ./build/src/bitcoind +``` + +Lists selectable peers and traffic and connection information. +``` + P2P Message Monitor + Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages + + PEER OUTBOUND INBOUND TYPE ADDR + 0 46 398 byte 61 1407590 byte block-relay-only XX.XX.XXX.196:8333 + 11 1156 253570 byte 3431 2394924 byte outbound-full-relay XXX.X.XX.179:8333 + 13 3425 1809620 byte 1236 305458 byte inbound XXX.X.X.X:60380 + 16 1046 241633 byte 1589 1199220 byte outbound-full-relay 4faketorv2pbfu7x.onion:8333 + 19 577 181679 byte 390 148951 byte outbound-full-relay kfake4vctorjv2o2.onion:8333 + 20 11 1248 byte 13 1283 byte block-relay-only [2600:fake:64d9:b10c:4436:aaaa:fe:bb]:8333 + 21 11 1248 byte 13 1299 byte block-relay-only XX.XXX.X.155:8333 + 22 5 103 byte 1 102 byte feeler XX.XX.XXX.173:8333 + 23 11 1248 byte 12 1255 byte block-relay-only XX.XXX.XXX.220:8333 + 24 3 103 byte 1 102 byte feeler XXX.XXX.XXX.64:8333 +… +``` + +Showing recent P2P messages between our node and a selected peer. + +``` + ---------------------------------------------------------------------- + | PEER 16 (4faketorv2pbfu7x.onion:8333) | + | OUR NODE outbound-full-relay PEER | + | <--- sendcmpct (9 bytes) | + | inv (37 byte) ---> | + | <--- ping (8 bytes) | + | pong (8 byte) ---> | + | inv (37 byte) ---> | + | <--- addr (31 bytes) | + | inv (37 byte) ---> | + | <--- getheaders (1029 bytes) | + | headers (1 byte) ---> | + | <--- feefilter (8 bytes) | + | <--- pong (8 bytes) | + | <--- headers (82 bytes) | + | <--- addr (30003 bytes) | + | inv (1261 byte) ---> | + | … | + +``` + +### log_raw_p2p_msgs.py + +A BCC Python script showcasing eBPF and USDT limitations when passing data +larger than about 32kb. Based on the `net:inbound_message` and +`net:outbound_message` tracepoints. + +Bitcoin P2P messages can be larger than 32kb (e.g. `tx`, `block`, ...). The +eBPF VM's stack is limited to 512 bytes, and we can't allocate more than about +32kb for a P2P message in the eBPF VM. The **message data is cut off** when the +message is larger than MAX_MSG_DATA_LENGTH (see script). This can be detected +in user-space by comparing the data length to the message length variable. The +message is cut off when the data length is smaller than the message length. +A warning is included with the printed message data. + +Data is submitted to user-space (i.e. to this script) via a ring buffer. The +throughput of the ring buffer is limited. Each p2p_message is about 32kb in +size. In- or outbound messages submitted to the ring buffer in rapid +succession fill the ring buffer faster than it can be read. Some messages are +lost. BCC prints: `Possibly lost 2 samples` on lost messages. + + +``` +$ python3 contrib/tracing/log_raw_p2p_msgs.py ./build/src/bitcoind +``` + +``` +Logging raw P2P messages. +Messages larger that about 32kb will be cut off! +Some messages might be lost! + outbound msg 'inv' from peer 4 (outbound-full-relay, XX.XXX.XX.4:8333) with 253 bytes: 0705000000be2245c8f844c9f763748e1a7… +… +Warning: incomplete message (only 32568 out of 53552 bytes)! inbound msg 'tx' from peer 32 (outbound-full-relay, XX.XXX.XXX.43:8333) with 53552 bytes: 020000000001fd3c01939c85ad6756ed9fc… +… +Possibly lost 2 samples +``` + +### connectblock_benchmark.bt + +A `bpftrace` script to benchmark the `ConnectBlock()` function during, for +example, a blockchain re-index. Based on the `validation:block_connected` USDT +tracepoint. + +The script takes three positional arguments. The first two arguments, the start, +and end height indicate between which blocks the benchmark should be run. The +third acts as a duration threshold in milliseconds. When the `ConnectBlock()` +function takes longer than the threshold, information about the block, is +printed. For more details, see the header comment in the script. + +The following command can be used to benchmark, for example, `ConnectBlock()` +between height 20000 and 38000 on SigNet while logging all blocks that take +longer than 25ms to connect. + +``` +$ bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25 +``` + +In a different terminal, starting Bitcoin Core in SigNet mode and with +re-indexing enabled. + +``` +$ ./build/src/bitcoind -signet -reindex +``` + +This produces the following output. +``` +Attaching 5 probes... +ConnectBlock Benchmark between height 20000 and 38000 inclusive +Logging blocks taking longer than 25 ms to connect. +Starting Connect Block Benchmark between height 20000 and 38000. +BENCH 39 blk/s 59 tx/s 59 inputs/s 20 sigops/s (height 20038) +Block 20492 (000000f555653bb05e2f3c6e79925e01a20dd57033f4dc7c354b46e34735d32b) 20 tx 2319 ins 2318 sigops took 38 ms +BENCH 1840 blk/s 2117 tx/s 4478 inputs/s 2471 sigops/s (height 21879) +BENCH 1816 blk/s 4972 tx/s 4982 inputs/s 125 sigops/s (height 23695) +BENCH 2095 blk/s 2890 tx/s 2910 inputs/s 152 sigops/s (height 25790) +BENCH 1684 blk/s 3979 tx/s 4053 inputs/s 288 sigops/s (height 27474) +BENCH 1155 blk/s 3216 tx/s 3252 inputs/s 115 sigops/s (height 28629) +BENCH 1797 blk/s 2488 tx/s 2503 inputs/s 111 sigops/s (height 30426) +BENCH 1849 blk/s 6318 tx/s 6569 inputs/s 12189 sigops/s (height 32275) +BENCH 946 blk/s 20209 tx/s 20775 inputs/s 83809 sigops/s (height 33221) +Block 33406 (0000002adfe4a15cfcd53bd890a89bbae836e5bb7f38bac566f61ad4548c87f6) 25 tx 2045 ins 2090 sigops took 29 ms +Block 33687 (00000073231307a9828e5607ceb8156b402efe56747271a4442e75eb5b77cd36) 52 tx 1797 ins 1826 sigops took 26 ms +BENCH 582 blk/s 21581 tx/s 27673 inputs/s 60345 sigops/s (height 33803) +BENCH 1035 blk/s 19735 tx/s 19776 inputs/s 51355 sigops/s (height 34838) +Block 35625 (0000006b00b347390c4768ea9df2655e9ff4b120f29d78594a2a702f8a02c997) 20 tx 3374 ins 3371 sigops took 49 ms +BENCH 887 blk/s 17857 tx/s 22191 inputs/s 24404 sigops/s (height 35725) +Block 35937 (000000d816d13d6e39b471cd4368db60463a764ba1f29168606b04a22b81ea57) 75 tx 3943 ins 3940 sigops took 61 ms +BENCH 823 blk/s 16298 tx/s 21031 inputs/s 18440 sigops/s (height 36548) +Block 36583 (000000c3e260556dbf42968aae3f904dba8b8c1ff96a6f6e3aa5365d2e3ad317) 24 tx 2198 ins 2194 sigops took 34 ms +Block 36700 (000000b3b173de9e65a3cfa738d976af6347aaf83fa17ab3f2a4d2ede3ddfac4) 73 tx 1615 ins 1611 sigops took 31 ms +Block 36832 (0000007859578c02c1ac37dabd1b9ec19b98f350b56935f5dd3a41e9f79f836e) 34 tx 1440 ins 1436 sigops took 26 ms +BENCH 613 blk/s 16718 tx/s 25074 inputs/s 23022 sigops/s (height 37161) +Block 37870 (000000f5c1086291ba2d943fb0c3bc82e71c5ee341ee117681d1456fbf6c6c38) 25 tx 1517 ins 1514 sigops took 29 ms +BENCH 811 blk/s 16031 tx/s 20921 inputs/s 18696 sigops/s (height 37972) + +Took 14055 ms to connect the blocks between height 20000 and 38000. + +Histogram of block connection times in milliseconds (ms). +@durations: +[0] 16838 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| +[1] 882 |@@ | +[2, 4) 236 | | +[4, 8) 23 | | +[8, 16) 9 | | +[16, 32) 9 | | +[32, 64) 4 | | +``` + +### log_utxocache_flush.py + +A BCC Python script to log the UTXO cache flushes. Based on the +`utxocache:flush` tracepoint. + +```bash +$ python3 contrib/tracing/log_utxocache_flush.py ./build/src/bitcoind +``` + +``` +Logging utxocache flushes. Ctrl-C to end... +Duration (µs) Mode Coins Count Memory Usage Prune +730451 IF_NEEDED 22990 3323.54 kB True +637657 ALWAYS 122320 17124.80 kB False +81349 ALWAYS 0 1383.49 kB False +``` + +### log_utxos.bt + +A `bpftrace` script to log information about the coins that are added, spent, or +uncached from the UTXO set. Based on the `utxocache:add`, `utxocache:spend` and +`utxocache:uncache` tracepoints. + +```bash +$ bpftrace contrib/tracing/log_utxos.bt +``` + +This should produce an output similar to the following. If you see bpftrace +warnings like `Lost 24 events`, the eBPF perf ring-buffer is filled faster +than it is being read. You can increase the ring-buffer size by setting the +ENV variable `BPFTRACE_PERF_RB_PAGES` (default 64) at a cost of higher +memory usage. See the [bpftrace reference guide] for more information. + +[bpftrace reference guide]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md#98-bpftrace_perf_rb_pages + +```bash +Attaching 4 probes... +OP Outpoint Value Height Coinbase +Added 6ba9ad857e1ef2eb2a2c94f06813c414c7ab273e3d6bd7ad64e000315a887e7c:1 10000 2094512 No +Spent fa7dc4db56637a151f6649d8f26732956d1c5424c82aae400a83d02b2cc2c87b:0 182264897 2094512 No +Added eeb2f099b1af6a2a12e6ddd2eeb16fc5968582241d7f08ba202d28b60ac264c7:0 10000 2094512 No +Added eeb2f099b1af6a2a12e6ddd2eeb16fc5968582241d7f08ba202d28b60ac264c7:1 182254756 2094512 No +Added a0c7f4ec9cccef2d89672a624a4e6c8237a17572efdd4679eea9e9ee70d2db04:0 10072679 2094513 Yes +Spent 25e0df5cc1aeb1b78e6056bf403e5e8b7e41f138060ca0a50a50134df0549a5e:2 540 2094508 No +Spent 42f383c04e09c26a2378272ec33aa0c1bf4883ca5ab739e8b7e06be5a5787d61:1 3848399 2007724 No +Added f85e3b4b89270863a389395cc9a4123e417ab19384cef96533c6649abd6b0561:0 3788399 2094513 No +Added f85e3b4b89270863a389395cc9a4123e417ab19384cef96533c6649abd6b0561:2 540 2094513 No +Spent a05880b8c77971ed0b9f73062c7c4cdb0ff3856ab14cbf8bc481ed571cd34b83:1 5591281046 2094511 No +Added eb689865f7d957938978d6207918748f74e6aa074f47874724327089445b0960:0 5589696005 2094513 No +Added eb689865f7d957938978d6207918748f74e6aa074f47874724327089445b0960:1 1565556 2094513 No +``` + +### mempool_monitor.py + +A BCC Python script producing mempool statistics and an event log. Based on the +`mempool:added`, `mempool:removed`, `mempool:replaced`, and `mempool:rejected` +tracepoints. + +Statistics include incidence and rate for each event type since the script was +started (`total`) as well as during the last minute (`1 min`) and ten minutes +(`10 min`). The event log shows mempool events in real time, each entry +comprising a timestamp along with all event data available via the event's +tracepoint. + +```console +$ python3 contrib/tracing/mempool_monitor.py ./build/src/bitcoind +``` + +``` + Mempool Monitor + Press CTRL-C to stop. + + ┌─Event count───────────────────────┐ ┌─Event rate──────────────────────────┐ + │ Event total 1 min 10 min │ │ Event total 1 min 10 min │ + │ added 1425tx 201tx 1425tx │ │ added 4.7tx/s 3.4tx/s 4.7tx/s │ + │ removed 35tx 4tx 35tx │ │ removed 0.1tx/s 0.1tx/s 0.1tx/s │ + │ replaced 35tx 4tx 35tx │ │ replaced 0.1tx/s 0.1tx/s 0.1tx/s │ + │ rejected 0tx 0tx 0tx │ │ rejected 0.0tx/s 0.0tx/s 0.0tx/s │ + └───────────────────────────────────┘ └─────────────────────────────────────┘ + + ┌─Event log────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ 13:10:30Z added f9064ca5bfc87cdd191faa42bf697217cd920b2b94838c1f1192e4f06c4fd217 with feerate 8.92 sat/vB (981 sat, 110 vbytes) │ + │ 13:10:30Z added 53ffa3afbe57b1bfe423e1755ca2b52c5b6cb4aa91b8b7ee9cb694953f47f234 with feerate 5.00 sat/vB (550 sat, 110 vbytes) │ + │ 13:10:30Z added 4177df5e19465eb5e53c3f8b6830a293f57474921bc6c2ae89375e0986e1f0f9 with feerate 2.98 sat/vB (429 sat, 144 vbytes) │ + │ 13:10:30Z added 931a10d83f0a268768da75dc4b9e199f2f055f12979ae5491cc304ee10f890ea with feerate 3.55 sat/vB (500 sat, 141 vbytes) │ + │ 13:10:30Z added 4cf32b295723cc4ab73f2a2e51d4bb276c0042760a4c00a3eb9595b8ebb24721 with feerate 89.21 sat/vB (12668 sat, 142 vbytes) │ + │ 13:10:31Z replaced d1eecf9d662121322f4f31f0c2267a752d14bb3956e6016ba96e87f47890e1db with feerate 27.12 sat/vB received 23.3 seconds ago (7213 sat, 266 vbytes) with c412db908│ + │ 9b7ed53f3e5e36d2819dd291278b59ccaabaeb17fd37c3d87fdcd57 with feerate 28.12 sat/vB (8351 sat, 297 vbytes) │ + │ 13:10:31Z added c412db9089b7ed53f3e5e36d2819dd291278b59ccaabaeb17fd37c3d87fdcd57 with feerate 28.12 sat/vB (8351 sat, 297 vbytes) │ + │ 13:10:31Z added b8388a5bdc421b11460bdf477d5a85a1a39c2784e7dd7bffabe688740424ea57 with feerate 25.21 sat/vB (3554 sat, 141 vbytes) │ + │ 13:10:31Z added 4ddb88bc90a122cd9eae8a664e73bdf5bebe75f3ef901241b4a251245854a98e with feerate 24.15 sat/vB (5072 sat, 210 vbytes) │ + │ 13:10:31Z added 19101e4161bca5271ad5d03e7747f2faec7793b274dc2f3c4cf516b7cef1aac3 with feerate 7.06 sat/vB (1080 sat, 153 vbytes) │ + │ 13:10:31Z removed d1eecf9d662121322f4f31f0c2267a752d14bb3956e6016ba96e87f47890e1db with feerate 27.12 sat/vB (7213 sat, 266 vbytes): replaced │ + │ 13:10:31Z added 6c511c60d9b95b9eff81df6ecba5c86780f513fe62ce3ad6be2c5340d957025a with feerate 4.00 sat/vB (440 sat, 110 vbytes) │ + │ 13:10:31Z added 44d66f7f004bd52c46be4dff3067cab700e51c7866a84282bd8aab560a5bfb79 with feerate 3.15 sat/vB (448 sat, 142 vbytes) │ + │ 13:10:31Z added b17b7c9ec5acfbbf12f0eeef8e29826fad3105bb95eef7a47d2f1f22b4784643 with feerate 4.10 sat/vB (1348 sat, 329 vbytes) │ + │ 13:10:31Z added b7a4ad93554e57454e8a8049bfc0bd803fa962bd3f0a08926aa72e7cb23e2276 with feerate 1.01 sat/vB (205 sat, 202 vbytes) │ + │ 13:10:32Z added c78e87be86c828137a6e7e00a177c03b52202ce4c39029b99904c2a094b9da87 with feerate 11.00 sat/vB (1562 sat, 142 vbytes) │ + │ │ + └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/contrib/tracing/connectblock_benchmark.bt b/contrib/tracing/connectblock_benchmark.bt new file mode 100755 index 0000000000000..de112af639cba --- /dev/null +++ b/contrib/tracing/connectblock_benchmark.bt @@ -0,0 +1,156 @@ +#!/usr/bin/env bpftrace + +/* + + USAGE: + + bpftrace contrib/tracing/connectblock_benchmark.bt + + - sets the height at which the benchmark should start. Setting + the start height to 0 starts the benchmark immediately, even before the + first block is connected. + - sets the height after which the benchmark should end. Setting + the end height to 0 disables the benchmark. The script only logs blocks + over . + - Threshold + + This script requires a 'bitcoind' binary compiled with eBPF support and the + 'validation:block_connected' USDT. By default, it's assumed that 'bitcoind' is + located in './build/src/bitcoind'. This can be modified in the script below. + + EXAMPLES: + + bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000 + + When run together 'bitcoind -reindex', this benchmarks the time it takes to + connect the blocks between height 300.000 and 680.000 (inclusive) and prints + details about all blocks that take longer than 1000ms to connect. Prints a + histogram with block connection times when the benchmark is finished. + + + bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500 + + When running together 'bitcoind', all newly connected blocks that + take longer than 500ms to connect are logged. A histogram with block + connection times is shown when the script is terminated. + +*/ + +BEGIN +{ + $start_height = $1; + $end_height = $2; + $logging_threshold_ms = $3; + + if ($end_height < $start_height) { + printf("Error: start height (%d) larger than end height (%d)!\n", $start_height, $end_height); + exit(); + } + + if ($end_height > 0) { + printf("ConnectBlock benchmark between height %d and %d inclusive\n", $start_height, $end_height); + } else { + printf("ConnectBlock logging starting at height %d\n", $start_height); + } + + if ($logging_threshold_ms > 0) { + printf("Logging blocks taking longer than %d ms to connect.\n", $3); + } + + if ($start_height == 0) { + @start = nsecs; + } +} + +/* + Attaches to the 'validation:block_connected' USDT and collects stats when the + connected block is between the start and end height (or the end height is + unset). +*/ +usdt:./build/src/bitcoind:validation:block_connected /arg1 >= $1 && (arg1 <= $2 || $2 == 0 )/ +{ + $height = arg1; + $transactions = arg2; + $inputs = arg3; + $sigops = arg4; + $duration = (uint64) arg5; + + @height = $height; + + @blocks = @blocks + 1; + @transactions = @transactions + $transactions; + @inputs = @inputs + $inputs; + @sigops = @sigops + $sigops; + + @durations = hist($duration / 1e6); + + if ($height == $1 && $height != 0) { + @start = nsecs; + printf("Starting Connect Block Benchmark between height %d and %d.\n", $1, $2); + } + + if ($2 > 0 && $height >= $2) { + @end = nsecs; + $duration = @end - @start; + printf("\nTook %d ms to connect the blocks between height %d and %d.\n", $duration / 1e9, $1, $2); + exit(); + } +} + +/* + Attaches to the 'validation:block_connected' USDT and logs information about + blocks where the time it took to connect the block is above the + . +*/ +usdt:./build/src/bitcoind:validation:block_connected / (uint64) arg5 / 1e6 > $3 / +{ + $hash = arg0; + $height = (int32) arg1; + $transactions = (uint64) arg2; + $inputs = (int32) arg3; + $sigops = (int64) arg4; + $duration = (int64) arg5; + + + printf("Block %d (", $height); + /* Prints each byte of the block hash as hex in big-endian (the block-explorer format) */ + $p = $hash + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p -= 1; + } + printf(") %4d tx %5d ins %5d sigops took %4d ms\n", $transactions, $inputs, $sigops, (uint64) $duration / 1e6); +} + + +/* + Prints stats about the blocks, transactions, inputs, and sigops processed in + the last second (if any). +*/ +interval:s:1 { + if (@blocks > 0) { + printf("BENCH %4d blk/s %6d tx/s %7d inputs/s %8d sigops/s (height %d)\n", @blocks, @transactions, @inputs, @sigops, @height); + + zero(@blocks); + zero(@transactions); + zero(@inputs); + zero(@sigops); + } +} + +END +{ + printf("\nHistogram of block connection times in milliseconds (ms).\n"); + print(@durations); + + clear(@durations); + clear(@blocks); + clear(@transactions); + clear(@inputs); + clear(@sigops); + clear(@height); + clear(@start); + clear(@end); +} + diff --git a/contrib/tracing/log_p2p_traffic.bt b/contrib/tracing/log_p2p_traffic.bt new file mode 100755 index 0000000000000..89e5b777be98d --- /dev/null +++ b/contrib/tracing/log_p2p_traffic.bt @@ -0,0 +1,28 @@ +#!/usr/bin/env bpftrace + +BEGIN +{ + printf("Logging P2P traffic\n") +} + +usdt:./build/src/bitcoind:net:inbound_message +{ + $peer_id = (int64) arg0; + $peer_addr = str(arg1); + $peer_type = str(arg2); + $msg_type = str(arg3); + $msg_len = arg4; + printf("inbound '%s' msg from peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len); +} + +usdt:./build/src/bitcoind:net:outbound_message +{ + $peer_id = (int64) arg0; + $peer_addr = str(arg1); + $peer_type = str(arg2); + $msg_type = str(arg3); + $msg_len = arg4; + + printf("outbound '%s' msg to peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len); +} + diff --git a/contrib/tracing/log_raw_p2p_msgs.py b/contrib/tracing/log_raw_p2p_msgs.py new file mode 100755 index 0000000000000..c0ab70410622b --- /dev/null +++ b/contrib/tracing/log_raw_p2p_msgs.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" Demonstration of eBPF limitations and the effect on USDT with the + net:inbound_message and net:outbound_message tracepoints. """ + +# This script shows a limitation of eBPF when data larger than 32kb is passed to +# user-space. It uses BCC (https://github.com/iovisor/bcc) to load a sandboxed +# eBPF program into the Linux kernel (root privileges are required). The eBPF +# program attaches to two statically defined tracepoints. The tracepoint +# 'net:inbound_message' is called when a new P2P message is received, and +# 'net:outbound_message' is called on outbound P2P messages. The eBPF program +# submits the P2P messages to this script via a BPF ring buffer. The submitted +# messages are printed. + +# eBPF Limitations: +# +# Bitcoin P2P messages can be larger than 32kb (e.g. tx, block, ...). The eBPF +# VM's stack is limited to 512 bytes, and we can't allocate more than about 32kb +# for a P2P message in the eBPF VM. The message data is cut off when the message +# is larger than MAX_MSG_DATA_LENGTH (see definition below). This can be detected +# in user-space by comparing the data length to the message length variable. The +# message is cut off when the data length is smaller than the message length. +# A warning is included with the printed message data. +# +# Data is submitted to user-space (i.e. to this script) via a ring buffer. The +# throughput of the ring buffer is limited. Each p2p_message is about 32kb in +# size. In- or outbound messages submitted to the ring buffer in rapid +# succession fill the ring buffer faster than it can be read. Some messages are +# lost. +# +# BCC prints: "Possibly lost 2 samples" on lost messages. + +import sys +from bcc import BPF, USDT + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +#include + +#define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; }) + +// Maximum possible allocation size +// from include/linux/percpu.h in the Linux kernel +#define PCPU_MIN_UNIT_SIZE (32 << 10) + +// Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). +#define MAX_PEER_ADDR_LENGTH 62 + 6 +#define MAX_PEER_CONN_TYPE_LENGTH 20 +#define MAX_MSG_TYPE_LENGTH 20 +#define MAX_MSG_DATA_LENGTH PCPU_MIN_UNIT_SIZE - 200 + +struct p2p_message +{ + u64 peer_id; + char peer_addr[MAX_PEER_ADDR_LENGTH]; + char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; + char msg_type[MAX_MSG_TYPE_LENGTH]; + u64 msg_size; + u8 msg[MAX_MSG_DATA_LENGTH]; +}; + +// We can't store the p2p_message struct on the eBPF stack as it is limited to +// 512 bytes and P2P message can be bigger than 512 bytes. However, we can use +// an BPF-array with a length of 1 to allocate up to 32768 bytes (this is +// defined by PCPU_MIN_UNIT_SIZE in include/linux/percpu.h in the Linux kernel). +// Also see https://github.com/iovisor/bcc/issues/2306 +BPF_ARRAY(msg_arr, struct p2p_message, 1); + +// Two BPF perf buffers for pushing data (here P2P messages) to user-space. +BPF_PERF_OUTPUT(inbound_messages); +BPF_PERF_OUTPUT(outbound_messages); + +int trace_inbound_message(struct pt_regs *ctx) { + int idx = 0; + struct p2p_message *msg = msg_arr.lookup(&idx); + + // lookup() does not return a NULL pointer. However, the BPF verifier + // requires an explicit check that that the `msg` pointer isn't a NULL + // pointer. See https://github.com/iovisor/bcc/issues/2595 + if (msg == NULL) return 1; + + bpf_usdt_readarg(1, ctx, &msg->peer_id); + bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg->msg_size); + bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); + + inbound_messages.perf_submit(ctx, msg, sizeof(*msg)); + return 0; +}; + +int trace_outbound_message(struct pt_regs *ctx) { + int idx = 0; + struct p2p_message *msg = msg_arr.lookup(&idx); + + // lookup() does not return a NULL pointer. However, the BPF verifier + // requires an explicit check that that the `msg` pointer isn't a NULL + // pointer. See https://github.com/iovisor/bcc/issues/2595 + if (msg == NULL) return 1; + + bpf_usdt_readarg(1, ctx, &msg->peer_id); + bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg->msg_size); + bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); + + outbound_messages.perf_submit(ctx, msg, sizeof(*msg)); + return 0; +}; +""" + + +def print_message(event, inbound): + print(f"%s %s msg '%s' from peer %d (%s, %s) with %d bytes: %s" % + ( + f"Warning: incomplete message (only %d out of %d bytes)!" % ( + len(event.msg), event.msg_size) if len(event.msg) < event.msg_size else "", + "inbound" if inbound else "outbound", + event.msg_type.decode("utf-8"), + event.peer_id, + event.peer_conn_type.decode("utf-8"), + event.peer_addr.decode("utf-8"), + event.msg_size, + bytes(event.msg[:event.msg_size]).hex(), + ) + ) + + +def main(bitcoind_path): + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="inbound_message", fn_name="trace_inbound_message") + bitcoind_with_usdts.enable_probe( + probe="outbound_message", fn_name="trace_outbound_message") + bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + # BCC: perf buffer handle function for inbound_messages + def handle_inbound(_, data, size): + """ Inbound message handler. + + Called each time a message is submitted to the inbound_messages BPF table.""" + + event = bpf["inbound_messages"].event(data) + print_message(event, True) + + # BCC: perf buffer handle function for outbound_messages + + def handle_outbound(_, data, size): + """ Outbound message handler. + + Called each time a message is submitted to the outbound_messages BPF table.""" + + event = bpf["outbound_messages"].event(data) + print_message(event, False) + + # BCC: add handlers to the inbound and outbound perf buffers + bpf["inbound_messages"].open_perf_buffer(handle_inbound) + bpf["outbound_messages"].open_perf_buffer(handle_outbound) + + print("Logging raw P2P messages.") + print("Messages larger that about 32kb will be cut off!") + print("Some messages might be lost!") + while True: + try: + bpf.perf_buffer_poll() + except KeyboardInterrupt: + exit() + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE:", sys.argv[0], "path/to/bitcoind") + exit() + path = sys.argv[1] + main(path) diff --git a/contrib/tracing/log_utxocache_flush.py b/contrib/tracing/log_utxocache_flush.py new file mode 100755 index 0000000000000..6c568998e9923 --- /dev/null +++ b/contrib/tracing/log_utxocache_flush.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021-2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import sys +import ctypes +from bcc import BPF, USDT + +"""Example logging Bitcoin Core utxo set cache flushes utilizing + the utxocache:flush tracepoint.""" + +# USAGE: ./contrib/tracing/log_utxocache_flush.py path/to/bitcoind + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +# include + +struct data_t +{ + u64 duration; + u32 mode; + u64 coins_count; + u64 coins_mem_usage; + bool is_flush_for_prune; +}; + +// BPF perf buffer to push the data to user space. +BPF_PERF_OUTPUT(flush); + +int trace_flush(struct pt_regs *ctx) { + struct data_t data = {}; + bpf_usdt_readarg(1, ctx, &data.duration); + bpf_usdt_readarg(2, ctx, &data.mode); + bpf_usdt_readarg(3, ctx, &data.coins_count); + bpf_usdt_readarg(4, ctx, &data.coins_mem_usage); + bpf_usdt_readarg(5, ctx, &data.is_flush_for_prune); + flush.perf_submit(ctx, &data, sizeof(data)); + return 0; +} +""" + +FLUSH_MODES = [ + 'NONE', + 'IF_NEEDED', + 'PERIODIC', + 'ALWAYS' +] + + +class Data(ctypes.Structure): + # define output data structure corresponding to struct data_t + _fields_ = [ + ("duration", ctypes.c_uint64), + ("mode", ctypes.c_uint32), + ("coins_count", ctypes.c_uint64), + ("coins_mem_usage", ctypes.c_uint64), + ("is_flush_for_prune", ctypes.c_bool) + ] + + +def print_event(event): + print("%-15d %-10s %-15d %-15s %-8s" % ( + event.duration, + FLUSH_MODES[event.mode], + event.coins_count, + "%.2f kB" % (event.coins_mem_usage/1000), + event.is_flush_for_prune + )) + + +def main(bitcoind_path): + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program + # to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="flush", fn_name="trace_flush") + b = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + def handle_flush(_, data, size): + """ Coins Flush handler. + Called each time coin caches and indexes are flushed.""" + event = ctypes.cast(data, ctypes.POINTER(Data)).contents + print_event(event) + + b["flush"].open_perf_buffer(handle_flush) + print("Logging utxocache flushes. Ctrl-C to end...") + print("%-15s %-10s %-15s %-15s %-8s" % ("Duration (µs)", "Mode", + "Coins Count", "Memory Usage", + "Flush for Prune")) + + while True: + try: + b.perf_buffer_poll() + except KeyboardInterrupt: + exit(0) + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE: ", sys.argv[0], "path/to/bitcoind") + exit(1) + + path = sys.argv[1] + main(path) diff --git a/contrib/tracing/log_utxos.bt b/contrib/tracing/log_utxos.bt new file mode 100755 index 0000000000000..a04f22115791a --- /dev/null +++ b/contrib/tracing/log_utxos.bt @@ -0,0 +1,86 @@ +#!/usr/bin/env bpftrace + +/* + + USAGE: + + bpftrace contrib/tracing/log_utxos.bt + + This script requires a 'bitcoind' binary compiled with eBPF support and the + 'utxocache' tracepoints. By default, it's assumed that 'bitcoind' is + located in './build/src/bitcoind'. This can be modified in the script below. + + NOTE: requires bpftrace v0.12.0 or above. +*/ + +BEGIN +{ + printf("%-7s %-71s %16s %7s %8s\n", + "OP", "Outpoint", "Value", "Height", "Coinbase"); +} + +/* + Attaches to the 'utxocache:add' tracepoint and prints additions to the UTXO set cache. +*/ +usdt:./build/src/bitcoind:utxocache:add +{ + $txid = arg0; + $index = (uint32)arg1; + $height = (uint32)arg2; + $value = (int64)arg3; + $isCoinbase = arg4; + + printf("Added "); + $p = $txid + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p-=1; + } + + printf(":%-6d %16ld %7d %s\n", $index, $value, $height, ($isCoinbase ? "Yes" : "No" )); +} + +/* + Attaches to the 'utxocache:spent' tracepoint and prints spents from the UTXO set cache. +*/ +usdt:./build/src/bitcoind:utxocache:spent +{ + $txid = arg0; + $index = (uint32)arg1; + $height = (uint32)arg2; + $value = (int64)arg3; + $isCoinbase = arg4; + + printf("Spent "); + $p = $txid + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p-=1; + } + + printf(":%-6d %16ld %7d %s\n", $index, $value, $height, ($isCoinbase ? "Yes" : "No" )); +} + +/* + Attaches to the 'utxocache:uncache' tracepoint and uncache UTXOs from the UTXO set cache. +*/ +usdt:./build/src/bitcoind:utxocache:uncache +{ + $txid = arg0; + $index = (uint32)arg1; + $height = (uint32)arg2; + $value = (int64)arg3; + $isCoinbase = arg4; + + printf("Uncache "); + $p = $txid + 31; + unroll(32) { + $b = *(uint8*)$p; + printf("%02x", $b); + $p-=1; + } + + printf(":%-6d %16ld %7d %s\n", $index, $value, $height, ($isCoinbase ? "Yes" : "No" )); +} diff --git a/contrib/tracing/mempool_monitor.py b/contrib/tracing/mempool_monitor.py new file mode 100755 index 0000000000000..afb5e603722b1 --- /dev/null +++ b/contrib/tracing/mempool_monitor.py @@ -0,0 +1,372 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" Example logging Bitcoin Core mempool events using the mempool:added, + mempool:removed, mempool:replaced, and mempool:rejected tracepoints. """ + +import curses +import sys +from datetime import datetime, timezone + +from bcc import BPF, USDT + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +PROGRAM = """ +# include + +// The longest rejection reason is 118 chars and is generated in case of SCRIPT_ERR_EVAL_FALSE by +// strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())) +#define MAX_REJECT_REASON_LENGTH 118 +// The longest string returned by RemovalReasonToString() is 'sizelimit' +#define MAX_REMOVAL_REASON_LENGTH 9 +#define HASH_LENGTH 32 + +struct added_event +{ + u8 hash[HASH_LENGTH]; + s32 vsize; + s64 fee; +}; + +struct removed_event +{ + u8 hash[HASH_LENGTH]; + char reason[MAX_REMOVAL_REASON_LENGTH]; + s32 vsize; + s64 fee; + u64 entry_time; +}; + +struct rejected_event +{ + u8 hash[HASH_LENGTH]; + char reason[MAX_REJECT_REASON_LENGTH]; +}; + +struct replaced_event +{ + u8 replaced_hash[HASH_LENGTH]; + s32 replaced_vsize; + s64 replaced_fee; + u64 replaced_entry_time; + u8 replacement_hash[HASH_LENGTH]; + s32 replacement_vsize; + s64 replacement_fee; +}; + +// BPF perf buffer to push the data to user space. +BPF_PERF_OUTPUT(added_events); +BPF_PERF_OUTPUT(removed_events); +BPF_PERF_OUTPUT(rejected_events); +BPF_PERF_OUTPUT(replaced_events); + +int trace_added(struct pt_regs *ctx) { + struct added_event added = {}; + + bpf_usdt_readarg_p(1, ctx, &added.hash, HASH_LENGTH); + bpf_usdt_readarg(2, ctx, &added.vsize); + bpf_usdt_readarg(3, ctx, &added.fee); + + added_events.perf_submit(ctx, &added, sizeof(added)); + return 0; +} + +int trace_removed(struct pt_regs *ctx) { + struct removed_event removed = {}; + + bpf_usdt_readarg_p(1, ctx, &removed.hash, HASH_LENGTH); + bpf_usdt_readarg_p(2, ctx, &removed.reason, MAX_REMOVAL_REASON_LENGTH); + bpf_usdt_readarg(3, ctx, &removed.vsize); + bpf_usdt_readarg(4, ctx, &removed.fee); + bpf_usdt_readarg(5, ctx, &removed.entry_time); + + removed_events.perf_submit(ctx, &removed, sizeof(removed)); + return 0; +} + +int trace_rejected(struct pt_regs *ctx) { + struct rejected_event rejected = {}; + + bpf_usdt_readarg_p(1, ctx, &rejected.hash, HASH_LENGTH); + bpf_usdt_readarg_p(2, ctx, &rejected.reason, MAX_REJECT_REASON_LENGTH); + + rejected_events.perf_submit(ctx, &rejected, sizeof(rejected)); + return 0; +} + +int trace_replaced(struct pt_regs *ctx) { + struct replaced_event replaced = {}; + + bpf_usdt_readarg_p(1, ctx, &replaced.replaced_hash, HASH_LENGTH); + bpf_usdt_readarg(2, ctx, &replaced.replaced_vsize); + bpf_usdt_readarg(3, ctx, &replaced.replaced_fee); + bpf_usdt_readarg(4, ctx, &replaced.replaced_entry_time); + bpf_usdt_readarg_p(5, ctx, &replaced.replacement_hash, HASH_LENGTH); + bpf_usdt_readarg(6, ctx, &replaced.replacement_vsize); + bpf_usdt_readarg(7, ctx, &replaced.replacement_fee); + + replaced_events.perf_submit(ctx, &replaced, sizeof(replaced)); + return 0; +} +""" + + +def main(bitcoind_path): + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program + # to the tracepoints + bitcoind_with_usdts.enable_probe(probe="mempool:added", fn_name="trace_added") + bitcoind_with_usdts.enable_probe(probe="mempool:removed", fn_name="trace_removed") + bitcoind_with_usdts.enable_probe(probe="mempool:replaced", fn_name="trace_replaced") + bitcoind_with_usdts.enable_probe(probe="mempool:rejected", fn_name="trace_rejected") + bpf = BPF(text=PROGRAM, usdt_contexts=[bitcoind_with_usdts]) + + events = [] + + def get_timestamp(): + return datetime.now(timezone.utc) + + def handle_added(_, data, size): + event = bpf["added_events"].event(data) + events.append((get_timestamp(), "added", event)) + + def handle_removed(_, data, size): + event = bpf["removed_events"].event(data) + events.append((get_timestamp(), "removed", event)) + + def handle_rejected(_, data, size): + event = bpf["rejected_events"].event(data) + events.append((get_timestamp(), "rejected", event)) + + def handle_replaced(_, data, size): + event = bpf["replaced_events"].event(data) + events.append((get_timestamp(), "replaced", event)) + + bpf["added_events"].open_perf_buffer(handle_added) + # By default, open_perf_buffer uses eight pages for a buffer, making for a total + # buffer size of 32k on most machines. In practice, this size is insufficient: + # Each `mempool:removed` event takes up 57 bytes in the buffer (32 bytes for txid, + # 9 bytes for removal reason, and 8 bytes each for vsize and fee). Full blocks + # contain around 2k transactions, requiring a buffer size of around 114kB. To cover + # this amount, 32 4k pages are required. + bpf["removed_events"].open_perf_buffer(handle_removed, page_cnt=32) + bpf["rejected_events"].open_perf_buffer(handle_rejected) + bpf["replaced_events"].open_perf_buffer(handle_replaced) + + curses.wrapper(loop, bpf, events) + + +def loop(screen, bpf, events): + dashboard = Dashboard(screen) + while True: + try: + bpf.perf_buffer_poll(timeout=50) + dashboard.render(events) + except KeyboardInterrupt: + exit() + + +class Dashboard: + """Visualization of mempool state using ncurses.""" + + INFO_WIN_HEIGHT = 2 + EVENT_WIN_HEIGHT = 7 + + def __init__(self, screen): + screen.nodelay(True) + curses.curs_set(False) + self._screen = screen + self._time_started = datetime.now(timezone.utc) + self._timestamps = {"added": [], "removed": [], "rejected": [], "replaced": []} + self._event_history = {"added": 0, "removed": 0, "rejected": 0, "replaced": 0} + self._init_windows() + + def _init_windows(self): + """Initialize all windows.""" + self._init_info_win() + self._init_event_count_win() + self._init_event_rate_win() + self._init_event_log_win() + + @staticmethod + def create_win(x, y, height, width, title=None): + """Helper function to create generic windows and decorate them with box and title if requested.""" + win = curses.newwin(height, width, x, y) + if title: + win.box() + win.addstr(0, 2, title, curses.A_BOLD) + return win + + def _init_info_win(self): + """Create and populate the info window.""" + self._info_win = Dashboard.create_win( + x=0, y=1, height=Dashboard.INFO_WIN_HEIGHT, width=22 + ) + self._info_win.addstr(0, 0, "Mempool Monitor", curses.A_REVERSE) + self._info_win.addstr(1, 0, "Press CTRL-C to stop.", curses.A_NORMAL) + self._info_win.refresh() + + def _init_event_count_win(self): + """Create and populate the event count window.""" + self._event_count_win = Dashboard.create_win( + x=3, y=1, height=Dashboard.EVENT_WIN_HEIGHT, width=37, title="Event count" + ) + header = " {:<8} {:>8} {:>7} {:>7} " + self._event_count_win.addstr( + 1, 1, header.format("Event", "total", "1 min", "10 min"), curses.A_UNDERLINE + ) + self._event_count_win.refresh() + + def _init_event_rate_win(self): + """Create and populate the event rate window.""" + self._event_rate_win = Dashboard.create_win( + x=3, y=40, height=Dashboard.EVENT_WIN_HEIGHT, width=42, title="Event rate" + ) + header = " {:<8} {:>9} {:>9} {:>9} " + self._event_rate_win.addstr( + 1, 1, header.format("Event", "total", "1 min", "10 min"), curses.A_UNDERLINE + ) + self._event_rate_win.refresh() + + def _init_event_log_win(self): + """Create windows showing event log. This comprises a dummy boxed window and an + inset window so line breaks don't overwrite box.""" + # dummy boxed window + num_rows, num_cols = self._screen.getmaxyx() + space_above = Dashboard.INFO_WIN_HEIGHT + 1 + Dashboard.EVENT_WIN_HEIGHT + 1 + box_win_height = num_rows - space_above + box_win_width = num_cols - 2 + win_box = Dashboard.create_win( + x=space_above, + y=1, + height=box_win_height, + width=box_win_width, + title="Event log", + ) + # actual logging window + log_lines = box_win_height - 2 # top and bottom box lines + log_line_len = box_win_width - 2 - 1 # box lines and left padding + win = win_box.derwin(log_lines, log_line_len, 1, 2) + win.idlok(True) + win.scrollok(True) + win_box.refresh() + win.refresh() + self._event_log_win_box = win_box + self._event_log_win = win + + def calculate_metrics(self, events): + """Calculate count and rate metrics.""" + count, rate = {}, {} + for event_ts, event_type, event_data in events: + self._timestamps[event_type].append(event_ts) + for event_type, ts in self._timestamps.items(): + # remove timestamps older than ten minutes but keep track of their + # count for the 'total' metric + # + self._event_history[event_type] += len( + [t for t in ts if Dashboard.timestamp_age(t) >= 600] + ) + ts = [t for t in ts if Dashboard.timestamp_age(t) < 600] + self._timestamps[event_type] = ts + # count metric + count_1m = len([t for t in ts if Dashboard.timestamp_age(t) < 60]) + count_10m = len(ts) + count_total = self._event_history[event_type] + len(ts) + count[event_type] = (count_total, count_1m, count_10m) + # rate metric + runtime = Dashboard.timestamp_age(self._time_started) + rate_1m = count_1m / min(60, runtime) + rate_10m = count_10m / min(600, runtime) + rate_total = count_total / runtime + rate[event_type] = (rate_total, rate_1m, rate_10m) + return count, rate + + def _update_event_count(self, count): + """Update the event count window.""" + w = self._event_count_win + row_format = " {:<8} {:>6}tx {:>5}tx {:>5}tx " + for line, metric in enumerate(["added", "removed", "replaced", "rejected"]): + w.addstr(2 + line, 1, row_format.format(metric, *count[metric])) + w.refresh() + + def _update_event_rate(self, rate): + """Update the event rate window.""" + w = self._event_rate_win + row_format = " {:<8} {:>5.1f}tx/s {:>5.1f}tx/s {:>5.1f}tx/s " + for line, metric in enumerate(["added", "removed", "replaced", "rejected"]): + w.addstr(2 + line, 1, row_format.format(metric, *rate[metric])) + w.refresh() + + def _update_event_log(self, events): + """Update the event log window.""" + w = self._event_log_win + for event in events: + w.addstr(Dashboard.parse_event(event) + "\n") + w.refresh() + + def render(self, events): + """Render the dashboard.""" + count, rate = self.calculate_metrics(events) + self._update_event_count(count) + self._update_event_rate(rate) + self._update_event_log(events) + events.clear() + + @staticmethod + def parse_event(event): + """Converts events into human-readable messages""" + + ts_dt, type_, data = event + ts = ts_dt.strftime("%H:%M:%SZ") + if type_ == "added": + return ( + f"{ts} added {bytes(data.hash)[::-1].hex()}" + f" with feerate {data.fee/data.vsize:.2f} sat/vB" + f" ({data.fee} sat, {data.vsize} vbytes)" + ) + + if type_ == "removed": + return ( + f"{ts} removed {bytes(data.hash)[::-1].hex()}" + f" with feerate {data.fee/data.vsize:.2f} sat/vB" + f" ({data.fee} sat, {data.vsize} vbytes)" + f" received {ts_dt.timestamp()-data.entry_time:.1f} seconds ago" + f": {data.reason.decode('UTF-8')}" + ) + + if type_ == "rejected": + return ( + f"{ts} rejected {bytes(data.hash)[::-1].hex()}" + f": {data.reason.decode('UTF-8')}" + ) + + if type_ == "replaced": + return ( + f"{ts} replaced {bytes(data.replaced_hash)[::-1].hex()}" + f" with feerate {data.replaced_fee/data.replaced_vsize:.2f} sat/vB" + f" received {ts_dt.timestamp()-data.replaced_entry_time:.1f} seconds ago" + f" ({data.replaced_fee} sat, {data.replaced_vsize} vbytes)" + f" with {bytes(data.replacement_hash)[::-1].hex()}" + f" with feerate {data.replacement_fee/data.replacement_vsize:.2f} sat/vB" + f" ({data.replacement_fee} sat, {data.replacement_vsize} vbytes)" + ) + + raise NotImplementedError("Unsupported event type: {type_}") + + @staticmethod + def timestamp_age(timestamp): + """Return age of timestamp in seconds.""" + return (datetime.now(timezone.utc) - timestamp).total_seconds() + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE: ", sys.argv[0], "path/to/bitcoind") + exit(1) + + path = sys.argv[1] + main(path) diff --git a/contrib/tracing/p2p_monitor.py b/contrib/tracing/p2p_monitor.py new file mode 100755 index 0000000000000..4ff701cac3c8c --- /dev/null +++ b/contrib/tracing/p2p_monitor.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" Interactive bitcoind P2P network traffic monitor utilizing USDT and the + net:inbound_message and net:outbound_message tracepoints. """ + +# This script demonstrates what USDT for Bitcoin Core can enable. It uses BCC +# (https://github.com/iovisor/bcc) to load a sandboxed eBPF program into the +# Linux kernel (root privileges are required). The eBPF program attaches to two +# statically defined tracepoints. The tracepoint 'net:inbound_message' is called +# when a new P2P message is received, and 'net:outbound_message' is called on +# outbound P2P messages. The eBPF program submits the P2P messages to +# this script via a BPF ring buffer. + +import sys +import curses +from curses import wrapper, panel +from bcc import BPF, USDT + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +#include + +// Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). +// I2P addresses are 60 chars + 6 chars for the port (':12345'). +#define MAX_PEER_ADDR_LENGTH 62 + 6 +#define MAX_PEER_CONN_TYPE_LENGTH 20 +#define MAX_MSG_TYPE_LENGTH 20 + +struct p2p_message +{ + u64 peer_id; + char peer_addr[MAX_PEER_ADDR_LENGTH]; + char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; + char msg_type[MAX_MSG_TYPE_LENGTH]; + u64 msg_size; +}; + + +// Two BPF perf buffers for pushing data (here P2P messages) to user space. +BPF_PERF_OUTPUT(inbound_messages); +BPF_PERF_OUTPUT(outbound_messages); + +int trace_inbound_message(struct pt_regs *ctx) { + struct p2p_message msg = {}; + + bpf_usdt_readarg(1, ctx, &msg.peer_id); + bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg.msg_size); + + inbound_messages.perf_submit(ctx, &msg, sizeof(msg)); + return 0; +}; + +int trace_outbound_message(struct pt_regs *ctx) { + struct p2p_message msg = {}; + + bpf_usdt_readarg(1, ctx, &msg.peer_id); + bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg.msg_size); + + outbound_messages.perf_submit(ctx, &msg, sizeof(msg)); + return 0; +}; +""" + + +class Message: + """ A P2P network message. """ + msg_type = "" + size = 0 + data = bytes() + inbound = False + + def __init__(self, msg_type, size, inbound): + self.msg_type = msg_type + self.size = size + self.inbound = inbound + + +class Peer: + """ A P2P network peer. """ + id = 0 + address = "" + connection_type = "" + last_messages = list() + + total_inbound_msgs = 0 + total_inbound_bytes = 0 + total_outbound_msgs = 0 + total_outbound_bytes = 0 + + def __init__(self, id, address, connection_type): + self.id = id + self.address = address + self.connection_type = connection_type + self.last_messages = list() + + def add_message(self, message): + self.last_messages.append(message) + if len(self.last_messages) > 25: + self.last_messages.pop(0) + if message.inbound: + self.total_inbound_bytes += message.size + self.total_inbound_msgs += 1 + else: + self.total_outbound_bytes += message.size + self.total_outbound_msgs += 1 + + +def main(bitcoind_path): + peers = dict() + + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="inbound_message", fn_name="trace_inbound_message") + bitcoind_with_usdts.enable_probe( + probe="outbound_message", fn_name="trace_outbound_message") + bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + # BCC: perf buffer handle function for inbound_messages + def handle_inbound(_, data, size): + """ Inbound message handler. + + Called each time a message is submitted to the inbound_messages BPF table.""" + event = bpf["inbound_messages"].event(data) + if event.peer_id not in peers: + peer = Peer(event.peer_id, event.peer_addr.decode( + "utf-8"), event.peer_conn_type.decode("utf-8")) + peers[peer.id] = peer + peers[event.peer_id].add_message( + Message(event.msg_type.decode("utf-8"), event.msg_size, True)) + + # BCC: perf buffer handle function for outbound_messages + def handle_outbound(_, data, size): + """ Outbound message handler. + + Called each time a message is submitted to the outbound_messages BPF table.""" + event = bpf["outbound_messages"].event(data) + if event.peer_id not in peers: + peer = Peer(event.peer_id, event.peer_addr.decode( + "utf-8"), event.peer_conn_type.decode("utf-8")) + peers[peer.id] = peer + peers[event.peer_id].add_message( + Message(event.msg_type.decode("utf-8"), event.msg_size, False)) + + # BCC: add handlers to the inbound and outbound perf buffers + bpf["inbound_messages"].open_perf_buffer(handle_inbound) + bpf["outbound_messages"].open_perf_buffer(handle_outbound) + + wrapper(loop, bpf, peers) + + +def loop(screen, bpf, peers): + screen.nodelay(1) + cur_list_pos = 0 + win = curses.newwin(30, 70, 2, 7) + win.erase() + win.border(ord("|"), ord("|"), ord("-"), ord("-"), + ord("-"), ord("-"), ord("-"), ord("-")) + info_panel = panel.new_panel(win) + info_panel.hide() + + ROWS_AVALIABLE_FOR_LIST = curses.LINES - 5 + scroll = 0 + + while True: + try: + # BCC: poll the perf buffers for new events or timeout after 50ms + bpf.perf_buffer_poll(timeout=50) + + ch = screen.getch() + if (ch == curses.KEY_DOWN or ch == ord("j")) and cur_list_pos < len( + peers.keys()) -1 and info_panel.hidden(): + cur_list_pos += 1 + if cur_list_pos >= ROWS_AVALIABLE_FOR_LIST: + scroll += 1 + if (ch == curses.KEY_UP or ch == ord("k")) and cur_list_pos > 0 and info_panel.hidden(): + cur_list_pos -= 1 + if scroll > 0: + scroll -= 1 + if ch == ord('\n') or ch == ord(' '): + if info_panel.hidden(): + info_panel.show() + else: + info_panel.hide() + screen.erase() + render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel) + curses.panel.update_panels() + screen.refresh() + except KeyboardInterrupt: + exit() + + +def render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel): + """ renders the list of peers and details panel + + This code is unrelated to USDT, BCC and BPF. + """ + header_format = "%6s %-20s %-20s %-22s %-67s" + row_format = "%6s %-5d %9d byte %-5d %9d byte %-22s %-67s" + + screen.addstr(0, 1, (" P2P Message Monitor "), curses.A_REVERSE) + screen.addstr( + 1, 0, (" Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages"), curses.A_NORMAL) + screen.addstr(3, 0, + header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), curses.A_BOLD | curses.A_UNDERLINE) + peer_list = sorted(peers.keys())[scroll:ROWS_AVALIABLE_FOR_LIST+scroll] + for i, peer_id in enumerate(peer_list): + peer = peers[peer_id] + screen.addstr(i + 4, 0, + row_format % (peer.id, peer.total_outbound_msgs, peer.total_outbound_bytes, + peer.total_inbound_msgs, peer.total_inbound_bytes, + peer.connection_type, peer.address), + curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL) + if i + scroll == cur_list_pos: + info_window = info_panel.window() + info_window.erase() + info_window.border( + ord("|"), ord("|"), ord("-"), ord("-"), + ord("-"), ord("-"), ord("-"), ord("-")) + + info_window.addstr( + 1, 1, f"PEER {peer.id} ({peer.address})".center(68), curses.A_REVERSE | curses.A_BOLD) + info_window.addstr( + 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", + curses.A_BOLD) + for i, msg in enumerate(peer.last_messages): + if msg.inbound: + info_window.addstr( + i + 3, 1, "%68s" % + (f"<--- {msg.msg_type} ({msg.size} bytes) "), curses.A_NORMAL) + else: + info_window.addstr( + i + 3, 1, " %s (%d byte) --->" % + (msg.msg_type, msg.size), curses.A_NORMAL) + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE:", sys.argv[0], "path/to/bitcoind") + exit() + path = sys.argv[1] + main(path) diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp index f232bb62c2c39..1ec5b8d20d4c3 100644 --- a/contrib/valgrind.supp +++ b/contrib/valgrind.supp @@ -1,30 +1,20 @@ -# Valgrind suppressions file for Bitcoin. -# -# Includes known Valgrind warnings in our dependencies that cannot be fixed -# in-tree. +# This valgrind suppressions file includes known Valgrind warnings in our +# dependencies that cannot be fixed in-tree. # # Example use: -# $ valgrind --suppressions=contrib/valgrind.supp src/test/test_bitcoin +# $ valgrind --suppressions=contrib/valgrind.supp build/src/test/test_bitcoin # $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \ -# --show-leak-kinds=all src/test/test_bitcoin +# --show-leak-kinds=all build/src/test/test_bitcoin # # To create suppressions for found issues, use the --gen-suppressions=all option: # $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \ # --show-leak-kinds=all --gen-suppressions=all --show-reachable=yes \ -# --error-limit=no src/test/test_bitcoin +# --error-limit=no build/src/test/test_bitcoin # # Note that suppressions may depend on OS and/or library versions. -{ - Suppress libstdc++ warning - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65434 - Memcheck:Leak - match-leak-kinds: reachable - fun:malloc - obj:*/libstdc++.* - fun:call_init.part.0 - fun:call_init - fun:_dl_init - obj:*/ld-*.so -} +# Tested on: +# * aarch64 (Ubuntu Noble system libs, clang, without gui) +# * x86_64 (Ubuntu Noble system libs, clang, without gui) { Suppress libdb warning - https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=662917 Memcheck:Cond @@ -47,8 +37,7 @@ Suppress libdb warning Memcheck:Param pwrite64(buf) - fun:pwrite - fun:__os_io + ... obj:*/libdb_cxx-*.so } { @@ -64,39 +53,12 @@ ... obj:*/libdb_cxx-*.so } -{ - Suppress leaks on init - Memcheck:Leak - ... - fun:_Z11AppInitMainR11NodeContext -} { Suppress leaks on shutdown Memcheck:Leak ... fun:_Z8ShutdownR11NodeContext } -{ - Ignore GUI warning - Memcheck:Leak - ... - obj:/usr/lib64/libgdk-3.so.0.2404.7 -} -{ - Suppress leveldb warning (leveldb::InitModule()) - https://github.com/google/leveldb/issues/113 - Memcheck:Leak - match-leak-kinds: reachable - fun:_Znwm - fun:_ZN7leveldbL10InitModuleEv -} -{ - Suppress leveldb warning (leveldb::Env::Default()) - https://github.com/google/leveldb/issues/113 - Memcheck:Leak - match-leak-kinds: reachable - fun:_Znwm - ... - fun:_ZN7leveldbL14InitDefaultEnvEv -} { Suppress leveldb leak Memcheck:Leak @@ -112,55 +74,6 @@ ... fun:GetCoin } -{ - Suppress wcsnrtombs glibc SSE4 warning (could be related: https://stroika.atlassian.net/browse/STK-626) - Memcheck:Addr16 - fun:__wcsnlen_sse4_1 - fun:wcsnrtombs -} -{ - Suppress wcsnrtombs warning (remove after removing boost::fs) - Memcheck:Cond - ... - fun:_ZN5boost10filesystem6detail11unique_pathERKNS0_4pathEPNS_6system10error_codeE - fun:unique_path -} -{ - Suppress boost warning - Memcheck:Leak - fun:_Znwm - ... - fun:_ZN5boost9unit_test9framework5state17execute_test_treeEmjPKNS2_23random_generator_helperE - fun:_ZN5boost9unit_test9framework3runEmb - fun:_ZN5boost9unit_test14unit_test_mainEPFbvEiPPc - fun:main -} -{ - Suppress boost::filesystem warning (fixed in boost 1.70: https://github.com/boostorg/filesystem/commit/bbe9d1771e5d679b3f10c42a58fc81f7e8c024a9) - Memcheck:Cond - fun:_ZN5boost10filesystem6detail28directory_iterator_incrementERNS0_18directory_iteratorEPNS_6system10error_codeE - ... - obj:*/libboost_filesystem.so.* -} -{ - Suppress boost::filesystem warning (could be related: https://stackoverflow.com/questions/9830182/function-boostfilesystemcomplete-being-reported-as-possible-memory-leak-by-v) - Memcheck:Leak - match-leak-kinds: reachable - fun:_Znwm - ... - fun:_ZN5boost10filesystem8absoluteERKNS0_4pathES3_ -} -{ - Suppress boost still reachable memory warning - Memcheck:Leak - match-leak-kinds: reachable - fun:_Znwm - ... - fun:_M_construct_aux - fun:_M_construct - fun:basic_string - fun:path -} { Suppress LogInstance still reachable memory warning Memcheck:Leak @@ -168,14 +81,6 @@ fun:_Znwm fun:_Z11LogInstancev } -{ - Suppress secp256k1_context_create still reachable memory warning - Memcheck:Leak - match-leak-kinds: reachable - fun:malloc - ... - fun:secp256k1_context_create -} { Suppress BCLog::Logger::StartLogging() still reachable memory warning Memcheck:Leak @@ -184,3 +89,9 @@ ... fun:_ZN5BCLog6Logger12StartLoggingEv } +{ + Suppress https://bugs.kde.org/show_bug.cgi?id=472219 - fixed in Valgrind 3.22. + Memcheck:Param + ppoll(ufds.events) + obj:/lib/ld-musl-aarch64.so.1 +} diff --git a/contrib/verify-binaries/README.md b/contrib/verify-binaries/README.md new file mode 100644 index 0000000000000..0f3e16a5bc9d3 --- /dev/null +++ b/contrib/verify-binaries/README.md @@ -0,0 +1,90 @@ +### Verify Binaries + +#### Preparation + +As of Bitcoin Core v22.0, releases are signed by a number of public keys on the basis +of the [guix.sigs repository](https://github.com/bitcoin-core/guix.sigs/). When +verifying binary downloads, you (the end user) decide which of these public keys you +trust and then use that trust model to evaluate the signature on a file that contains +hashes of the release binaries. The downloaded binaries are then hashed and compared to +the signed checksum file. + +First, you have to figure out which public keys to recognize. Browse the [list of frequent +builder-keys](https://github.com/bitcoin-core/guix.sigs/tree/main/builder-keys) and +decide which of these keys you would like to trust. For each key you want to trust, you +must obtain that key for your local GPG installation. + +You can obtain these keys by + - through a browser using a key server (e.g. keyserver.ubuntu.com), + - manually using the `gpg --keyserver --recv-keys ` command, or + - you can run the packaged `verify.py --import-keys ...` script to + have it automatically retrieve unrecognized keys. + +#### Usage + +This script attempts to download the checksum file (`SHA256SUMS`) and corresponding +signature file `SHA256SUMS.asc` from https://bitcoincore.org and https://bitcoin.org. + +It first checks if the checksum file is valid based upon a plurality of signatures, and +then downloads the release files specified in the checksum file, and checks if the +hashes of the release files are as expected. + +If we encounter pubkeys in the signature file that we do not recognize, the script +can prompt the user as to whether they'd like to download the pubkeys. To enable +this behavior, use the `--import-keys` flag. + +The script returns 0 if everything passes the checks. It returns 1 if either the +signature check or the hash check doesn't pass. An exit code of >2 indicates an error. + +See the `Config` object for various options. + +#### Examples + +Validate releases with default settings: +```sh +./contrib/verify-binaries/verify.py pub 22.0 +./contrib/verify-binaries/verify.py pub 22.0-rc3 +``` + +Get JSON output and don't prompt for user input (no auto key import): + +```sh +./contrib/verify-binaries/verify.py --json pub 22.0-x86 +./contrib/verify-binaries/verify.py --json pub 23.0-rc5-linux-gnu +``` + +Rely only on local GPG state and manually specified keys, while requiring a +threshold of at least 10 trusted signatures: +```sh +./contrib/verify-binaries/verify.py \ + --trusted-keys 74E2DEF5D77260B98BC19438099BAD163C70FBFA,9D3CC86A72F8494342EA5FD10A41BDC3F4FAFF1C \ + --min-good-sigs 10 pub 22.0-linux +``` + +If you only want to download the binaries for a certain architecture and/or platform, add the corresponding suffix, e.g.: + +```sh +./contrib/verify-binaries/verify.py pub 25.2-x86_64-linux +./contrib/verify-binaries/verify.py pub 24.1-rc1-darwin +./contrib/verify-binaries/verify.py pub 27.0-win64-setup.exe +``` + +If you do not want to keep the downloaded binaries, specify the cleanup option. + +```sh +./contrib/verify-binaries/verify.py pub --cleanup 22.0 +``` + +Use the bin subcommand to verify all files listed in a local checksum file + +```sh +./contrib/verify-binaries/verify.py bin SHA256SUMS +``` + +Verify only a subset of the files listed in a local checksum file + +```sh +./contrib/verify-binaries/verify.py bin ~/Downloads/SHA256SUMS \ + ~/Downloads/bitcoin-24.0.1-x86_64-linux-gnu.tar.gz \ + ~/Downloads/bitcoin-24.0.1-arm-linux-gnueabihf.tar.gz +``` diff --git a/contrib/verify-binaries/test.py b/contrib/verify-binaries/test.py new file mode 100755 index 0000000000000..875606ec22678 --- /dev/null +++ b/contrib/verify-binaries/test.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 + +import json +import sys +import subprocess +from pathlib import Path + + +def main(): + """Tests ordered roughly from faster to slower.""" + expect_code(run_verify("", "pub", '0.32'), 4, "Nonexistent version should fail") + expect_code(run_verify("", "pub", '0.32.awefa.12f9h'), 11, "Malformed version should fail") + expect_code(run_verify('--min-good-sigs 20', "pub", "22.0"), 9, "--min-good-sigs 20 should fail") + + print("- testing verification (22.0-x86_64-linux-gnu.tar.gz)", flush=True) + _220_x86_64_linux_gnu = run_verify("--json", "pub", "22.0-x86_64-linux-gnu.tar.gz") + try: + result = json.loads(_220_x86_64_linux_gnu.stdout.decode()) + except Exception: + print("failed on 22.0-x86_64-linux-gnu.tar.gz --json:") + print_process_failure(_220_x86_64_linux_gnu) + raise + + expect_code(_220_x86_64_linux_gnu, 0, "22.0-x86_64-linux-gnu.tar.gz should succeed") + v = result['verified_binaries'] + assert result['good_trusted_sigs'] + assert len(v) == 1 + assert v['bitcoin-22.0-x86_64-linux-gnu.tar.gz'] == '59ebd25dd82a51638b7a6bb914586201e67db67b919b2a1ff08925a7936d1b16' + + print("- testing verification (22.0)", flush=True) + _220 = run_verify("--json", "pub", "22.0") + try: + result = json.loads(_220.stdout.decode()) + except Exception: + print("failed on 22.0 --json:") + print_process_failure(_220) + raise + + expect_code(_220, 0, "22.0 should succeed") + v = result['verified_binaries'] + assert result['good_trusted_sigs'] + assert v['bitcoin-22.0-aarch64-linux-gnu.tar.gz'] == 'ac718fed08570a81b3587587872ad85a25173afa5f9fbbd0c03ba4d1714cfa3e' + assert v['bitcoin-22.0-osx64.tar.gz'] == '2744d199c3343b2d94faffdfb2c94d75a630ba27301a70e47b0ad30a7e0155e9' + assert v['bitcoin-22.0-x86_64-linux-gnu.tar.gz'] == '59ebd25dd82a51638b7a6bb914586201e67db67b919b2a1ff08925a7936d1b16' + + +def run_verify(global_args: str, command: str, command_args: str) -> subprocess.CompletedProcess: + maybe_here = Path.cwd() / 'verify.py' + path = maybe_here if maybe_here.exists() else Path.cwd() / 'contrib' / 'verify-binaries' / 'verify.py' + + if command == "pub": + command += " --cleanup" + + return subprocess.run( + f"{path} {global_args} {command} {command_args}", + stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + + +def expect_code(completed: subprocess.CompletedProcess, expected_code: int, msg: str): + if completed.returncode != expected_code: + print(f"{msg!r} failed: got code {completed.returncode}, expected {expected_code}") + print_process_failure(completed) + sys.exit(1) + else: + print(f"✓ {msg!r} passed") + + +def print_process_failure(completed: subprocess.CompletedProcess): + print(f"stdout:\n{completed.stdout.decode()}") + print(f"stderr:\n{completed.stderr.decode()}") + + +if __name__ == '__main__': + main() diff --git a/contrib/verify-binaries/verify.py b/contrib/verify-binaries/verify.py new file mode 100755 index 0000000000000..6c07b36c9d8cd --- /dev/null +++ b/contrib/verify-binaries/verify.py @@ -0,0 +1,709 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Script for verifying Bitcoin Core release binaries. + +This script attempts to download the sum file SHA256SUMS and corresponding +signature file SHA256SUMS.asc from bitcoincore.org and bitcoin.org and +compares them. + +The sum-signature file is signed by a number of builder keys. This script +ensures that there is a minimum threshold of signatures from pubkeys that +we trust. This trust is articulated on the basis of configuration options +here, but by default is based upon local GPG trust settings. + +The builder keys are available in the guix.sigs repo: + + https://github.com/bitcoin-core/guix.sigs/tree/main/builder-keys + +If a minimum good, trusted signature threshold is met on the sum file, we then +download the files specified in SHA256SUMS, and check if the hashes of these +files match those that are specified. The script returns 0 if everything passes +the checks. It returns 1 if either the signature check or the hash check +doesn't pass. If an error occurs the return value is >= 2. + +Logging output goes to stderr and final binary verification data goes to stdout. + +JSON output can by obtained by setting env BINVERIFY_JSON=1. +""" +import argparse +import difflib +import json +import logging +import os +import subprocess +import typing as t +import re +import sys +import shutil +import tempfile +import textwrap +import urllib.request +import urllib.error +import enum +from hashlib import sha256 +from pathlib import PurePath, Path + +# The primary host; this will fail if we can't retrieve files from here. +HOST1 = "https://bitcoincore.org" +HOST2 = "https://bitcoin.org" +VERSIONPREFIX = "bitcoin-core-" +SUMS_FILENAME = 'SHA256SUMS' +SIGNATUREFILENAME = f"{SUMS_FILENAME}.asc" + + +class ReturnCode(enum.IntEnum): + SUCCESS = 0 + INTEGRITY_FAILURE = 1 + FILE_GET_FAILED = 4 + FILE_MISSING_FROM_ONE_HOST = 5 + FILES_NOT_EQUAL = 6 + NO_BINARIES_MATCH = 7 + NOT_ENOUGH_GOOD_SIGS = 9 + BINARY_DOWNLOAD_FAILED = 10 + BAD_VERSION = 11 + + +def set_up_logger(is_verbose: bool = True) -> logging.Logger: + """Set up a logger that writes to stderr.""" + log = logging.getLogger(__name__) + log.setLevel(logging.INFO if is_verbose else logging.WARNING) + console = logging.StreamHandler(sys.stderr) # log to stderr + console.setLevel(logging.DEBUG) + formatter = logging.Formatter('[%(levelname)s] %(message)s') + console.setFormatter(formatter) + log.addHandler(console) + return log + + +log = set_up_logger() + + +def indent(output: str) -> str: + return textwrap.indent(output, ' ') + + +def bool_from_env(key, default=False) -> bool: + if key not in os.environ: + return default + raw = os.environ[key] + + if raw.lower() in ('1', 'true'): + return True + elif raw.lower() in ('0', 'false'): + return False + raise ValueError(f"Unrecognized environment value {key}={raw!r}") + + +VERSION_FORMAT = ".[.][-rc[0-9]][-platform]" +VERSION_EXAMPLE = "22.0 or 23.1-rc1-darwin.dmg or 27.0-x86_64-linux-gnu" + +def parse_version_string(version_str): + # "[-rcN][-platform]" + version_base, _, platform = version_str.partition('-') + rc = "" + if platform.startswith("rc"): # "-rcN[-platform]" + rc, _, platform = platform.partition('-') + # else "" or "-platform" + + return version_base, rc, platform + + +def download_with_wget(remote_file, local_file): + result = subprocess.run(['wget', '-O', local_file, remote_file], + stderr=subprocess.STDOUT, stdout=subprocess.PIPE) + return result.returncode == 0, result.stdout.decode().rstrip() + + +def download_lines_with_urllib(url) -> tuple[bool, list[str]]: + """Get (success, text lines of a file) over HTTP.""" + try: + return (True, [ + line.strip().decode() for line in urllib.request.urlopen(url).readlines()]) + except urllib.error.HTTPError as e: + log.warning(f"HTTP request to {url} failed (HTTPError): {e}") + except Exception as e: + log.warning(f"HTTP request to {url} failed ({e})") + return (False, []) + + +def verify_with_gpg( + filename, + signature_filename, + output_filename: t.Optional[str] = None +) -> tuple[int, str]: + with tempfile.NamedTemporaryFile() as status_file: + args = [ + 'gpg', '--yes', '--verify', '--verify-options', 'show-primary-uid-only', "--status-file", status_file.name, + '--output', output_filename if output_filename else '', signature_filename, filename] + + env = dict(os.environ, LANGUAGE='en') + result = subprocess.run(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env) + + gpg_data = status_file.read().decode().rstrip() + + log.debug(f'Result from GPG ({result.returncode}): {result.stdout.decode()}') + log.debug(f"{gpg_data}") + return result.returncode, gpg_data + + +def remove_files(filenames): + for filename in filenames: + os.remove(filename) + + +class SigData: + """GPG signature data as parsed from GPG stdout.""" + def __init__(self): + self.key = None + self.name = "" + self.trusted = False + self.status = "" + + def __bool__(self): + return self.key is not None + + def __repr__(self): + return ( + "SigData(%r, %r, trusted=%s, status=%r)" % + (self.key, self.name, self.trusted, self.status)) + + +def parse_gpg_result( + output: list[str] +) -> tuple[list[SigData], list[SigData], list[SigData]]: + """Returns good, unknown, and bad signatures from GPG stdout.""" + good_sigs: list[SigData] = [] + unknown_sigs: list[SigData] = [] + bad_sigs: list[SigData] = [] + total_resolved_sigs = 0 + + # Ensure that all lines we match on include a prefix that prevents malicious input + # from fooling the parser. + def line_begins_with(patt: str, line: str) -> t.Optional[re.Match]: + return re.match(r'^(\[GNUPG:\])\s+' + patt, line) + + curr_sigs = unknown_sigs + curr_sigdata = SigData() + + for line in output: + if line_begins_with(r"NEWSIG(?:\s|$)", line): + total_resolved_sigs += 1 + if curr_sigdata: + curr_sigs.append(curr_sigdata) + curr_sigdata = SigData() + newsig_split = line.split() + if len(newsig_split) == 3: + curr_sigdata.name = newsig_split[2] + + elif line_begins_with(r"GOODSIG(?:\s|$)", line): + curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4] + curr_sigs = good_sigs + + elif line_begins_with(r"EXPKEYSIG(?:\s|$)", line): + curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4] + curr_sigs = good_sigs + curr_sigdata.status = "expired" + + elif line_begins_with(r"REVKEYSIG(?:\s|$)", line): + curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4] + curr_sigs = good_sigs + curr_sigdata.status = "revoked" + + elif line_begins_with(r"BADSIG(?:\s|$)", line): + curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4] + curr_sigs = bad_sigs + + elif line_begins_with(r"ERRSIG(?:\s|$)", line): + curr_sigdata.key, _, _, _, _, _ = line.split()[2:8] + curr_sigs = unknown_sigs + + elif line_begins_with(r"TRUST_(UNDEFINED|NEVER)(?:\s|$)", line): + curr_sigdata.trusted = False + + elif line_begins_with(r"TRUST_(MARGINAL|FULLY|ULTIMATE)(?:\s|$)", line): + curr_sigdata.trusted = True + + # The last one won't have been added, so add it now + assert curr_sigdata + curr_sigs.append(curr_sigdata) + + all_found = len(good_sigs + bad_sigs + unknown_sigs) + if all_found != total_resolved_sigs: + raise RuntimeError( + f"failed to evaluate all signatures: found {all_found} " + f"but expected {total_resolved_sigs}") + + return (good_sigs, unknown_sigs, bad_sigs) + + +def files_are_equal(filename1, filename2): + with open(filename1, 'rb') as file1: + contents1 = file1.read() + with open(filename2, 'rb') as file2: + contents2 = file2.read() + eq = contents1 == contents2 + + if not eq: + with open(filename1, 'r', encoding='utf-8') as f1, \ + open(filename2, 'r', encoding='utf-8') as f2: + f1lines = f1.readlines() + f2lines = f2.readlines() + + diff = indent( + ''.join(difflib.unified_diff(f1lines, f2lines))) + log.warning(f"found diff in files ({filename1}, {filename2}):\n{diff}\n") + + return eq + + +def get_files_from_hosts_and_compare( + hosts: list[str], path: str, filename: str, require_all: bool = False +) -> ReturnCode: + """ + Retrieve the same file from a number of hosts and ensure they have the same contents. + The first host given will be treated as the "primary" host, and is required to succeed. + + Args: + filename: for writing the file locally. + """ + assert len(hosts) > 1 + primary_host = hosts[0] + other_hosts = hosts[1:] + got_files = [] + + def join_url(host: str) -> str: + return host.rstrip('/') + '/' + path.lstrip('/') + + url = join_url(primary_host) + success, output = download_with_wget(url, filename) + if not success: + log.error( + f"couldn't fetch file ({url}). " + "Have you specified the version number in the following format?\n" + f"{VERSION_FORMAT} " + f"(example: {VERSION_EXAMPLE})\n" + f"wget output:\n{indent(output)}") + return ReturnCode.FILE_GET_FAILED + else: + log.info(f"got file {url} as {filename}") + got_files.append(filename) + + for i, host in enumerate(other_hosts): + url = join_url(host) + fname = filename + f'.{i + 2}' + success, output = download_with_wget(url, fname) + + if require_all and not success: + log.error( + f"{host} failed to provide file ({url}), but {primary_host} did?\n" + f"wget output:\n{indent(output)}") + return ReturnCode.FILE_MISSING_FROM_ONE_HOST + elif not success: + log.warning( + f"{host} failed to provide file ({url}). " + f"Continuing based solely upon {primary_host}.") + else: + log.info(f"got file {url} as {fname}") + got_files.append(fname) + + for i, got_file in enumerate(got_files): + if got_file == got_files[-1]: + break # break on last file, nothing after it to compare to + + compare_to = got_files[i + 1] + if not files_are_equal(got_file, compare_to): + log.error(f"files not equal: {got_file} and {compare_to}") + return ReturnCode.FILES_NOT_EQUAL + + return ReturnCode.SUCCESS + + +def check_multisig(sums_file: str, sigfilename: str, args: argparse.Namespace) -> tuple[int, str, list[SigData], list[SigData], list[SigData]]: + # check signature + # + # We don't write output to a file because this command will almost certainly + # fail with GPG exit code '2' (and so not writing to --output) because of the + # likely presence of multiple untrusted signatures. + retval, output = verify_with_gpg(sums_file, sigfilename) + + if args.verbose: + log.info(f"gpg output:\n{indent(output)}") + + good, unknown, bad = parse_gpg_result(output.splitlines()) + + if unknown and args.import_keys: + # Retrieve unknown keys and then try GPG again. + for unsig in unknown: + if prompt_yn(f" ? Retrieve key {unsig.key} ({unsig.name})? (y/N) "): + ran = subprocess.run( + ["gpg", "--keyserver", args.keyserver, "--recv-keys", unsig.key]) + + if ran.returncode != 0: + log.warning(f"failed to retrieve key {unsig.key}") + + # Reparse the GPG output now that we have more keys + retval, output = verify_with_gpg(sums_file, sigfilename) + good, unknown, bad = parse_gpg_result(output.splitlines()) + + return retval, output, good, unknown, bad + + +def prompt_yn(prompt) -> bool: + """Return true if the user inputs 'y'.""" + got = '' + while got not in ['y', 'n']: + got = input(prompt).lower() + return got == 'y' + +def verify_shasums_signature( + signature_file_path: str, sums_file_path: str, args: argparse.Namespace +) -> tuple[ + ReturnCode, list[SigData], list[SigData], list[SigData], list[SigData] +]: + min_good_sigs = args.min_good_sigs + gpg_allowed_codes = [0, 2] # 2 is returned when untrusted signatures are present. + + gpg_retval, gpg_output, good, unknown, bad = check_multisig(sums_file_path, signature_file_path, args) + + if gpg_retval not in gpg_allowed_codes: + if gpg_retval == 1: + log.critical(f"Bad signature (code: {gpg_retval}).") + else: + log.critical(f"unexpected GPG exit code ({gpg_retval})") + + log.error(f"gpg output:\n{indent(gpg_output)}") + return (ReturnCode.INTEGRITY_FAILURE, [], [], [], []) + + # Decide which keys we trust, though not "trust" in the GPG sense, but rather + # which pubkeys convince us that this sums file is legitimate. In other words, + # which pubkeys within the Bitcoin community do we trust for the purposes of + # binary verification? + trusted_keys = set() + if args.trusted_keys: + trusted_keys |= set(args.trusted_keys.split(',')) + + # Tally signatures and make sure we have enough goods to fulfill + # our threshold. + good_trusted = [sig for sig in good if sig.trusted or sig.key in trusted_keys] + good_untrusted = [sig for sig in good if sig not in good_trusted] + num_trusted = len(good_trusted) + len(good_untrusted) + log.info(f"got {num_trusted} good signatures") + + if num_trusted < min_good_sigs: + log.info("Maybe you need to import " + f"(`gpg --keyserver {args.keyserver} --recv-keys `) " + "some of the following keys: ") + log.info('') + for sig in unknown: + log.info(f" {sig.key} ({sig.name})") + log.info('') + log.error( + "not enough trusted sigs to meet threshold " + f"({num_trusted} vs. {min_good_sigs})") + + return (ReturnCode.NOT_ENOUGH_GOOD_SIGS, [], [], [], []) + + for sig in good_trusted: + log.info(f"GOOD SIGNATURE: {sig}") + + for sig in good_untrusted: + log.info(f"GOOD SIGNATURE (untrusted): {sig}") + + for sig in [sig for sig in good if sig.status == 'expired']: + log.warning(f"key {sig.key} for {sig.name} is expired") + + for sig in bad: + log.warning(f"BAD SIGNATURE: {sig}") + + for sig in unknown: + log.warning(f"UNKNOWN SIGNATURE: {sig}") + + return (ReturnCode.SUCCESS, good_trusted, good_untrusted, unknown, bad) + + +def parse_sums_file(sums_file_path: str, filename_filter: list[str]) -> list[list[str]]: + # extract hashes/filenames of binaries to verify from hash file; + # each line has the following format: " " + with open(sums_file_path, 'r', encoding='utf8') as hash_file: + return [line.split()[:2] for line in hash_file if len(filename_filter) == 0 or any(f in line for f in filename_filter)] + + +def verify_binary_hashes(hashes_to_verify: list[list[str]]) -> tuple[ReturnCode, dict[str, str]]: + offending_files = [] + files_to_hashes = {} + + for hash_expected, binary_filename in hashes_to_verify: + with open(binary_filename, 'rb') as binary_file: + hash_calculated = sha256(binary_file.read()).hexdigest() + if hash_calculated != hash_expected: + offending_files.append(binary_filename) + else: + files_to_hashes[binary_filename] = hash_calculated + + if offending_files: + joined_files = '\n'.join(offending_files) + log.critical( + "Hashes don't match.\n" + f"Offending files:\n{joined_files}") + return (ReturnCode.INTEGRITY_FAILURE, files_to_hashes) + + return (ReturnCode.SUCCESS, files_to_hashes) + + +def verify_published_handler(args: argparse.Namespace) -> ReturnCode: + WORKINGDIR = Path(tempfile.gettempdir()) / f"bitcoin_verify_binaries.{args.version}" + + def cleanup(): + log.info("cleaning up files") + os.chdir(Path.home()) + shutil.rmtree(WORKINGDIR) + + # determine remote dir dependent on provided version string + try: + version_base, version_rc, os_filter = parse_version_string(args.version) + version_tuple = [int(i) for i in version_base.split('.')] + except Exception as e: + log.debug(e) + log.error(f"unable to parse version; expected format is {VERSION_FORMAT}") + log.error(f" e.g. {VERSION_EXAMPLE}") + return ReturnCode.BAD_VERSION + + remote_dir = f"/bin/{VERSIONPREFIX}{version_base}/" + if version_rc: + remote_dir += f"test.{version_rc}/" + remote_sigs_path = remote_dir + SIGNATUREFILENAME + remote_sums_path = remote_dir + SUMS_FILENAME + + # create working directory + os.makedirs(WORKINGDIR, exist_ok=True) + os.chdir(WORKINGDIR) + + hosts = [HOST1, HOST2] + + got_sig_status = get_files_from_hosts_and_compare( + hosts, remote_sigs_path, SIGNATUREFILENAME, args.require_all_hosts) + if got_sig_status != ReturnCode.SUCCESS: + return got_sig_status + + # Multi-sig verification is available after 22.0. + if version_tuple[0] < 22: + log.error("Version too old - single sig not supported. Use a previous " + "version of this script from the repo.") + return ReturnCode.BAD_VERSION + + got_sums_status = get_files_from_hosts_and_compare( + hosts, remote_sums_path, SUMS_FILENAME, args.require_all_hosts) + if got_sums_status != ReturnCode.SUCCESS: + return got_sums_status + + # Verify the signature on the SHA256SUMS file + sigs_status, good_trusted, good_untrusted, unknown, bad = verify_shasums_signature(SIGNATUREFILENAME, SUMS_FILENAME, args) + if sigs_status != ReturnCode.SUCCESS: + if sigs_status == ReturnCode.INTEGRITY_FAILURE: + cleanup() + return sigs_status + + # Extract hashes and filenames + hashes_to_verify = parse_sums_file(SUMS_FILENAME, [os_filter]) + if not hashes_to_verify: + available_versions = ["-".join(line[1].split("-")[2:]) for line in parse_sums_file(SUMS_FILENAME, [])] + closest_match = difflib.get_close_matches(os_filter, available_versions, cutoff=0, n=1)[0] + log.error(f"No files matched the platform specified. Did you mean: {closest_match}") + return ReturnCode.NO_BINARIES_MATCH + + # remove binaries that are known not to be hosted by bitcoincore.org + fragments_to_remove = ['-unsigned', '-debug', '-codesignatures'] + for fragment in fragments_to_remove: + nobinaries = [i for i in hashes_to_verify if fragment in i[1]] + if nobinaries: + remove_str = ', '.join(i[1] for i in nobinaries) + log.info( + f"removing *{fragment} binaries ({remove_str}) from verification " + f"since {HOST1} does not host *{fragment} binaries") + hashes_to_verify = [i for i in hashes_to_verify if fragment not in i[1]] + + # download binaries + for _, binary_filename in hashes_to_verify: + log.info(f"downloading {binary_filename} to {WORKINGDIR}") + success, output = download_with_wget( + HOST1 + remote_dir + binary_filename, binary_filename) + + if not success: + log.error( + f"failed to download {binary_filename}\n" + f"wget output:\n{indent(output)}") + return ReturnCode.BINARY_DOWNLOAD_FAILED + + # verify hashes + hashes_status, files_to_hashes = verify_binary_hashes(hashes_to_verify) + if hashes_status != ReturnCode.SUCCESS: + return hashes_status + + + if args.cleanup: + cleanup() + else: + log.info(f"did not clean up {WORKINGDIR}") + + if args.json: + output = { + 'good_trusted_sigs': [str(s) for s in good_trusted], + 'good_untrusted_sigs': [str(s) for s in good_untrusted], + 'unknown_sigs': [str(s) for s in unknown], + 'bad_sigs': [str(s) for s in bad], + 'verified_binaries': files_to_hashes, + } + print(json.dumps(output, indent=2)) + else: + for filename in files_to_hashes: + print(f"VERIFIED: {filename}") + + return ReturnCode.SUCCESS + + +def verify_binaries_handler(args: argparse.Namespace) -> ReturnCode: + binary_to_basename = {} + for file in args.binary: + binary_to_basename[PurePath(file).name] = file + + sums_sig_path = None + if args.sums_sig_file: + sums_sig_path = Path(args.sums_sig_file) + else: + log.info(f"No signature file specified, assuming it is {args.sums_file}.asc") + sums_sig_path = Path(args.sums_file).with_suffix(".asc") + + # Verify the signature on the SHA256SUMS file + sigs_status, good_trusted, good_untrusted, unknown, bad = verify_shasums_signature(str(sums_sig_path), args.sums_file, args) + if sigs_status != ReturnCode.SUCCESS: + return sigs_status + + # Extract hashes and filenames + hashes_to_verify = parse_sums_file(args.sums_file, [k for k, n in binary_to_basename.items()]) + if not hashes_to_verify: + log.error(f"No files in {args.sums_file} match the specified binaries") + return ReturnCode.NO_BINARIES_MATCH + + # Make sure all files are accounted for + sums_file_path = Path(args.sums_file) + missing_files = [] + files_to_hash = [] + if len(binary_to_basename) > 0: + for file_hash, file in hashes_to_verify: + files_to_hash.append([file_hash, binary_to_basename[file]]) + del binary_to_basename[file] + if len(binary_to_basename) > 0: + log.error(f"Not all specified binaries are in {args.sums_file}") + return ReturnCode.NO_BINARIES_MATCH + else: + log.info(f"No binaries specified, assuming all files specified in {args.sums_file} are located relatively") + for file_hash, file in hashes_to_verify: + file_path = Path(sums_file_path.parent.joinpath(file)) + if file_path.exists(): + files_to_hash.append([file_hash, str(file_path)]) + else: + missing_files.append(file) + + # verify hashes + hashes_status, files_to_hashes = verify_binary_hashes(files_to_hash) + if hashes_status != ReturnCode.SUCCESS: + return hashes_status + + if args.json: + output = { + 'good_trusted_sigs': [str(s) for s in good_trusted], + 'good_untrusted_sigs': [str(s) for s in good_untrusted], + 'unknown_sigs': [str(s) for s in unknown], + 'bad_sigs': [str(s) for s in bad], + 'verified_binaries': files_to_hashes, + "missing_binaries": missing_files, + } + print(json.dumps(output, indent=2)) + else: + for filename in files_to_hashes: + print(f"VERIFIED: {filename}") + for filename in missing_files: + print(f"MISSING: {filename}") + + return ReturnCode.SUCCESS + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + '-v', '--verbose', action='store_true', + default=bool_from_env('BINVERIFY_VERBOSE'), + ) + parser.add_argument( + '-q', '--quiet', action='store_true', + default=bool_from_env('BINVERIFY_QUIET'), + ) + parser.add_argument( + '--import-keys', action='store_true', + default=bool_from_env('BINVERIFY_IMPORTKEYS'), + help='if specified, ask to import each unknown builder key' + ) + parser.add_argument( + '--min-good-sigs', type=int, action='store', nargs='?', + default=int(os.environ.get('BINVERIFY_MIN_GOOD_SIGS', 3)), + help=( + 'The minimum number of good signatures to require successful termination.'), + ) + parser.add_argument( + '--keyserver', action='store', nargs='?', + default=os.environ.get('BINVERIFY_KEYSERVER', 'hkps://keys.openpgp.org'), + help='which keyserver to use', + ) + parser.add_argument( + '--trusted-keys', action='store', nargs='?', + default=os.environ.get('BINVERIFY_TRUSTED_KEYS', ''), + help='A list of trusted signer GPG keys, separated by commas. Not "trusted keys" in the GPG sense.', + ) + parser.add_argument( + '--json', action='store_true', + default=bool_from_env('BINVERIFY_JSON'), + help='If set, output the result as JSON', + ) + + subparsers = parser.add_subparsers(title="Commands", required=True, dest="command") + + pub_parser = subparsers.add_parser("pub", help="Verify a published release.") + pub_parser.set_defaults(func=verify_published_handler) + pub_parser.add_argument( + 'version', type=str, help=( + f'version of the bitcoin release to download; of the format ' + f'{VERSION_FORMAT}. Example: {VERSION_EXAMPLE}') + ) + pub_parser.add_argument( + '--cleanup', action='store_true', + default=bool_from_env('BINVERIFY_CLEANUP'), + help='if specified, clean up files afterwards' + ) + pub_parser.add_argument( + '--require-all-hosts', action='store_true', + default=bool_from_env('BINVERIFY_REQUIRE_ALL_HOSTS'), + help=( + f'If set, require all hosts ({HOST1}, {HOST2}) to provide signatures. ' + '(Sometimes bitcoin.org lags behind bitcoincore.org.)') + ) + + bin_parser = subparsers.add_parser("bin", help="Verify local binaries.") + bin_parser.set_defaults(func=verify_binaries_handler) + bin_parser.add_argument("--sums-sig-file", "-s", help="Path to the SHA256SUMS.asc file to verify") + bin_parser.add_argument("sums_file", help="Path to the SHA256SUMS file to verify") + bin_parser.add_argument( + "binary", nargs="*", + help="Path to a binary distribution file to verify. Can be specified multiple times for multiple files to verify." + ) + + args = parser.parse_args() + if args.quiet: + log.setLevel(logging.WARNING) + + return args.func(args) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md index e95a57586ff1c..020890c3660ee 100644 --- a/contrib/verify-commits/README.md +++ b/contrib/verify-commits/README.md @@ -27,6 +27,10 @@ Note that the above isn't a good UI/UX yet, and needs significant improvements to make it more convenient and reduce the chance of errors; pull-reqs improving this process would be much appreciated. +Unless `--clean-merge 0` is specified, `verify-commits.py` will attempt to verify that +each merge commit applies cleanly (with some exceptions). This requires using at least +git v2.38.0. + Configuration files ------------------- @@ -40,7 +44,7 @@ Import trusted keys In order to check the commit signatures, you must add the trusted PGP keys to your machine. [GnuPG](https://gnupg.org/) may be used to import the trusted keys by running the following command: ```sh -gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys $(/dev/null)" + printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null + exit $? else # Note how we've disabled SHA1 with the --weak-digest option, disabling # signatures - including selfsigs - that use SHA1. While you might think that @@ -20,12 +17,12 @@ else # an attacker could construct a pull-req that results in a commit object that # they've created a collision for. Not the most likely attack, but preventing # it is pretty easy so we do so as a "belt-and-suspenders" measure. - GPG_RES="" for LINE in $(gpg --version); do case "$LINE" in "gpg (GnuPG) 1.4.1"*|"gpg (GnuPG) 2.0."*) echo "Please upgrade to at least gpg 2.1.10 to check for weak signatures" > /dev/stderr - GPG_RES="$(printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null)" + printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null + exit $? ;; # We assume if you're running 2.1+, you're probably running 2.1.10+ # gpg will fail otherwise @@ -33,33 +30,6 @@ else # gpg will fail otherwise esac done - [ "$GPG_RES" = "" ] && GPG_RES="$(printf '%s\n' "$INPUT" | gpg --trust-model always --weak-digest sha1 "$@" 2>/dev/null)" -fi -for LINE in $GPG_RES; do - case "$LINE" in - "[GNUPG:] VALIDSIG "*) - while read KEY; do - [ "${LINE#?GNUPG:? VALIDSIG * * * * * * * * * }" = "$KEY" ] && VALID=true - done < ./contrib/verify-commits/trusted-keys - ;; - "[GNUPG:] REVKEYSIG "*) - [ "$BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG" != 1 ] && exit 1 - REVSIG=true - GOODREVSIG="[GNUPG:] GOODSIG ${LINE#* * *}" - ;; - "[GNUPG:] EXPKEYSIG "*) - [ "$BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG" != 1 ] && exit 1 - REVSIG=true - GOODREVSIG="[GNUPG:] GOODSIG ${LINE#* * *}" - ;; - esac -done -if ! $VALID; then - exit 1 -fi -if $VALID && $REVSIG; then - printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null | grep "^\[GNUPG:\] \(NEWSIG\|SIG_ID\|VALIDSIG\)" - echo "$GOODREVSIG" -else - printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null + printf '%s\n' "$INPUT" | gpg --trust-model always --weak-digest sha1 "$@" 2>/dev/null + exit $? fi diff --git a/contrib/verify-commits/pre-push-hook.sh b/contrib/verify-commits/pre-push-hook.sh index 78873dc0c38eb..995324837887f 100755 --- a/contrib/verify-commits/pre-push-hook.sh +++ b/contrib/verify-commits/pre-push-hook.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2020 The Bitcoin Core developers +# Copyright (c) 2014-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -9,11 +9,11 @@ if ! [[ "$2" =~ ^(git@)?(www.)?github.com(:|/)bitcoin/bitcoin(.git)?$ ]]; then fi while read LINE; do - set -- A $LINE + set -- A "$LINE" if [ "$4" != "refs/heads/master" ]; then continue fi - if ! ./contrib/verify-commits/verify-commits.py $3 > /dev/null 2>&1; then + if ! ./contrib/verify-commits/verify-commits.py "$3" > /dev/null 2>&1; then echo "ERROR: A commit is not signed, can't push" ./contrib/verify-commits/verify-commits.py exit 1 diff --git a/contrib/verify-commits/trusted-git-root b/contrib/verify-commits/trusted-git-root index c60f8ab695e9c..7ec318e1ea79b 100644 --- a/contrib/verify-commits/trusted-git-root +++ b/contrib/verify-commits/trusted-git-root @@ -1 +1 @@ -82bcf405f6db1d55b684a1f63a4aabad376cdad7 +437dfe1c26e752c280014a30f809e62c684ad99e diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys index 27fede6277616..f25486776f9f2 100644 --- a/contrib/verify-commits/trusted-keys +++ b/contrib/verify-commits/trusted-keys @@ -1,6 +1,5 @@ -71A3B16735405025D447E8F274810B012346C9A6 -133EAC179436F14A5CF1B794860FEB804E669320 -32EE5C4C3FA15CCADB46ABE529D4BCB6416F53EC -B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B -CA03882CB1FC067B5D3ACFE4D300116E1C875A3D E777299FC265DD04793070EB944D35F9AC3DB76A +D1DBF2C4B96F2DEBF4C16654410108112E7EA81F +152812300785C96444D3334D17565732E08E5E41 +6B002C6EA3F91B1B0DF0C9BC8F617F1200A6D25C +4D1B3D5ECBA1A7E05371EEBE46800E30FC748A66 diff --git a/contrib/verify-commits/verify-commits.py b/contrib/verify-commits/verify-commits.py index 7e46c6fd47733..a1fe78a6436c1 100755 --- a/contrib/verify-commits/verify-commits.py +++ b/contrib/verify-commits/verify-commits.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2018-2019 The Bitcoin Core developers +# Copyright (c) 2018-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Verify commits against a trusted keys list.""" @@ -82,13 +82,20 @@ def main(): # get directory of this program and read data files dirname = os.path.dirname(os.path.abspath(__file__)) print("Using verify-commits data from " + dirname) - verified_root = open(dirname + "/trusted-git-root", "r", encoding="utf8").read().splitlines()[0] - verified_sha512_root = open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8").read().splitlines()[0] - revsig_allowed = open(dirname + "/allow-revsig-commits", "r", encoding="utf-8").read().splitlines() - unclean_merge_allowed = open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf-8").read().splitlines() - incorrect_sha512_allowed = open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf-8").read().splitlines() - - # Set commit and branch and set variables + with open(dirname + "/trusted-git-root", "r", encoding="utf8") as f: + verified_root = f.read().splitlines()[0] + with open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8") as f: + verified_sha512_root = f.read().splitlines()[0] + with open(dirname + "/allow-revsig-commits", "r", encoding="utf8") as f: + revsig_allowed = f.read().splitlines() + with open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf8") as f: + unclean_merge_allowed = f.read().splitlines() + with open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf8") as f: + incorrect_sha512_allowed = f.read().splitlines() + with open(dirname + "/trusted-keys", "r", encoding="utf8") as f: + trusted_keys = f.read().splitlines() + + # Set commit and variables current_commit = args.commit if ' ' in current_commit: print("Commit must not contain spaces", file=sys.stderr) @@ -97,7 +104,6 @@ def main(): no_sha1 = True prev_commit = "" initial_commit = current_commit - branch = subprocess.check_output([GIT, 'show', '-s', '--format=%H', initial_commit]).decode('utf8').splitlines()[0] # Iterate through commits while True: @@ -108,17 +114,41 @@ def main(): if current_commit == verified_root: print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root)) sys.exit(0) - if current_commit == verified_sha512_root: - if verify_tree: + else: + # Make sure this commit isn't older than trusted roots + check_root_older_res = subprocess.run([GIT, "merge-base", "--is-ancestor", verified_root, current_commit]) + if check_root_older_res.returncode != 0: + print(f"\"{current_commit}\" predates the trusted root, stopping!") + sys.exit(0) + + if verify_tree: + if current_commit == verified_sha512_root: print("All Tree-SHA512s matched up to {}".format(verified_sha512_root), file=sys.stderr) - verify_tree = False - no_sha1 = False + verify_tree = False + no_sha1 = False + else: + # Skip the tree check if we are older than the trusted root + check_root_older_res = subprocess.run([GIT, "merge-base", "--is-ancestor", verified_sha512_root, current_commit]) + if check_root_older_res.returncode != 0: + print(f"\"{current_commit}\" predates the trusted SHA512 root, disabling tree verification.") + verify_tree = False + no_sha1 = False + os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_SHA1'] = "0" if no_sha1 else "1" - os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG'] = "1" if current_commit in revsig_allowed else "0" + allow_revsig = current_commit in revsig_allowed # Check that the commit (and parents) was signed with a trusted key - if subprocess.call([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', current_commit], stdout=subprocess.DEVNULL): + valid_sig = False + verify_res = subprocess.run([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', "--raw", current_commit], capture_output=True) + for line in verify_res.stderr.decode().splitlines(): + if line.startswith("[GNUPG:] VALIDSIG "): + key = line.split(" ")[-1] + valid_sig = key in trusted_keys + elif (line.startswith("[GNUPG:] REVKEYSIG ") or line.startswith("[GNUPG:] EXPKEYSIG ")) and not allow_revsig: + valid_sig = False + break + if not valid_sig: if prev_commit != "": print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr) print("Parents are:", file=sys.stderr) @@ -148,15 +178,24 @@ def main(): allow_unclean = current_commit in unclean_merge_allowed if len(parents) == 2 and check_merge and not allow_unclean: current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit]).decode('utf8').splitlines()[0] - subprocess.call([GIT, 'checkout', '--force', '--quiet', parents[0]]) - subprocess.call([GIT, 'merge', '--no-ff', '--quiet', '--no-gpg-sign', parents[1]], stdout=subprocess.DEVNULL) - recreated_tree = subprocess.check_output([GIT, 'show', '--format=format:%T', 'HEAD']).decode('utf8').splitlines()[0] + + # This merge-tree functionality requires git >= 2.38. The + # --write-tree option was added in order to opt-in to the new + # behavior. Older versions of git will not recognize the option and + # will instead exit with code 128. + try: + recreated_tree = subprocess.check_output([GIT, "merge-tree", "--write-tree", parents[0], parents[1]]).decode('utf8').splitlines()[0] + except subprocess.CalledProcessError as e: + if e.returncode == 128: + print("git v2.38+ is required for this functionality.", file=sys.stderr) + sys.exit(1) + else: + raise e + if current_tree != recreated_tree: print("Merge commit {} is not clean".format(current_commit), file=sys.stderr) - subprocess.call([GIT, 'diff', current_commit]) - subprocess.call([GIT, 'checkout', '--force', '--quiet', branch]) + subprocess.call([GIT, 'diff', recreated_tree, current_tree]) sys.exit(1) - subprocess.call([GIT, 'checkout', '--force', '--quiet', branch]) prev_commit = current_commit current_commit = parents[0] diff --git a/contrib/verifybinaries/README.md b/contrib/verifybinaries/README.md deleted file mode 100644 index 4209fdb3644e5..0000000000000 --- a/contrib/verifybinaries/README.md +++ /dev/null @@ -1,41 +0,0 @@ -### Verify Binaries - -#### Preparation: - -Make sure you obtain the proper release signing key and verify the fingerprint with several independent sources. - -```sh -$ gpg --fingerprint "Bitcoin Core binary release signing key" -pub 4096R/36C2E964 2015-06-24 [expires: YYYY-MM-DD] - Key fingerprint = 01EA 5486 DE18 A882 D4C2 6845 90C8 019E 36C2 E964 -uid Wladimir J. van der Laan (Bitcoin Core binary release signing key) -``` - -#### Usage: - -This script attempts to download the signature file `SHA256SUMS.asc` from https://bitcoin.org. - -It first checks if the signature passes, and then downloads the files specified in the file, and checks if the hashes of these files match those that are specified in the signature file. - -The script returns 0 if everything passes the checks. It returns 1 if either the signature check or the hash check doesn't pass. If an error occurs the return value is 2. - - -```sh -./verify.sh bitcoin-core-0.11.2 -./verify.sh bitcoin-core-0.12.0 -./verify.sh bitcoin-core-0.13.0-rc3 -``` - -If you only want to download the binaries of certain platform, add the corresponding suffix, e.g.: - -```sh -./verify.sh bitcoin-core-0.11.2-osx -./verify.sh 0.12.0-linux -./verify.sh bitcoin-core-0.13.0-rc3-win64 -``` - -If you do not want to keep the downloaded binaries, specify anything as the second parameter. - -```sh -./verify.sh bitcoin-core-0.13.0 delete -``` diff --git a/contrib/verifybinaries/verify.sh b/contrib/verifybinaries/verify.sh deleted file mode 100755 index 4296998631cff..0000000000000 --- a/contrib/verifybinaries/verify.sh +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2016-2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -### This script attempts to download the signature file SHA256SUMS.asc from -### bitcoincore.org and bitcoin.org and compares them. -### It first checks if the signature passes, and then downloads the files specified in -### the file, and checks if the hashes of these files match those that are specified -### in the signature file. -### The script returns 0 if everything passes the checks. It returns 1 if either the -### signature check or the hash check doesn't pass. If an error occurs the return value is 2 - -export LC_ALL=C -function clean_up { - for file in "$@" - do - rm "$file" 2> /dev/null - done -} - -WORKINGDIR="/tmp/bitcoin_verify_binaries" -TMPFILE="hashes.tmp" - -SIGNATUREFILENAME="SHA256SUMS.asc" -RCSUBDIR="test" -HOST1="https://bitcoincore.org" -HOST2="https://bitcoin.org" -BASEDIR="/bin/" -VERSIONPREFIX="bitcoin-core-" -RCVERSIONSTRING="rc" - -if [ ! -d "$WORKINGDIR" ]; then - mkdir "$WORKINGDIR" -fi - -cd "$WORKINGDIR" || exit 1 - -#test if a version number has been passed as an argument -if [ -n "$1" ]; then - #let's also check if the version number includes the prefix 'bitcoin-', - # and add this prefix if it doesn't - if [[ $1 == "$VERSIONPREFIX"* ]]; then - VERSION="$1" - else - VERSION="$VERSIONPREFIX$1" - fi - - STRIPPEDLAST="${VERSION%-*}" - - #now let's see if the version string contains "rc" or a platform name (e.g. "osx") - if [[ "$STRIPPEDLAST-" == "$VERSIONPREFIX" ]]; then - BASEDIR="$BASEDIR$VERSION/" - else - # let's examine the last part to see if it's rc and/or platform name - STRIPPEDNEXTTOLAST="${STRIPPEDLAST%-*}" - if [[ "$STRIPPEDNEXTTOLAST-" == "$VERSIONPREFIX" ]]; then - - LASTSUFFIX="${VERSION##*-}" - VERSION="$STRIPPEDLAST" - - if [[ $LASTSUFFIX == *"$RCVERSIONSTRING"* ]]; then - RCVERSION="$LASTSUFFIX" - else - PLATFORM="$LASTSUFFIX" - fi - - else - RCVERSION="${STRIPPEDLAST##*-}" - PLATFORM="${VERSION##*-}" - - VERSION="$STRIPPEDNEXTTOLAST" - fi - - BASEDIR="$BASEDIR$VERSION/" - if [[ $RCVERSION == *"$RCVERSIONSTRING"* ]]; then - BASEDIR="$BASEDIR$RCSUBDIR.$RCVERSION/" - fi - fi -else - echo "Error: need to specify a version on the command line" - exit 2 -fi - -if ! WGETOUT=$(wget -N "$HOST1$BASEDIR$SIGNATUREFILENAME" 2>&1); then - echo "Error: couldn't fetch signature file. Have you specified the version number in the following format?" - # shellcheck disable=SC1087 - echo "[$VERSIONPREFIX]-[$RCVERSIONSTRING[0-9]] (example: ${VERSIONPREFIX}0.10.4-${RCVERSIONSTRING}1)" - echo "wget output:" - # shellcheck disable=SC2001 - echo "$WGETOUT"|sed 's/^/\t/g' - exit 2 -fi - -if ! WGETOUT=$(wget -N -O "$SIGNATUREFILENAME.2" "$HOST2$BASEDIR$SIGNATUREFILENAME" 2>&1); then - echo "bitcoin.org failed to provide signature file, but bitcoincore.org did?" - echo "wget output:" - # shellcheck disable=SC2001 - echo "$WGETOUT"|sed 's/^/\t/g' - clean_up $SIGNATUREFILENAME - exit 3 -fi - -SIGFILEDIFFS="$(diff $SIGNATUREFILENAME $SIGNATUREFILENAME.2)" -if [ "$SIGFILEDIFFS" != "" ]; then - echo "bitcoin.org and bitcoincore.org signature files were not equal?" - clean_up $SIGNATUREFILENAME $SIGNATUREFILENAME.2 - exit 4 -fi - -#then we check it -GPGOUT=$(gpg --yes --decrypt --output "$TMPFILE" "$SIGNATUREFILENAME" 2>&1) - -#return value 0: good signature -#return value 1: bad signature -#return value 2: gpg error - -RET="$?" -if [ $RET -ne 0 ]; then - if [ $RET -eq 1 ]; then - #and notify the user if it's bad - echo "Bad signature." - elif [ $RET -eq 2 ]; then - #or if a gpg error has occurred - echo "gpg error. Do you have the Bitcoin Core binary release signing key installed?" - fi - - echo "gpg output:" - # shellcheck disable=SC2001 - echo "$GPGOUT"|sed 's/^/\t/g' - clean_up $SIGNATUREFILENAME $SIGNATUREFILENAME.2 $TMPFILE - exit "$RET" -fi - -if [ -n "$PLATFORM" ]; then - grep $PLATFORM $TMPFILE > "$TMPFILE-plat" - TMPFILESIZE=$(stat -c%s "$TMPFILE-plat") - if [ $TMPFILESIZE -eq 0 ]; then - echo "error: no files matched the platform specified" && exit 3 - fi - mv "$TMPFILE-plat" $TMPFILE -fi - -#here we extract the filenames from the signature file -FILES=$(awk '{print $2}' "$TMPFILE") - -#and download these one by one -for file in $FILES -do - echo "Downloading $file" - wget --quiet -N "$HOST1$BASEDIR$file" -done - -#check hashes -DIFF=$(diff <(sha256sum $FILES) "$TMPFILE") - -if [ $? -eq 1 ]; then - echo "Hashes don't match." - echo "Offending files:" - echo "$DIFF"|grep "^<"|awk '{print "\t"$3}' - exit 1 -elif [ $? -gt 1 ]; then - echo "Error executing 'diff'" - exit 2 -fi - -if [ -n "$2" ]; then - echo "Clean up the binaries" - clean_up $FILES $SIGNATUREFILENAME $SIGNATUREFILENAME.2 $TMPFILE -else - echo "Keep the binaries in $WORKINGDIR" - clean_up $TMPFILE -fi - -echo -e "Verified hashes of \n$FILES" - -exit 0 diff --git a/contrib/windeploy/detached-sig-create.sh b/contrib/windeploy/detached-sig-create.sh index 31720e72e7081..82fcf2d40684f 100755 --- a/contrib/windeploy/detached-sig-create.sh +++ b/contrib/windeploy/detached-sig-create.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (c) 2014-2019 The Bitcoin Core developers +# Copyright (c) 2014-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -23,9 +23,10 @@ TIMESERVER=http://timestamp.comodoca.com CERTFILE="win-codesign.cert" mkdir -p "${OUTSUBDIR}" +# shellcheck disable=SC2046 basename -a $(ls -1 "${SRCDIR}"/*-unsigned.exe) | while read UNSIGNED; do echo Signing "${UNSIGNED}" - "${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@" + "${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -h sha256 -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@" "${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}" done diff --git a/contrib/windeploy/win-codesign.cert b/contrib/windeploy/win-codesign.cert index 4023a5b638f89..ec60a3018d91f 100644 --- a/contrib/windeploy/win-codesign.cert +++ b/contrib/windeploy/win-codesign.cert @@ -1,100 +1,112 @@ +-----BEGIN CERTIFICATE----- +MIIHeTCCBWGgAwIBAgIQBzR46J2yq3g++NbQS/BBVDANBgkqhkiG9w0BAQsFADBp +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xQTA/BgNVBAMT +OERpZ2lDZXJ0IFRydXN0ZWQgRzQgQ29kZSBTaWduaW5nIFJTQTQwOTYgU0hBMzg0 +IDIwMjEgQ0ExMB4XDTI0MDUyMjAwMDAwMFoXDTI3MDUzMTIzNTk1OVowgYAxCzAJ +BgNVBAYTAlVTMREwDwYDVQQIEwhEZWxhd2FyZTEOMAwGA1UEBxMFTGV3ZXMxJjAk +BgNVBAoTHUJpdGNvaW4gQ29yZSBDb2RlIFNpZ25pbmcgTExDMSYwJAYDVQQDEx1C +aXRjb2luIENvcmUgQ29kZSBTaWduaW5nIExMQzCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBALewxfjztuRTDNAGf7zkqqWNEt28CZmVJHoYltVRxtE1BP45 +BfmptH5eM1JC/XosTPytHRFeOkO4YVAtiELxK9S/82OZlKA7Mx7PW6vv1184u8+m +P3WpTN/KAZTaW9fB0ELTSCuqsvXq2crM2T7NudJnSyWh2VBjLfPPCAcYwzyGKQbl +jQWjFEJDJWFK83t9mK/v0WQgA3jGJeaz+V6CYXMS7UgpdG8dUhg9o63gYJZAW5pY +RIsNRcJCM5LHhwEMW5329UsTmYCfP7/53emepbQ0n8ijVZjgaJ+LZ8NspBLSeCiF +9UPCKX82uWiQAUTbYHCfSi3I0f3wQidXL9ZY+PXmalM7BMuQ+c2xEcl97CnhrDzx +EBwZvvOC9wGoG+8+epV4TjUZWf+7QN1ZYeg1rai7c7c8u9ILogE8su2xVoz333TH +CDvScIgnQXmk+cbKMBtg9kM0F+aLWsN2xVf0uAj3U7sdXLrfJeW0DZIktWtTBQzX +O/OE4Ka+1WFnDg0HJIih0cTjl9YYvfe53L4pCGy+qGt/XGBRqCMfXp3g+H9FGR5r +pensVVcsrv3GbTfYdlpdmp9OHH5G57GTAZueobCZg7r7RKK0zPU9EiTLJxzyXuai +v/Ksd8eIhHRjewMaQuAtQM1tO+oKAbLF0v2M7v7/aVT76X32JllYAizm3zjvAgMB +AAGjggIDMIIB/zAfBgNVHSMEGDAWgBRoN+Drtjv4XxGG+/5hewiIZfROQjAdBgNV +HQ4EFgQUvCpU58PIuofv0kHJ3Ty0YDKEy3cwPgYDVR0gBDcwNTAzBgZngQwBBAEw +KTAnBggrBgEFBQcCARYbaHR0cDovL3d3dy5kaWdpY2VydC5jb20vQ1BTMA4GA1Ud +DwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzCBtQYDVR0fBIGtMIGqMFOg +UaBPhk1odHRwOi8vY3JsMy5kaWdpY2VydC5jb20vRGlnaUNlcnRUcnVzdGVkRzRD +b2RlU2lnbmluZ1JTQTQwOTZTSEEzODQyMDIxQ0ExLmNybDBToFGgT4ZNaHR0cDov +L2NybDQuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0VHJ1c3RlZEc0Q29kZVNpZ25pbmdS +U0E0MDk2U0hBMzg0MjAyMUNBMS5jcmwwgZQGCCsGAQUFBwEBBIGHMIGEMCQGCCsG +AQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wXAYIKwYBBQUHMAKGUGh0 +dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVT +aWduaW5nUlNBNDA5NlNIQTM4NDIwMjFDQTEuY3J0MAkGA1UdEwQCMAAwDQYJKoZI +hvcNAQELBQADggIBADdniG9IY9oOfw1e3+uc2lR4hoZqquJQRrCnbWJ1npnCTavI +CfcEEMuQ5ztg4TR7tQNj2KcaHWKuPYxEz2bg8HpSPG27lnXaz4pLgfqvjdZWNH2v +W6DGRUAwuMQHSV0qhuRcJPZuhwSFx/8y4r++jIcBxCbt/Jprt/bqc8vZZZzTDPfG +M6cGaKMDvF//OkUPVzh4s557kV7+LoaX8CigiACZky3Zj3tkQfJYkEvdQseNvX49 +CMJ+cjN+fGweshbn/DszAT5oXW5l2PXeceyGrE+5Ex1ELXCPqNj8ZSn+S9IKZOag +zDFBA93RTVD438peXPz//xgusgnmSqSPS5tCp9KSvew81acu4v/+egg9EgSSx5Ho +9fkOX7JuygvN3r3UZqsddxdwf2dPvBDYlMdieF8qsR7H5DQPQoaTVrIhW4TFtJl/ +UPjVlnDwu+yvMC70F+CaVgQs01uZ0VKuG3KNkkEj6+V/SM54NVVgcY/Q7llKIFA8 +Qk8Ip8/83cVBptKW+OU+i2ZwoYskLbdfDE31X2knUIouNZgBBMhzc5WjJCEGXAPm +9xYZMn87cc+ejxCw6/WC4b6tDCziO8drq76Pl6LTNPOtRkEVqt12p8Uqi9PgznUB +bdHeoF5XHt1Ca2ySpSYuMz5djwIC2ws8kiMm44/AyTm6dwRcesiOTqnaRc+t +-----END CERTIFICATE----- -----BEGIN CERTIFICATE----- -MIIFdDCCBFygAwIBAgIRAL98pqZb/N9LuNaNxKsHNGQwDQYJKoZIhvcNAQELBQAw -fDELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G -A1UEBxMHU2FsZm9yZDEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMSQwIgYDVQQD -ExtTZWN0aWdvIFJTQSBDb2RlIFNpZ25pbmcgQ0EwHhcNMjAwMzI0MDAwMDAwWhcN -MjEwMzI0MjM1OTU5WjCBtzELMAkGA1UEBhMCQ0gxDTALBgNVBBEMBDgwMDUxDjAM -BgNVBAgMBVN0YXRlMRAwDgYDVQQHDAdaw7xyaWNoMRcwFQYDVQQJDA5NYXR0ZW5n -YXNzZSAyNzEuMCwGA1UECgwlQml0Y29pbiBDb3JlIENvZGUgU2lnbmluZyBBc3Nv -Y2lhdGlvbjEuMCwGA1UEAwwlQml0Y29pbiBDb3JlIENvZGUgU2lnbmluZyBBc3Nv -Y2lhdGlvbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMtxC8N4r/jE -OGOdFy/0UtiUvEczPZf9WYZz/7paAkc75XopHIE5/ssmoEX27gG9K00tf3Q62QAx -inZUPWkNTh8X0l+6uSGiIBFIV7dDgztIxnPcxaqw0k7Q2TEqKJvb5qm16zX6WfXJ -R2r6O5utUdQ3AarHnQq9fwdM1j5+ywS5u52te74ENgDMTMKUuB2J3KH1ASg5PAtO -CjPqPL+ZXJ7eT3M0Z+Lbu5ISZSqZB48BcCwOo/fOO0dAiLT9FE1iVtaCpBKHqGmd -glRjPzZdgDv8g28etRmk8wQ5pQmfL2gBjt/LtIgMPTdHHETKLxJO5H3y0CNx1vzL -ql7xNMxELxkCAwEAAaOCAbMwggGvMB8GA1UdIwQYMBaAFA7hOqhTOjHVir7Bu61n -GgOFrTQOMB0GA1UdDgQWBBSHBbl82FUJiUkXyyYJog1awYRsxjAOBgNVHQ8BAf8E -BAMCB4AwDAYDVR0TAQH/BAIwADATBgNVHSUEDDAKBggrBgEFBQcDAzARBglghkgB -hvhCAQEEBAMCBBAwQAYDVR0gBDkwNzA1BgwrBgEEAbIxAQIBAwIwJTAjBggrBgEF -BQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwQwYDVR0fBDwwOjA4oDagNIYy -aHR0cDovL2NybC5zZWN0aWdvLmNvbS9TZWN0aWdvUlNBQ29kZVNpZ25pbmdDQS5j -cmwwcwYIKwYBBQUHAQEEZzBlMD4GCCsGAQUFBzAChjJodHRwOi8vY3J0LnNlY3Rp -Z28uY29tL1NlY3RpZ29SU0FDb2RlU2lnbmluZ0NBLmNydDAjBggrBgEFBQcwAYYX -aHR0cDovL29jc3Auc2VjdGlnby5jb20wKwYDVR0RBCQwIoEgam9uYXNAYml0Y29p -bmNvcmVjb2Rlc2lnbmluZy5vcmcwDQYJKoZIhvcNAQELBQADggEBAAU59qJzQ2ED -aTMIQTsU01zIhZJ/xwQh78i0v2Mnr46RvzYrZOev+btF3SyUYD8veNnbYlY6yEYq -Vb+/PQnE3t1xlqR80qiTZCk/Wmxx/qKvQuWeRL5QQgvsCmWBpycQ7PNfwzOWxbPE -b0Hb2/VFFZfR9iltkfeInRUrzS96CJGYtm7dMf2JtnXYBcwpn1N8BSMH4nXVyN8g -VEE5KyjE7+/awYiSST7+e6Y7FE5AJ4f3FjqnRm+2XetTVqITwMLKZMoV283nSEeH -fA4FNAMGz9QeV38ol65NNqFP2vSSgVoPK79orqH9OOW2LSobt2qun+euddJIQeYV -CMP90b/2WPc= +MIIGsDCCBJigAwIBAgIQCK1AsmDSnEyfXs2pvZOu2TANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMjEwNDI5MDAwMDAwWhcNMzYwNDI4MjM1OTU5WjBpMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xQTA/BgNVBAMTOERpZ2lDZXJ0IFRy +dXN0ZWQgRzQgQ29kZSBTaWduaW5nIFJTQTQwOTYgU0hBMzg0IDIwMjEgQ0ExMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1bQvQtAorXi3XdU5WRuxiEL1 +M4zrPYGXcMW7xIUmMJ+kjmjYXPXrNCQH4UtP03hD9BfXHtr50tVnGlJPDqFX/IiZ +wZHMgQM+TXAkZLON4gh9NH1MgFcSa0OamfLFOx/y78tHWhOmTLMBICXzENOLsvsI +8IrgnQnAZaf6mIBJNYc9URnokCF4RS6hnyzhGMIazMXuk0lwQjKP+8bqHPNlaJGi +TUyCEUhSaN4QvRRXXegYE2XFf7JPhSxIpFaENdb5LpyqABXRN/4aBpTCfMjqGzLm +ysL0p6MDDnSlrzm2q2AS4+jWufcx4dyt5Big2MEjR0ezoQ9uo6ttmAaDG7dqZy3S +vUQakhCBj7A7CdfHmzJawv9qYFSLScGT7eG0XOBv6yb5jNWy+TgQ5urOkfW+0/tv +k2E0XLyTRSiDNipmKF+wc86LJiUGsoPUXPYVGUztYuBeM/Lo6OwKp7ADK5GyNnm+ +960IHnWmZcy740hQ83eRGv7bUKJGyGFYmPV8AhY8gyitOYbs1LcNU9D4R+Z1MI3s +MJN2FKZbS110YU0/EpF23r9Yy3IQKUHw1cVtJnZoEUETWJrcJisB9IlNWdt4z4FK +PkBHX8mBUHOFECMhWWCKZFTBzCEa6DgZfGYczXg4RTCZT/9jT0y7qg0IU0F8WD1H +s/q27IwyCQLMbDwMVhECAwEAAaOCAVkwggFVMBIGA1UdEwEB/wQIMAYBAf8CAQAw +HQYDVR0OBBYEFGg34Ou2O/hfEYb7/mF7CIhl9E5CMB8GA1UdIwQYMBaAFOzX44LS +cV1kTN8uZz/nupiuHA9PMA4GA1UdDwEB/wQEAwIBhjATBgNVHSUEDDAKBggrBgEF +BQcDAzB3BggrBgEFBQcBAQRrMGkwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRp +Z2ljZXJ0LmNvbTBBBggrBgEFBQcwAoY1aHR0cDovL2NhY2VydHMuZGlnaWNlcnQu +Y29tL0RpZ2lDZXJ0VHJ1c3RlZFJvb3RHNC5jcnQwQwYDVR0fBDwwOjA4oDagNIYy +aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0VHJ1c3RlZFJvb3RHNC5j +cmwwHAYDVR0gBBUwEzAHBgVngQwBAzAIBgZngQwBBAEwDQYJKoZIhvcNAQEMBQAD +ggIBADojRD2NCHbuj7w6mdNW4AIapfhINPMstuZ0ZveUcrEAyq9sMCcTEp6QRJ9L +/Z6jfCbVN7w6XUhtldU/SfQnuxaBRVD9nL22heB2fjdxyyL3WqqQz/WTauPrINHV +UHmImoqKwba9oUgYftzYgBoRGRjNYZmBVvbJ43bnxOQbX0P4PpT/djk9ntSZz0rd +KOtfJqGVWEjVGv7XJz/9kNF2ht0csGBc8w2o7uCJob054ThO2m67Np375SFTWsPK +6Wrxoj7bQ7gzyE84FJKZ9d3OVG3ZXQIUH0AzfAPilbLCIXVzUstG2MQ0HKKlS43N +b3Y3LIU/Gs4m6Ri+kAewQ3+ViCCCcPDMyu/9KTVcH4k4Vfc3iosJocsL6TEa/y4Z +XDlx4b6cpwoG1iZnt5LmTl/eeqxJzy6kdJKt2zyknIYf48FWGysj/4+16oh7cGvm +oLr9Oj9FpsToFpFSi0HASIRLlk2rREDjjfAVKM7t8RhWByovEMQMCGQ8M4+uKIw8 +y4+ICw2/O/TOHnuO77Xry7fwdxPm5yg/rBKupS8ibEH5glwVZsxsDsrFhsP2JjMM +B0ug0wcCampAMEhLNKhRILutG4UI4lkNbcoFUCvqShyepf2gpx8GdOfy1lKQ/a+F +SCH5Vzu0nAPthkX0tGFuv2jiJmCG6sivqf6UHedjGzqGVnhO -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- -MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB -iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl -cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV -BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw -MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU -aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy -dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B -3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY -tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ -Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 -VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT -79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 -c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT -Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l -c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee -UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE -Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd -BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G -A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF -Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO -VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 -ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs -8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR -iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze -Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ -XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ -qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB -VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB -L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG -jjxDah2nGN59PRbxYvnKkKj9 ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIIF9TCCA92gAwIBAgIQHaJIMG+bJhjQguCWfTPTajANBgkqhkiG9w0BAQwFADCB -iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl -cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV -BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTgx -MTAyMDAwMDAwWhcNMzAxMjMxMjM1OTU5WjB8MQswCQYDVQQGEwJHQjEbMBkGA1UE -CBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRgwFgYDVQQK -Ew9TZWN0aWdvIExpbWl0ZWQxJDAiBgNVBAMTG1NlY3RpZ28gUlNBIENvZGUgU2ln -bmluZyBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIYijTKFehif -SfCWL2MIHi3cfJ8Uz+MmtiVmKUCGVEZ0MWLFEO2yhyemmcuVMMBW9aR1xqkOUGKl -UZEQauBLYq798PgYrKf/7i4zIPoMGYmobHutAMNhodxpZW0fbieW15dRhqb0J+V8 -aouVHltg1X7XFpKcAC9o95ftanK+ODtj3o+/bkxBXRIgCFnoOc2P0tbPBrRXBbZO -oT5Xax+YvMRi1hsLjcdmG0qfnYHEckC14l/vC0X/o84Xpi1VsLewvFRqnbyNVlPG -8Lp5UEks9wO5/i9lNfIi6iwHr0bZ+UYc3Ix8cSjz/qfGFN1VkW6KEQ3fBiSVfQ+n -oXw62oY1YdMCAwEAAaOCAWQwggFgMB8GA1UdIwQYMBaAFFN5v1qqK0rPVIDh2JvA -nfKyA2bLMB0GA1UdDgQWBBQO4TqoUzox1Yq+wbutZxoDha00DjAOBgNVHQ8BAf8E -BAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAUBggrBgEFBQcDAwYI -KwYBBQUHAwgwEQYDVR0gBAowCDAGBgRVHSAAMFAGA1UdHwRJMEcwRaBDoEGGP2h0 -dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FDZXJ0aWZpY2F0aW9u -QXV0aG9yaXR5LmNybDB2BggrBgEFBQcBAQRqMGgwPwYIKwYBBQUHMAKGM2h0dHA6 -Ly9jcnQudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FBZGRUcnVzdENBLmNydDAl -BggrBgEFBQcwAYYZaHR0cDovL29jc3AudXNlcnRydXN0LmNvbTANBgkqhkiG9w0B -AQwFAAOCAgEATWNQ7Uc0SmGk295qKoyb8QAAHh1iezrXMsL2s+Bjs/thAIiaG20Q -BwRPvrjqiXgi6w9G7PNGXkBGiRL0C3danCpBOvzW9Ovn9xWVM8Ohgyi33i/klPeF -M4MtSkBIv5rCT0qxjyT0s4E307dksKYjalloUkJf/wTr4XRleQj1qZPea3FAmZa6 -ePG5yOLDCBaxq2NayBWAbXReSnV+pbjDbLXP30p5h1zHQE1jNfYw08+1Cg4LBH+g -S667o6XQhACTPlNdNKUANWlsvp8gJRANGftQkGG+OY96jk32nw4e/gdREmaDJhlI -lc5KycF/8zoFm/lv34h/wCOe0h5DekUxwZxNqfBZslkZ6GqNKQQCd3xLS81wvjqy -VVp4Pry7bwMQJXcVNIr5NsxDkuS6T/FikyglVyn7URnHoSVAaoRXxrKdsbwcCtp8 -Z359LukoTBh+xHsxQXGaSynsCz1XUNLK3f2eBVHlRHjdAd6xdZgNVCT98E7j4viD -vXK6yz067vBeF5Jobchh+abxKgoLpbn0nu6YMgWFnuv5gynTxix9vTp3Los3QqBq -gu07SqqUEKThDfgXxbZaeTMYkuO1dfih6Y4KJR7kHvGfWocj/5+kUZ77OYARzdu1 -xKeogG/lU9Tg46LC0lsa+jImLWpXcBw8pFguo/NbSwfcMlnzh6cabVg= +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ -----END CERTIFICATE----- diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py old mode 100644 new mode 100755 index 06893407f5fef..d8087a4db387c --- a/contrib/zmq/zmq_sub.py +++ b/contrib/zmq/zmq_sub.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2018 The Bitcoin Core developers +# Copyright (c) 2014-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -11,7 +11,8 @@ -zmqpubrawtx=tcp://127.0.0.1:28332 \ -zmqpubrawblock=tcp://127.0.0.1:28332 \ -zmqpubhashtx=tcp://127.0.0.1:28332 \ - -zmqpubhashblock=tcp://127.0.0.1:28332 + -zmqpubhashblock=tcp://127.0.0.1:28332 \ + -zmqpubsequence=tcp://127.0.0.1:28332 We use the asyncio library here. `self.handle()` installs itself as a future at the end of the function. Since it never returns with the event @@ -22,7 +23,6 @@ https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py """ -import binascii import asyncio import zmq import zmq.asyncio @@ -47,28 +47,32 @@ def __init__(self): self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx") + self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "sequence") self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port) async def handle(self) : - msg = await self.zmqSubSocket.recv_multipart() - topic = msg[0] - body = msg[1] + topic, body, seq = await self.zmqSubSocket.recv_multipart() sequence = "Unknown" - if len(msg[-1]) == 4: - msgSequence = struct.unpack('/dev/null) -build_id_string+=$(shell $(build_AR) --version 2>/dev/null) -build_id_string+=$(shell $(build_CXX) --version 2>/dev/null) -build_id_string+=$(shell $(build_RANLIB) --version 2>/dev/null) -build_id_string+=$(shell $(build_STRIP) --version 2>/dev/null) - -$(host_arch)_$(host_os)_id_string:=$(HOST_ID_SALT) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CC) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) - -qrencode_packages_$(NO_QR) = $(qrencode_packages) +# Previously, we directly invoked the well-known programs using $(shell ...) +# to construct build_id_string. However, that was problematic because: +# +# 1. When invoking a shell, GNU Make special-cases exit code 127 (command not +# found) by not capturing the output but instead passing it through. This is +# not done for any other exit code. +# +# 2. Characters like '#' (from these programs' output) would end up in make +# variables like build_id_string, which would be wrongly interpreted by make +# when these variables were used. +# +# Therefore, we should avoid having arbitrary strings in make variables where +# possible. The gen_id script used here hashes the output to construct a +# "make-safe" id. +# +# Also note that these lines need to be: +# +# 1. After including {hosts,builders}/*.mk, since they rely on the tool +# variables (e.g. build_CC, host_STRIP, etc.) to be set. +# +# 2. Before including packages/*.mk (excluding packages/packages.mk), since +# they rely on the build_id variables +# +build_id:=$(shell env CC='$(build_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(build_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(build_AR) 'NM='$(build_NM)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(host_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(host_AR)' NM='$(host_NM)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') + +boost_packages_$(NO_BOOST) = $(boost_packages) + +libevent_packages_$(NO_LIBEVENT) = $(libevent_packages) + +qrencode_packages_$(NO_QR) = $(qrencode_$(host_os)_packages) qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_) -wallet_packages_$(NO_WALLET) = $(wallet_packages) +bdb_packages_$(NO_BDB) = $(bdb_packages) +sqlite_packages_$(NO_SQLITE) = $(sqlite_packages) +wallet_packages_$(NO_WALLET) = $(bdb_packages_) $(sqlite_packages_) + upnp_packages_$(NO_UPNP) = $(upnp_packages) + zmq_packages_$(NO_ZMQ) = $(zmq_packages) +multiprocess_packages_$(MULTIPROCESS) = $(multiprocess_packages) +usdt_packages_$(NO_USDT) = $(usdt_$(host_os)_packages) -packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) $(qt_packages_) $(wallet_packages_) $(upnp_packages_) +packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) $(boost_packages_) $(libevent_packages_) $(qt_packages_) $(wallet_packages_) $(upnp_packages_) $(usdt_packages_) native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages) ifneq ($(zmq_packages_),) packages += $(zmq_packages) endif -all_packages = $(packages) $(native_packages) +ifeq ($(multiprocess_packages_),) +packages += $(multiprocess_packages) +native_packages += $(multiprocess_native_packages) +endif -meta_depends = Makefile funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk +all_packages = $(packages) $(native_packages) -$(host_arch)_$(host_os)_native_toolchain?=$($(host_os)_native_toolchain) +meta_depends = Makefile config.guess config.sub funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk include funcs.mk -toolchain_path=$($($(host_arch)_$(host_os)_native_toolchain)_prefixbin) -final_build_id_long+=$(shell $(build_SHA256SUM) config.site.in) +final_build_id_long+=$(shell $(build_SHA256SUM) toolchain.cmake.in) final_build_id+=$(shell echo -n "$(final_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH)) $(host_prefix)/.stamp_$(final_build_id): $(native_packages) $(packages) - $(AT)rm -rf $(@D) - $(AT)mkdir -p $(@D) - $(AT)echo copying packages: $^ - $(AT)echo to: $(@D) - $(AT)cd $(@D); $(foreach package,$^, tar xf $($(package)_cached); ) - $(AT)touch $@ - -$(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_build_id) - $(AT)@mkdir -p $(@D) - $(AT)sed -e 's|@HOST@|$(host)|' \ - -e 's|@CC@|$(toolchain_path)$(host_CC)|' \ - -e 's|@CXX@|$(toolchain_path)$(host_CXX)|' \ - -e 's|@AR@|$(toolchain_path)$(host_AR)|' \ - -e 's|@RANLIB@|$(toolchain_path)$(host_RANLIB)|' \ - -e 's|@NM@|$(toolchain_path)$(host_NM)|' \ - -e 's|@STRIP@|$(toolchain_path)$(host_STRIP)|' \ - -e 's|@build_os@|$(build_os)|' \ - -e 's|@host_os@|$(host_os)|' \ - -e 's|@CFLAGS@|$(strip $(host_CFLAGS) $(host_$(release_type)_CFLAGS))|' \ - -e 's|@CXXFLAGS@|$(strip $(host_CXXFLAGS) $(host_$(release_type)_CXXFLAGS))|' \ - -e 's|@CPPFLAGS@|$(strip $(host_CPPFLAGS) $(host_$(release_type)_CPPFLAGS))|' \ - -e 's|@LDFLAGS@|$(strip $(host_LDFLAGS) $(host_$(release_type)_LDFLAGS))|' \ - -e 's|@allow_host_packages@|$(ALLOW_HOST_PACKAGES)|' \ - -e 's|@no_qt@|$(NO_QT)|' \ - -e 's|@no_qr@|$(NO_QR)|' \ - -e 's|@no_zmq@|$(NO_ZMQ)|' \ - -e 's|@no_wallet@|$(NO_WALLET)|' \ - -e 's|@no_upnp@|$(NO_UPNP)|' \ - -e 's|@debug@|$(DEBUG)|' \ - $< > $@ - $(AT)touch $@ + rm -rf $(@D) + mkdir -p $(@D) + echo copying packages: $^ + echo to: $(@D) + cd $(@D); $(foreach package,$^, $(build_TAR) xf $($(package)_cached); ) + echo To build Bitcoin Core with these packages, pass \'--toolchain $(@D)/toolchain.cmake\' to the first CMake invocation. + touch $@ + +ifeq ($(host),$(build)) + crosscompiling=FALSE +else + crosscompiling=TRUE +endif +$(host_prefix)/toolchain.cmake : toolchain.cmake.in $(host_prefix)/.stamp_$(final_build_id) + @mkdir -p $(@D) + sed -e 's|@depends_crosscompiling@|$(crosscompiling)|' \ + -e 's|@host_system_name@|$($(host_os)_cmake_system_name)|' \ + -e 's|@host_system_version@|$($(host_os)_cmake_system_version)|' \ + -e 's|@host_arch@|$(host_arch)|' \ + -e 's|@CC@|$(host_CC)|' \ + -e 's|@CXX@|$(host_CXX)|' \ + -e 's|@OSX_SDK@|$(OSX_SDK)|' \ + -e 's|@AR@|$(host_AR)|' \ + -e 's|@RANLIB@|$(host_RANLIB)|' \ + -e 's|@STRIP@|$(host_STRIP)|' \ + -e 's|@OBJCOPY@|$(host_OBJCOPY)|' \ + -e 's|@OBJDUMP@|$(host_OBJDUMP)|' \ + -e 's|@depends_prefix@|$(host_prefix)|' \ + -e 's|@CFLAGS@|$(strip $(host_CFLAGS))|' \ + -e 's|@CFLAGS_RELEASE@|$(strip $(host_release_CFLAGS))|' \ + -e 's|@CFLAGS_DEBUG@|$(strip $(host_debug_CFLAGS))|' \ + -e 's|@CXXFLAGS@|$(strip $(host_CXXFLAGS))|' \ + -e 's|@CXXFLAGS_RELEASE@|$(strip $(host_release_CXXFLAGS))|' \ + -e 's|@CXXFLAGS_DEBUG@|$(strip $(host_debug_CXXFLAGS))|' \ + -e 's|@CPPFLAGS@|$(strip $(host_CPPFLAGS))|' \ + -e 's|@CPPFLAGS_RELEASE@|$(strip $(host_release_CPPFLAGS))|' \ + -e 's|@CPPFLAGS_DEBUG@|$(strip $(host_debug_CPPFLAGS))|' \ + -e 's|@LDFLAGS@|$(strip $(host_LDFLAGS))|' \ + -e 's|@LDFLAGS_RELEASE@|$(strip $(host_release_LDFLAGS))|' \ + -e 's|@LDFLAGS_DEBUG@|$(strip $(host_debug_LDFLAGS))|' \ + -e 's|@qt_packages@|$(qt_packages_)|' \ + -e 's|@qrencode_packages@|$(qrencode_packages_)|' \ + -e 's|@zmq_packages@|$(zmq_packages_)|' \ + -e 's|@wallet_packages@|$(wallet_packages_)|' \ + -e 's|@bdb_packages@|$(bdb_packages_)|' \ + -e 's|@sqlite_packages@|$(sqlite_packages_)|' \ + -e 's|@upnp_packages@|$(upnp_packages_)|' \ + -e 's|@usdt_packages@|$(usdt_packages_)|' \ + -e 's|@no_harden@|$(NO_HARDEN)|' \ + -e 's|@multiprocess@|$(MULTIPROCESS)|' \ + $< > $@ + touch $@ define check_or_remove_cached mkdir -p $(BASE_CACHE)/$(host)/$(package) && cd $(BASE_CACHE)/$(host)/$(package); \ @@ -179,7 +257,7 @@ check-packages: check-sources: @$(foreach package,$(all_packages),$(call check_or_remove_sources,$(package));) -$(host_prefix)/share/config.site: check-packages +$(host_prefix)/toolchain.cmake: check-packages check-packages: check-sources @@ -187,15 +265,15 @@ clean-all: clean @rm -rf $(SOURCES_PATH) x86_64* i686* mips* arm* aarch64* powerpc* riscv32* riscv64* s390x* clean: - @rm -rf $(WORK_PATH) $(BASE_CACHE) $(BUILD) + @rm -rf $(WORK_PATH) $(BASE_CACHE) $(BUILD) *.log -install: check-packages $(host_prefix)/share/config.site +install: check-packages $(host_prefix)/toolchain.cmake download-one: check-sources $(all_sources) download-osx: - @$(MAKE) -s HOST=x86_64-apple-darwin14 download-one + @$(MAKE) -s HOST=x86_64-apple-darwin download-one download-linux: @$(MAKE) -s HOST=x86_64-unknown-linux-gnu download-one download-win: @@ -205,3 +283,5 @@ download: download-osx download-linux download-win $(foreach package,$(all_packages),$(eval $(call ext_add_stages,$(package)))) .PHONY: install cached clean clean-all download-one download-osx download-linux download-win download check-packages check-sources +.PHONY: FORCE +$(V).SILENT: diff --git a/depends/README.md b/depends/README.md index 79865ff011575..4ef7247ea4e92 100644 --- a/depends/README.md +++ b/depends/README.md @@ -12,20 +12,21 @@ For example: make HOST=x86_64-w64-mingw32 -j4 -**Bitcoin Core's configure script by default will ignore the depends output.** In +**When configuring Bitcoin Core, CMake by default will ignore the depends output.** In order for it to pick up libraries, tools, and settings from the depends build, -you must point it at the appropriate `--prefix` directory generated by the -build. In the above example, a prefix dir named x86_64-w64-mingw32 will be -created. To use it for Bitcoin: +you must specify the toolchain file. +In the above example, a file named `depends/x86_64-w64-mingw32/toolchain.cmake` will be +created. To use it during configuring Bitcoin Core: - ./configure --prefix=$PWD/depends/x86_64-w64-mingw32 + cmake -B build --toolchain depends/x86_64-w64-mingw32/toolchain.cmake -Common `host-platform-triplets` for cross compilation are: +Common `host-platform-triplet`s for cross compilation are: - `i686-pc-linux-gnu` for Linux 32 bit - `x86_64-pc-linux-gnu` for x86 Linux - `x86_64-w64-mingw32` for Win64 -- `x86_64-apple-darwin16` for macOS +- `x86_64-apple-darwin` for macOS +- `arm64-apple-darwin` for ARM macOS - `arm-linux-gnueabihf` for Linux ARM 32 bit - `aarch64-linux-gnu` for Linux ARM 64 bit - `powerpc64-linux-gnu` for Linux POWER 64-bit (big endian) @@ -33,28 +34,33 @@ Common `host-platform-triplets` for cross compilation are: - `riscv32-linux-gnu` for Linux RISC-V 32 bit - `riscv64-linux-gnu` for Linux RISC-V 64 bit - `s390x-linux-gnu` for Linux S390X -- `armv7a-linux-android` for Android ARM 32 bit -- `aarch64-linux-android` for Android ARM 64 bit -- `i686-linux-android` for Android x86 32 bit -- `x86_64-linux-android` for Android x86 64 bit -The paths are automatically configured and no other options are needed unless targeting [Android](#Android). +The paths are automatically configured and no other options are needed. ### Install the required dependencies: Ubuntu & Debian +#### Common + + apt install bison cmake curl make patch pkg-config python3 xz-utils + #### For macOS cross compilation - sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libcap-dev libz-dev libbz2-dev python3-setuptools + apt install clang lld llvm g++ zip + +Clang 18 or later is required. You must also obtain the macOS SDK before +proceeding with a cross-compile. Under the depends directory, create a +subdirectory named `SDKs`. Then, place the extracted SDK under this new directory. +For more information, see [SDK Extraction](../contrib/macdeploy/README.md#sdk-extraction). #### For Win64 cross compilation -- see [build-windows.md](../doc/build-windows.md#cross-compilation-for-ubuntu-and-windows-subsystem-for-linux) + apt install g++-mingw-w64-x86-64-posix #### For linux (including i386, ARM) cross compilation Common linux dependencies: - sudo apt-get install make automake cmake curl g++-multilib libtool binutils-gold bsdmainutils pkg-config python3 patch + sudo apt-get install g++-multilib binutils For linux ARM cross compilation: @@ -72,31 +78,54 @@ For linux RISC-V 64-bit cross compilation (there are no packages for 32-bit): sudo apt-get install g++-riscv64-linux-gnu binutils-riscv64-linux-gnu -RISC-V known issue: gcc-7.3.0 and gcc-7.3.1 result in a broken `test_bitcoin` executable (see https://github.com/bitcoin/bitcoin/pull/13543), -this is apparently fixed in gcc-8.1.0. - For linux S390X cross compilation: sudo apt-get install g++-s390x-linux-gnu binutils-s390x-linux-gnu +### Install the required dependencies: FreeBSD + + pkg install bash + +### Install the required dependencies: NetBSD + + pkgin install bash gmake + +### Install the required dependencies: OpenBSD + + pkg_add bash gmake gtar + ### Dependency Options -The following can be set when running make: make FOO=bar - - SOURCES_PATH: downloaded sources will be placed here - BASE_CACHE: built packages will be placed here - SDK_PATH: Path where sdk's can be found (used by macOS) - FALLBACK_DOWNLOAD_PATH: If a source file can't be fetched, try here before giving up - NO_QT: Don't download/build/cache qt and its dependencies - NO_QR: Don't download/build/cache packages needed for enabling qrencode - NO_ZMQ: Don't download/build/cache packages needed for enabling zeromq - NO_WALLET: Don't download/build/cache libs needed to enable the wallet - NO_UPNP: Don't download/build/cache packages needed for enabling upnp - DEBUG: disable some optimizations and enable more runtime checking - HOST_ID_SALT: Optional salt to use when generating host package ids - BUILD_ID_SALT: Optional salt to use when generating build package ids - -If some packages are not built, for example `make NO_WALLET=1`, the appropriate -options will be passed to bitcoin's configure. In this case, `--disable-wallet`. + +The following can be set when running make: `make FOO=bar` + +- `SOURCES_PATH`: Downloaded sources will be placed here +- `BASE_CACHE`: Built packages will be placed here +- `SDK_PATH`: Path where SDKs can be found (used by macOS) +- `FALLBACK_DOWNLOAD_PATH`: If a source file can't be fetched, try here before giving up +- `C_STANDARD`: Set the C standard version used. Defaults to `c11`. +- `CXX_STANDARD`: Set the C++ standard version used. Defaults to `c++20`. +- `NO_BOOST`: Don't download/build/cache Boost +- `NO_LIBEVENT`: Don't download/build/cache Libevent +- `NO_QT`: Don't download/build/cache Qt and its dependencies +- `NO_QR`: Don't download/build/cache packages needed for enabling qrencode +- `NO_ZMQ`: Don't download/build/cache packages needed for enabling ZeroMQ +- `NO_WALLET`: Don't download/build/cache libs needed to enable the wallet +- `NO_BDB`: Don't download/build/cache BerkeleyDB +- `NO_SQLITE`: Don't download/build/cache SQLite +- `NO_UPNP`: Don't download/build/cache packages needed for enabling UPnP +- `NO_USDT`: Don't download/build/cache packages needed for enabling USDT tracepoints +- `MULTIPROCESS`: Build libmultiprocess (experimental) +- `DEBUG`: Disable some optimizations and enable more runtime checking +- `HOST_ID_SALT`: Optional salt to use when generating host package ids +- `BUILD_ID_SALT`: Optional salt to use when generating build package ids +- `LOG`: Use file-based logging for individual packages. During a package build its log file + resides in the `depends` directory, and the log file is printed out automatically in case + of build error. After successful build log files are moved along with package archives +- `LTO`: Enable options needed for LTO. Does not add `-flto` related options to *FLAGS. +- `NO_HARDEN=1`: Don't use hardening options when building packages + +If some packages are not built, for example `make NO_WALLET=1`, the appropriate CMake cache +variables will be set when generating the Bitcoin Core buildsystem. In this case, `-DENABLE_WALLET=OFF`. ### Additional targets @@ -106,20 +135,7 @@ options will be passed to bitcoin's configure. In this case, `--disable-wallet`. download-linux: run 'make download-linux' to fetch all sources needed for linux builds -### Android - -Before proceeding with an Android build one needs to get the [Android SDK](https://developer.android.com/studio) and use the "SDK Manager" tool to download the NDK and one or more "Platform packages" (these are Android versions and have a corresponding API level). -In order to build `ANDROID_API_LEVEL` (API level corresponding to the Android version targeted, e.g. Android 9.0 Pie is 28 and its "Platform package" needs to be available) and `ANDROID_TOOLCHAIN_BIN` (path to toolchain binaries depending on the platform the build is being performed on) need to be set. - -API levels from 24 to 29 have been tested to work. - -If the build includes Qt, environment variables `ANDROID_SDK` and `ANDROID_NDK` need to be set as well but can otherwise be omitted. -This is an example command for a default build with no disabled dependencies: - - ANDROID_SDK=/home/user/Android/Sdk ANDROID_NDK=/home/user/Android/Sdk/ndk-bundle make HOST=aarch64-linux-android ANDROID_API_LEVEL=28 ANDROID_TOOLCHAIN_BIN=/home/user/Android/Sdk/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin - ### Other documentation - [description.md](description.md): General description of the depends system - [packages.md](packages.md): Steps for adding packages - diff --git a/depends/builders/darwin.mk b/depends/builders/darwin.mk index 69c394ec1dbdf..2b59353e84f31 100644 --- a/depends/builders/darwin.mk +++ b/depends/builders/darwin.mk @@ -1,22 +1,25 @@ -build_darwin_CC:=$(shell xcrun -f clang) --sysroot $(shell xcrun --show-sdk-path) -build_darwin_CXX:=$(shell xcrun -f clang++) --sysroot $(shell xcrun --show-sdk-path) +build_darwin_CC:=$(shell xcrun -f clang) -isysroot$(shell xcrun --show-sdk-path) +build_darwin_CXX:=$(shell xcrun -f clang++) -isysroot$(shell xcrun --show-sdk-path) build_darwin_AR:=$(shell xcrun -f ar) build_darwin_RANLIB:=$(shell xcrun -f ranlib) build_darwin_STRIP:=$(shell xcrun -f strip) -build_darwin_OTOOL:=$(shell xcrun -f otool) +build_darwin_OBJDUMP:=$(shell xcrun -f objdump) build_darwin_NM:=$(shell xcrun -f nm) -build_darwin_INSTALL_NAME_TOOL:=$(shell xcrun -f install_name_tool) +build_darwin_DSYMUTIL:=$(shell xcrun -f dsymutil) build_darwin_SHA256SUM=shasum -a 256 build_darwin_DOWNLOAD=curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o #darwin host on darwin builder. overrides darwin host preferences. -darwin_CC=$(shell xcrun -f clang) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(shell xcrun --show-sdk-path) -darwin_CXX:=$(shell xcrun -f clang++) -mmacosx-version-min=$(OSX_MIN_VERSION) -stdlib=libc++ --sysroot $(shell xcrun --show-sdk-path) +darwin_CC=$(shell xcrun -f clang) -isysroot$(shell xcrun --show-sdk-path) +darwin_CXX:=$(shell xcrun -f clang++) -stdlib=libc++ -isysroot$(shell xcrun --show-sdk-path) darwin_AR:=$(shell xcrun -f ar) darwin_RANLIB:=$(shell xcrun -f ranlib) darwin_STRIP:=$(shell xcrun -f strip) -darwin_LIBTOOL:=$(shell xcrun -f libtool) -darwin_OTOOL:=$(shell xcrun -f otool) +darwin_OBJDUMP:=$(shell xcrun -f objdump) darwin_NM:=$(shell xcrun -f nm) -darwin_INSTALL_NAME_TOOL:=$(shell xcrun -f install_name_tool) -darwin_native_toolchain= +darwin_DSYMUTIL:=$(shell xcrun -f dsymutil) + +x86_64_darwin_CFLAGS += -arch x86_64 +x86_64_darwin_CXXFLAGS += -arch x86_64 +aarch64_darwin_CFLAGS += -arch arm64 +aarch64_darwin_CXXFLAGS += -arch arm64 diff --git a/depends/builders/default.mk b/depends/builders/default.mk index f097db65d603d..2a1709d98ad21 100644 --- a/depends/builders/default.mk +++ b/depends/builders/default.mk @@ -1,18 +1,19 @@ default_build_CC = gcc default_build_CXX = g++ default_build_AR = ar +default_build_OBJDUMP = objdump +default_build_TAR = tar default_build_RANLIB = ranlib default_build_STRIP = strip default_build_NM = nm -default_build_OTOOL = otool -default_build_INSTALL_NAME_TOOL = install_name_tool +default_build_TOUCH = touch -h -m -t 200001011200 define add_build_tool_func build_$(build_os)_$1 ?= $$(default_build_$1) build_$(build_arch)_$(build_os)_$1 ?= $$(build_$(build_os)_$1) build_$1=$$(build_$(build_arch)_$(build_os)_$1) endef -$(foreach var,CC CXX AR RANLIB NM STRIP SHA256SUM DOWNLOAD OTOOL INSTALL_NAME_TOOL,$(eval $(call add_build_tool_func,$(var)))) +$(foreach var,CC CXX AR TAR RANLIB NM STRIP SHA256SUM DOWNLOAD OBJDUMP DSYMUTIL TOUCH,$(eval $(call add_build_tool_func,$(var)))) define add_build_flags_func build_$(build_arch)_$(build_os)_$1 += $(build_$(build_os)_$1) build_$1=$$(build_$(build_arch)_$(build_os)_$1) diff --git a/depends/builders/freebsd.mk b/depends/builders/freebsd.mk new file mode 100644 index 0000000000000..465f58e04dc7f --- /dev/null +++ b/depends/builders/freebsd.mk @@ -0,0 +1,5 @@ +build_freebsd_CC=clang +build_freebsd_CXX=clang++ + +build_freebsd_SHA256SUM = shasum -a 256 +build_freebsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o diff --git a/depends/builders/netbsd.mk b/depends/builders/netbsd.mk new file mode 100644 index 0000000000000..b7cf1f7514189 --- /dev/null +++ b/depends/builders/netbsd.mk @@ -0,0 +1,2 @@ +build_netbsd_SHA256SUM = shasum -a 256 +build_netbsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o diff --git a/depends/builders/openbsd.mk b/depends/builders/openbsd.mk new file mode 100644 index 0000000000000..9c94c4baae7a3 --- /dev/null +++ b/depends/builders/openbsd.mk @@ -0,0 +1,9 @@ +build_openbsd_CC = clang +build_openbsd_CXX = clang++ + +build_openbsd_SHA256SUM = sha256 +build_openbsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o + +build_openbsd_TAR = gtar +# openBSD touch doesn't understand -h +build_openbsd_TOUCH = touch -m -t 200001011200 diff --git a/depends/config.guess b/depends/config.guess index 7f9ebbe31097d..cdfc4392047ce 100755 --- a/depends/config.guess +++ b/depends/config.guess @@ -1,12 +1,14 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2019 Free Software Foundation, Inc. +# Copyright 1992-2023 Free Software Foundation, Inc. -timestamp='2019-09-10' +# shellcheck disable=SC2006,SC2268 # see below for rationale + +timestamp='2023-08-22' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or +# the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but @@ -27,17 +29,25 @@ timestamp='2019-09-10' # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: -# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess # # Please send patches to . +# The "shellcheck disable" line above the timestamp inhibits complaints +# about features and limitations of the classic Bourne shell that were +# superseded or lifted in POSIX. However, this script identifies a wide +# variety of pre-POSIX systems that do not have POSIX shells at all, and +# even some reasonably current systems (Solaris 10 as case-in-point) still +# have a pre-POSIX /bin/sh. + + me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] -Output the configuration name of the system \`$me' is run on. +Output the configuration name of the system '$me' is run on. Options: -h, --help print this help, then exit @@ -50,13 +60,13 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2019 Free Software Foundation, Inc. +Copyright 1992-2023 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" -Try \`$me --help' for more information." +Try '$me --help' for more information." # Parse command line while test $# -gt 0 ; do @@ -84,13 +94,16 @@ if test $# != 0; then exit 1 fi +# Just in case it came from the environment. +GUESS= + # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. +# Historically, 'CC_FOR_BUILD' used to be named 'HOST_CC'. We still +# use 'HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. @@ -99,8 +112,10 @@ tmp= trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 set_cc_for_build() { + # prevent multiple calls if $tmp is already set + test "$tmp" && return 0 : "${TMPDIR=/tmp}" - # shellcheck disable=SC2039 + # shellcheck disable=SC2039,SC3028 { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || @@ -110,7 +125,7 @@ set_cc_for_build() { ,,) echo "int x;" > "$dummy.c" for driver in cc gcc c89 c99 ; do if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then - CC_FOR_BUILD="$driver" + CC_FOR_BUILD=$driver break fi done @@ -131,40 +146,55 @@ fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown -case "$UNAME_SYSTEM" in +case $UNAME_SYSTEM in Linux|GNU|GNU/*) - # If the system lacks a compiler, then just pick glibc. - # We could probably try harder. - LIBC=gnu + LIBC=unknown set_cc_for_build cat <<-EOF > "$dummy.c" + #if defined(__ANDROID__) + LIBC=android + #else #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc - #else + #elif defined(__GLIBC__) LIBC=gnu + #else + #include + /* First heuristic to detect musl libc. */ + #ifdef __DEFINED_va_list + LIBC=musl + #endif + #endif #endif EOF - eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" + cc_set_libc=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + eval "$cc_set_libc" - # If ldd exists, use it to detect musl libc. - if command -v ldd >/dev/null && \ - ldd --version 2>&1 | grep -q ^musl - then - LIBC=musl + # Second heuristic to detect musl libc. + if [ "$LIBC" = unknown ] && + command -v ldd >/dev/null && + ldd --version 2>&1 | grep -q ^musl; then + LIBC=musl + fi + + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + if [ "$LIBC" = unknown ]; then + LIBC=gnu fi ;; esac # Note: order is significant - the case branches are not exclusive. -case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in +case $UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, @@ -176,12 +206,12 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ - "/sbin/$sysctl" 2>/dev/null || \ - "/usr/sbin/$sysctl" 2>/dev/null || \ + /sbin/sysctl -n hw.machine_arch 2>/dev/null || \ + /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \ echo unknown)` - case "$UNAME_MACHINE_ARCH" in + case $UNAME_MACHINE_ARCH in + aarch64eb) machine=aarch64_be-unknown ;; armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; @@ -190,13 +220,13 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in earmv*) arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` - machine="${arch}${endian}"-unknown + machine=${arch}${endian}-unknown ;; - *) machine="$UNAME_MACHINE_ARCH"-unknown ;; + *) machine=$UNAME_MACHINE_ARCH-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. - case "$UNAME_MACHINE_ARCH" in + case $UNAME_MACHINE_ARCH in earm*) os=netbsdelf ;; @@ -217,7 +247,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in ;; esac # Determine ABI tags. - case "$UNAME_MACHINE_ARCH" in + case $UNAME_MACHINE_ARCH in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` @@ -228,7 +258,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. - case "$UNAME_VERSION" in + case $UNAME_VERSION in Debian*) release='-gnu' ;; @@ -239,51 +269,57 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "$machine-${os}${release}${abi-}" - exit ;; + GUESS=$machine-${os}${release}${abi-} + ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE_ARCH-unknown-bitrig$UNAME_RELEASE + ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE_ARCH-unknown-openbsd$UNAME_RELEASE + ;; + *:SecBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/SecBSD.//'` + GUESS=$UNAME_MACHINE_ARCH-unknown-secbsd$UNAME_RELEASE + ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` - echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE_ARCH-unknown-libertybsd$UNAME_RELEASE + ;; *:MidnightBSD:*:*) - echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-midnightbsd$UNAME_RELEASE + ;; *:ekkoBSD:*:*) - echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-ekkobsd$UNAME_RELEASE + ;; *:SolidBSD:*:*) - echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-solidbsd$UNAME_RELEASE + ;; *:OS108:*:*) - echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-os108_$UNAME_RELEASE + ;; macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-unknown-mirbsd$UNAME_RELEASE + ;; *:MirBSD:*:*) - echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-mirbsd$UNAME_RELEASE + ;; *:Sortix:*:*) - echo "$UNAME_MACHINE"-unknown-sortix - exit ;; + GUESS=$UNAME_MACHINE-unknown-sortix + ;; *:Twizzler:*:*) - echo "$UNAME_MACHINE"-unknown-twizzler - exit ;; + GUESS=$UNAME_MACHINE-unknown-twizzler + ;; *:Redox:*:*) - echo "$UNAME_MACHINE"-unknown-redox - exit ;; + GUESS=$UNAME_MACHINE-unknown-redox + ;; mips:OSF1:*.*) - echo mips-dec-osf1 - exit ;; + GUESS=mips-dec-osf1 + ;; alpha:OSF1:*:*) + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + trap '' 0 case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` @@ -297,7 +333,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in + case $ALPHA_CPU_TYPE in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") @@ -334,117 +370,121 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; + OSF_REL=`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + GUESS=$UNAME_MACHINE-dec-osf$OSF_REL + ;; Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; + GUESS=m68k-unknown-sysv4 + ;; *:[Aa]miga[Oo][Ss]:*:*) - echo "$UNAME_MACHINE"-unknown-amigaos - exit ;; + GUESS=$UNAME_MACHINE-unknown-amigaos + ;; *:[Mm]orph[Oo][Ss]:*:*) - echo "$UNAME_MACHINE"-unknown-morphos - exit ;; + GUESS=$UNAME_MACHINE-unknown-morphos + ;; *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; + GUESS=i370-ibm-openedition + ;; *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; + GUESS=s390-ibm-zvmoe + ;; *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; + GUESS=powerpc-ibm-os400 + ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix"$UNAME_RELEASE" - exit ;; + GUESS=arm-acorn-riscix$UNAME_RELEASE + ;; arm*:riscos:*:*|arm*:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; + GUESS=arm-unknown-riscos + ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; + GUESS=hppa1.1-hitachi-hiuxmpp + ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; + case `(/bin/universe) 2>/dev/null` in + att) GUESS=pyramid-pyramid-sysv3 ;; + *) GUESS=pyramid-pyramid-bsd ;; + esac + ;; NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; + GUESS=pyramid-pyramid-svr4 + ;; DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; + GUESS=sparc-icl-nx6 + ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; + sparc) GUESS=sparc-icl-nx7 ;; + esac + ;; s390x:SunOS:*:*) - echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=$UNAME_MACHINE-ibm-solaris2$SUN_REL + ;; sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-hal-solaris2$SUN_REL + ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-sun-solaris2$SUN_REL + ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux"$UNAME_RELEASE" - exit ;; + GUESS=i386-pc-auroraux$UNAME_RELEASE + ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) set_cc_for_build SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. - if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + (CCOPTS="" $CC_FOR_BUILD -m64 -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH=x86_64 fi fi - echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=$SUN_ARCH-pc-solaris2$SUN_REL + ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-sun-solaris3$SUN_REL + ;; sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in + case `/usr/bin/arch -k` in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" - exit ;; + # Japanese Language versions have a version number like '4.1.3-JL'. + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/-/_/'` + GUESS=sparc-sun-sunos$SUN_REL + ;; sun3*:SunOS:*:*) - echo m68k-sun-sunos"$UNAME_RELEASE" - exit ;; + GUESS=m68k-sun-sunos$UNAME_RELEASE + ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 - case "`/bin/arch`" in + case `/bin/arch` in sun3) - echo m68k-sun-sunos"$UNAME_RELEASE" + GUESS=m68k-sun-sunos$UNAME_RELEASE ;; sun4) - echo sparc-sun-sunos"$UNAME_RELEASE" + GUESS=sparc-sun-sunos$UNAME_RELEASE ;; esac - exit ;; + ;; aushp:SunOS:*:*) - echo sparc-auspex-sunos"$UNAME_RELEASE" - exit ;; + GUESS=sparc-auspex-sunos$UNAME_RELEASE + ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor @@ -454,41 +494,41 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-milan-mint$UNAME_RELEASE + ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-hades-mint$UNAME_RELEASE + ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-unknown-mint$UNAME_RELEASE + ;; m68k:machten:*:*) - echo m68k-apple-machten"$UNAME_RELEASE" - exit ;; + GUESS=m68k-apple-machten$UNAME_RELEASE + ;; powerpc:machten:*:*) - echo powerpc-apple-machten"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-apple-machten$UNAME_RELEASE + ;; RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; + GUESS=mips-dec-mach_bsd4.3 + ;; RISC*:ULTRIX:*:*) - echo mips-dec-ultrix"$UNAME_RELEASE" - exit ;; + GUESS=mips-dec-ultrix$UNAME_RELEASE + ;; VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix"$UNAME_RELEASE" - exit ;; + GUESS=vax-dec-ultrix$UNAME_RELEASE + ;; 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix"$UNAME_RELEASE" - exit ;; + GUESS=clipper-intergraph-clix$UNAME_RELEASE + ;; mips:*:*:UMIPS | mips:*:*:RISCos) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" @@ -516,75 +556,76 @@ EOF dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`"$dummy" "$dummyarg"` && { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos"$UNAME_RELEASE" - exit ;; + GUESS=mips-mips-riscos$UNAME_RELEASE + ;; Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; + GUESS=powerpc-motorola-powermax + ;; Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; + GUESS=powerpc-harris-powermax + ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; + GUESS=powerpc-harris-powermax + ;; Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; + GUESS=powerpc-harris-powerunix + ;; m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; + GUESS=m88k-harris-cxux7 + ;; m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; + GUESS=m88k-motorola-sysv4 + ;; m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; + GUESS=m88k-motorola-sysv3 + ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] + if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110 then - if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ - [ "$TARGET_BINARY_INTERFACE"x = x ] + if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \ + test "$TARGET_BINARY_INTERFACE"x = x then - echo m88k-dg-dgux"$UNAME_RELEASE" + GUESS=m88k-dg-dgux$UNAME_RELEASE else - echo m88k-dg-dguxbcs"$UNAME_RELEASE" + GUESS=m88k-dg-dguxbcs$UNAME_RELEASE fi else - echo i586-dg-dgux"$UNAME_RELEASE" + GUESS=i586-dg-dgux$UNAME_RELEASE fi - exit ;; + ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; + GUESS=m88k-dolphin-sysv3 + ;; M88*:*:R3*:*) # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; + GUESS=m88k-motorola-sysv3 + ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; + GUESS=m88k-tektronix-sysv3 + ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; + GUESS=m68k-tektronix-bsd + ;; *:IRIX*:*:*) - echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" - exit ;; + IRIX_REL=`echo "$UNAME_RELEASE" | sed -e 's/-/_/g'` + GUESS=mips-sgi-irix$IRIX_REL + ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + GUESS=romp-ibm-aix # uname -m gives an 8 hex-code CPU id + ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; + GUESS=i386-ibm-aix + ;; ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then + if test -x /usr/bin/oslevel ; then IBM_REV=`/usr/bin/oslevel` else - IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + IBM_REV=$UNAME_VERSION.$UNAME_RELEASE fi - echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" - exit ;; + GUESS=$UNAME_MACHINE-ibm-aix$IBM_REV + ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then set_cc_for_build @@ -601,16 +642,16 @@ EOF EOF if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` then - echo "$SYSTEM_NAME" + GUESS=$SYSTEM_NAME else - echo rs6000-ibm-aix3.2.5 + GUESS=rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 + GUESS=rs6000-ibm-aix3.2.4 else - echo rs6000-ibm-aix3.2 + GUESS=rs6000-ibm-aix3.2 fi - exit ;; + ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then @@ -618,56 +659,56 @@ EOF else IBM_ARCH=powerpc fi - if [ -x /usr/bin/lslpp ] ; then - IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + if test -x /usr/bin/lslpp ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | \ awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else - IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + IBM_REV=$UNAME_VERSION.$UNAME_RELEASE fi - echo "$IBM_ARCH"-ibm-aix"$IBM_REV" - exit ;; + GUESS=$IBM_ARCH-ibm-aix$IBM_REV + ;; *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; + GUESS=rs6000-ibm-aix + ;; ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) - echo romp-ibm-bsd4.4 - exit ;; + GUESS=romp-ibm-bsd4.4 + ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 + GUESS=romp-ibm-bsd$UNAME_RELEASE # 4.3 with uname added to + ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; + GUESS=rs6000-bull-bosx + ;; DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; + GUESS=m68k-bull-sysv3 + ;; 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; + GUESS=m68k-hp-bsd + ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; + GUESS=m68k-hp-bsd4.4 + ;; 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` - case "$UNAME_MACHINE" in + HPUX_REV=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*.[0B]*//'` + case $UNAME_MACHINE in 9000/31?) HP_ARCH=m68000 ;; 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then + if test -x /usr/bin/getconf; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "$sc_cpu_version" in + case $sc_cpu_version in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 - case "$sc_kernel_bits" in + case $sc_kernel_bits in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi - if [ "$HP_ARCH" = "" ]; then + if test "$HP_ARCH" = ""; then set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" @@ -706,7 +747,7 @@ EOF test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac - if [ "$HP_ARCH" = hppa2.0w ] + if test "$HP_ARCH" = hppa2.0w then set_cc_for_build @@ -727,12 +768,12 @@ EOF HP_ARCH=hppa64 fi fi - echo "$HP_ARCH"-hp-hpux"$HPUX_REV" - exit ;; + GUESS=$HP_ARCH-hp-hpux$HPUX_REV + ;; ia64:HP-UX:*:*) - HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux"$HPUX_REV" - exit ;; + HPUX_REV=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*.[0B]*//'` + GUESS=ia64-hp-hpux$HPUX_REV + ;; 3050*:HI-UX:*:*) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" @@ -762,36 +803,36 @@ EOF EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; + GUESS=unknown-hitachi-hiuxwe2 + ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) - echo hppa1.1-hp-bsd - exit ;; + GUESS=hppa1.1-hp-bsd + ;; 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; + GUESS=hppa1.0-hp-bsd + ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; + GUESS=hppa1.0-hp-mpeix + ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) - echo hppa1.1-hp-osf - exit ;; + GUESS=hppa1.1-hp-osf + ;; hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; + GUESS=hppa1.0-hp-osf + ;; i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo "$UNAME_MACHINE"-unknown-osf1mk + if test -x /usr/sbin/sysversion ; then + GUESS=$UNAME_MACHINE-unknown-osf1mk else - echo "$UNAME_MACHINE"-unknown-osf1 + GUESS=$UNAME_MACHINE-unknown-osf1 fi - exit ;; + ;; parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; + GUESS=hppa1.1-hp-lites + ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; + GUESS=c1-convex-bsd + ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd @@ -799,17 +840,18 @@ EOF fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; + GUESS=c34-convex-bsd + ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; + GUESS=c38-convex-bsd + ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; + GUESS=c4-convex-bsd + ;; CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=ymp-cray-unicos$CRAY_REL + ;; CRAY*[A-Z]90:*:*:*) echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ @@ -817,114 +859,155 @@ EOF -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) - echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=t90-cray-unicos$CRAY_REL + ;; CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=alphaev5-cray-unicosmk$CRAY_REL + ;; CRAY*SV1:*:*:*) - echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=sv1-cray-unicos$CRAY_REL + ;; *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=craynv-cray-unicosmp$CRAY_REL + ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; + GUESS=${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL} + ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; + GUESS=sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL} + ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-pc-bsdi$UNAME_RELEASE + ;; sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi"$UNAME_RELEASE" - exit ;; + GUESS=sparc-unknown-bsdi$UNAME_RELEASE + ;; *:BSD/OS:*:*) - echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-bsdi$UNAME_RELEASE + ;; arm:FreeBSD:*:*) UNAME_PROCESSOR=`uname -p` set_cc_for_build if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL-gnueabi else - echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL-gnueabihf fi - exit ;; + ;; *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case "$UNAME_PROCESSOR" in + UNAME_PROCESSOR=`uname -p` + case $UNAME_PROCESSOR in amd64) UNAME_PROCESSOR=x86_64 ;; i386) UNAME_PROCESSOR=i586 ;; esac - echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" - exit ;; + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL + ;; i*:CYGWIN*:*) - echo "$UNAME_MACHINE"-pc-cygwin - exit ;; + GUESS=$UNAME_MACHINE-pc-cygwin + ;; *:MINGW64*:*) - echo "$UNAME_MACHINE"-pc-mingw64 - exit ;; + GUESS=$UNAME_MACHINE-pc-mingw64 + ;; *:MINGW*:*) - echo "$UNAME_MACHINE"-pc-mingw32 - exit ;; + GUESS=$UNAME_MACHINE-pc-mingw32 + ;; *:MSYS*:*) - echo "$UNAME_MACHINE"-pc-msys - exit ;; + GUESS=$UNAME_MACHINE-pc-msys + ;; i*:PW*:*) - echo "$UNAME_MACHINE"-pc-pw32 - exit ;; + GUESS=$UNAME_MACHINE-pc-pw32 + ;; + *:SerenityOS:*:*) + GUESS=$UNAME_MACHINE-pc-serenity + ;; *:Interix*:*) - case "$UNAME_MACHINE" in + case $UNAME_MACHINE in x86) - echo i586-pc-interix"$UNAME_RELEASE" - exit ;; + GUESS=i586-pc-interix$UNAME_RELEASE + ;; authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix"$UNAME_RELEASE" - exit ;; + GUESS=x86_64-unknown-interix$UNAME_RELEASE + ;; IA64) - echo ia64-unknown-interix"$UNAME_RELEASE" - exit ;; + GUESS=ia64-unknown-interix$UNAME_RELEASE + ;; esac ;; i*:UWIN*:*) - echo "$UNAME_MACHINE"-pc-uwin - exit ;; + GUESS=$UNAME_MACHINE-pc-uwin + ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-pc-cygwin - exit ;; + GUESS=x86_64-pc-cygwin + ;; prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=powerpcle-unknown-solaris2$SUN_REL + ;; *:GNU:*:*) # the GNU system - echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" - exit ;; + GNU_ARCH=`echo "$UNAME_MACHINE" | sed -e 's,[-/].*$,,'` + GNU_REL=`echo "$UNAME_RELEASE" | sed -e 's,/.*$,,'` + GUESS=$GNU_ARCH-unknown-$LIBC$GNU_REL + ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" - exit ;; + GNU_SYS=`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"` + GNU_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_MACHINE-unknown-$GNU_SYS$GNU_REL-$LIBC + ;; + x86_64:[Mm]anagarm:*:*|i?86:[Mm]anagarm:*:*) + GUESS="$UNAME_MACHINE-pc-managarm-mlibc" + ;; + *:[Mm]anagarm:*:*) + GUESS="$UNAME_MACHINE-unknown-managarm-mlibc" + ;; *:Minix:*:*) - echo "$UNAME_MACHINE"-unknown-minix - exit ;; + GUESS=$UNAME_MACHINE-unknown-minix + ;; aarch64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + set_cc_for_build + CPU=$UNAME_MACHINE + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + ABI=64 + sed 's/^ //' << EOF > "$dummy.c" + #ifdef __ARM_EABI__ + #ifdef __ARM_PCS_VFP + ABI=eabihf + #else + ABI=eabi + #endif + #endif +EOF + cc_set_abi=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^ABI' | sed 's, ,,g'` + eval "$cc_set_abi" + case $ABI in + eabi | eabihf) CPU=armv8l; LIBCABI=$LIBC$ABI ;; + esac + fi + GUESS=$CPU-unknown-linux-$LIBCABI + ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; @@ -935,60 +1018,72 @@ EOF esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; - arc:Linux:*:* | arceb:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + arc:Linux:*:* | arceb:Linux:*:* | arc32:Linux:*:* | arc64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; arm*:Linux:*:*) set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi + GUESS=$UNAME_MACHINE-unknown-linux-${LIBC}eabi else - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf + GUESS=$UNAME_MACHINE-unknown-linux-${LIBC}eabihf fi fi - exit ;; + ;; avr32*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; cris:Linux:*:*) - echo "$UNAME_MACHINE"-axis-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-axis-linux-$LIBC + ;; crisv32:Linux:*:*) - echo "$UNAME_MACHINE"-axis-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-axis-linux-$LIBC + ;; e2k:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; frv:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; hexagon:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; i*86:Linux:*:*) - echo "$UNAME_MACHINE"-pc-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-pc-linux-$LIBC + ;; ia64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; k1om:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + kvx:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + kvx:cos:*:*) + GUESS=$UNAME_MACHINE-unknown-cos + ;; + kvx:mbr:*:*) + GUESS=$UNAME_MACHINE-unknown-mbr + ;; + loongarch32:Linux:*:* | loongarch64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; m32r*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; m68*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; mips:Linux:*:* | mips64:Linux:*:*) set_cc_for_build IS_GLIBC=0 @@ -1033,113 +1128,135 @@ EOF #endif #endif EOF - eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI'`" + cc_set_vars=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI'` + eval "$cc_set_vars" test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } ;; mips64el:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; openrisc*:Linux:*:*) - echo or1k-unknown-linux-"$LIBC" - exit ;; + GUESS=or1k-unknown-linux-$LIBC + ;; or32:Linux:*:* | or1k*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; padre:Linux:*:*) - echo sparc-unknown-linux-"$LIBC" - exit ;; + GUESS=sparc-unknown-linux-$LIBC + ;; parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-"$LIBC" - exit ;; + GUESS=hppa64-unknown-linux-$LIBC + ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; - PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; - *) echo hppa-unknown-linux-"$LIBC" ;; + PA7*) GUESS=hppa1.1-unknown-linux-$LIBC ;; + PA8*) GUESS=hppa2.0-unknown-linux-$LIBC ;; + *) GUESS=hppa-unknown-linux-$LIBC ;; esac - exit ;; + ;; ppc64:Linux:*:*) - echo powerpc64-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpc64-unknown-linux-$LIBC + ;; ppc:Linux:*:*) - echo powerpc-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpc-unknown-linux-$LIBC + ;; ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpc64le-unknown-linux-$LIBC + ;; ppcle:Linux:*:*) - echo powerpcle-unknown-linux-"$LIBC" - exit ;; - riscv32:Linux:*:* | riscv64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpcle-unknown-linux-$LIBC + ;; + riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; s390:Linux:*:* | s390x:Linux:*:*) - echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-ibm-linux-$LIBC + ;; sh64*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; sh*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; sparc:Linux:*:* | sparc64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; tile*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; vax:Linux:*:*) - echo "$UNAME_MACHINE"-dec-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-dec-linux-$LIBC + ;; x86_64:Linux:*:*) - echo "$UNAME_MACHINE"-pc-linux-"$LIBC" - exit ;; + set_cc_for_build + CPU=$UNAME_MACHINE + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + ABI=64 + sed 's/^ //' << EOF > "$dummy.c" + #ifdef __i386__ + ABI=x86 + #else + #ifdef __ILP32__ + ABI=x32 + #endif + #endif +EOF + cc_set_abi=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^ABI' | sed 's, ,,g'` + eval "$cc_set_abi" + case $ABI in + x86) CPU=i686 ;; + x32) LIBCABI=${LIBC}x32 ;; + esac + fi + GUESS=$CPU-pc-linux-$LIBCABI + ;; xtensa*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; + GUESS=i386-sequent-sysv4 + ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. - echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" - exit ;; + GUESS=$UNAME_MACHINE-pc-sysv4.2uw$UNAME_VERSION + ;; i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility + # If we were able to find 'uname', then EMX Unix compatibility # is probably installed. - echo "$UNAME_MACHINE"-pc-os2-emx - exit ;; + GUESS=$UNAME_MACHINE-pc-os2-emx + ;; i*86:XTS-300:*:STOP) - echo "$UNAME_MACHINE"-unknown-stop - exit ;; + GUESS=$UNAME_MACHINE-unknown-stop + ;; i*86:atheos:*:*) - echo "$UNAME_MACHINE"-unknown-atheos - exit ;; + GUESS=$UNAME_MACHINE-unknown-atheos + ;; i*86:syllable:*:*) - echo "$UNAME_MACHINE"-pc-syllable - exit ;; + GUESS=$UNAME_MACHINE-pc-syllable + ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=i386-unknown-lynxos$UNAME_RELEASE + ;; i*86:*DOS:*:*) - echo "$UNAME_MACHINE"-pc-msdosdjgpp - exit ;; + GUESS=$UNAME_MACHINE-pc-msdosdjgpp + ;; i*86:*:4.*:*) UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" + GUESS=$UNAME_MACHINE-univel-sysv$UNAME_REL else - echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" + GUESS=$UNAME_MACHINE-pc-sysv$UNAME_REL fi - exit ;; + ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in @@ -1147,12 +1264,12 @@ EOF *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac - echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}" - exit ;; + GUESS=$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 @@ -1162,11 +1279,11 @@ EOF && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 - echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" + GUESS=$UNAME_MACHINE-pc-sco$UNAME_REL else - echo "$UNAME_MACHINE"-pc-sysv32 + GUESS=$UNAME_MACHINE-pc-sysv32 fi - exit ;; + ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about @@ -1174,31 +1291,31 @@ EOF # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. - echo i586-pc-msdosdjgpp - exit ;; + GUESS=i586-pc-msdosdjgpp + ;; Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; + GUESS=i386-pc-mach3 + ;; paragon:*:*:*) - echo i860-intel-osf1 - exit ;; + GUESS=i860-intel-osf1 + ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 + GUESS=i860-stardent-sysv$UNAME_RELEASE # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 + GUESS=i860-unknown-sysv$UNAME_RELEASE # Unknown i860-SVR4 fi - exit ;; + ;; mini*:CTIX:SYS*5:*) # "miniframe" - echo m68010-convergent-sysv - exit ;; + GUESS=m68010-convergent-sysv + ;; mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; + GUESS=m68k-convergent-sysv + ;; M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; + GUESS=m68k-diab-dnix + ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) @@ -1223,113 +1340,119 @@ EOF /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=m68k-unknown-lynxos$UNAME_RELEASE + ;; mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; + GUESS=m68k-atari-sysv4 + ;; TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=sparc-unknown-lynxos$UNAME_RELEASE + ;; rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=rs6000-unknown-lynxos$UNAME_RELEASE + ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-unknown-lynxos$UNAME_RELEASE + ;; SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv"$UNAME_RELEASE" - exit ;; + GUESS=mips-dde-sysv$UNAME_RELEASE + ;; RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; + GUESS=mips-sni-sysv4 + ;; RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; + GUESS=mips-sni-sysv4 + ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo "$UNAME_MACHINE"-sni-sysv4 + GUESS=$UNAME_MACHINE-sni-sysv4 else - echo ns32k-sni-sysv + GUESS=ns32k-sni-sysv fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + ;; + PENTIUM:*:4.0*:*) # Unisys 'ClearPath HMP IX 4000' SVR4/MP effort # says - echo i586-unisys-sysv4 - exit ;; + GUESS=i586-unisys-sysv4 + ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; + GUESS=hppa1.1-stratus-sysv4 + ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; + GUESS=i860-stratus-sysv4 + ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. - echo "$UNAME_MACHINE"-stratus-vos - exit ;; + GUESS=$UNAME_MACHINE-stratus-vos + ;; *:VOS:*:*) # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; + GUESS=hppa1.1-stratus-vos + ;; mc68*:A/UX:*:*) - echo m68k-apple-aux"$UNAME_RELEASE" - exit ;; + GUESS=m68k-apple-aux$UNAME_RELEASE + ;; news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; + GUESS=mips-sony-newsos6 + ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv"$UNAME_RELEASE" + if test -d /usr/nec; then + GUESS=mips-nec-sysv$UNAME_RELEASE else - echo mips-unknown-sysv"$UNAME_RELEASE" + GUESS=mips-unknown-sysv$UNAME_RELEASE fi - exit ;; + ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; + GUESS=powerpc-be-beos + ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; + GUESS=powerpc-apple-beos + ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; + GUESS=i586-pc-beos + ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. - echo i586-pc-haiku - exit ;; - x86_64:Haiku:*:*) - echo x86_64-unknown-haiku - exit ;; + GUESS=i586-pc-haiku + ;; + ppc:Haiku:*:*) # Haiku running on Apple PowerPC + GUESS=powerpc-apple-haiku + ;; + *:Haiku:*:*) # Haiku modern gcc (not bound by BeOS compat) + GUESS=$UNAME_MACHINE-unknown-haiku + ;; SX-4:SUPER-UX:*:*) - echo sx4-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx4-nec-superux$UNAME_RELEASE + ;; SX-5:SUPER-UX:*:*) - echo sx5-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx5-nec-superux$UNAME_RELEASE + ;; SX-6:SUPER-UX:*:*) - echo sx6-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx6-nec-superux$UNAME_RELEASE + ;; SX-7:SUPER-UX:*:*) - echo sx7-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx7-nec-superux$UNAME_RELEASE + ;; SX-8:SUPER-UX:*:*) - echo sx8-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx8-nec-superux$UNAME_RELEASE + ;; SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx8r-nec-superux$UNAME_RELEASE + ;; SX-ACE:SUPER-UX:*:*) - echo sxace-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sxace-nec-superux$UNAME_RELEASE + ;; Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-apple-rhapsody$UNAME_RELEASE + ;; *:Rhapsody:*:*) - echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-apple-rhapsody$UNAME_RELEASE + ;; + arm64:Darwin:*:*) + GUESS=aarch64-apple-darwin$UNAME_RELEASE + ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` case $UNAME_PROCESSOR in @@ -1344,7 +1467,7 @@ EOF else set_cc_for_build fi - if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null @@ -1365,109 +1488,119 @@ EOF # uname -m returns i386 or x86_64 UNAME_PROCESSOR=$UNAME_MACHINE fi - echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_PROCESSOR-apple-darwin$UNAME_RELEASE + ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi - echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_PROCESSOR-$UNAME_MACHINE-nto-qnx$UNAME_RELEASE + ;; *:QNX:*:4*) - echo i386-pc-qnx - exit ;; + GUESS=i386-pc-qnx + ;; NEO-*:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=neo-tandem-nsk$UNAME_RELEASE + ;; NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nse-tandem-nsk$UNAME_RELEASE + ;; NSR-*:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nsr-tandem-nsk$UNAME_RELEASE + ;; NSV-*:NONSTOP_KERNEL:*:*) - echo nsv-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nsv-tandem-nsk$UNAME_RELEASE + ;; NSX-*:NONSTOP_KERNEL:*:*) - echo nsx-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nsx-tandem-nsk$UNAME_RELEASE + ;; *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; + GUESS=mips-compaq-nonstopux + ;; BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; + GUESS=bs2000-siemens-sysv + ;; DS/*:UNIX_System_V:*:*) - echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-$UNAME_SYSTEM-$UNAME_RELEASE + ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. - # shellcheck disable=SC2154 - if test "$cputype" = 386; then + if test "${cputype-}" = 386; then UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" + elif test "x${cputype-}" != x; then + UNAME_MACHINE=$cputype fi - echo "$UNAME_MACHINE"-unknown-plan9 - exit ;; + GUESS=$UNAME_MACHINE-unknown-plan9 + ;; *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; + GUESS=pdp10-unknown-tops10 + ;; *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; + GUESS=pdp10-unknown-tenex + ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; + GUESS=pdp10-dec-tops20 + ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; + GUESS=pdp10-xkl-tops20 + ;; *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; + GUESS=pdp10-unknown-tops20 + ;; *:ITS:*:*) - echo pdp10-unknown-its - exit ;; + GUESS=pdp10-unknown-its + ;; SEI:*:*:SEIUX) - echo mips-sei-seiux"$UNAME_RELEASE" - exit ;; + GUESS=mips-sei-seiux$UNAME_RELEASE + ;; *:DragonFly:*:*) - echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" - exit ;; + DRAGONFLY_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_MACHINE-unknown-dragonfly$DRAGONFLY_REL + ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "$UNAME_MACHINE" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; + case $UNAME_MACHINE in + A*) GUESS=alpha-dec-vms ;; + I*) GUESS=ia64-dec-vms ;; + V*) GUESS=vax-dec-vms ;; esac ;; *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; + GUESS=i386-pc-xenix + ;; i*86:skyos:*:*) - echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" - exit ;; + SKYOS_REL=`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'` + GUESS=$UNAME_MACHINE-pc-skyos$SKYOS_REL + ;; i*86:rdos:*:*) - echo "$UNAME_MACHINE"-pc-rdos - exit ;; - i*86:AROS:*:*) - echo "$UNAME_MACHINE"-pc-aros - exit ;; + GUESS=$UNAME_MACHINE-pc-rdos + ;; + i*86:Fiwix:*:*) + GUESS=$UNAME_MACHINE-pc-fiwix + ;; + *:AROS:*:*) + GUESS=$UNAME_MACHINE-unknown-aros + ;; x86_64:VMkernel:*:*) - echo "$UNAME_MACHINE"-unknown-esx - exit ;; + GUESS=$UNAME_MACHINE-unknown-esx + ;; amd64:Isilon\ OneFS:*:*) - echo x86_64-unknown-onefs - exit ;; + GUESS=x86_64-unknown-onefs + ;; *:Unleashed:*:*) - echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-unleashed$UNAME_RELEASE + ;; esac +# Do we have a guess based on uname results? +if test "x$GUESS" != x; then + echo "$GUESS" + exit +fi + # No uname command or uname output not recognized. set_cc_for_build cat > "$dummy.c" </dev/null && SYSTEM_NAME=`$dummy` && +$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } # Apollos put the system type in the environment. @@ -1607,7 +1740,7 @@ test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } echo "$0: unable to guess system type" >&2 -case "$UNAME_MACHINE:$UNAME_SYSTEM" in +case $UNAME_MACHINE:$UNAME_SYSTEM in mips:Linux | mips64:Linux) # If we got here on MIPS GNU/Linux, output extra information. cat >&2 <&2 <&2 + echo "Invalid configuration '$1': more than four components" >&2 exit 1 ;; *-*-*-*) basic_machine=$field1-$field2 - os=$field3-$field4 + basic_os=$field3-$field4 ;; *-*-*) # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two # parts maybe_os=$field2-$field3 case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \ - | linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \ + nto-qnx* | linux-* | uclinux-uclibc* \ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ - | storm-chaos* | os2-emx* | rtmk-nova*) + | storm-chaos* | os2-emx* | rtmk-nova* | managarm-* \ + | windows-* ) basic_machine=$field1 - os=$maybe_os + basic_os=$maybe_os ;; android-linux) basic_machine=$field1-unknown - os=linux-android + basic_os=linux-android ;; *) basic_machine=$field1-$field2 - os=$field3 + basic_os=$field3 ;; esac ;; @@ -154,7 +165,7 @@ case $1 in case $field1-$field2 in decstation-3100) basic_machine=mips-dec - os= + basic_os= ;; *-*) # Second component is usually, but not always the OS @@ -162,7 +173,11 @@ case $1 in # Prevent following clause from handling this valid os sun*os*) basic_machine=$field1 - os=$field2 + basic_os=$field2 + ;; + zephyr*) + basic_machine=$field1-unknown + basic_os=$field2 ;; # Manufacturers dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ @@ -175,11 +190,11 @@ case $1 in | microblaze* | sim | cisco \ | oki | wec | wrs | winbond) basic_machine=$field1-$field2 - os= + basic_os= ;; *) basic_machine=$field1 - os=$field2 + basic_os=$field2 ;; esac ;; @@ -191,447 +206,451 @@ case $1 in case $field1 in 386bsd) basic_machine=i386-pc - os=bsd + basic_os=bsd ;; a29khif) basic_machine=a29k-amd - os=udi + basic_os=udi ;; adobe68k) basic_machine=m68010-adobe - os=scout + basic_os=scout ;; alliant) basic_machine=fx80-alliant - os= + basic_os= ;; altos | altos3068) basic_machine=m68k-altos - os= + basic_os= ;; am29k) basic_machine=a29k-none - os=bsd + basic_os=bsd ;; amdahl) basic_machine=580-amdahl - os=sysv + basic_os=sysv ;; amiga) basic_machine=m68k-unknown - os= + basic_os= ;; amigaos | amigados) basic_machine=m68k-unknown - os=amigaos + basic_os=amigaos ;; amigaunix | amix) basic_machine=m68k-unknown - os=sysv4 + basic_os=sysv4 ;; apollo68) basic_machine=m68k-apollo - os=sysv + basic_os=sysv ;; apollo68bsd) basic_machine=m68k-apollo - os=bsd + basic_os=bsd ;; aros) basic_machine=i386-pc - os=aros + basic_os=aros ;; aux) basic_machine=m68k-apple - os=aux + basic_os=aux ;; balance) basic_machine=ns32k-sequent - os=dynix + basic_os=dynix ;; blackfin) basic_machine=bfin-unknown - os=linux + basic_os=linux ;; cegcc) basic_machine=arm-unknown - os=cegcc + basic_os=cegcc ;; convex-c1) basic_machine=c1-convex - os=bsd + basic_os=bsd ;; convex-c2) basic_machine=c2-convex - os=bsd + basic_os=bsd ;; convex-c32) basic_machine=c32-convex - os=bsd + basic_os=bsd ;; convex-c34) basic_machine=c34-convex - os=bsd + basic_os=bsd ;; convex-c38) basic_machine=c38-convex - os=bsd + basic_os=bsd ;; cray) basic_machine=j90-cray - os=unicos + basic_os=unicos ;; crds | unos) basic_machine=m68k-crds - os= + basic_os= ;; da30) basic_machine=m68k-da30 - os= + basic_os= ;; decstation | pmax | pmin | dec3100 | decstatn) basic_machine=mips-dec - os= + basic_os= ;; delta88) basic_machine=m88k-motorola - os=sysv3 + basic_os=sysv3 ;; dicos) basic_machine=i686-pc - os=dicos + basic_os=dicos ;; djgpp) basic_machine=i586-pc - os=msdosdjgpp + basic_os=msdosdjgpp ;; ebmon29k) basic_machine=a29k-amd - os=ebmon + basic_os=ebmon ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson - os=ose + basic_os=ose ;; gmicro) basic_machine=tron-gmicro - os=sysv + basic_os=sysv ;; go32) basic_machine=i386-pc - os=go32 + basic_os=go32 ;; h8300hms) basic_machine=h8300-hitachi - os=hms + basic_os=hms ;; h8300xray) basic_machine=h8300-hitachi - os=xray + basic_os=xray ;; h8500hms) basic_machine=h8500-hitachi - os=hms + basic_os=hms ;; harris) basic_machine=m88k-harris - os=sysv3 + basic_os=sysv3 ;; hp300 | hp300hpux) basic_machine=m68k-hp - os=hpux + basic_os=hpux ;; hp300bsd) basic_machine=m68k-hp - os=bsd + basic_os=bsd ;; hppaosf) basic_machine=hppa1.1-hp - os=osf + basic_os=osf ;; hppro) basic_machine=hppa1.1-hp - os=proelf + basic_os=proelf ;; i386mach) basic_machine=i386-mach - os=mach + basic_os=mach ;; isi68 | isi) basic_machine=m68k-isi - os=sysv + basic_os=sysv ;; m68knommu) basic_machine=m68k-unknown - os=linux + basic_os=linux ;; magnum | m3230) basic_machine=mips-mips - os=sysv + basic_os=sysv ;; merlin) basic_machine=ns32k-utek - os=sysv + basic_os=sysv ;; mingw64) basic_machine=x86_64-pc - os=mingw64 + basic_os=mingw64 ;; mingw32) basic_machine=i686-pc - os=mingw32 + basic_os=mingw32 ;; mingw32ce) basic_machine=arm-unknown - os=mingw32ce + basic_os=mingw32ce ;; monitor) basic_machine=m68k-rom68k - os=coff + basic_os=coff ;; morphos) basic_machine=powerpc-unknown - os=morphos + basic_os=morphos ;; moxiebox) basic_machine=moxie-unknown - os=moxiebox + basic_os=moxiebox ;; msdos) basic_machine=i386-pc - os=msdos + basic_os=msdos ;; msys) basic_machine=i686-pc - os=msys + basic_os=msys ;; mvs) basic_machine=i370-ibm - os=mvs + basic_os=mvs ;; nacl) basic_machine=le32-unknown - os=nacl + basic_os=nacl ;; ncr3000) basic_machine=i486-ncr - os=sysv4 + basic_os=sysv4 ;; netbsd386) basic_machine=i386-pc - os=netbsd + basic_os=netbsd ;; netwinder) basic_machine=armv4l-rebel - os=linux + basic_os=linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony - os=newsos + basic_os=newsos ;; news1000) basic_machine=m68030-sony - os=newsos + basic_os=newsos ;; necv70) basic_machine=v70-nec - os=sysv + basic_os=sysv ;; nh3000) basic_machine=m68k-harris - os=cxux + basic_os=cxux ;; nh[45]000) basic_machine=m88k-harris - os=cxux + basic_os=cxux ;; nindy960) basic_machine=i960-intel - os=nindy + basic_os=nindy ;; mon960) basic_machine=i960-intel - os=mon960 + basic_os=mon960 ;; nonstopux) basic_machine=mips-compaq - os=nonstopux + basic_os=nonstopux ;; os400) basic_machine=powerpc-ibm - os=os400 + basic_os=os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson - os=ose + basic_os=ose ;; os68k) basic_machine=m68k-none - os=os68k + basic_os=os68k ;; paragon) basic_machine=i860-intel - os=osf + basic_os=osf ;; parisc) basic_machine=hppa-unknown - os=linux + basic_os=linux + ;; + psp) + basic_machine=mipsallegrexel-sony + basic_os=psp ;; pw32) basic_machine=i586-unknown - os=pw32 + basic_os=pw32 ;; rdos | rdos64) basic_machine=x86_64-pc - os=rdos + basic_os=rdos ;; rdos32) basic_machine=i386-pc - os=rdos + basic_os=rdos ;; rom68k) basic_machine=m68k-rom68k - os=coff + basic_os=coff ;; sa29200) basic_machine=a29k-amd - os=udi + basic_os=udi ;; sei) basic_machine=mips-sei - os=seiux + basic_os=seiux ;; sequent) basic_machine=i386-sequent - os= + basic_os= ;; sps7) basic_machine=m68k-bull - os=sysv2 + basic_os=sysv2 ;; st2000) basic_machine=m68k-tandem - os= + basic_os= ;; stratus) basic_machine=i860-stratus - os=sysv4 + basic_os=sysv4 ;; sun2) basic_machine=m68000-sun - os= + basic_os= ;; sun2os3) basic_machine=m68000-sun - os=sunos3 + basic_os=sunos3 ;; sun2os4) basic_machine=m68000-sun - os=sunos4 + basic_os=sunos4 ;; sun3) basic_machine=m68k-sun - os= + basic_os= ;; sun3os3) basic_machine=m68k-sun - os=sunos3 + basic_os=sunos3 ;; sun3os4) basic_machine=m68k-sun - os=sunos4 + basic_os=sunos4 ;; sun4) basic_machine=sparc-sun - os= + basic_os= ;; sun4os3) basic_machine=sparc-sun - os=sunos3 + basic_os=sunos3 ;; sun4os4) basic_machine=sparc-sun - os=sunos4 + basic_os=sunos4 ;; sun4sol2) basic_machine=sparc-sun - os=solaris2 + basic_os=solaris2 ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun - os= + basic_os= ;; sv1) basic_machine=sv1-cray - os=unicos + basic_os=unicos ;; symmetry) basic_machine=i386-sequent - os=dynix + basic_os=dynix ;; t3e) basic_machine=alphaev5-cray - os=unicos + basic_os=unicos ;; t90) basic_machine=t90-cray - os=unicos + basic_os=unicos ;; toad1) basic_machine=pdp10-xkl - os=tops20 + basic_os=tops20 ;; tpf) basic_machine=s390x-ibm - os=tpf + basic_os=tpf ;; udi29k) basic_machine=a29k-amd - os=udi + basic_os=udi ;; ultra3) basic_machine=a29k-nyu - os=sym1 + basic_os=sym1 ;; v810 | necv810) basic_machine=v810-nec - os=none + basic_os=none ;; vaxv) basic_machine=vax-dec - os=sysv + basic_os=sysv ;; vms) basic_machine=vax-dec - os=vms + basic_os=vms ;; vsta) basic_machine=i386-pc - os=vsta + basic_os=vsta ;; vxworks960) basic_machine=i960-wrs - os=vxworks + basic_os=vxworks ;; vxworks68) basic_machine=m68k-wrs - os=vxworks + basic_os=vxworks ;; vxworks29k) basic_machine=a29k-wrs - os=vxworks + basic_os=vxworks ;; xbox) basic_machine=i686-pc - os=mingw32 + basic_os=mingw32 ;; ymp) basic_machine=ymp-cray - os=unicos + basic_os=unicos ;; *) basic_machine=$1 - os= + basic_os= ;; esac ;; @@ -683,17 +702,17 @@ case $basic_machine in bluegene*) cpu=powerpc vendor=ibm - os=cnk + basic_os=cnk ;; decsystem10* | dec10*) cpu=pdp10 vendor=dec - os=tops10 + basic_os=tops10 ;; decsystem20* | dec20*) cpu=pdp10 vendor=dec - os=tops20 + basic_os=tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) @@ -703,7 +722,7 @@ case $basic_machine in dpx2*) cpu=m68k vendor=bull - os=sysv3 + basic_os=sysv3 ;; encore | umax | mmax) cpu=ns32k @@ -712,7 +731,7 @@ case $basic_machine in elxsi) cpu=elxsi vendor=elxsi - os=${os:-bsd} + basic_os=${basic_os:-bsd} ;; fx2800) cpu=i860 @@ -725,7 +744,7 @@ case $basic_machine in h3050r* | hiux*) cpu=hppa1.1 vendor=hitachi - os=hiuxwe2 + basic_os=hiuxwe2 ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) cpu=hppa1.0 @@ -768,36 +787,36 @@ case $basic_machine in i*86v32) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=sysv32 + basic_os=sysv32 ;; i*86v4*) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=sysv4 + basic_os=sysv4 ;; i*86v) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=sysv + basic_os=sysv ;; i*86sol2) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=solaris2 + basic_os=solaris2 ;; j90 | j90-cray) cpu=j90 vendor=cray - os=${os:-unicos} + basic_os=${basic_os:-unicos} ;; iris | iris4d) cpu=mips vendor=sgi - case $os in + case $basic_os in irix*) ;; *) - os=irix4 + basic_os=irix4 ;; esac ;; @@ -808,26 +827,26 @@ case $basic_machine in *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) cpu=m68k vendor=atari - os=mint + basic_os=mint ;; news-3600 | risc-news) cpu=mips vendor=sony - os=newsos + basic_os=newsos ;; next | m*-next) cpu=m68k vendor=next - case $os in + case $basic_os in openstep*) ;; nextstep*) ;; ns2*) - os=nextstep2 + basic_os=nextstep2 ;; *) - os=nextstep3 + basic_os=nextstep3 ;; esac ;; @@ -838,12 +857,12 @@ case $basic_machine in op50n-* | op60c-*) cpu=hppa1.1 vendor=oki - os=proelf + basic_os=proelf ;; pa-hitachi) cpu=hppa1.1 vendor=hitachi - os=hiuxwe2 + basic_os=hiuxwe2 ;; pbd) cpu=sparc @@ -880,12 +899,12 @@ case $basic_machine in sde) cpu=mipsisa32 vendor=sde - os=${os:-elf} + basic_os=${basic_os:-elf} ;; simso-wrs) cpu=sparclite vendor=wrs - os=vxworks + basic_os=vxworks ;; tower | tower-32) cpu=m68k @@ -902,7 +921,7 @@ case $basic_machine in w89k-*) cpu=hppa1.1 vendor=winbond - os=proelf + basic_os=proelf ;; none) cpu=none @@ -919,11 +938,13 @@ case $basic_machine in *-*) # shellcheck disable=SC2162 + saved_IFS=$IFS IFS="-" read cpu vendor <&2 + echo "Invalid configuration '$1': machine '$cpu-$vendor' not recognized" 1>&2 exit 1 ;; esac @@ -1275,8 +1285,54 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if [ x$os != x ] +if test x"$basic_os" != x then + +# First recognize some ad-hoc cases, or perhaps split kernel-os, or else just +# set os. +obj= +case $basic_os in + gnu/linux*) + kernel=linux + os=`echo "$basic_os" | sed -e 's|gnu/linux|gnu|'` + ;; + os2-emx) + kernel=os2 + os=`echo "$basic_os" | sed -e 's|os2-emx|emx|'` + ;; + nto-qnx*) + kernel=nto + os=`echo "$basic_os" | sed -e 's|nto-qnx|qnx|'` + ;; + *-*) + # shellcheck disable=SC2162 + saved_IFS=$IFS + IFS="-" read kernel os <&2 - exit 1 + # No normalization, but not necessarily accepted, that comes below. ;; esac + else # Here we handle the default operating systems that come with various machines. @@ -1528,42 +1517,54 @@ else # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. +kernel= +obj= case $cpu-$vendor in score-*) - os=elf + os= + obj=elf ;; spu-*) - os=elf + os= + obj=elf ;; *-acorn) os=riscix1.2 ;; arm*-rebel) - os=linux + kernel=linux + os=gnu ;; arm*-semi) - os=aout + os= + obj=aout ;; c4x-* | tic4x-*) - os=coff + os= + obj=coff ;; c8051-*) - os=elf + os= + obj=elf ;; clipper-intergraph) os=clix ;; hexagon-*) - os=elf + os= + obj=elf ;; tic54x-*) - os=coff + os= + obj=coff ;; tic55x-*) - os=coff + os= + obj=coff ;; tic6x-*) - os=coff + os= + obj=coff ;; # This must come before the *-dec entry. pdp10-*) @@ -1585,19 +1586,24 @@ case $cpu-$vendor in os=sunos3 ;; m68*-cisco) - os=aout + os= + obj=aout ;; mep-*) - os=elf + os= + obj=elf ;; mips*-cisco) - os=elf + os= + obj=elf ;; mips*-*) - os=elf + os= + obj=elf ;; or32-*) - os=coff + os= + obj=coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=sysv3 @@ -1606,7 +1612,8 @@ case $cpu-$vendor in os=sunos4.1.1 ;; pru-*) - os=elf + os= + obj=elf ;; *-be) os=beos @@ -1687,10 +1694,12 @@ case $cpu-$vendor in os=uxpv ;; *-rom68k) - os=coff + os= + obj=coff ;; *-*bug) - os=coff + os= + obj=coff ;; *-apple) os=macos @@ -1705,84 +1714,242 @@ case $cpu-$vendor in os=none ;; esac + fi +# Now, validate our (potentially fixed-up) individual pieces (OS, OBJ). + +case $os in + # Sometimes we do "kernel-libc", so those need to count as OSes. + musl* | newlib* | relibc* | uclibc*) + ;; + # Likewise for "kernel-abi" + eabi* | gnueabi*) + ;; + # VxWorks passes extra cpu info in the 4th filed. + simlinux | simwindows | spe) + ;; + # See `case $cpu-$os` validation below + ghcjs) + ;; + # Now accept the basic system types. + # The portable systems comes first. + # Each alternative MUST end in a * to match a version number. + gnu* | android* | bsd* | mach* | minix* | genix* | ultrix* | irix* \ + | *vms* | esix* | aix* | cnk* | sunos | sunos[34]* \ + | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ + | sym* | plan9* | psp* | sim* | xray* | os68k* | v88r* \ + | hiux* | abug | nacl* | netware* | windows* \ + | os9* | macos* | osx* | ios* | tvos* | watchos* \ + | mpw* | magic* | mmixware* | mon960* | lnews* \ + | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ + | aos* | aros* | cloudabi* | sortix* | twizzler* \ + | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \ + | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \ + | mirbsd* | netbsd* | dicos* | openedition* | ose* \ + | bitrig* | openbsd* | secbsd* | solidbsd* | libertybsd* | os108* \ + | ekkobsd* | freebsd* | riscix* | lynxos* | os400* \ + | bosx* | nextstep* | cxux* | oabi* \ + | ptx* | ecoff* | winnt* | domain* | vsta* \ + | udi* | lites* | ieee* | go32* | aux* | hcos* \ + | chorusrdb* | cegcc* | glidix* | serenity* \ + | cygwin* | msys* | moss* | proelf* | rtems* \ + | midipix* | mingw32* | mingw64* | mint* \ + | uxpv* | beos* | mpeix* | udk* | moxiebox* \ + | interix* | uwin* | mks* | rhapsody* | darwin* \ + | openstep* | oskit* | conix* | pw32* | nonstopux* \ + | storm-chaos* | tops10* | tenex* | tops20* | its* \ + | os2* | vos* | palmos* | uclinux* | nucleus* | morphos* \ + | scout* | superux* | sysv* | rtmk* | tpf* | windiss* \ + | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \ + | skyos* | haiku* | rdos* | toppers* | drops* | es* \ + | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ + | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \ + | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx* | zephyr* \ + | fiwix* | mlibc* | cos* | mbr* ) + ;; + # This one is extra strict with allowed versions + sco3.2v2 | sco3.2v[4-9]* | sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + ;; + none) + ;; + kernel* | msvc* ) + # Restricted further below + ;; + '') + if test x"$obj" = x + then + echo "Invalid configuration '$1': Blank OS only allowed with explicit machine code file format" 1>&2 + fi + ;; + *) + echo "Invalid configuration '$1': OS '$os' not recognized" 1>&2 + exit 1 + ;; +esac + +case $obj in + aout* | coff* | elf* | pe*) + ;; + '') + # empty is fine + ;; + *) + echo "Invalid configuration '$1': Machine code format '$obj' not recognized" 1>&2 + exit 1 + ;; +esac + +# Here we handle the constraint that a (synthetic) cpu and os are +# valid only in combination with each other and nowhere else. +case $cpu-$os in + # The "javascript-unknown-ghcjs" triple is used by GHC; we + # accept it here in order to tolerate that, but reject any + # variations. + javascript-ghcjs) + ;; + javascript-* | *-ghcjs) + echo "Invalid configuration '$1': cpu '$cpu' is not valid with os '$os$obj'" 1>&2 + exit 1 + ;; +esac + +# As a final step for OS-related things, validate the OS-kernel combination +# (given a valid OS), if there is a kernel. +case $kernel-$os-$obj in + linux-gnu*- | linux-dietlibc*- | linux-android*- | linux-newlib*- \ + | linux-musl*- | linux-relibc*- | linux-uclibc*- | linux-mlibc*- ) + ;; + uclinux-uclibc*- ) + ;; + managarm-mlibc*- | managarm-kernel*- ) + ;; + windows*-msvc*-) + ;; + -dietlibc*- | -newlib*- | -musl*- | -relibc*- | -uclibc*- | -mlibc*- ) + # These are just libc implementations, not actual OSes, and thus + # require a kernel. + echo "Invalid configuration '$1': libc '$os' needs explicit kernel." 1>&2 + exit 1 + ;; + -kernel*- ) + echo "Invalid configuration '$1': '$os' needs explicit kernel." 1>&2 + exit 1 + ;; + *-kernel*- ) + echo "Invalid configuration '$1': '$kernel' does not support '$os'." 1>&2 + exit 1 + ;; + *-msvc*- ) + echo "Invalid configuration '$1': '$os' needs 'windows'." 1>&2 + exit 1 + ;; + kfreebsd*-gnu*- | kopensolaris*-gnu*-) + ;; + vxworks-simlinux- | vxworks-simwindows- | vxworks-spe-) + ;; + nto-qnx*-) + ;; + os2-emx-) + ;; + *-eabi*- | *-gnueabi*-) + ;; + none--*) + # None (no kernel, i.e. freestanding / bare metal), + # can be paired with an machine code file format + ;; + -*-) + # Blank kernel with real OS is always fine. + ;; + --*) + # Blank kernel and OS with real machine code file format is always fine. + ;; + *-*-*) + echo "Invalid configuration '$1': Kernel '$kernel' not known to work with OS '$os'." 1>&2 + exit 1 + ;; +esac + # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. case $vendor in unknown) - case $os in - riscix*) + case $cpu-$os in + *-riscix*) vendor=acorn ;; - sunos*) + *-sunos*) vendor=sun ;; - cnk*|-aix*) + *-cnk* | *-aix*) vendor=ibm ;; - beos*) + *-beos*) vendor=be ;; - hpux*) + *-hpux*) vendor=hp ;; - mpeix*) + *-mpeix*) vendor=hp ;; - hiux*) + *-hiux*) vendor=hitachi ;; - unos*) + *-unos*) vendor=crds ;; - dgux*) + *-dgux*) vendor=dg ;; - luna*) + *-luna*) vendor=omron ;; - genix*) + *-genix*) vendor=ns ;; - clix*) + *-clix*) vendor=intergraph ;; - mvs* | opened*) + *-mvs* | *-opened*) + vendor=ibm + ;; + *-os400*) vendor=ibm ;; - os400*) + s390-* | s390x-*) vendor=ibm ;; - ptx*) + *-ptx*) vendor=sequent ;; - tpf*) + *-tpf*) vendor=ibm ;; - vxsim* | vxworks* | windiss*) + *-vxsim* | *-vxworks* | *-windiss*) vendor=wrs ;; - aux*) + *-aux*) vendor=apple ;; - hms*) + *-hms*) vendor=hitachi ;; - mpw* | macos*) + *-mpw* | *-macos*) vendor=apple ;; - *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) + *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) vendor=atari ;; - vos*) + *-vos*) vendor=stratus ;; esac ;; esac -echo "$cpu-$vendor-$os" +echo "$cpu-$vendor${kernel:+-$kernel}${os:+-$os}${obj:+-$obj}" exit # Local variables: diff --git a/depends/description.md b/depends/description.md index 0a6f2e6442190..fa345bfe85947 100644 --- a/depends/description.md +++ b/depends/description.md @@ -6,13 +6,12 @@ There are several features that make it different from most similar systems: In theory, binaries for any target OS/architecture can be created, from a builder running any OS/architecture. In practice, build-side tools must be specified when the defaults don't fit, and packages must be amended to work -on new hosts. For now, a build architecture of x86_64 is assumed, either on -Linux or macOS. +on new hosts. ### No reliance on timestamps File presence is used to determine what needs to be built. This makes the -results distributable and easily digestable by automated builders. +results distributable and easily digestible by automated builders. ### Each build only has its specified dependencies available at build-time. @@ -28,7 +27,7 @@ etc), and as well as a hash of the same data for each recursive dependency. If any portion of a package's build recipe changes, it will be rebuilt as well as any other package that depends on it. If any of the main makefiles (Makefile, funcs.mk, etc) are changed, all packages will be rebuilt. After building, the -results are cached into a tarball that can be re-used and distributed. +results are cached into a tarball that can be reused and distributed. ### Package build results are (relatively) deterministic. diff --git a/depends/funcs.mk b/depends/funcs.mk index a4434b5167ad9..a2f760bd0e726 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -1,17 +1,22 @@ define int_vars #Set defaults for vars which may be overridden per-package -$(1)_cc=$($($(1)_type)_CC) -$(1)_cxx=$($($(1)_type)_CXX) -$(1)_objc=$($($(1)_type)_OBJC) -$(1)_objcxx=$($($(1)_type)_OBJCXX) -$(1)_ar=$($($(1)_type)_AR) -$(1)_ranlib=$($($(1)_type)_RANLIB) -$(1)_libtool=$($($(1)_type)_LIBTOOL) -$(1)_nm=$($($(1)_type)_NM) -$(1)_cflags=$($($(1)_type)_CFLAGS) $($($(1)_type)_$(release_type)_CFLAGS) -$(1)_cxxflags=$($($(1)_type)_CXXFLAGS) $($($(1)_type)_$(release_type)_CXXFLAGS) -$(1)_ldflags=$($($(1)_type)_LDFLAGS) $($($(1)_type)_$(release_type)_LDFLAGS) -L$($($(1)_type)_prefix)/lib -$(1)_cppflags=$($($(1)_type)_CPPFLAGS) $($($(1)_type)_$(release_type)_CPPFLAGS) -I$($($(1)_type)_prefix)/include +$(1)_cc=$$($$($(1)_type)_CC) +$(1)_cxx=$$($$($(1)_type)_CXX) +$(1)_objc=$$($$($(1)_type)_OBJC) +$(1)_objcxx=$$($$($(1)_type)_OBJCXX) +$(1)_ar=$$($$($(1)_type)_AR) +$(1)_ranlib=$$($$($(1)_type)_RANLIB) +$(1)_nm=$$($$($(1)_type)_NM) +$(1)_cflags=$$($$($(1)_type)_CFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CFLAGS) +$(1)_cxxflags=$$($$($(1)_type)_CXXFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CXXFLAGS) +$(1)_ldflags=$$($$($(1)_type)_LDFLAGS) \ + $$($$($(1)_type)_$$(release_type)_LDFLAGS) \ + -L$$($($(1)_type)_prefix)/lib +$(1)_cppflags=$$($$($(1)_type)_CPPFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CPPFLAGS) \ + -I$$($$($(1)_type)_prefix)/include $(1)_recipe_hash:= endef @@ -41,9 +46,9 @@ endef define int_get_build_id $(eval $(1)_dependencies += $($(1)_$(host_arch)_$(host_os)_dependencies) $($(1)_$(host_os)_dependencies)) -$(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type)_native_toolchain) $($(1)_dependencies))) +$(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($(1)_dependencies))) $(foreach dep,$($(1)_all_dependencies),$(eval $(1)_build_id_deps+=$(dep)-$($(dep)_version)-$($(dep)_recipe_hash))) -$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id_string)) +$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id)) $(eval $(1)_build_id:=$(shell echo -n "$($(1)_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH))) final_build_id_long+=$($(package)_build_id_long) @@ -61,6 +66,7 @@ $(1)_cached_checksum:=$(BASE_CACHE)/$(host)/$(1)/$(1)-$($(1)_version)-$($(1)_bui $(1)_patch_dir:=$(base_build_dir)/$(host)/$(1)/$($(1)_version)-$($(1)_build_id)/.patches-$($(1)_build_id) $(1)_prefixbin:=$($($(1)_type)_prefix)/bin/ $(1)_cached:=$(BASE_CACHE)/$(host)/$(1)/$(1)-$($(1)_version)-$($(1)_build_id).tar.gz +$(1)_build_log:=$(BASEDIR)/$(1)-$($(1)_version)-$($(1)_build_id).log $(1)_all_sources=$($(1)_file_name) $($(1)_extra_sources) #stamps @@ -69,7 +75,7 @@ $(1)_extracted=$$($(1)_extract_dir)/.stamp_extracted $(1)_preprocessed=$$($(1)_extract_dir)/.stamp_preprocessed $(1)_cleaned=$$($(1)_extract_dir)/.stamp_cleaned $(1)_built=$$($(1)_build_dir)/.stamp_built -$(1)_configured=$$($(1)_build_dir)/.stamp_configured +$(1)_configured=$(host_prefix)/.$(1)_stamp_configured $(1)_staged=$$($(1)_staging_dir)/.stamp_staged $(1)_postprocessed=$$($(1)_staging_prefix_dir)/.stamp_postprocessed $(1)_download_path_fixed=$(subst :,\:,$$($(1)_download_path)) @@ -78,11 +84,11 @@ $(1)_download_path_fixed=$(subst :,\:,$$($(1)_download_path)) #default commands # The default behavior for tar will try to set ownership when running as uid 0 and may not succeed, --no-same-owner disables this behavior $(1)_fetch_cmds ?= $(call fetch_file,$(1),$(subst \:,:,$$($(1)_download_path_fixed)),$$($(1)_download_file),$($(1)_file_name),$($(1)_sha256_hash)) -$(1)_extract_cmds ?= mkdir -p $$($(1)_extract_dir) && echo "$$($(1)_sha256_hash) $$($(1)_source)" > $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_SHA256SUM) -c $$($(1)_extract_dir)/.$$($(1)_file_name).hash && tar --no-same-owner --strip-components=1 -xf $$($(1)_source) -$(1)_preprocess_cmds ?= -$(1)_build_cmds ?= -$(1)_config_cmds ?= -$(1)_stage_cmds ?= +$(1)_extract_cmds ?= mkdir -p $$($(1)_extract_dir) && echo "$$($(1)_sha256_hash) $$($(1)_source)" > $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_SHA256SUM) -c $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_TAR) --no-same-owner --strip-components=1 -xf $$($(1)_source) +$(1)_preprocess_cmds ?= true +$(1)_build_cmds ?= true +$(1)_config_cmds ?= true +$(1)_stage_cmds ?= true $(1)_set_vars ?= @@ -130,11 +136,18 @@ $(1)_config_env+=$($(1)_config_env_$(host_arch)_$(host_os)) $($(1)_config_env_$( $(1)_config_env+=PKG_CONFIG_LIBDIR=$($($(1)_type)_prefix)/lib/pkgconfig $(1)_config_env+=PKG_CONFIG_PATH=$($($(1)_type)_prefix)/share/pkgconfig -$(1)_config_env+=PATH=$(build_prefix)/bin:$(PATH) -$(1)_build_env+=PATH=$(build_prefix)/bin:$(PATH) -$(1)_stage_env+=PATH=$(build_prefix)/bin:$(PATH) -$(1)_autoconf=./configure --host=$($($(1)_type)_host) --prefix=$($($(1)_type)_prefix) $$($(1)_config_opts) CC="$$($(1)_cc)" CXX="$$($(1)_cxx)" - +$(1)_config_env+=PKG_CONFIG_SYSROOT_DIR=/ +$(1)_config_env+=CMAKE_MODULE_PATH=$($($(1)_type)_prefix)/lib/cmake +$(1)_config_env+=PATH="$(build_prefix)/bin:$(PATH)" +$(1)_build_env+=PATH="$(build_prefix)/bin:$(PATH)" +$(1)_stage_env+=PATH="$(build_prefix)/bin:$(PATH)" + +# Setting a --build type that differs from --host will explicitly enable +# cross-compilation mode. Note that --build defaults to the output of +# config.guess, which is what we set it too here. This also quells autoconf +# warnings, "If you wanted to set the --build type, don't use --host.", +# when using versions older than 2.70. +$(1)_autoconf=./configure --build=$(BUILD) --host=$($($(1)_type)_host) --prefix=$($($(1)_type)_prefix) --with-pic $$($(1)_config_opts) CC="$$($(1)_cc)" CXX="$$($(1)_cxx)" ifneq ($($(1)_nm),) $(1)_autoconf += NM="$$($(1)_nm)" endif @@ -156,57 +169,91 @@ endif ifneq ($($(1)_ldflags),) $(1)_autoconf += LDFLAGS="$$($(1)_ldflags)" endif + +# We hardcode the library install path to "lib" to match the PKG_CONFIG_PATH +# setting in depends/toolchain.cmake.in, which also hardcodes "lib". +# Without this setting, CMake by default would use the OS library +# directory, which might be "lib64" or something else, not "lib", on multiarch systems. +$(1)_cmake=env CC="$$($(1)_cc)" \ + CFLAGS="$$($(1)_cppflags) $$($(1)_cflags)" \ + CXX="$$($(1)_cxx)" \ + CXXFLAGS="$$($(1)_cppflags) $$($(1)_cxxflags)" \ + LDFLAGS="$$($(1)_ldflags)" \ + cmake -DCMAKE_INSTALL_PREFIX:PATH="$$($($(1)_type)_prefix)" \ + -DCMAKE_AR=`which $$($(1)_ar)` \ + -DCMAKE_NM=`which $$($(1)_nm)` \ + -DCMAKE_RANLIB=`which $$($(1)_ranlib)` \ + -DCMAKE_INSTALL_LIBDIR=lib/ \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DCMAKE_VERBOSE_MAKEFILE:BOOL=$(V) \ + $$($(1)_config_opts) +ifeq ($($(1)_type),build) +$(1)_cmake += -DCMAKE_INSTALL_RPATH:PATH="$$($($(1)_type)_prefix)/lib" +else +ifneq ($(host),$(build)) +$(1)_cmake += -DCMAKE_SYSTEM_NAME=$($(host_os)_cmake_system_name) +$(1)_cmake += -DCMAKE_C_COMPILER_TARGET=$(host) +$(1)_cmake += -DCMAKE_CXX_COMPILER_TARGET=$(host) +endif +endif endef define int_add_cmds +ifneq ($(LOG),) +$(1)_logging = >>$$($(1)_build_log) 2>&1 || { if test -f $$($(1)_build_log); then cat $$($(1)_build_log); fi; exit 1; } +endif + $($(1)_fetched): - $(AT)mkdir -p $$(@D) $(SOURCES_PATH) - $(AT)rm -f $$@ - $(AT)touch $$@ - $(AT)cd $$(@D); $(call $(1)_fetch_cmds,$(1)) - $(AT)cd $($(1)_source_dir); $(foreach source,$($(1)_all_sources),$(build_SHA256SUM) $(source) >> $$(@);) - $(AT)touch $$@ + mkdir -p $$(@D) $(SOURCES_PATH) + rm -f $$@ + touch $$@ + cd $$(@D); $($(1)_fetch_cmds) + cd $($(1)_source_dir); $(foreach source,$($(1)_all_sources),$(build_SHA256SUM) $(source) >> $$(@);) + touch $$@ $($(1)_extracted): | $($(1)_fetched) - $(AT)echo Extracting $(1)... - $(AT)mkdir -p $$(@D) - $(AT)cd $$(@D); $(call $(1)_extract_cmds,$(1)) - $(AT)touch $$@ + echo Extracting $(1)... + mkdir -p $$(@D) + cd $$(@D); $($(1)_extract_cmds) + touch $$@ $($(1)_preprocessed): | $($(1)_extracted) - $(AT)echo Preprocessing $(1)... - $(AT)mkdir -p $$(@D) $($(1)_patch_dir) - $(AT)$(foreach patch,$($(1)_patches),cd $(PATCHES_PATH)/$(1); cp $(patch) $($(1)_patch_dir) ;) - $(AT)cd $$(@D); $(call $(1)_preprocess_cmds, $(1)) - $(AT)touch $$@ + echo Preprocessing $(1)... + mkdir -p $$(@D) $($(1)_patch_dir) + $(foreach patch,$($(1)_patches),cd $(PATCHES_PATH)/$(1); cp $(patch) $($(1)_patch_dir) ;) + { cd $$(@D); $($(1)_preprocess_cmds); } $$($(1)_logging) + touch $$@ $($(1)_configured): | $($(1)_dependencies) $($(1)_preprocessed) - $(AT)echo Configuring $(1)... - $(AT)rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), tar --no-same-owner -xf $($(package)_cached); ) - $(AT)mkdir -p $$(@D) - $(AT)+cd $$(@D); $($(1)_config_env) $(call $(1)_config_cmds, $(1)) - $(AT)touch $$@ + echo Configuring $(1)... + rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), $(build_TAR) --no-same-owner -xf $($(package)_cached); ) + mkdir -p $$($(1)_build_dir) + +{ cd $$($(1)_build_dir); export $($(1)_config_env); $($(1)_config_cmds); } $$($(1)_logging) + touch $$@ $($(1)_built): | $($(1)_configured) - $(AT)echo Building $(1)... - $(AT)mkdir -p $$(@D) - $(AT)+cd $$(@D); $($(1)_build_env) $(call $(1)_build_cmds, $(1)) - $(AT)touch $$@ + echo Building $(1)... + mkdir -p $$(@D) + +{ cd $$(@D); export $($(1)_build_env); $($(1)_build_cmds); } $$($(1)_logging) + touch $$@ $($(1)_staged): | $($(1)_built) - $(AT)echo Staging $(1)... - $(AT)mkdir -p $($(1)_staging_dir)/$(host_prefix) - $(AT)cd $($(1)_build_dir); $($(1)_stage_env) $(call $(1)_stage_cmds, $(1)) - $(AT)rm -rf $($(1)_extract_dir) - $(AT)touch $$@ + echo Staging $(1)... + mkdir -p $($(1)_staging_dir)/$(host_prefix) + +{ cd $($(1)_build_dir); export $($(1)_stage_env); $($(1)_stage_cmds); } $$($(1)_logging) + rm -rf $($(1)_extract_dir) + touch $$@ $($(1)_postprocessed): | $($(1)_staged) - $(AT)echo Postprocessing $(1)... - $(AT)cd $($(1)_staging_prefix_dir); $(call $(1)_postprocess_cmds) - $(AT)touch $$@ + echo Postprocessing $(1)... + cd $($(1)_staging_prefix_dir); $($(1)_postprocess_cmds) + touch $$@ $($(1)_cached): | $($(1)_dependencies) $($(1)_postprocessed) - $(AT)echo Caching $(1)... - $(AT)cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | tar --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T - - $(AT)mkdir -p $$(@D) - $(AT)rm -rf $$(@D) && mkdir -p $$(@D) - $(AT)mv $$($(1)_staging_dir)/$$(@F) $$(@) - $(AT)rm -rf $($(1)_staging_dir) + echo Caching $(1)... + cd $$($(1)_staging_dir)/$(host_prefix); \ + find . ! -name '.stamp_postprocessed' -print0 | TZ=UTC xargs -0r $(build_TOUCH); \ + find . ! -name '.stamp_postprocessed' | LC_ALL=C sort | $(build_TAR) --numeric-owner --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T - + mkdir -p $$(@D) + rm -rf $$(@D) && mkdir -p $$(@D) + mv $$($(1)_staging_dir)/$$(@F) $$(@) + rm -rf $($(1)_staging_dir) + if test -f $($(1)_build_log); then mv $($(1)_build_log) $$(@D); fi $($(1)_cached_checksum): $($(1)_cached) - $(AT)cd $$(@D); $(build_SHA256SUM) $$( $$(@) + cd $$(@D); $(build_SHA256SUM) $$( $$(@) .PHONY: $(1) $(1): | $($(1)_cached_checksum) @@ -236,7 +283,8 @@ $(foreach package,$(packages),$(eval $(package)_type=$(host_arch)_$(host_os))) $(foreach package,$(all_packages),$(eval $(call int_vars,$(package)))) #include package files -$(foreach package,$(all_packages),$(eval include packages/$(package).mk)) +$(foreach native_package,$(native_packages),$(eval include packages/$(native_package).mk)) +$(foreach package,$(packages),$(eval include packages/$(package).mk)) #compute a hash of all files that comprise this package's build recipe $(foreach package,$(all_packages),$(eval $(call int_get_build_recipe_hash,$(package)))) @@ -249,6 +297,3 @@ $(foreach package,$(all_packages),$(eval $(call int_config_attach_build_config,$ #create build targets $(foreach package,$(all_packages),$(eval $(call int_add_cmds,$(package)))) - -#special exception: if a toolchain package exists, all non-native packages depend on it -$(foreach package,$(packages),$(eval $($(package)_unpacked): |$($($(host_arch)_$(host_os)_native_toolchain)_cached) )) diff --git a/depends/gen_id b/depends/gen_id new file mode 100755 index 0000000000000..e2e2273b2d676 --- /dev/null +++ b/depends/gen_id @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# Usage: env [ CC=... ] [ C_STANDARD=...] [ CXX=... ] [CXX_STANDARD=...] \ +# [ AR=... ] [ NM=... ] [ RANLIB=... ] [ STRIP=... ] [ DEBUG=... ] \ +# [ LTO=... ] [ NO_HARDEN=... ] ./build-id [ID_SALT]... +# +# Prints to stdout a SHA256 hash representing the current toolset, used by +# depends/Makefile as a build id for caching purposes (detecting when the +# toolset has changed and the cache needs to be invalidated). +# +# If the DEBUG environment variable is non-empty and the system has `tee` +# available in its $PATH, the pre-image to the SHA256 hash will be printed to +# stderr. This is to help developers debug caching issues in depends. + +# This script explicitly does not `set -e` because id determination is mostly +# opportunistic: it is fine that things fail, as long as they fail consistently. + +# Command variables (CC/CXX/AR) which can be blank are invoked with `bash -c`, +# because the "command not found" error message printed by shells often include +# the line number, like so: +# +# ./depends/gen_id: line 43: --version: command not found +# +# By invoking with `bash -c`, we ensure that the line number is always 1 + +( + # Redirect stderr to stdout + exec 2>&1 + + echo "BEGIN ALL" + + # Include any ID salts supplied via command line + echo "BEGIN ID SALT" + echo "$@" + echo "END ID SALT" + + # GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want + # the information from "-v -E -" as well, so just include both. + echo "BEGIN CC" + bash -c "${CC} -v" + bash -c "${CC} -v -E -xc -o /dev/null - < /dev/null" + bash -c "${CC} -v -E -xobjective-c -o /dev/null - < /dev/null" + echo "C_STANDARD=${C_STANDARD}" + echo "END CC" + + echo "BEGIN CXX" + bash -c "${CXX} -v" + bash -c "${CXX} -v -E -xc++ -o /dev/null - < /dev/null" + bash -c "${CXX} -v -E -xobjective-c++ -o /dev/null - < /dev/null" + echo "CXX_STANDARD=${CXX_STANDARD}" + echo "END CXX" + + echo "BEGIN AR" + bash -c "${AR} --version" + env | grep '^AR_' + echo "END AR" + + echo "BEGIN NM" + bash -c "${NM} --version" + env | grep '^NM_' + echo "END NM" + + echo "BEGIN RANLIB" + bash -c "${RANLIB} --version" + env | grep '^RANLIB_' + echo "END RANLIB" + + echo "BEGIN STRIP" + bash -c "${STRIP} --version" + env | grep '^STRIP_' + echo "END STRIP" + + echo "BEGIN LTO" + echo "LTO=${LTO}" + echo "END LTO" + + echo "BEGIN NO_HARDEN" + echo "NO_HARDEN=${NO_HARDEN}" + echo "END NO_HARDEN" + + echo "END ALL" +) | if [ -n "$DEBUG" ] && command -v tee > /dev/null 2>&1; then + # When debugging and `tee` is available, output the preimage to stderr + # in addition to passing through stdin to stdout + tee >(cat 1>&2) + else + # Otherwise, passthrough stdin to stdout + cat + fi | ${SHA256SUM} - | cut -d' ' -f1 diff --git a/depends/hosts/android.mk b/depends/hosts/android.mk deleted file mode 100644 index 969ec2a1cb047..0000000000000 --- a/depends/hosts/android.mk +++ /dev/null @@ -1,11 +0,0 @@ -ifeq ($(HOST),armv7a-linux-android) -android_AR=$(ANDROID_TOOLCHAIN_BIN)/arm-linux-androideabi-ar -android_CXX=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)eabi$(ANDROID_API_LEVEL)-clang++ -android_CC=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)eabi$(ANDROID_API_LEVEL)-clang -android_RANLIB=$(ANDROID_TOOLCHAIN_BIN)/arm-linux-androideabi-ranlib -else -android_AR=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)-ar -android_CXX=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang++ -android_CC=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang -android_RANLIB=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)-ranlib -endif diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk index 1bc4fb81896f6..4659d52912c1b 100644 --- a/depends/hosts/darwin.mk +++ b/depends/hosts/darwin.mk @@ -1,16 +1,81 @@ -OSX_MIN_VERSION=10.12 -OSX_SDK_VERSION=10.14 -OSX_SDK=$(SDK_PATH)/MacOSX$(OSX_SDK_VERSION).sdk -darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -stdlib=libc++ +OSX_MIN_VERSION=13.0 +OSX_SDK_VERSION=14.0 +XCODE_VERSION=15.0 +XCODE_BUILD_ID=15A240d +LLD_VERSION=711 -darwin_CFLAGS=-pipe -darwin_CXXFLAGS=$(darwin_CFLAGS) +OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers + +# We can't just use $(shell command -v clang) because GNU Make handles builtins +# in a special way and doesn't know that `command` is a POSIX-standard builtin +# prior to 1af314465e5dfe3e8baa839a32a72e83c04f26ef, first released in v4.2.90. +# At the time of writing, GNU Make v4.2.1 is still being used in supported +# distro releases. +# +# Source: https://lists.gnu.org/archive/html/bug-make/2017-11/msg00017.html +clang_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang") +clangxx_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang++") + +darwin_AR=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-ar") +darwin_DSYMUTIL=$(shell $(SHELL) $(.SHELLFLAGS) "command -v dsymutil") +darwin_NM=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-nm") +darwin_OBJDUMP=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-objdump") +darwin_RANLIB=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-ranlib") +darwin_STRIP=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-strip") + +# Flag explanations: +# +# -mlinker-version +# +# Ensures that modern linker features are enabled. See here for more +# details: https://github.com/bitcoin/bitcoin/pull/19407. +# +# -isysroot$(OSX_SDK) -nostdlibinc +# +# Disable default include paths built into the compiler as well as +# those normally included for libc and libc++. The only path that +# remains implicitly is the clang resource dir. +# +# -iwithsysroot / -iframeworkwithsysroot +# +# Adds the desired paths from the SDK +# +# -platform_version +# +# Indicate to the linker the platform, the oldest supported version, +# and the SDK used. +# +# -no_adhoc_codesign +# +# Disable adhoc codesigning (for now) when using LLVM tooling, to avoid +# non-determinism issues with the Identifier field. + +darwin_CC=$(clang_prog) --target=$(host) \ + -isysroot$(OSX_SDK) -nostdlibinc \ + -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks + +darwin_CXX=$(clangxx_prog) --target=$(host) \ + -isysroot$(OSX_SDK) -nostdlibinc \ + -iwithsysroot/usr/include/c++/v1 \ + -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks + +darwin_CFLAGS=-pipe -std=$(C_STANDARD) -mmacos-version-min=$(OSX_MIN_VERSION) +darwin_CXXFLAGS=-pipe -std=$(CXX_STANDARD) -mmacos-version-min=$(OSX_MIN_VERSION) +darwin_LDFLAGS=-Wl,-platform_version,macos,$(OSX_MIN_VERSION),$(OSX_SDK_VERSION) + +ifneq ($(build_os),darwin) +darwin_CFLAGS += -mlinker-version=$(LLD_VERSION) +darwin_CXXFLAGS += -mlinker-version=$(LLD_VERSION) +darwin_LDFLAGS += -Wl,-no_adhoc_codesign -fuse-ld=lld +endif darwin_release_CFLAGS=-O2 darwin_release_CXXFLAGS=$(darwin_release_CFLAGS) -darwin_debug_CFLAGS=-O1 +darwin_debug_CFLAGS=-O1 -g darwin_debug_CXXFLAGS=$(darwin_debug_CFLAGS) -darwin_native_toolchain=native_cctools +darwin_cmake_system_name=Darwin +# Darwin version, which corresponds to OSX_MIN_VERSION. +# See https://en.wikipedia.org/wiki/Darwin_(operating_system) +darwin_cmake_system_version=20.1 diff --git a/depends/hosts/default.mk b/depends/hosts/default.mk index 144e5f88b7820..24689da45bc19 100644 --- a/depends/hosts/default.mk +++ b/depends/hosts/default.mk @@ -7,24 +7,36 @@ default_host_CXX = $(host_toolchain)g++ default_host_AR = $(host_toolchain)ar default_host_RANLIB = $(host_toolchain)ranlib default_host_STRIP = $(host_toolchain)strip -default_host_LIBTOOL = $(host_toolchain)libtool -default_host_INSTALL_NAME_TOOL = $(host_toolchain)install_name_tool -default_host_OTOOL = $(host_toolchain)otool default_host_NM = $(host_toolchain)nm +default_host_OBJCOPY = $(host_toolchain)objcopy define add_host_tool_func +ifneq ($(filter $(origin $1),undefined default),) +# Do not consider the well-known var $1 if it is undefined or is taking a value +# that is predefined by "make" (e.g. the make variable "CC" has a predefined +# value of "cc") $(host_os)_$1?=$$(default_host_$1) $(host_arch)_$(host_os)_$1?=$$($(host_os)_$1) $(host_arch)_$(host_os)_$(release_type)_$1?=$$($(host_os)_$1) +else +$(host_os)_$1=$(or $($1),$($(host_os)_$1),$(default_host_$1)) +$(host_arch)_$(host_os)_$1=$(or $($1),$($(host_arch)_$(host_os)_$1),$$($(host_os)_$1)) +$(host_arch)_$(host_os)_$(release_type)_$1=$(or $($1),$($(host_arch)_$(host_os)_$(release_type)_$1),$$($(host_os)_$1)) +endif host_$1=$$($(host_arch)_$(host_os)_$1) endef define add_host_flags_func +ifeq ($(filter $(origin $1),undefined default),) +$(host_arch)_$(host_os)_$1 = $($1) +$(host_arch)_$(host_os)_$(release_type)_$1 = +else $(host_arch)_$(host_os)_$1 += $($(host_os)_$1) $(host_arch)_$(host_os)_$(release_type)_$1 += $($(host_os)_$(release_type)_$1) +endif host_$1 = $$($(host_arch)_$(host_os)_$1) host_$(release_type)_$1 = $$($(host_arch)_$(host_os)_$(release_type)_$1) endef -$(foreach tool,CC CXX AR RANLIB STRIP NM LIBTOOL OTOOL INSTALL_NAME_TOOL,$(eval $(call add_host_tool_func,$(tool)))) +$(foreach tool,CC CXX AR RANLIB STRIP NM OBJCOPY OBJDUMP DSYMUTIL,$(eval $(call add_host_tool_func,$(tool)))) $(foreach flags,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS, $(eval $(call add_host_flags_func,$(flags)))) diff --git a/depends/hosts/freebsd.mk b/depends/hosts/freebsd.mk new file mode 100644 index 0000000000000..8cef32e231cff --- /dev/null +++ b/depends/hosts/freebsd.mk @@ -0,0 +1,31 @@ +freebsd_CFLAGS=-pipe -std=$(C_STANDARD) +freebsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +freebsd_release_CFLAGS=-O2 +freebsd_release_CXXFLAGS=$(freebsd_release_CFLAGS) + +freebsd_debug_CFLAGS=-O1 +freebsd_debug_CXXFLAGS=$(freebsd_debug_CFLAGS) + +ifeq (86,$(findstring 86,$(build_arch))) +i686_freebsd_CC=clang -m32 +i686_freebsd_CXX=clang++ -m32 +i686_freebsd_AR=ar +i686_freebsd_RANLIB=ranlib +i686_freebsd_NM=nm +i686_freebsd_STRIP=strip + +x86_64_freebsd_CC=clang -m64 +x86_64_freebsd_CXX=clang++ -m64 +x86_64_freebsd_AR=ar +x86_64_freebsd_RANLIB=ranlib +x86_64_freebsd_NM=nm +x86_64_freebsd_STRIP=strip +else +i686_freebsd_CC=$(default_host_CC) -m32 +i686_freebsd_CXX=$(default_host_CXX) -m32 +x86_64_freebsd_CC=$(default_host_CC) -m64 +x86_64_freebsd_CXX=$(default_host_CXX) -m64 +endif + +freebsd_cmake_system_name=FreeBSD diff --git a/depends/hosts/linux.mk b/depends/hosts/linux.mk index b13a0f1ad714a..e2f34265d153e 100644 --- a/depends/hosts/linux.mk +++ b/depends/hosts/linux.mk @@ -1,14 +1,24 @@ -linux_CFLAGS=-pipe -linux_CXXFLAGS=$(linux_CFLAGS) +linux_CFLAGS=-pipe -std=$(C_STANDARD) +linux_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +linux_AR = $(host_toolchain)gcc-ar +linux_NM = $(host_toolchain)gcc-nm +linux_RANLIB = $(host_toolchain)gcc-ranlib +endif linux_release_CFLAGS=-O2 linux_release_CXXFLAGS=$(linux_release_CFLAGS) -linux_debug_CFLAGS=-O1 +linux_debug_CFLAGS=-O1 -g linux_debug_CXXFLAGS=$(linux_debug_CFLAGS) +# https://gcc.gnu.org/onlinedocs/libstdc++/manual/debug_mode.html linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC +# https://libcxx.llvm.org/Hardening.html +linux_debug_CPPFLAGS+=-D_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_DEBUG + ifeq (86,$(findstring 86,$(build_arch))) i686_linux_CC=gcc -m32 i686_linux_CXX=g++ -m32 @@ -29,3 +39,7 @@ i686_linux_CXX=$(default_host_CXX) -m32 x86_64_linux_CC=$(default_host_CC) -m64 x86_64_linux_CXX=$(default_host_CXX) -m64 endif + +linux_cmake_system_name=Linux +# Refer to doc/dependencies.md for the minimum required kernel. +linux_cmake_system_version=3.17.0 diff --git a/depends/hosts/mingw32.mk b/depends/hosts/mingw32.mk index dbfb62fdcf986..755d7aebe4c5e 100644 --- a/depends/hosts/mingw32.mk +++ b/depends/hosts/mingw32.mk @@ -1,10 +1,27 @@ -mingw32_CFLAGS=-pipe -mingw32_CXXFLAGS=$(mingw32_CFLAGS) +ifneq ($(shell $(SHELL) $(.SHELLFLAGS) "command -v $(host)-gcc-posix"),) +mingw32_CC := $(host)-gcc-posix +endif +ifneq ($(shell $(SHELL) $(.SHELLFLAGS) "command -v $(host)-g++-posix"),) +mingw32_CXX := $(host)-g++-posix +endif + +mingw32_CFLAGS=-pipe -std=$(C_STANDARD) +mingw32_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +mingw32_AR = $(host_toolchain)gcc-ar +mingw32_NM = $(host_toolchain)gcc-nm +mingw32_RANLIB = $(host_toolchain)gcc-ranlib +endif mingw32_release_CFLAGS=-O2 mingw32_release_CXXFLAGS=$(mingw32_release_CFLAGS) -mingw32_debug_CFLAGS=-O1 +mingw32_debug_CFLAGS=-O1 -g mingw32_debug_CXXFLAGS=$(mingw32_debug_CFLAGS) mingw32_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC + +mingw32_cmake_system_name=Windows +# Windows 7 (NT 6.1). +mingw32_cmake_system_version=6.1 diff --git a/depends/hosts/netbsd.mk b/depends/hosts/netbsd.mk new file mode 100644 index 0000000000000..16dff92d428a3 --- /dev/null +++ b/depends/hosts/netbsd.mk @@ -0,0 +1,39 @@ +netbsd_CFLAGS=-pipe -std=$(C_STANDARD) +netbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +netbsd_AR = $(host_toolchain)gcc-ar +netbsd_NM = $(host_toolchain)gcc-nm +netbsd_RANLIB = $(host_toolchain)gcc-ranlib +endif + +netbsd_CXXFLAGS=$(netbsd_CFLAGS) + +netbsd_release_CFLAGS=-O2 +netbsd_release_CXXFLAGS=$(netbsd_release_CFLAGS) + +netbsd_debug_CFLAGS=-O1 +netbsd_debug_CXXFLAGS=$(netbsd_debug_CFLAGS) + +ifeq (86,$(findstring 86,$(build_arch))) +i686_netbsd_CC=gcc -m32 +i686_netbsd_CXX=g++ -m32 +i686_netbsd_AR=ar +i686_netbsd_RANLIB=ranlib +i686_netbsd_NM=nm +i686_netbsd_STRIP=strip + +x86_64_netbsd_CC=gcc -m64 +x86_64_netbsd_CXX=g++ -m64 +x86_64_netbsd_AR=ar +x86_64_netbsd_RANLIB=ranlib +x86_64_netbsd_NM=nm +x86_64_netbsd_STRIP=strip +else +i686_netbsd_CC=$(default_host_CC) -m32 +i686_netbsd_CXX=$(default_host_CXX) -m32 +x86_64_netbsd_CC=$(default_host_CC) -m64 +x86_64_netbsd_CXX=$(default_host_CXX) -m64 +endif + +netbsd_cmake_system_name=NetBSD diff --git a/depends/hosts/openbsd.mk b/depends/hosts/openbsd.mk new file mode 100644 index 0000000000000..63f6d73d55598 --- /dev/null +++ b/depends/hosts/openbsd.mk @@ -0,0 +1,31 @@ +openbsd_CFLAGS=-pipe -std=$(C_STANDARD) +openbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +openbsd_release_CFLAGS=-O2 +openbsd_release_CXXFLAGS=$(openbsd_release_CFLAGS) + +openbsd_debug_CFLAGS=-O1 +openbsd_debug_CXXFLAGS=$(openbsd_debug_CFLAGS) + +ifeq (86,$(findstring 86,$(build_arch))) +i686_openbsd_CC=clang -m32 +i686_openbsd_CXX=clang++ -m32 +i686_openbsd_AR=ar +i686_openbsd_RANLIB=ranlib +i686_openbsd_NM=nm +i686_openbsd_STRIP=strip + +x86_64_openbsd_CC=clang -m64 +x86_64_openbsd_CXX=clang++ -m64 +x86_64_openbsd_AR=ar +x86_64_openbsd_RANLIB=ranlib +x86_64_openbsd_NM=nm +x86_64_openbsd_STRIP=strip +else +i686_openbsd_CC=$(default_host_CC) -m32 +i686_openbsd_CXX=$(default_host_CXX) -m32 +x86_64_openbsd_CC=$(default_host_CC) -m64 +x86_64_openbsd_CXX=$(default_host_CXX) -m64 +endif + +openbsd_cmake_system_name=OpenBSD diff --git a/depends/packages.md b/depends/packages.md index 7ed20ea1297dc..7a7a42afa10be 100644 --- a/depends/packages.md +++ b/depends/packages.md @@ -74,7 +74,6 @@ These variables may be set to override or append their default values. $(package)_objcxx $(package)_ar $(package)_ranlib - $(package)_libtool $(package)_nm $(package)_cflags $(package)_cxxflags @@ -163,6 +162,9 @@ From the [Gentoo Wiki entry](https://wiki.gentoo.org/wiki/Project:Quality_Assura > creates. This leads to massive overlinking, which is toxic to the Gentoo > ecosystem, as it leads to a massive number of unnecessary rebuilds. +Where possible, packages are built with Position Independent Code. Either using +the Autotools `--with-pic` flag, or `CMAKE_POSITION_INDEPENDENT_CODE` with CMake. + ## Secondary dependencies: Secondary dependency packages relative to the bitcoin binaries/libraries (i.e. @@ -178,8 +180,8 @@ not sufficient to just say `libprimary`. For us, it's much easier to just link a static `libsecondary` into a shared `libprimary`. Especially because in our case, we are linking against a dummy `libprimary` anyway that we'll throw away. We don't care if the end-user has a -static or dynamic `libseconday`, that's not our concern. With a static -`libseconday`, when we need to link `libprimary` into our executable, there's no +static or dynamic `libsecondary`, that's not our concern. With a static +`libsecondary`, when we need to link `libprimary` into our executable, there's no dependency chain to worry about as `libprimary` has all the symbols. ## Build targets: diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk index b679438c6f65a..be82b0d309817 100644 --- a/depends/packages/bdb.mk +++ b/depends/packages/bdb.mk @@ -4,18 +4,19 @@ $(package)_download_path=https://download.oracle.com/berkeley-db $(package)_file_name=db-$($(package)_version).NC.tar.gz $(package)_sha256_hash=12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef $(package)_build_subdir=build_unix +$(package)_patches=clang_cxx_11.patch define $(package)_set_vars $(package)_config_opts=--disable-shared --enable-cxx --disable-replication --enable-option-checking $(package)_config_opts_mingw32=--enable-mingw -$(package)_config_opts_linux=--with-pic -$(package)_cxxflags=-std=c++11 +$(package)_cflags+=-Wno-error=implicit-function-declaration -Wno-error=format-security -Wno-error=implicit-int +$(package)_cppflags_freebsd=-D_XOPEN_SOURCE=600 -D__BSD_VISIBLE=1 +$(package)_cppflags_netbsd=-D_XOPEN_SOURCE=600 $(package)_cppflags_mingw32=-DUNICODE -D_UNICODE endef define $(package)_preprocess_cmds - sed -i.old 's/__atomic_compare_exchange/__atomic_compare_exchange_db/' dbinc/atomic.h && \ - sed -i.old 's/atomic_init/atomic_init_db/' dbinc/atomic.h mp/mp_region.c mp/mp_mvcc.c mp/mp_fget.c mutex/mut_method.c mutex/mut_tas.c && \ + patch -p1 < $($(package)_patch_dir)/clang_cxx_11.patch && \ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub dist endef diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index cbe4fe4d970bf..938e9971ba88c 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -1,45 +1,10 @@ package=boost -$(package)_version=1_70_0 -$(package)_download_path=https://dl.bintray.com/boostorg/release/1.70.0/source/ -$(package)_file_name=$(package)_$($(package)_version).tar.bz2 -$(package)_sha256_hash=430ae8354789de4fd19ee52f3b1f739e1fba576f0aded0897c3c2bc00fb38778 - -define $(package)_set_vars -$(package)_config_opts_release=variant=release -$(package)_config_opts_debug=variant=debug -$(package)_config_opts=--layout=tagged --build-type=complete --user-config=user-config.jam -$(package)_config_opts+=threading=multi link=static -sNO_BZIP2=1 -sNO_ZLIB=1 -$(package)_config_opts_linux=threadapi=pthread runtime-link=shared -$(package)_config_opts_darwin=--toolset=clang-darwin runtime-link=shared -$(package)_config_opts_mingw32=binary-format=pe target-os=windows threadapi=win32 runtime-link=static -$(package)_config_opts_x86_64_mingw32=address-model=64 -$(package)_config_opts_i686_mingw32=address-model=32 -$(package)_config_opts_i686_linux=address-model=32 architecture=x86 -$(package)_config_opts_i686_android=address-model=32 -$(package)_config_opts_aarch64_android=address-model=64 -$(package)_config_opts_x86_64_android=address-model=64 -$(package)_config_opts_armv7a_android=address-model=32 -$(package)_toolset_$(host_os)=gcc -$(package)_archiver_$(host_os)=$($(package)_ar) -$(package)_toolset_darwin=clang-darwin -$(package)_config_libraries=filesystem,system,thread,test -$(package)_cxxflags=-std=c++11 -fvisibility=hidden -$(package)_cxxflags_linux=-fPIC -$(package)_cxxflags_android=-fPIC -endef - -define $(package)_preprocess_cmds - echo "using $($(package)_toolset_$(host_os)) : : $($(package)_cxx) : \"$($(package)_cxxflags) $($(package)_cppflags)\" \"$($(package)_ldflags)\" \"$($(package)_archiver_$(host_os))\" \"$(host_STRIP)\" \"$(host_RANLIB)\" \"$(host_WINDRES)\" : ;" > user-config.jam -endef - -define $(package)_config_cmds - ./bootstrap.sh --without-icu --with-libraries=$($(package)_config_libraries) -endef - -define $(package)_build_cmds - ./b2 -d2 -j2 -d1 --prefix=$($(package)_staging_prefix_dir) $($(package)_config_opts) stage -endef +$(package)_version=1.81.0 +$(package)_download_path=https://archives.boost.io/release/$($(package)_version)/source/ +$(package)_file_name=boost_$(subst .,_,$($(package)_version)).tar.gz +$(package)_sha256_hash=205666dea9f6a7cfed87c7a6dfbeb52a2c1b9de55712c9c1a87735d7181452b6 define $(package)_stage_cmds - ./b2 -d0 -j4 --prefix=$($(package)_staging_prefix_dir) $($(package)_config_opts) install + mkdir -p $($(package)_staging_prefix_dir)/include && \ + cp -r boost $($(package)_staging_prefix_dir)/include endef diff --git a/depends/packages/capnp.mk b/depends/packages/capnp.mk new file mode 100644 index 0000000000000..0c211cbc455df --- /dev/null +++ b/depends/packages/capnp.mk @@ -0,0 +1,29 @@ +package=capnp +$(package)_version=$(native_$(package)_version) +$(package)_download_path=$(native_$(package)_download_path) +$(package)_download_file=$(native_$(package)_download_file) +$(package)_file_name=$(native_$(package)_file_name) +$(package)_sha256_hash=$(native_$(package)_sha256_hash) + +define $(package)_set_vars := + $(package)_config_opts := -DBUILD_TESTING=OFF + $(package)_config_opts += -DWITH_OPENSSL=OFF + $(package)_config_opts += -DWITH_ZLIB=OFF + $(package)_cxxflags += -ffile-prefix-map=$$($(package)_extract_dir)=/usr +endef + +define $(package)_config_cmds + $($(package)_cmake) . +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf lib/pkgconfig +endef diff --git a/depends/packages/expat.mk b/depends/packages/expat.mk index 902fe43be2679..fb7d50993834d 100644 --- a/depends/packages/expat.mk +++ b/depends/packages/expat.mk @@ -1,17 +1,27 @@ package=expat -$(package)_version=2.2.7 -$(package)_download_path=https://github.com/libexpat/libexpat/releases/download/R_2_2_7/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=cbc9102f4a31a8dafd42d642e9a3aa31e79a0aedaa1f6efd2795ebc83174ec18 +$(package)_version=2.4.8 +$(package)_download_path=https://github.com/libexpat/libexpat/releases/download/R_$(subst .,_,$($(package)_version))/ +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=f79b8f904b749e3e0d20afeadecf8249c55b2e32d4ebb089ae378df479dcaf25 +$(package)_build_subdir=build +$(package)_patches += cmake_minimum.patch +# -D_DEFAULT_SOURCE defines __USE_MISC, which exposes additional +# definitions in endian.h, which are required for a working +# endianness check in configure when building with -flto. define $(package)_set_vars - $(package)_config_opts=--disable-shared --without-docbook --without-tests --without-examples - $(package)_config_opts += --disable-dependency-tracking --enable-option-checking - $(package)_config_opts_linux=--with-pic + $(package)_config_opts := -DCMAKE_BUILD_TYPE=None -DEXPAT_BUILD_TOOLS=OFF + $(package)_config_opts += -DEXPAT_BUILD_EXAMPLES=OFF -DEXPAT_BUILD_TESTS=OFF + $(package)_config_opts += -DBUILD_SHARED_LIBS=OFF + $(package)_cppflags += -D_DEFAULT_SOURCE +endef + +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/cmake_minimum.patch endef define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) -S .. -B . endef define $(package)_build_cmds @@ -23,5 +33,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm lib/*.la + rm -rf share lib/cmake endef diff --git a/depends/packages/fontconfig.mk b/depends/packages/fontconfig.mk index 128599ba77332..6baaecc55a76b 100644 --- a/depends/packages/fontconfig.mk +++ b/depends/packages/fontconfig.mk @@ -1,26 +1,26 @@ package=fontconfig -$(package)_version=2.12.1 +$(package)_version=2.12.6 $(package)_download_path=https://www.freedesktop.org/software/fontconfig/release/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=b449a3e10c47e1d1c7a6ec6e2016cca73d3bd68fbbd4f0ae5cc6b573f7d6c7f3 +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=064b9ebf060c9e77011733ac9dc0e2ce92870b574cca2405e11f5353a683c334 $(package)_dependencies=freetype expat +$(package)_patches=gperf_header_regen.patch define $(package)_set_vars $(package)_config_opts=--disable-docs --disable-static --disable-libxml2 --disable-iconv $(package)_config_opts += --disable-dependency-tracking --enable-option-checking + $(package)_cflags += -Wno-implicit-function-declaration +endef + +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/gperf_header_regen.patch endef define $(package)_config_cmds $($(package)_autoconf) endef -# 2.12.1 uses CHAR_WIDTH which is reserved and clashes with some glibc versions, but newer versions of fontconfig -# have broken makefiles which needlessly attempt to re-generate headers with gperf. -# Instead, change all uses of CHAR_WIDTH, and disable the rule that forces header re-generation. -# This can be removed once the upstream build is fixed. define $(package)_build_cmds - sed -i 's/CHAR_WIDTH/CHARWIDTH/g' fontconfig/fontconfig.h src/fcobjshash.gperf src/fcobjs.h src/fcobjshash.h && \ - sed -i 's/fcobjshash.h: fcobjshash.gperf/fcobjshash.h:/' src/Makefile && \ $(MAKE) endef @@ -29,5 +29,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm lib/*.la + rm -rf var lib/*.la endef diff --git a/depends/packages/freetype.mk b/depends/packages/freetype.mk index a1584608e19f7..fef0beaa7b498 100644 --- a/depends/packages/freetype.mk +++ b/depends/packages/freetype.mk @@ -1,17 +1,19 @@ package=freetype -$(package)_version=2.7.1 +$(package)_version=2.11.0 $(package)_download_path=https://download.savannah.gnu.org/releases/$(package) -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=3a3bb2c4e15ffb433f2032f50a5b5a92558206822e22bfe8cbe339af4aa82f88 +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=8bee39bd3968c4804b70614a0a3ad597299ad0e824bc8aad5ce8aaf48067bde7 +$(package)_build_subdir=build define $(package)_set_vars - $(package)_config_opts=--without-zlib --without-png --without-harfbuzz --without-bzip2 --disable-static - $(package)_config_opts += --enable-option-checking - $(package)_config_opts_linux=--with-pic + $(package)_config_opts := -DCMAKE_BUILD_TYPE=None -DBUILD_SHARED_LIBS=TRUE + $(package)_config_opts += -DCMAKE_DISABLE_FIND_PACKAGE_ZLIB=TRUE -DCMAKE_DISABLE_FIND_PACKAGE_PNG=TRUE + $(package)_config_opts += -DCMAKE_DISABLE_FIND_PACKAGE_HarfBuzz=TRUE -DCMAKE_DISABLE_FIND_PACKAGE_BZip2=TRUE + $(package)_config_opts += -DCMAKE_DISABLE_FIND_PACKAGE_BrotliDec=TRUE endef define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) -S .. -B . endef define $(package)_build_cmds @@ -22,6 +24,3 @@ define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef -define $(package)_postprocess_cmds - rm lib/*.la -endef diff --git a/depends/packages/libXau.mk b/depends/packages/libXau.mk index 4c55c2df04b5c..6bafc4f41a677 100644 --- a/depends/packages/libXau.mk +++ b/depends/packages/libXau.mk @@ -1,8 +1,8 @@ package=libXau -$(package)_version=1.0.8 +$(package)_version=1.0.9 $(package)_download_path=https://xorg.freedesktop.org/releases/individual/lib/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=fdd477320aeb5cdd67272838722d6b7d544887dfe7de46e1e7cc0c27c2bea4f2 +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=1f123d8304b082ad63a9e89376400a3b1d4c29e67e3ea07b3f659cccca690eea $(package)_dependencies=xproto # When updating this package, check the default value of @@ -10,7 +10,6 @@ $(package)_dependencies=xproto define $(package)_set_vars $(package)_config_opts=--disable-shared --disable-lint-library --without-lint $(package)_config_opts += --disable-dependency-tracking --enable-option-checking - $(package)_config_opts_linux=--with-pic endef define $(package)_preprocess_cmds @@ -30,5 +29,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm lib/*.la + rm -rf share lib/*.la endef diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index eb45e14f6fb95..4c05e8a0a7425 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -1,23 +1,34 @@ package=libevent -$(package)_version=2.1.11-stable -$(package)_download_path=https://github.com/libevent/libevent/archive/ -$(package)_file_name=release-$($(package)_version).tar.gz -$(package)_sha256_hash=229393ab2bf0dc94694f21836846b424f3532585bac3468738b7bf752c03901e +$(package)_version=2.1.12-stable +$(package)_download_path=https://github.com/libevent/libevent/releases/download/release-$($(package)_version)/ +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=92e6de1be9ec176428fd2367677e61ceffc2ee1cb119035037a27d346b0403bb +$(package)_patches=cmake_fixups.patch +$(package)_patches+=fix_mingw_link.patch +$(package)_build_subdir=build -define $(package)_preprocess_cmds - ./autogen.sh +# When building for Windows, we set _WIN32_WINNT to target the same Windows +# version as we do in configure. Due to quirks in libevents build system, this +# is also required to enable support for ipv6. See #19375. +define $(package)_set_vars + $(package)_config_opts=-DEVENT__DISABLE_BENCHMARK=ON -DEVENT__DISABLE_OPENSSL=ON + $(package)_config_opts+=-DEVENT__DISABLE_SAMPLES=ON -DEVENT__DISABLE_REGRESS=ON + $(package)_config_opts+=-DEVENT__DISABLE_TESTS=ON -DEVENT__LIBRARY_TYPE=STATIC + $(package)_cppflags += -D_GNU_SOURCE + $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601 + + ifeq ($(NO_HARDEN),) + $(package)_cppflags+=-D_FORTIFY_SOURCE=3 + endif endef -define $(package)_set_vars - $(package)_config_opts=--disable-shared --disable-openssl --disable-libevent-regress --disable-samples - $(package)_config_opts += --disable-dependency-tracking --enable-option-checking - $(package)_config_opts_release=--disable-debug-mode - $(package)_config_opts_linux=--with-pic - $(package)_config_opts_android=--with-pic +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/cmake_fixups.patch && \ + patch -p1 < $($(package)_patch_dir)/fix_mingw_link.patch endef define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) -S .. -B . endef define $(package)_build_cmds @@ -29,5 +40,7 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm lib/*.la + rm -rf bin && \ + rm include/ev*.h && \ + rm include/event2/*_compat.h endef diff --git a/depends/packages/libmultiprocess.mk b/depends/packages/libmultiprocess.mk new file mode 100644 index 0000000000000..a181e05100bba --- /dev/null +++ b/depends/packages/libmultiprocess.mk @@ -0,0 +1,29 @@ +package=libmultiprocess +$(package)_version=$(native_$(package)_version) +$(package)_download_path=$(native_$(package)_download_path) +$(package)_file_name=$(native_$(package)_file_name) +$(package)_sha256_hash=$(native_$(package)_sha256_hash) +$(package)_dependencies=native_$(package) capnp +ifneq ($(host),$(build)) +$(package)_dependencies += native_capnp +endif + +define $(package)_set_vars := +ifneq ($(host),$(build)) +$(package)_config_opts := -DCAPNP_EXECUTABLE="$$(native_capnp_prefixbin)/capnp" +$(package)_config_opts += -DCAPNPC_CXX_EXECUTABLE="$$(native_capnp_prefixbin)/capnpc-c++" +endif +$(package)_cxxflags += -ffile-prefix-map=$$($(package)_extract_dir)=/usr +endef + +define $(package)_config_cmds + $($(package)_cmake) . +endef + +define $(package)_build_cmds + $(MAKE) multiprocess +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install-lib +endef diff --git a/depends/packages/libxcb.mk b/depends/packages/libxcb.mk index 2204b381958b8..036eaf6560a11 100644 --- a/depends/packages/libxcb.mk +++ b/depends/packages/libxcb.mk @@ -1,39 +1,31 @@ package=libxcb -$(package)_version=1.10 +$(package)_version=1.14 $(package)_download_path=https://xcb.freedesktop.org/dist -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=98d9ab05b636dd088603b64229dd1ab2d2cc02ab807892e107d674f9c3f2d5b5 +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=a55ed6db98d43469801262d81dc2572ed124edc3db31059d4e9916eb9f844c34 $(package)_dependencies=xcb_proto libXau +$(package)_patches = remove_pthread_stubs.patch define $(package)_set_vars -$(package)_config_opts=--disable-static --disable-build-docs --without-doxygen --without-launchd +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen --without-launchd $(package)_config_opts += --disable-dependency-tracking --enable-option-checking -# Because we pass -qt-xcb to Qt, it will compile in a set of xcb helper libraries and extensions, -# so we skip building all of the extensions here. -# More info is available from: https://doc.qt.io/qt-5.9/linux-requirements.html +# Disable unneeded extensions. +# More info is available from: https://doc.qt.io/qt-5.15/linux-requirements.html $(package)_config_opts += --disable-composite --disable-damage --disable-dpms $(package)_config_opts += --disable-dri2 --disable-dri3 --disable-glx -$(package)_config_opts += --disable-present --disable-randr --disable-record -$(package)_config_opts += --disable-render --disable-resource --disable-screensaver -$(package)_config_opts += --disable-shape --disable-shm --disable-sync -$(package)_config_opts += --disable-xevie --disable-xfixes --disable-xfree86-dri -$(package)_config_opts += --disable-xinerama --disable-xinput --disable-xkb -$(package)_config_opts += --disable-xprint --disable-selinux --disable-xtest -$(package)_config_opts += --disable-xv --disable-xvmc +$(package)_config_opts += --disable-present --disable-record --disable-resource +$(package)_config_opts += --disable-screensaver --disable-xevie --disable-xfree86-dri +$(package)_config_opts += --disable-xinput --disable-xprint --disable-selinux +$(package)_config_opts += --disable-xtest --disable-xv --disable-xvmc endef define $(package)_preprocess_cmds - cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux &&\ - sed "s/pthread-stubs//" -i configure + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux && \ + patch -p1 -i $($(package)_patch_dir)/remove_pthread_stubs.patch endef -# Don't install xcb headers to the default path in order to work around a qt -# build issue: https://bugreports.qt.io/browse/QTBUG-34748 -# When using qt's internal libxcb, it may end up finding the real headers in -# depends staging. Use a non-default path to avoid that. - define $(package)_config_cmds - $($(package)_autoconf) --includedir=$(host_prefix)/include/xcb-shared + $($(package)_autoconf) endef define $(package)_build_cmds @@ -45,5 +37,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm -rf share/man share/doc lib/*.la + rm -rf share lib/*.la endef diff --git a/depends/packages/libxcb_util.mk b/depends/packages/libxcb_util.mk new file mode 100644 index 0000000000000..dc4456f85c84e --- /dev/null +++ b/depends/packages/libxcb_util.mk @@ -0,0 +1,31 @@ +package=libxcb_util +$(package)_version=0.4.0 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-$($(package)_version).tar.gz +$(package)_sha256_hash=0ed0934e2ef4ddff53fcc70fc64fb16fe766cd41ee00330312e20a985fd927a7 +$(package)_dependencies=libxcb + +define $(package)_set_vars +$(package)_config_opts = --disable-shared --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_image.mk b/depends/packages/libxcb_util_image.mk new file mode 100644 index 0000000000000..2228250fecb16 --- /dev/null +++ b/depends/packages/libxcb_util_image.mk @@ -0,0 +1,31 @@ +package=libxcb_util_image +$(package)_version=0.4.0 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-image-$($(package)_version).tar.gz +$(package)_sha256_hash=cb2c86190cf6216260b7357a57d9100811bb6f78c24576a3a5bfef6ad3740a42 +$(package)_dependencies=libxcb libxcb_util + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts+= --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_keysyms.mk b/depends/packages/libxcb_util_keysyms.mk new file mode 100644 index 0000000000000..56bc33d258da4 --- /dev/null +++ b/depends/packages/libxcb_util_keysyms.mk @@ -0,0 +1,31 @@ +package=libxcb_util_keysyms +$(package)_version=0.4.0 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-keysyms-$($(package)_version).tar.gz +$(package)_sha256_hash=0807cf078fbe38489a41d755095c58239e1b67299f14460dec2ec811e96caa96 +$(package)_dependencies=libxcb xproto + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_render.mk b/depends/packages/libxcb_util_render.mk new file mode 100644 index 0000000000000..ee2883fedaa1c --- /dev/null +++ b/depends/packages/libxcb_util_render.mk @@ -0,0 +1,31 @@ +package=libxcb_util_render +$(package)_version=0.3.9 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-renderutil-$($(package)_version).tar.gz +$(package)_sha256_hash=55eee797e3214fe39d0f3f4d9448cc53cffe06706d108824ea37bb79fcedcad5 +$(package)_dependencies=libxcb + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxcb_util_wm.mk b/depends/packages/libxcb_util_wm.mk new file mode 100644 index 0000000000000..a68fd23f8a779 --- /dev/null +++ b/depends/packages/libxcb_util_wm.mk @@ -0,0 +1,31 @@ +package=libxcb_util_wm +$(package)_version=0.4.1 +$(package)_download_path=https://xcb.freedesktop.org/dist +$(package)_file_name=xcb-util-wm-$($(package)_version).tar.gz +$(package)_sha256_hash=038b39c4bdc04a792d62d163ba7908f4bb3373057208c07110be73c1b04b8334 +$(package)_dependencies=libxcb + +define $(package)_set_vars +$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen +$(package)_config_opts += --disable-dependency-tracking --enable-option-checking +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf share/man share/doc lib/*.la +endef diff --git a/depends/packages/libxkbcommon.mk b/depends/packages/libxkbcommon.mk new file mode 100644 index 0000000000000..bcdcf671f71ad --- /dev/null +++ b/depends/packages/libxkbcommon.mk @@ -0,0 +1,37 @@ +package=libxkbcommon +$(package)_version=0.8.4 +$(package)_download_path=https://xkbcommon.org/download/ +$(package)_file_name=$(package)-$($(package)_version).tar.xz +$(package)_sha256_hash=60ddcff932b7fd352752d51a5c4f04f3d0403230a584df9a2e0d5ed87c486c8b +$(package)_dependencies=libxcb + +# This package explicitly enables -Werror=array-bounds, which causes build failures +# with GCC 12.1+. Work around that by turning errors back into warnings. +# This workaround would be dropped if the package was updated, as that would require +# a different build system (Meson) +define $(package)_set_vars +$(package)_config_opts = --enable-option-checking --disable-dependency-tracking +$(package)_config_opts += --disable-static --disable-docs +$(package)_cflags += -Wno-error=array-bounds +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm lib/*.la +endef + diff --git a/depends/packages/miniupnpc.mk b/depends/packages/miniupnpc.mk index fdbe22cda63d4..f9e114b49546f 100644 --- a/depends/packages/miniupnpc.mk +++ b/depends/packages/miniupnpc.mk @@ -1,28 +1,36 @@ package=miniupnpc -$(package)_version=2.0.20180203 +$(package)_version=2.2.7 $(package)_download_path=https://miniupnp.tuxfamily.org/files/ $(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=90dda8c7563ca6cd4a83e23b3c66dbbea89603a1675bfdb852897c2c9cc220b7 +$(package)_sha256_hash=b0c3a27056840fd0ec9328a5a9bac3dc5e0ec6d2e8733349cf577b0aa1e70ac1 +$(package)_patches=dont_leak_info.patch cmake_get_src_addr.patch fix_windows_snprintf.patch +$(package)_build_subdir=build define $(package)_set_vars -$(package)_build_opts=CC="$($(package)_cc)" -$(package)_build_opts_darwin=LIBTOOL="$($(package)_libtool)" -$(package)_build_opts_mingw32=-f Makefile.mingw -$(package)_build_env+=CFLAGS="$($(package)_cflags) $($(package)_cppflags)" AR="$($(package)_ar)" +$(package)_config_opts = -DUPNPC_BUILD_SAMPLE=OFF -DUPNPC_BUILD_SHARED=OFF +$(package)_config_opts += -DUPNPC_BUILD_STATIC=ON -DUPNPC_BUILD_TESTS=OFF +$(package)_config_opts_mingw32 += -DMINIUPNPC_TARGET_WINDOWS_VERSION=0x0601 endef define $(package)_preprocess_cmds - mkdir dll && \ - sed -e 's|MINIUPNPC_VERSION_STRING \"version\"|MINIUPNPC_VERSION_STRING \"$($(package)_version)\"|' -e 's|OS/version|$(host)|' miniupnpcstrings.h.in > miniupnpcstrings.h && \ - sed -i.old "s|miniupnpcstrings.h: miniupnpcstrings.h.in wingenminiupnpcstrings|miniupnpcstrings.h: miniupnpcstrings.h.in|" Makefile.mingw + patch -p1 < $($(package)_patch_dir)/dont_leak_info.patch && \ + patch -p1 < $($(package)_patch_dir)/cmake_get_src_addr.patch && \ + patch -p1 < $($(package)_patch_dir)/fix_windows_snprintf.patch +endef + +define $(package)_config_cmds + $($(package)_cmake) -S .. -B . endef define $(package)_build_cmds - $(MAKE) libminiupnpc.a $($(package)_build_opts) + $(MAKE) endef define $(package)_stage_cmds - mkdir -p $($(package)_staging_prefix_dir)/include/miniupnpc $($(package)_staging_prefix_dir)/lib &&\ - install *.h $($(package)_staging_prefix_dir)/include/miniupnpc &&\ - install libminiupnpc.a $($(package)_staging_prefix_dir)/lib + cmake --install . --prefix $($(package)_staging_prefix_dir) +endef + +define $(package)_postprocess_cmds + rm -rf bin && \ + rm -rf share endef diff --git a/depends/packages/native_biplist.mk b/depends/packages/native_biplist.mk deleted file mode 100644 index c3054cbd1a16e..0000000000000 --- a/depends/packages/native_biplist.mk +++ /dev/null @@ -1,15 +0,0 @@ -package=native_biplist -$(package)_version=1.0.3 -$(package)_download_path=https://bitbucket.org/wooster/biplist/downloads -$(package)_file_name=biplist-$($(package)_version).tar.gz -$(package)_sha256_hash=4c0549764c5fe50b28042ec21aa2e14fe1a2224e239a1dae77d9e7f3932aa4c6 -$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages - -define $(package)_build_cmds - python3 setup.py build -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_install_libdir) && \ - python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) -endef diff --git a/depends/packages/native_capnp.mk b/depends/packages/native_capnp.mk new file mode 100644 index 0000000000000..484e78d5d906b --- /dev/null +++ b/depends/packages/native_capnp.mk @@ -0,0 +1,28 @@ +package=native_capnp +$(package)_version=1.0.1 +$(package)_download_path=https://capnproto.org/ +$(package)_download_file=capnproto-c++-$($(package)_version).tar.gz +$(package)_file_name=capnproto-cxx-$($(package)_version).tar.gz +$(package)_sha256_hash=0f7f4b8a76a2cdb284fddef20de8306450df6dd031a47a15ac95bc43c3358e09 + +define $(package)_set_vars + $(package)_config_opts := -DBUILD_TESTING=OFF + $(package)_config_opts += -DWITH_OPENSSL=OFF + $(package)_config_opts += -DWITH_ZLIB=OFF +endef + +define $(package)_config_cmds + $($(package)_cmake) . +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install +endef + +define $(package)_postprocess_cmds + rm -rf lib/pkgconfig +endef diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk deleted file mode 100644 index 4195230b4003e..0000000000000 --- a/depends/packages/native_cctools.mk +++ /dev/null @@ -1,78 +0,0 @@ -package=native_cctools -$(package)_version=3764b223c011574971ee3ae09ce968ba5dc2f00f -$(package)_download_path=https://github.com/tpoechtrager/cctools-port/archive -$(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=3e35907bf376269a844df08e03cbb43e345c88125374f2228e03724b5f9a2a04 -$(package)_build_subdir=cctools -$(package)_clang_version=6.0.1 -$(package)_clang_download_path=https://releases.llvm.org/$($(package)_clang_version) -$(package)_clang_download_file=clang+llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz -$(package)_clang_file_name=clang-llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz -$(package)_clang_sha256_hash=fa5416553ca94a8c071a27134c094a5fb736fe1bd0ecc5ef2d9bc02754e1bef0 - -$(package)_libtapi_version=3efb201881e7a76a21e0554906cf306432539cef -$(package)_libtapi_download_path=https://github.com/tpoechtrager/apple-libtapi/archive -$(package)_libtapi_download_file=$($(package)_libtapi_version).tar.gz -$(package)_libtapi_file_name=$($(package)_libtapi_version).tar.gz -$(package)_libtapi_sha256_hash=380c1ca37cfa04a8699d0887a8d3ee1ad27f3d08baba78887c73b09485c0fbd3 - -$(package)_extra_sources=$($(package)_clang_file_name) -$(package)_extra_sources += $($(package)_libtapi_file_name) - -define $(package)_fetch_cmds -$(call fetch_file,$(package),$($(package)_download_path),$($(package)_download_file),$($(package)_file_name),$($(package)_sha256_hash)) && \ -$(call fetch_file,$(package),$($(package)_clang_download_path),$($(package)_clang_download_file),$($(package)_clang_file_name),$($(package)_clang_sha256_hash)) && \ -$(call fetch_file,$(package),$($(package)_libtapi_download_path),$($(package)_libtapi_download_file),$($(package)_libtapi_file_name),$($(package)_libtapi_sha256_hash)) -endef - -define $(package)_extract_cmds - mkdir -p $($(package)_extract_dir) && \ - echo "$($(package)_sha256_hash) $($(package)_source)" > $($(package)_extract_dir)/.$($(package)_file_name).hash && \ - echo "$($(package)_clang_sha256_hash) $($(package)_source_dir)/$($(package)_clang_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \ - echo "$($(package)_libtapi_sha256_hash) $($(package)_source_dir)/$($(package)_libtapi_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \ - $(build_SHA256SUM) -c $($(package)_extract_dir)/.$($(package)_file_name).hash && \ - mkdir -p toolchain/bin toolchain/lib/clang/$($(package)_clang_version)/include && \ - mkdir -p libtapi && \ - tar --no-same-owner --strip-components=1 -C libtapi -xf $($(package)_source_dir)/$($(package)_libtapi_file_name) && \ - tar --no-same-owner --strip-components=1 -C toolchain -xf $($(package)_source_dir)/$($(package)_clang_file_name) && \ - rm -f toolchain/lib/libc++abi.so* && \ - tar --no-same-owner --strip-components=1 -xf $($(package)_source) -endef - -define $(package)_set_vars - $(package)_config_opts=--target=$(host) --disable-lto-support --with-libtapi=$($(package)_extract_dir) - $(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib - $(package)_cc=$($(package)_extract_dir)/toolchain/bin/clang - $(package)_cxx=$($(package)_extract_dir)/toolchain/bin/clang++ -endef - -define $(package)_preprocess_cmds - CC=$($(package)_cc) CXX=$($(package)_cxx) INSTALLPREFIX=$($(package)_extract_dir) ./libtapi/build.sh && \ - CC=$($(package)_cc) CXX=$($(package)_cxx) INSTALLPREFIX=$($(package)_extract_dir) ./libtapi/install.sh && \ - sed -i.old "/define HAVE_PTHREADS/d" $($(package)_build_subdir)/ld64/src/ld/InputFiles.h -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install && \ - mkdir -p $($(package)_staging_prefix_dir)/lib/ && \ - cd $($(package)_extract_dir) && \ - cp lib/libtapi.so.6 $($(package)_staging_prefix_dir)/lib/ && \ - cd $($(package)_extract_dir)/toolchain && \ - mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_clang_version)/include && \ - mkdir -p $($(package)_staging_prefix_dir)/bin $($(package)_staging_prefix_dir)/include && \ - cp bin/clang $($(package)_staging_prefix_dir)/bin/ &&\ - cp -P bin/clang++ $($(package)_staging_prefix_dir)/bin/ &&\ - cp lib/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ - cp -rf lib/clang/$($(package)_clang_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_clang_version)/include/ && \ - cp bin/llvm-dsymutil $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil && \ - if `test -d include/c++/`; then cp -rf include/c++/ $($(package)_staging_prefix_dir)/include/; fi && \ - if `test -d lib/c++/`; then cp -rf lib/c++/ $($(package)_staging_prefix_dir)/lib/; fi -endef diff --git a/depends/packages/native_cdrkit.mk b/depends/packages/native_cdrkit.mk deleted file mode 100644 index 8243458ec858e..0000000000000 --- a/depends/packages/native_cdrkit.mk +++ /dev/null @@ -1,26 +0,0 @@ -package=native_cdrkit -$(package)_version=1.1.11 -$(package)_download_path=https://distro.ibiblio.org/fatdog/source/600/c -$(package)_file_name=cdrkit-$($(package)_version).tar.bz2 -$(package)_sha256_hash=b50d64c214a65b1a79afe3a964c691931a4233e2ba605d793eb85d0ac3652564 -$(package)_patches=cdrkit-deterministic.patch - -define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/cdrkit-deterministic.patch -endef - -define $(package)_config_cmds - cmake -DCMAKE_INSTALL_PREFIX=$(build_prefix) -endef - -define $(package)_build_cmds - $(MAKE) genisoimage -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) -C genisoimage install -endef - -define $(package)_postprocess_cmds - rm bin/isovfy bin/isoinfo bin/isodump bin/isodebug bin/devdump -endef diff --git a/depends/packages/native_ds_store.mk b/depends/packages/native_ds_store.mk deleted file mode 100644 index f99b689ecdc74..0000000000000 --- a/depends/packages/native_ds_store.mk +++ /dev/null @@ -1,16 +0,0 @@ -package=native_ds_store -$(package)_version=1.1.2 -$(package)_download_path=https://github.com/al45tair/ds_store/archive/ -$(package)_file_name=v$($(package)_version).tar.gz -$(package)_sha256_hash=3b3ecb7bf0a5157f5b6010bc3af7c141fb0ad3527084e63336220d22744bc20c -$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages -$(package)_dependencies=native_biplist - -define $(package)_build_cmds - python3 setup.py build -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_install_libdir) && \ - python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) -endef diff --git a/depends/packages/native_libdmg-hfsplus.mk b/depends/packages/native_libdmg-hfsplus.mk deleted file mode 100644 index c0f0ce74de8b6..0000000000000 --- a/depends/packages/native_libdmg-hfsplus.mk +++ /dev/null @@ -1,24 +0,0 @@ -package=native_libdmg-hfsplus -$(package)_version=7ac55ec64c96f7800d9818ce64c79670e7f02b67 -$(package)_download_path=https://github.com/planetbeing/libdmg-hfsplus/archive -$(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=56fbdc48ec110966342f0ecddd6f8f89202f4143ed2a3336e42bbf88f940850c -$(package)_build_subdir=build -$(package)_patches=remove-libcrypto-dependency.patch - -define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/remove-libcrypto-dependency.patch && \ - mkdir build -endef - -define $(package)_config_cmds - cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix) -DCMAKE_C_FLAGS="-Wl,--build-id=none" .. -endef - -define $(package)_build_cmds - $(MAKE) -C dmg -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) -C dmg install -endef diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk new file mode 100644 index 0000000000000..3fa5faa4ba1ed --- /dev/null +++ b/depends/packages/native_libmultiprocess.mk @@ -0,0 +1,18 @@ +package=native_libmultiprocess +$(package)_version=015e95f7ebaa47619a213a19801e7fffafc56864 +$(package)_download_path=https://github.com/chaincodelabs/libmultiprocess/archive +$(package)_file_name=$($(package)_version).tar.gz +$(package)_sha256_hash=4b1266b121337f3f6f37e1863fba91c1a5ee9ad126bcffc6fe6b9ca47ad050a1 +$(package)_dependencies=native_capnp + +define $(package)_config_cmds + $($(package)_cmake) . +endef + +define $(package)_build_cmds + $(MAKE) +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install-bin +endef diff --git a/depends/packages/native_mac_alias.mk b/depends/packages/native_mac_alias.mk deleted file mode 100644 index e60b99dccc98c..0000000000000 --- a/depends/packages/native_mac_alias.mk +++ /dev/null @@ -1,15 +0,0 @@ -package=native_mac_alias -$(package)_version=2.0.7 -$(package)_download_path=https://github.com/al45tair/mac_alias/archive/ -$(package)_file_name=v$($(package)_version).tar.gz -$(package)_sha256_hash=6f606d3b6bccd2112aeabf1a063f5b5ece87005a5d7e97c8faca23b916e88838 -$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages - -define $(package)_build_cmds - python3 setup.py build -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_install_libdir) && \ - python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) -endef diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index 42dbaa77a60d7..08a91cbcbd012 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -1,23 +1,25 @@ -packages:=boost libevent +packages:= -qt_packages = zlib +boost_packages = boost -qrencode_packages = qrencode +libevent_packages = libevent -qt_linux_packages:=qt expat libxcb xcb_proto libXau xproto freetype fontconfig -qt_android_packages=qt +qrencode_linux_packages = qrencode +qrencode_darwin_packages = qrencode +qrencode_mingw32_packages = qrencode +qt_linux_packages:=qt expat libxcb xcb_proto libXau xproto freetype fontconfig libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm qt_darwin_packages=qt qt_mingw32_packages=qt -wallet_packages=bdb +bdb_packages=bdb +sqlite_packages=sqlite zmq_packages=zeromq upnp_packages=miniupnpc -darwin_native_packages = native_biplist native_ds_store native_mac_alias +multiprocess_packages = libmultiprocess capnp +multiprocess_native_packages = native_libmultiprocess native_capnp -ifneq ($(build_os),darwin) -darwin_native_packages += native_cctools native_cdrkit native_libdmg-hfsplus -endif +usdt_linux_packages=systemtap diff --git a/depends/packages/qrencode.mk b/depends/packages/qrencode.mk index d1687883bcd51..e3f614091db0b 100644 --- a/depends/packages/qrencode.mk +++ b/depends/packages/qrencode.mk @@ -1,23 +1,24 @@ package=qrencode -$(package)_version=3.4.4 +$(package)_version=4.1.1 $(package)_download_path=https://fukuchi.org/works/qrencode/ -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=efe5188b1ddbcbf98763b819b146be6a90481aac30cfc8d858ab78a19cde1fa5 +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=da448ed4f52aba6bcb0cd48cac0dd51b8692bccc4cd127431402fca6f8171e8e +$(package)_patches=cmake_fixups.patch define $(package)_set_vars -$(package)_config_opts=--disable-shared --without-tools --without-tests --disable-sdltest -$(package)_config_opts += --disable-gprof --disable-gcov --disable-mudflap -$(package)_config_opts += --disable-dependency-tracking --enable-option-checking -$(package)_config_opts_linux=--with-pic -$(package)_config_opts_android=--with-pic +$(package)_config_opts := -DWITH_TOOLS=NO -DWITH_TESTS=NO -DGPROF=OFF -DCOVERAGE=OFF +$(package)_config_opts += -DCMAKE_DISABLE_FIND_PACKAGE_PNG=TRUE -DWITHOUT_PNG=ON +$(package)_config_opts += -DCMAKE_DISABLE_FIND_PACKAGE_ICONV=TRUE +$(package)_cflags += -Wno-int-conversion -Wno-implicit-function-declaration endef define $(package)_preprocess_cmds - cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub use + patch -p1 < $($(package)_patch_dir)/cmake_fixups.patch endef + define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) -S . -B . endef define $(package)_build_cmds @@ -29,5 +30,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm lib/*.la + rm -rf share endef diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 366b1d0c423ae..917e17993228f 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,36 +1,55 @@ -PACKAGE=qt -$(package)_version=5.9.8 -$(package)_download_path=https://download.qt.io/official_releases/qt/5.9/$($(package)_version)/submodules -$(package)_suffix=opensource-src-$($(package)_version).tar.xz +package=qt +$(package)_version=5.15.14 +$(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules +$(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) -$(package)_sha256_hash=9b9dec1f67df1f94bce2955c5604de992d529dde72050239154c56352da0907d -$(package)_dependencies=zlib -$(package)_linux_dependencies=freetype fontconfig libxcb -$(package)_build_subdir=qtbase +$(package)_sha256_hash=500d3b390048e9538c28b5f523dfea6936f9c2e10d24ab46580ff57d430b98be +$(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm $(package)_qt_libs=corelib network widgets gui plugins testlib -$(package)_patches=fix_qt_pkgconfig.patch mac-qmake.conf fix_configure_mac.patch fix_no_printer.patch fix_rcc_determinism.patch fix_riscv64_arch.patch xkb-default.patch no-xlib.patch fix_android_qmake_conf.patch fix_android_jni_static.patch +$(package)_linguist_tools = lrelease lupdate lconvert +$(package)_patches = qt.pro +$(package)_patches += qttools_src.pro +$(package)_patches += mac-qmake.conf +$(package)_patches += fix_qt_pkgconfig.patch +$(package)_patches += no-xlib.patch +$(package)_patches += dont_hardcode_pwd.patch +$(package)_patches += qtbase-moc-ignore-gcc-macro.patch +$(package)_patches += no_warnings_for_symbols.patch +$(package)_patches += rcc_hardcode_timestamp.patch +$(package)_patches += duplicate_lcqpafonts.patch +$(package)_patches += guix_cross_lib_path.patch +$(package)_patches += fix-macos-linker.patch +$(package)_patches += memory_resource.patch +$(package)_patches += clang_18_libpng.patch +$(package)_patches += utc_from_string_no_optimize.patch +$(package)_patches += windows_lto.patch +$(package)_patches += darwin_no_libm.patch +$(package)_patches += zlib-timebits64.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) -$(package)_qttranslations_sha256_hash=fb5a47799754af73d3bf501fe513342cfe2fc37f64e80df5533f6110e804220c +$(package)_qttranslations_sha256_hash=5b94d1a11b566908622fcca2f8b799744d2f8a68da20be4caa5953ed63b10489 $(package)_qttools_file_name=qttools-$($(package)_suffix) -$(package)_qttools_sha256_hash=a97556eb7b2f30252cdd8a598c396cfce2b2f79d2bae883af6d3b26a2cdcc63c +$(package)_qttools_sha256_hash=12061a85baf5f4de8fbc795e1d3872b706f340211b9e70962caeffc6f5e89563 $(package)_extra_sources = $($(package)_qttranslations_file_name) $(package)_extra_sources += $($(package)_qttools_file_name) define $(package)_set_vars +$(package)_config_env = QT_MAC_SDK_NO_VERSION_CHECK=1 $(package)_config_opts_release = -release +$(package)_config_opts_release += -silent $(package)_config_opts_debug = -debug +$(package)_config_opts_debug += -optimized-tools $(package)_config_opts += -bindir $(build_prefix)/bin -$(package)_config_opts += -c++std c++11 +$(package)_config_opts += -c++std c++2a $(package)_config_opts += -confirm-license $(package)_config_opts += -hostprefix $(build_prefix) $(package)_config_opts += -no-compile-examples $(package)_config_opts += -no-cups $(package)_config_opts += -no-egl $(package)_config_opts += -no-eglfs -$(package)_config_opts += -no-freetype +$(package)_config_opts += -no-evdev $(package)_config_opts += -no-gif $(package)_config_opts += -no-glib $(package)_config_opts += -no-icu @@ -41,11 +60,12 @@ $(package)_config_opts += -no-linuxfb $(package)_config_opts += -no-libjpeg $(package)_config_opts += -no-libproxy $(package)_config_opts += -no-libudev +$(package)_config_opts += -no-mimetype-database $(package)_config_opts += -no-mtdev $(package)_config_opts += -no-openssl $(package)_config_opts += -no-openvg $(package)_config_opts += -no-reduce-relocations -$(package)_config_opts += -no-qml-debug +$(package)_config_opts += -no-schannel $(package)_config_opts += -no-sctp $(package)_config_opts += -no-securetransport $(package)_config_opts += -no-sql-db2 @@ -59,27 +79,24 @@ $(package)_config_opts += -no-sql-sqlite $(package)_config_opts += -no-sql-sqlite2 $(package)_config_opts += -no-system-proxies $(package)_config_opts += -no-use-gold-linker -$(package)_config_opts += -no-xinput2 +$(package)_config_opts += -no-zstd $(package)_config_opts += -nomake examples $(package)_config_opts += -nomake tests +$(package)_config_opts += -nomake tools $(package)_config_opts += -opensource -$(package)_config_opts += -optimized-tools -$(package)_config_opts += -pch $(package)_config_opts += -pkg-config $(package)_config_opts += -prefix $(host_prefix) $(package)_config_opts += -qt-libpng $(package)_config_opts += -qt-pcre $(package)_config_opts += -qt-harfbuzz -$(package)_config_opts += -system-zlib +$(package)_config_opts += -qt-zlib $(package)_config_opts += -static -$(package)_config_opts += -silent $(package)_config_opts += -v $(package)_config_opts += -no-feature-bearermanagement $(package)_config_opts += -no-feature-colordialog $(package)_config_opts += -no-feature-commandlineparser $(package)_config_opts += -no-feature-concurrent $(package)_config_opts += -no-feature-dial -$(package)_config_opts += -no-feature-filesystemwatcher $(package)_config_opts += -no-feature-fontcombobox $(package)_config_opts += -no-feature-ftp $(package)_config_opts += -no-feature-http @@ -93,13 +110,14 @@ $(package)_config_opts += -no-feature-printdialog $(package)_config_opts += -no-feature-printer $(package)_config_opts += -no-feature-printpreviewdialog $(package)_config_opts += -no-feature-printpreviewwidget -$(package)_config_opts += -no-feature-regularexpression $(package)_config_opts += -no-feature-sessionmanager $(package)_config_opts += -no-feature-socks5 $(package)_config_opts += -no-feature-sql +$(package)_config_opts += -no-feature-sqlmodel $(package)_config_opts += -no-feature-statemachine $(package)_config_opts += -no-feature-syntaxhighlighter $(package)_config_opts += -no-feature-textbrowser +$(package)_config_opts += -no-feature-textmarkdownwriter $(package)_config_opts += -no-feature-textodfwriter $(package)_config_opts += -no-feature-topleveldomain $(package)_config_opts += -no-feature-udpsocket @@ -113,60 +131,62 @@ $(package)_config_opts += -no-feature-xml $(package)_config_opts_darwin = -no-dbus $(package)_config_opts_darwin += -no-opengl +$(package)_config_opts_darwin += -pch +$(package)_config_opts_darwin += -no-feature-corewlan +$(package)_config_opts_darwin += -no-freetype +$(package)_config_opts_darwin += QMAKE_MACOSX_DEPLOYMENT_TARGET=$(OSX_MIN_VERSION) ifneq ($(build_os),darwin) $(package)_config_opts_darwin += -xplatform macx-clang-linux $(package)_config_opts_darwin += -device-option MAC_SDK_PATH=$(OSX_SDK) $(package)_config_opts_darwin += -device-option MAC_SDK_VERSION=$(OSX_SDK_VERSION) -$(package)_config_opts_darwin += -device-option CROSS_COMPILE="$(host)-" -$(package)_config_opts_darwin += -device-option MAC_MIN_VERSION=$(OSX_MIN_VERSION) +$(package)_config_opts_darwin += -device-option CROSS_COMPILE="llvm-" $(package)_config_opts_darwin += -device-option MAC_TARGET=$(host) +$(package)_config_opts_darwin += -device-option XCODE_VERSION=$(XCODE_VERSION) endif -$(package)_config_opts_linux = -qt-xkbcommon-x11 -$(package)_config_opts_linux += -qt-xcb +ifneq ($(build_arch),$(host_arch)) +$(package)_config_opts_aarch64_darwin += -device-option QMAKE_APPLE_DEVICE_ARCHS=arm64 +$(package)_config_opts_x86_64_darwin += -device-option QMAKE_APPLE_DEVICE_ARCHS=x86_64 +endif + +$(package)_config_opts_linux = -xcb $(package)_config_opts_linux += -no-xcb-xlib $(package)_config_opts_linux += -no-feature-xlib $(package)_config_opts_linux += -system-freetype $(package)_config_opts_linux += -fontconfig $(package)_config_opts_linux += -no-opengl +$(package)_config_opts_linux += -no-feature-vulkan $(package)_config_opts_linux += -dbus-runtime -$(package)_config_opts_arm_linux += -platform linux-g++ -xplatform bitcoin-linux-g++ -$(package)_config_opts_i686_linux = -xplatform linux-g++-32 -$(package)_config_opts_x86_64_linux = -xplatform linux-g++-64 -$(package)_config_opts_aarch64_linux = -xplatform linux-aarch64-gnu-g++ -$(package)_config_opts_powerpc64_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ -$(package)_config_opts_powerpc64le_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ -$(package)_config_opts_riscv64_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ -$(package)_config_opts_s390x_linux = -platform linux-g++ -xplatform bitcoin-linux-g++ +ifneq ($(LTO),) +$(package)_config_opts_linux += -ltcg +endif + +ifneq (,$(findstring clang,$($(package)_cxx))) + ifneq (,$(findstring -stdlib=libc++,$($(package)_cxx))) + $(package)_config_opts_linux += -platform linux-clang-libc++ -xplatform linux-clang-libc++ + else + $(package)_config_opts_linux += -platform linux-clang -xplatform linux-clang + endif +else + $(package)_config_opts_linux += -platform linux-g++ -xplatform bitcoin-linux-g++ +endif $(package)_config_opts_mingw32 = -no-opengl $(package)_config_opts_mingw32 += -no-dbus +$(package)_config_opts_mingw32 += -no-freetype $(package)_config_opts_mingw32 += -xplatform win32-g++ +$(package)_config_opts_mingw32 += "QMAKE_CFLAGS = '$($(package)_cflags) $($(package)_cppflags)'" +$(package)_config_opts_mingw32 += "QMAKE_CXX = '$($(package)_cxx)'" +$(package)_config_opts_mingw32 += "QMAKE_CXXFLAGS = '$($(package)_cxxflags) $($(package)_cppflags)'" +$(package)_config_opts_mingw32 += "QMAKE_LINK = '$($(package)_cxx)'" +$(package)_config_opts_mingw32 += "QMAKE_LFLAGS = '$($(package)_ldflags)'" +$(package)_config_opts_mingw32 += "QMAKE_LIB = '$($(package)_ar) rc'" $(package)_config_opts_mingw32 += -device-option CROSS_COMPILE="$(host)-" - -$(package)_config_opts_android = -xplatform android-clang -$(package)_config_opts_android += -android-sdk $(ANDROID_SDK) -$(package)_config_opts_android += -android-ndk $(ANDROID_NDK) -$(package)_config_opts_android += -android-ndk-platform android-$(ANDROID_API_LEVEL) -$(package)_config_opts_android += -device-option CROSS_COMPILE="$(host)-" -$(package)_config_opts_android += -egl -$(package)_config_opts_android += -qpa xcb -$(package)_config_opts_android += -no-eglfs -$(package)_config_opts_android += -no-dbus -$(package)_config_opts_android += -opengl es2 -$(package)_config_opts_android += -qt-freetype -$(package)_config_opts_android += -no-fontconfig -$(package)_config_opts_android += -L $(host_prefix)/lib -$(package)_config_opts_android += -I $(host_prefix)/include - -$(package)_config_opts_aarch64_android += -android-arch arm64-v8a -$(package)_config_opts_armv7a_android += -android-arch armeabi-v7a -$(package)_config_opts_x86_64_android += -android-arch x86_64 -$(package)_config_opts_i686_android += -android-arch i686 - -$(package)_build_env = QT_RCC_TEST=1 -$(package)_build_env += QT_RCC_SOURCE_DATE_OVERRIDE=1 +$(package)_config_opts_mingw32 += -pch +ifneq ($(LTO),) +$(package)_config_opts_mingw32 += -ltcg +endif endef define $(package)_fetch_cmds @@ -182,80 +202,77 @@ define $(package)_extract_cmds echo "$($(package)_qttools_sha256_hash) $($(package)_source_dir)/$($(package)_qttools_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \ $(build_SHA256SUM) -c $($(package)_extract_dir)/.$($(package)_file_name).hash && \ mkdir qtbase && \ - tar --no-same-owner --strip-components=1 -xf $($(package)_source) -C qtbase && \ + $(build_TAR) --no-same-owner --strip-components=1 -xf $($(package)_source) -C qtbase && \ mkdir qttranslations && \ - tar --no-same-owner --strip-components=1 -xf $($(package)_source_dir)/$($(package)_qttranslations_file_name) -C qttranslations && \ + $(build_TAR) --no-same-owner --strip-components=1 -xf $($(package)_source_dir)/$($(package)_qttranslations_file_name) -C qttranslations && \ mkdir qttools && \ - tar --no-same-owner --strip-components=1 -xf $($(package)_source_dir)/$($(package)_qttools_file_name) -C qttools + $(build_TAR) --no-same-owner --strip-components=1 -xf $($(package)_source_dir)/$($(package)_qttools_file_name) -C qttools endef +# Preprocessing steps work as follows: +# +# 1. Apply our patches to the extracted source. See each patch for more info. +# +# 2. Create a macOS-Clang-Linux mkspec using our mac-qmake.conf. +# +# 3. After making a copy of the mkspec for the linux-arm-gnueabi host, named +# bitcoin-linux-g++, replace tool names with $($($(package)_type)_TOOL). +# +# 4. Put our C, CXX and LD FLAGS into gcc-base.conf. Only used for non-host builds. +# +# 5. In clang.conf, swap out clang & clang++, for our compiler + flags. See #17466. define $(package)_preprocess_cmds - sed -i.old "s|FT_Get_Font_Format|FT_Get_X11_Font_Format|" qtbase/src/platformsupport/fontdatabases/freetype/qfontengine_ft.cpp && \ - sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \ - sed -i.old "/updateqm.depends =/d" qttranslations/translations/translations.pro && \ - sed -i.old "s/src_plugins.depends = src_sql src_network/src_plugins.depends = src_network/" qtbase/src/src.pro && \ - sed -i.old -e 's/if \[ "$$$$XPLATFORM_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/if \[ "$$$$BUILD_ON_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/' -e 's|/bin/pwd|pwd|' qtbase/configure && \ + cp $($(package)_patch_dir)/qt.pro qt.pro && \ + cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \ + patch -p1 -i $($(package)_patch_dir)/fix-macos-linker.patch && \ + patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \ + patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \ + patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \ + patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ + patch -p1 -i $($(package)_patch_dir)/memory_resource.patch && \ + patch -p1 -i $($(package)_patch_dir)/no_warnings_for_symbols.patch && \ + patch -p1 -i $($(package)_patch_dir)/clang_18_libpng.patch && \ + patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch && \ + patch -p1 -i $($(package)_patch_dir)/duplicate_lcqpafonts.patch && \ + patch -p1 -i $($(package)_patch_dir)/utc_from_string_no_optimize.patch && \ + patch -p1 -i $($(package)_patch_dir)/guix_cross_lib_path.patch && \ + patch -p1 -i $($(package)_patch_dir)/windows_lto.patch && \ + patch -p1 -i $($(package)_patch_dir)/darwin_no_libm.patch && \ + patch -p1 -i $($(package)_patch_dir)/zlib-timebits64.patch && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ - cp -f qtbase/mkspecs/macx-clang/Info.plist.lib qtbase/mkspecs/macx-clang-linux/ &&\ - cp -f qtbase/mkspecs/macx-clang/Info.plist.app qtbase/mkspecs/macx-clang-linux/ &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \ cp -r qtbase/mkspecs/linux-arm-gnueabi-g++ qtbase/mkspecs/bitcoin-linux-g++ && \ - sed -i.old "s/arm-linux-gnueabi-/$(host)-/g" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ - patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch &&\ - patch -p1 -i $($(package)_patch_dir)/fix_configure_mac.patch &&\ - patch -p1 -i $($(package)_patch_dir)/fix_no_printer.patch &&\ - patch -p1 -i $($(package)_patch_dir)/fix_rcc_determinism.patch &&\ - patch -p1 -i $($(package)_patch_dir)/xkb-default.patch &&\ - patch -p1 -i $($(package)_patch_dir)/fix_android_qmake_conf.patch &&\ - patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch &&\ + sed -i.old "s|arm-linux-gnueabi-gcc|$($($(package)_type)_CC)|" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ + sed -i.old "s|arm-linux-gnueabi-g++|$($($(package)_type)_CXX)|" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ + sed -i.old "s|arm-linux-gnueabi-ar|$($($(package)_type)_AR)|" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ + sed -i.old "s|arm-linux-gnueabi-objcopy|$($($(package)_type)_OBJCOPY)|" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ + sed -i.old "s|arm-linux-gnueabi-nm|$($($(package)_type)_NM)|" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ + sed -i.old "s|arm-linux-gnueabi-strip|$($($(package)_type)_STRIP)|" qtbase/mkspecs/bitcoin-linux-g++/qmake.conf && \ echo "!host_build: QMAKE_CFLAGS += $($(package)_cflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ echo "!host_build: QMAKE_CXXFLAGS += $($(package)_cxxflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ echo "!host_build: QMAKE_LFLAGS += $($(package)_ldflags)" >> qtbase/mkspecs/common/gcc-base.conf && \ - patch -p1 -i $($(package)_patch_dir)/fix_riscv64_arch.patch &&\ - patch -p1 -i $($(package)_patch_dir)/no-xlib.patch &&\ - echo "QMAKE_LINK_OBJECT_MAX = 10" >> qtbase/mkspecs/win32-g++/qmake.conf &&\ - echo "QMAKE_LINK_OBJECT_SCRIPT = object_script" >> qtbase/mkspecs/win32-g++/qmake.conf &&\ - sed -i.old "s|QMAKE_CFLAGS += |!host_build: QMAKE_CFLAGS = $($(package)_cflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ - sed -i.old "s|QMAKE_CXXFLAGS += |!host_build: QMAKE_CXXFLAGS = $($(package)_cxxflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \ - sed -i.old "0,/^QMAKE_LFLAGS_/s|^QMAKE_LFLAGS_|!host_build: QMAKE_LFLAGS = $($(package)_ldflags)\n&|" qtbase/mkspecs/win32-g++/qmake.conf && \ - sed -i.old "s|QMAKE_CC = clang|QMAKE_CC = $($(package)_cc)|" qtbase/mkspecs/common/clang.conf && \ - sed -i.old "s|QMAKE_CXX = clang++|QMAKE_CXX = $($(package)_cxx)|" qtbase/mkspecs/common/clang.conf && \ - sed -i.old "s/LIBRARY_PATH/(CROSS_)?\0/g" qtbase/mkspecs/features/toolchain.prf + sed -i.old "s|QMAKE_CC = \$$$$\$$$${CROSS_COMPILE}clang|QMAKE_CC = $($(package)_cc)|" qtbase/mkspecs/common/clang.conf && \ + sed -i.old "s|QMAKE_CXX = \$$$$\$$$${CROSS_COMPILE}clang++|QMAKE_CXX = $($(package)_cxx)|" qtbase/mkspecs/common/clang.conf endef define $(package)_config_cmds - export PKG_CONFIG_SYSROOT_DIR=/ && \ - export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \ - export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \ - ./configure $($(package)_config_opts) && \ - echo "host_build: QT_CONFIG ~= s/system-zlib/zlib" >> mkspecs/qconfig.pri && \ - echo "CONFIG += force_bootstrap" >> mkspecs/qconfig.pri && \ - $(MAKE) sub-src-clean && \ - cd ../qttranslations && ../qtbase/bin/qmake qttranslations.pro -o Makefile && \ - cd translations && ../../qtbase/bin/qmake translations.pro -o Makefile && cd ../.. && \ - cd qttools/src/linguist/lrelease/ && ../../../../qtbase/bin/qmake lrelease.pro -o Makefile && \ - cd ../lupdate/ && ../../../../qtbase/bin/qmake lupdate.pro -o Makefile && cd ../../../.. + cd qtbase && \ + ./configure -top-level $($(package)_config_opts) endef define $(package)_build_cmds - $(MAKE) -C src $(addprefix sub-,$($(package)_qt_libs)) && \ - $(MAKE) -C ../qttools/src/linguist/lrelease && \ - $(MAKE) -C ../qttools/src/linguist/lupdate && \ - $(MAKE) -C ../qttranslations + $(MAKE) endef +# TODO: Investigate whether specific targets can be used here to minimize the amount of files/components installed. define $(package)_stage_cmds - $(MAKE) -C src INSTALL_ROOT=$($(package)_staging_dir) $(addsuffix -install_subtargets,$(addprefix sub-,$($(package)_qt_libs))) && cd .. && \ - $(MAKE) -C qttools/src/linguist/lrelease INSTALL_ROOT=$($(package)_staging_dir) install_target && \ - $(MAKE) -C qttools/src/linguist/lupdate INSTALL_ROOT=$($(package)_staging_dir) install_target && \ - $(MAKE) -C qttranslations INSTALL_ROOT=$($(package)_staging_dir) install_subtargets && \ - if `test -f qtbase/src/plugins/platforms/xcb/xcb-static/libxcb-static.a`; then \ - cp qtbase/src/plugins/platforms/xcb/xcb-static/libxcb-static.a $($(package)_staging_prefix_dir)/lib; \ - fi + $(MAKE) -C qtbase INSTALL_ROOT=$($(package)_staging_dir) install && \ + $(MAKE) -C qttools INSTALL_ROOT=$($(package)_staging_dir) install && \ + $(MAKE) -C qttranslations INSTALL_ROOT=$($(package)_staging_dir) install_subtargets endef define $(package)_postprocess_cmds - rm -rf native/mkspecs/ native/lib/ lib/cmake/ && \ - rm -f lib/lib*.la lib/*.prl plugins/*/*.prl + rm -rf doc/ native/lib/ && \ + rm -f lib/lib*.la endef diff --git a/depends/packages/sqlite.mk b/depends/packages/sqlite.mk new file mode 100644 index 0000000000000..15bc0f4d7a3ca --- /dev/null +++ b/depends/packages/sqlite.mk @@ -0,0 +1,35 @@ +package=sqlite +$(package)_version=3380500 +$(package)_download_path=https://sqlite.org/2022/ +$(package)_file_name=sqlite-autoconf-$($(package)_version).tar.gz +$(package)_sha256_hash=5af07de982ba658fd91a03170c945f99c971f6955bc79df3266544373e39869c + +define $(package)_set_vars +$(package)_config_opts=--disable-shared --disable-readline --disable-dynamic-extensions --enable-option-checking +$(package)_config_opts+= --disable-rtree --disable-fts4 --disable-fts5 +# We avoid using `--enable-debug` because it overrides CFLAGS, a behavior we want to prevent. +$(package)_cppflags_debug += -DSQLITE_DEBUG +$(package)_cppflags+=-DSQLITE_DQS=0 -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_OMIT_DEPRECATED +$(package)_cppflags+=-DSQLITE_OMIT_SHARED_CACHE -DSQLITE_OMIT_JSON -DSQLITE_LIKE_DOESNT_MATCH_BLOBS +$(package)_cppflags+=-DSQLITE_OMIT_DECLTYPE -DSQLITE_OMIT_PROGRESS_CALLBACK -DSQLITE_OMIT_AUTOINIT +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) libsqlite3.la +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-includeHEADERS install-pkgconfigDATA +endef + +define $(package)_postprocess_cmds + rm lib/*.la +endef diff --git a/depends/packages/systemtap.mk b/depends/packages/systemtap.mk new file mode 100644 index 0000000000000..c912e18c31e77 --- /dev/null +++ b/depends/packages/systemtap.mk @@ -0,0 +1,12 @@ +package=systemtap +$(package)_version=4.8 +$(package)_download_path=https://sourceware.org/ftp/systemtap/releases/ +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=cbd50a4eba5b261394dc454c12448ddec73e55e6742fda7f508f9fbc1331c223 +$(package)_patches=remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch + +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch && \ + mkdir -p $($(package)_staging_prefix_dir)/include/sys && \ + cp includes/sys/sdt.h $($(package)_staging_prefix_dir)/include/sys/sdt.h +endef diff --git a/depends/packages/xcb_proto.mk b/depends/packages/xcb_proto.mk index 01203a0718af9..6e1c5a10a87ba 100644 --- a/depends/packages/xcb_proto.mk +++ b/depends/packages/xcb_proto.mk @@ -1,8 +1,8 @@ package=xcb_proto -$(package)_version=1.10 -$(package)_download_path=https://xcb.freedesktop.org/dist -$(package)_file_name=xcb-proto-$($(package)_version).tar.bz2 -$(package)_sha256_hash=7ef40ddd855b750bc597d2a435da21e55e502a0fefa85b274f2c922800baaf05 +$(package)_version=1.15.2 +$(package)_download_path=https://xorg.freedesktop.org/archive/individual/proto +$(package)_file_name=xcb-proto-$($(package)_version).tar.xz +$(package)_sha256_hash=7072beb1f680a2fe3f9e535b797c146d22528990c72f63ddb49d2f350a3653ed define $(package)_config_cmds $($(package)_autoconf) @@ -17,6 +17,5 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - find -name "*.pyc" -delete && \ - find -name "*.pyo" -delete + rm -rf lib/python*/site-packages/xcbgen/__pycache__ endef diff --git a/depends/packages/xproto.mk b/depends/packages/xproto.mk index 6bd867d02b20f..29c349a21b5b7 100644 --- a/depends/packages/xproto.mk +++ b/depends/packages/xproto.mk @@ -1,8 +1,8 @@ package=xproto -$(package)_version=7.0.26 +$(package)_version=7.0.31 $(package)_download_path=https://xorg.freedesktop.org/releases/individual/proto -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=636162c1759805a5a0114a369dffdeccb8af8c859ef6e1445f26a4e6e046514f +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=6d755eaae27b45c5cc75529a12855fed5de5969b367ed05003944cf901ed43c7 define $(package)_set_vars $(package)_config_opts=--without-fop --without-xmlto --without-xsltproc --disable-specs diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk index 6f35ede24850a..67a0dd88e5def 100644 --- a/depends/packages/zeromq.mk +++ b/depends/packages/zeromq.mk @@ -1,39 +1,52 @@ package=zeromq -$(package)_version=4.3.1 +$(package)_version=4.3.5 $(package)_download_path=https://github.com/zeromq/libzmq/releases/download/v$($(package)_version)/ $(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=bcbabe1e2c7d0eec4ed612e10b94b112dd5f06fcefa994a0c79a45d835cd21eb -$(package)_patches=0001-fix-build-with-older-mingw64.patch 0002-disable-pthread_set_name_np.patch +$(package)_sha256_hash=6653ef5910f17954861fe72332e68b03ca6e4d9c7160eb3a8de5a5a913bfab43 +$(package)_build_subdir=build +$(package)_patches = remove_libstd_link.patch +$(package)_patches += macos_mktemp_check.patch +$(package)_patches += builtin_sha1.patch +$(package)_patches += fix_have_windows.patch +$(package)_patches += openbsd_kqueue_headers.patch +$(package)_patches += cmake_minimum.patch +$(package)_patches += cacheline_undefined.patch +$(package)_patches += no_librt.patch +$(package)_patches += fix_mingw_link.patch define $(package)_set_vars - $(package)_config_opts=--without-docs --disable-shared --disable-curve --disable-curve-keygen --disable-perf - $(package)_config_opts += --without-libsodium --without-libgssapi_krb5 --without-pgm --without-norm --without-vmci - $(package)_config_opts += --disable-libunwind --disable-radix-tree --without-gcov --disable-dependency-tracking - $(package)_config_opts += --disable-Werror --disable-drafts --enable-option-checking - $(package)_config_opts_linux=--with-pic - $(package)_config_opts_android=--with-pic - $(package)_cxxflags=-std=c++11 + $(package)_config_opts := -DCMAKE_BUILD_TYPE=None -DWITH_DOCS=OFF -DWITH_LIBSODIUM=OFF + $(package)_config_opts += -DWITH_LIBBSD=OFF -DENABLE_CURVE=OFF -DENABLE_CPACK=OFF + $(package)_config_opts += -DBUILD_SHARED=OFF -DBUILD_TESTS=OFF -DZMQ_BUILD_TESTS=OFF + $(package)_config_opts += -DENABLE_DRAFTS=OFF -DZMQ_BUILD_TESTS=OFF + $(package)_cxxflags += -ffile-prefix-map=$($(package)_extract_dir)=/usr + $(package)_config_opts_mingw32 += -DZMQ_WIN32_WINNT=0x0601 -DZMQ_HAVE_IPC=OFF endef define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/0001-fix-build-with-older-mingw64.patch && \ - patch -p1 < $($(package)_patch_dir)/0002-disable-pthread_set_name_np.patch && \ - cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config + patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \ + patch -p1 < $($(package)_patch_dir)/macos_mktemp_check.patch && \ + patch -p1 < $($(package)_patch_dir)/builtin_sha1.patch && \ + patch -p1 < $($(package)_patch_dir)/cacheline_undefined.patch && \ + patch -p1 < $($(package)_patch_dir)/fix_have_windows.patch && \ + patch -p1 < $($(package)_patch_dir)/openbsd_kqueue_headers.patch && \ + patch -p1 < $($(package)_patch_dir)/cmake_minimum.patch && \ + patch -p1 < $($(package)_patch_dir)/no_librt.patch && \ + patch -p1 < $($(package)_patch_dir)/fix_mingw_link.patch endef define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) -S .. -B . endef define $(package)_build_cmds - $(MAKE) src/libzmq.la + $(MAKE) endef define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-includeHEADERS install-pkgconfigDATA + $(MAKE) DESTDIR=$($(package)_staging_dir) install endef define $(package)_postprocess_cmds - sed -i.old "s/ -lstdc++//" lib/pkgconfig/libzmq.pc && \ - rm -rf bin share lib/*.la + rm -rf share endef diff --git a/depends/packages/zlib.mk b/depends/packages/zlib.mk deleted file mode 100644 index acb02020a80b2..0000000000000 --- a/depends/packages/zlib.mk +++ /dev/null @@ -1,31 +0,0 @@ -package=zlib -$(package)_version=1.2.11 -$(package)_download_path=https://www.zlib.net -$(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1 - -define $(package)_set_vars -$(package)_config_opts= CC="$($(package)_cc)" -$(package)_config_opts+=CFLAGS="$($(package)_cflags) $($(package)_cppflags) -fPIC" -$(package)_config_opts+=RANLIB="$($(package)_ranlib)" -$(package)_config_opts+=AR="$($(package)_ar)" -$(package)_config_opts_darwin+=AR="$($(package)_libtool)" -$(package)_config_opts_darwin+=ARFLAGS="-o" -$(package)_config_opts_android+=CHOST=$(host) -endef - -# zlib has its own custom configure script that takes in options like CC, -# CFLAGS, RANLIB, AR, and ARFLAGS from the environment rather than from -# command-line arguments. -define $(package)_config_cmds - env $($(package)_config_opts) ./configure --static --prefix=$(host_prefix) -endef - -define $(package)_build_cmds - $(MAKE) libz.a -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef - diff --git a/depends/patches/bdb/clang_cxx_11.patch b/depends/patches/bdb/clang_cxx_11.patch new file mode 100644 index 0000000000000..58f7ddc7d502c --- /dev/null +++ b/depends/patches/bdb/clang_cxx_11.patch @@ -0,0 +1,147 @@ +commit 3311d68f11d1697565401eee6efc85c34f022ea7 +Author: fanquake +Date: Mon Aug 17 20:03:56 2020 +0800 + + Fix C++11 compatibility + +diff --git a/dbinc/atomic.h b/dbinc/atomic.h +index 0034dcc..7c11d4a 100644 +--- a/dbinc/atomic.h ++++ b/dbinc/atomic.h +@@ -70,7 +70,7 @@ typedef struct { + * These have no memory barriers; the caller must include them when necessary. + */ + #define atomic_read(p) ((p)->value) +-#define atomic_init(p, val) ((p)->value = (val)) ++#define atomic_init_db(p, val) ((p)->value = (val)) + + #ifdef HAVE_ATOMIC_SUPPORT + +@@ -144,7 +144,7 @@ typedef LONG volatile *interlocked_val; + #define atomic_inc(env, p) __atomic_inc(p) + #define atomic_dec(env, p) __atomic_dec(p) + #define atomic_compare_exchange(env, p, o, n) \ +- __atomic_compare_exchange((p), (o), (n)) ++ __atomic_compare_exchange_db((p), (o), (n)) + static inline int __atomic_inc(db_atomic_t *p) + { + int temp; +@@ -176,7 +176,7 @@ static inline int __atomic_dec(db_atomic_t *p) + * http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html + * which configure could be changed to use. + */ +-static inline int __atomic_compare_exchange( ++static inline int __atomic_compare_exchange_db( + db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval) + { + atomic_value_t was; +@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange( + #define atomic_dec(env, p) (--(p)->value) + #define atomic_compare_exchange(env, p, oldval, newval) \ + (DB_ASSERT(env, atomic_read(p) == (oldval)), \ +- atomic_init(p, (newval)), 1) ++ atomic_init_db(p, (newval)), 1) + #else + #define atomic_inc(env, p) __atomic_inc(env, p) + #define atomic_dec(env, p) __atomic_dec(env, p) +diff --git a/mp/mp_fget.c b/mp/mp_fget.c +index 5fdee5a..0b75f57 100644 +--- a/mp/mp_fget.c ++++ b/mp/mp_fget.c +@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */ + + /* Initialize enough so we can call __memp_bhfree. */ + alloc_bhp->flags = 0; +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + #ifdef DIAGNOSTIC + if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) { + __db_errx(env, +@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */ + MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize, + PROT_READ); + +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + MUTEX_LOCK(env, alloc_bhp->mtx_buf); + alloc_bhp->priority = bhp->priority; + alloc_bhp->pgno = bhp->pgno; +diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c +index 34467d2..f05aa0c 100644 +--- a/mp/mp_mvcc.c ++++ b/mp/mp_mvcc.c +@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp) + #else + memcpy(frozen_bhp, bhp, SSZA(BH, buf)); + #endif +- atomic_init(&frozen_bhp->ref, 0); ++ atomic_init_db(&frozen_bhp->ref, 0); + if (mutex != MUTEX_INVALID) + frozen_bhp->mtx_buf = mutex; + else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH, +@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp) + #endif + alloc_bhp->mtx_buf = mutex; + MUTEX_LOCK(env, alloc_bhp->mtx_buf); +- atomic_init(&alloc_bhp->ref, 1); ++ atomic_init_db(&alloc_bhp->ref, 1); + F_CLR(alloc_bhp, BH_FROZEN); + } + +diff --git a/mp/mp_region.c b/mp/mp_region.c +index e6cece9..ddbe906 100644 +--- a/mp/mp_region.c ++++ b/mp/mp_region.c +@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0) + return (ret); + SH_TAILQ_INIT(&htab[i].hash_bucket); +- atomic_init(&htab[i].hash_page_dirty, 0); ++ atomic_init_db(&htab[i].hash_page_dirty, 0); + } + + /* +@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg) + hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID : + mtx_base + i; + SH_TAILQ_INIT(&hp->hash_bucket); +- atomic_init(&hp->hash_page_dirty, 0); ++ atomic_init_db(&hp->hash_page_dirty, 0); + #ifdef HAVE_STATISTICS + hp->hash_io_wait = 0; + hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0; +diff --git a/mutex/mut_method.c b/mutex/mut_method.c +index 2588763..5c6d516 100644 +--- a/mutex/mut_method.c ++++ b/mutex/mut_method.c +@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval) + MUTEX_LOCK(env, mtx); + ret = atomic_read(v) == oldval; + if (ret) +- atomic_init(v, newval); ++ atomic_init_db(v, newval); + MUTEX_UNLOCK(env, mtx); + + return (ret); +diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c +index f3922e0..e40fcdf 100644 +--- a/mutex/mut_tas.c ++++ b/mutex/mut_tas.c +@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags) + + #ifdef HAVE_SHARED_LATCHES + if (F_ISSET(mutexp, DB_MUTEX_SHARED)) +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + else + #endif + if (MUTEX_INIT(&mutexp->tas)) { +@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex) + F_CLR(mutexp, DB_MUTEX_LOCKED); + /* Flush flag update before zeroing count */ + MEMBAR_EXIT(); +- atomic_init(&mutexp->sharecount, 0); ++ atomic_init_db(&mutexp->sharecount, 0); + } else { + DB_ASSERT(env, sharecount > 0); + MEMBAR_EXIT(); diff --git a/depends/patches/expat/cmake_minimum.patch b/depends/patches/expat/cmake_minimum.patch new file mode 100644 index 0000000000000..a849a82a33758 --- /dev/null +++ b/depends/patches/expat/cmake_minimum.patch @@ -0,0 +1,13 @@ +build: set minimum required CMake to 3.16 + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -33,7 +33,7 @@ + # Unlike most of Expat, + # this file is copyrighted under the BSD-license for buildsystem files of KDE. + +-cmake_minimum_required(VERSION 3.1.3) ++cmake_minimum_required(VERSION 3.16) + + # This allows controlling documented build time switches + # when Expat is pulled in using the add_subdirectory function, e.g. diff --git a/depends/patches/fontconfig/gperf_header_regen.patch b/depends/patches/fontconfig/gperf_header_regen.patch new file mode 100644 index 0000000000000..b1a70d5fb12cf --- /dev/null +++ b/depends/patches/fontconfig/gperf_header_regen.patch @@ -0,0 +1,24 @@ +commit 7b6eb33ecd88768b28c67ce5d2d68a7eed5936b6 +Author: fanquake +Date: Tue Aug 25 14:34:53 2020 +0800 + + Remove rule that causes inadvertent header regeneration + + Otherwise the makefile will needlessly attempt to re-generate the + headers with gperf. This can be dropped once the upstream build is fixed. + + See #10851. + +diff --git a/src/Makefile.in b/src/Makefile.in +index f4626ad..4ae1b00 100644 +--- a/src/Makefile.in ++++ b/src/Makefile.in +@@ -912,7 +912,7 @@ + ' - > $@.tmp && \ + mv -f $@.tmp fcobjshash.gperf && touch $@ || ( $(RM) $@.tmp && false ) + +-fcobjshash.h: Makefile fcobjshash.gperf ++fcobjshash.h: + $(AM_V_GEN) $(GPERF) --pic -m 100 fcobjshash.gperf > $@.tmp && \ + mv -f $@.tmp $@ || ( $(RM) $@.tmp && false ) + diff --git a/depends/patches/libevent/cmake_fixups.patch b/depends/patches/libevent/cmake_fixups.patch new file mode 100644 index 0000000000000..d80c1a94898c5 --- /dev/null +++ b/depends/patches/libevent/cmake_fixups.patch @@ -0,0 +1,35 @@ +cmake: set minimum version to 3.5 + +Fix generated pkg-config files, see +https://github.com/libevent/libevent/pull/1165. + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -19,7 +19,7 @@ + # start libevent.sln + # + +-cmake_minimum_required(VERSION 3.1 FATAL_ERROR) ++cmake_minimum_required(VERSION 3.5 FATAL_ERROR) + + if (POLICY CMP0054) + cmake_policy(SET CMP0054 NEW) +diff --git a/cmake/AddEventLibrary.cmake b/cmake/AddEventLibrary.cmake +index 04f5837e..d8ea42c4 100644 +--- a/cmake/AddEventLibrary.cmake ++++ b/cmake/AddEventLibrary.cmake +@@ -20,12 +20,12 @@ macro(generate_pkgconfig LIB_NAME) + + set(LIBS "") + foreach (LIB ${LIB_PLATFORM}) +- set(LIBS "${LIBS} -L${LIB}") ++ set(LIBS "${LIBS} -l${LIB}") + endforeach() + + set(OPENSSL_LIBS "") + foreach(LIB ${OPENSSL_LIBRARIES}) +- set(OPENSSL_LIBS "${OPENSSL_LIBS} -L${LIB}") ++ set(OPENSSL_LIBS "${OPENSSL_LIBS} -l${LIB}") + endforeach() + + configure_file("lib${LIB_NAME}.pc.in" "lib${LIB_NAME}.pc" @ONLY) diff --git a/depends/patches/libevent/fix_mingw_link.patch b/depends/patches/libevent/fix_mingw_link.patch new file mode 100644 index 0000000000000..41cbd463c9122 --- /dev/null +++ b/depends/patches/libevent/fix_mingw_link.patch @@ -0,0 +1,25 @@ +commit d108099913c5fdbe518f3f4d711f248f8522bd10 +Author: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> +Date: Mon Apr 22 06:39:35 2024 +0100 + + build: Add `Iphlpapi` to `Libs.private` in `*.pc` files on Windows + + It has been required since https://github.com/libevent/libevent/pull/923 + at least for the `if_nametoindex` call. + + See https://github.com/libevent/libevent/pull/1622. + + +diff --git a/configure.ac b/configure.ac +index d00e063a..cd1fce37 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -906,7 +906,7 @@ if(WIN32) + list(APPEND HDR_PRIVATE WIN32-Code/getopt.h) + + set(EVENT__DNS_USE_FTIME_FOR_ID 1) +- set(LIB_PLATFORM ws2_32 shell32 advapi32) ++ set(LIB_PLATFORM ws2_32 shell32 advapi32 iphlpapi) + add_definitions( + -D_CRT_SECURE_NO_WARNINGS + -D_CRT_NONSTDC_NO_DEPRECATE) diff --git a/depends/patches/libxcb/remove_pthread_stubs.patch b/depends/patches/libxcb/remove_pthread_stubs.patch new file mode 100644 index 0000000000000..1f32dea527e25 --- /dev/null +++ b/depends/patches/libxcb/remove_pthread_stubs.patch @@ -0,0 +1,12 @@ +Remove uneeded pthread-stubs dependency +--- a/configure ++++ b/configure +@@ -19695,7 +19695,7 @@ fi + NEEDED="xau >= 0.99.2" + case $host_os in + linux*) ;; +- *) NEEDED="$NEEDED pthread-stubs" ;; ++ *) NEEDED="$NEEDED" ;; + esac + + pkg_failed=no diff --git a/depends/patches/miniupnpc/cmake_get_src_addr.patch b/depends/patches/miniupnpc/cmake_get_src_addr.patch new file mode 100644 index 0000000000000..bae1b738f3f31 --- /dev/null +++ b/depends/patches/miniupnpc/cmake_get_src_addr.patch @@ -0,0 +1,22 @@ +commit cb2026239c2a3aff393952ccb0ee1c448189402d +Author: fanquake +Date: Fri Mar 22 14:03:54 2024 +0000 + + build: add MINIUPNPC_GET_SRC_ADDR to CMake build + + This mirrors the autotools build. + + See https://github.com/miniupnp/miniupnp/pull/721. + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 1aa95a8..0cacf3e 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -31,6 +31,7 @@ endif () + if (NOT WIN32) + target_compile_definitions(miniupnpc-private INTERFACE + MINIUPNPC_SET_SOCKET_TIMEOUT ++ MINIUPNPC_GET_SRC_ADDR + _BSD_SOURCE _DEFAULT_SOURCE) + if (NOT APPLE AND NOT CMAKE_SYSTEM_NAME MATCHES ".*BSD" AND NOT CMAKE_SYSTEM_NAME STREQUAL "SunOS") + # add_definitions (-D_POSIX_C_SOURCE=200112L) diff --git a/depends/patches/miniupnpc/dont_leak_info.patch b/depends/patches/miniupnpc/dont_leak_info.patch new file mode 100644 index 0000000000000..95a09a26dce1d --- /dev/null +++ b/depends/patches/miniupnpc/dont_leak_info.patch @@ -0,0 +1,32 @@ +commit 51f6dd991c29af66fb4f64c6feb2787cce23a1a7 +Author: fanquake +Date: Mon Jan 8 11:21:40 2024 +0000 + + Don't leak OS and miniupnpc version info in User-Agent + +diff --git a/src/minisoap.c b/src/minisoap.c +index 903ac5f..046e0ea 100644 +--- a/src/minisoap.c ++++ b/src/minisoap.c +@@ -90,7 +90,7 @@ int soapPostSubmit(SOCKET fd, + headerssize = snprintf(headerbuf, sizeof(headerbuf), + "POST %s HTTP/%s\r\n" + "Host: %s%s\r\n" +- "User-Agent: " OS_STRING " " UPNP_VERSION_STRING " MiniUPnPc/" MINIUPNPC_VERSION_STRING "\r\n" ++ "User-Agent: " UPNP_VERSION_STRING "\r\n" + "Content-Length: %d\r\n" + #if (UPNP_VERSION_MAJOR == 1) && (UPNP_VERSION_MINOR == 0) + "Content-Type: text/xml\r\n" +diff --git a/src/miniwget.c b/src/miniwget.c +index e76a5e5..0cc36fe 100644 +--- a/src/miniwget.c ++++ b/src/miniwget.c +@@ -444,7 +444,7 @@ miniwget3(const char * host, + "GET %s HTTP/%s\r\n" + "Host: %s:%d\r\n" + "Connection: Close\r\n" +- "User-Agent: " OS_STRING " " UPNP_VERSION_STRING " MiniUPnPc/" MINIUPNPC_VERSION_STRING "\r\n" ++ "User-Agent: " UPNP_VERSION_STRING "\r\n" + + "\r\n", + path, httpversion, host, port); diff --git a/depends/patches/miniupnpc/fix_windows_snprintf.patch b/depends/patches/miniupnpc/fix_windows_snprintf.patch new file mode 100644 index 0000000000000..ff9e26231e739 --- /dev/null +++ b/depends/patches/miniupnpc/fix_windows_snprintf.patch @@ -0,0 +1,25 @@ +commit a1e9de80ab99b4c956a6a4e21d3e0de6f7a1014d +Author: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> +Date: Sat Apr 20 15:14:47 2024 +0100 + + Fix macro expression that guards `snprintf` for Windows + + Otherwise, the `snprintf` is still wrongly emulated for the following + cases: + - mingw-w64 6.0.0 or new with ucrt + - mingw-w64 8.0.0 or new with iso c ext + +--- a/src/win32_snprintf.h ++++ b/src/win32_snprintf.h +@@ -23,9 +23,9 @@ + (defined(_MSC_VER) && _MSC_VER < 1900) /* Visual Studio older than 2015 */ || \ + (defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) && defined(__NO_ISOCEXT)) /* mingw32 without iso c ext */ || \ + (defined(__MINGW64_VERSION_MAJOR) && /* mingw-w64 not ... */ !( \ +- (defined (__USE_MINGW_ANSI_STDIO) && __USE_MINGW_ANSI_STDIO != 0)) /* ... with ansi stdio */ || \ ++ (defined (__USE_MINGW_ANSI_STDIO) && __USE_MINGW_ANSI_STDIO != 0) /* ... with ansi stdio */ || \ + (__MINGW64_VERSION_MAJOR >= 6 && defined(_UCRT)) /* ... at least 6.0.0 with ucrt */ || \ +- (__MINGW64_VERSION_MAJOR >= 8 && !defined(__NO_ISOCEXT)) /* ... at least 8.0.0 with iso c ext */ || \ ++ (__MINGW64_VERSION_MAJOR >= 8 && !defined(__NO_ISOCEXT))) /* ... at least 8.0.0 with iso c ext */ || \ + 0) || \ + 0) + diff --git a/depends/patches/native_cdrkit/cdrkit-deterministic.patch b/depends/patches/native_cdrkit/cdrkit-deterministic.patch deleted file mode 100644 index 8ab0993dc4de1..0000000000000 --- a/depends/patches/native_cdrkit/cdrkit-deterministic.patch +++ /dev/null @@ -1,86 +0,0 @@ ---- cdrkit-1.1.11.old/genisoimage/tree.c 2008-10-21 19:57:47.000000000 -0400 -+++ cdrkit-1.1.11/genisoimage/tree.c 2013-12-06 00:23:18.489622668 -0500 -@@ -1139,8 +1139,9 @@ - scan_directory_tree(struct directory *this_dir, char *path, - struct directory_entry *de) - { -- DIR *current_dir; -+ int current_file; - char whole_path[PATH_MAX]; -+ struct dirent **d_list; - struct dirent *d_entry; - struct directory *parent; - int dflag; -@@ -1164,7 +1165,8 @@ - this_dir->dir_flags |= DIR_WAS_SCANNED; - - errno = 0; /* Paranoia */ -- current_dir = opendir(path); -+ //current_dir = opendir(path); -+ current_file = scandir(path, &d_list, NULL, alphasort); - d_entry = NULL; - - /* -@@ -1173,12 +1175,12 @@ - */ - old_path = path; - -- if (current_dir) { -+ if (current_file >= 0) { - errno = 0; -- d_entry = readdir(current_dir); -+ d_entry = d_list[0]; - } - -- if (!current_dir || !d_entry) { -+ if (current_file < 0 || !d_entry) { - int ret = 1; - - #ifdef USE_LIBSCHILY -@@ -1191,8 +1193,8 @@ - de->isorec.flags[0] &= ~ISO_DIRECTORY; - ret = 0; - } -- if (current_dir) -- closedir(current_dir); -+ if(d_list) -+ free(d_list); - return (ret); - } - #ifdef ABORT_DEEP_ISO_ONLY -@@ -1208,7 +1210,7 @@ - errmsgno(EX_BAD, "use Rock Ridge extensions via -R or -r,\n"); - errmsgno(EX_BAD, "or allow deep ISO9660 directory nesting via -D.\n"); - } -- closedir(current_dir); -+ free(d_list); - return (1); - } - #endif -@@ -1250,13 +1252,13 @@ - * The first time through, skip this, since we already asked - * for the first entry when we opened the directory. - */ -- if (dflag) -- d_entry = readdir(current_dir); -+ if (dflag && current_file >= 0) -+ d_entry = d_list[current_file]; - dflag++; - -- if (!d_entry) -+ if (current_file < 0) - break; -- -+ current_file--; - /* OK, got a valid entry */ - - /* If we do not want all files, then pitch the backups. */ -@@ -1348,7 +1350,7 @@ - insert_file_entry(this_dir, whole_path, d_entry->d_name); - #endif /* APPLE_HYB */ - } -- closedir(current_dir); -+ free(d_list); - - #ifdef APPLE_HYB - /* diff --git a/depends/patches/native_libdmg-hfsplus/remove-libcrypto-dependency.patch b/depends/patches/native_libdmg-hfsplus/remove-libcrypto-dependency.patch deleted file mode 100644 index f346c8f2cff8b..0000000000000 --- a/depends/patches/native_libdmg-hfsplus/remove-libcrypto-dependency.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 3e5fd3fb56bc9ff03beb535979e33dcf83fe1f70 Mon Sep 17 00:00:00 2001 -From: Cory Fields -Date: Thu, 8 May 2014 12:39:42 -0400 -Subject: [PATCH] dmg: remove libcrypto dependency - ---- - dmg/CMakeLists.txt | 16 ---------------- - 1 file changed, 16 deletions(-) - -diff --git a/dmg/CMakeLists.txt b/dmg/CMakeLists.txt -index eec62d6..3969f64 100644 ---- a/dmg/CMakeLists.txt -+++ b/dmg/CMakeLists.txt -@@ -1,12 +1,5 @@ --INCLUDE(FindOpenSSL) - INCLUDE(FindZLIB) - --FIND_LIBRARY(CRYPTO_LIBRARIES crypto -- PATHS -- /usr/lib -- /usr/local/lib -- ) -- - IF(NOT ZLIB_FOUND) - message(FATAL_ERROR "zlib is required for dmg!") - ENDIF(NOT ZLIB_FOUND) -@@ -18,15 +11,6 @@ link_directories(${PROJECT_BINARY_DIR}/common ${PROJECT_BINARY_DIR}/hfs) - - add_library(dmg adc.c base64.c checksum.c dmgfile.c dmglib.c filevault.c io.c partition.c resources.c udif.c) - --IF(OPENSSL_FOUND) -- add_definitions(-DHAVE_CRYPT) -- include_directories(${OPENSSL_INCLUDE_DIR}) -- target_link_libraries(dmg ${CRYPTO_LIBRARIES}) -- IF(WIN32) -- TARGET_LINK_LIBRARIES(dmg gdi32) -- ENDIF(WIN32) --ENDIF(OPENSSL_FOUND) -- - target_link_libraries(dmg common hfs z) - - add_executable(dmg-bin dmg.c) --- -2.22.0 - diff --git a/depends/patches/qrencode/cmake_fixups.patch b/depends/patches/qrencode/cmake_fixups.patch new file mode 100644 index 0000000000000..7518d756cb28f --- /dev/null +++ b/depends/patches/qrencode/cmake_fixups.patch @@ -0,0 +1,23 @@ +cmake: set minimum version to 3.5 + +Correct some dev warning output. + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 773e037..a558145 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,4 +1,4 @@ +-cmake_minimum_required(VERSION 3.1.0) ++cmake_minimum_required(VERSION 3.5) + + project(QRencode VERSION 4.1.1 LANGUAGES C) + +@@ -20,7 +20,7 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + set(CMAKE_THREAD_PREFER_PTHREAD ON) + find_package(Threads) + find_package(PNG) +-find_package(Iconv) ++find_package(ICONV) + + if(CMAKE_USE_PTHREADS_INIT) + add_definitions(-DHAVE_LIBPTHREAD=1) diff --git a/depends/patches/qt/clang_18_libpng.patch b/depends/patches/qt/clang_18_libpng.patch new file mode 100644 index 0000000000000..e807905b321a5 --- /dev/null +++ b/depends/patches/qt/clang_18_libpng.patch @@ -0,0 +1,40 @@ +fix Qt macOS build with Clang 18 + + See: + https://github.com/pnggroup/libpng/commit/893b8113f04d408cc6177c6de19c9889a48faa24. + + In a similar manner as zlib (madler/zlib#895), + libpng contains a header configuration that's no longer valid and + hasn't been exercised for the macOS target. + + - The target OS conditional macros are misused. Specifically + `TARGET_OS_MAC` covers all Apple targets, including iOS, and it + should not be checked with `#if defined` as they would always be + defined (to either 1 or 0) on Apple platforms. + - `#include ` no longer works for the macOS target and results + in a compilation failure. macOS ships all required functions in + `math.h`, and clients should use `math.h` instead. + +--- a/qtbase/src/3rdparty/libpng/pngpriv.h ++++ b/qtbase/src/3rdparty/libpng/pngpriv.h +@@ -514,18 +514,8 @@ + */ + # include + +-# if (defined(__MWERKS__) && defined(macintosh)) || defined(applec) || \ +- defined(THINK_C) || defined(__SC__) || defined(TARGET_OS_MAC) +- /* We need to check that hasn't already been included earlier +- * as it seems it doesn't agree with , yet we should really use +- * if possible. +- */ +-# if !defined(__MATH_H__) && !defined(__MATH_H) && !defined(__cmath__) +-# include +-# endif +-# else +-# include +-# endif ++# include ++ + # if defined(_AMIGA) && defined(__SASC) && defined(_M68881) + /* Amiga SAS/C: We must include builtin FPU functions when compiling using + * MATH=68881 diff --git a/depends/patches/qt/darwin_no_libm.patch b/depends/patches/qt/darwin_no_libm.patch new file mode 100644 index 0000000000000..38a94beeb7a69 --- /dev/null +++ b/depends/patches/qt/darwin_no_libm.patch @@ -0,0 +1,17 @@ +build: remove explicit -lm link from qttools + +This causes issues with at least the macOS cross build, and shouldn't +actually be required anywhere else. GCC with libstdc++ will already get libm. + +--- a/qtbase/src/corelib/tools/tools.pri ++++ b/qtbase/src/corelib/tools/tools.pri +@@ -111,9 +111,6 @@ qtConfig(easingcurve) { + tools/qtimeline.cpp + } + +-# Note: libm should be present by default becaue this is C++ +-unix:!macx-icc:!vxworks:!haiku:!integrity:!wasm: LIBS_PRIVATE += -lm +- + TR_EXCLUDE += ../3rdparty/* + + # MIPS DSP diff --git a/depends/patches/qt/dont_hardcode_pwd.patch b/depends/patches/qt/dont_hardcode_pwd.patch new file mode 100644 index 0000000000000..f6955b2f20ce4 --- /dev/null +++ b/depends/patches/qt/dont_hardcode_pwd.patch @@ -0,0 +1,27 @@ +Do not assume FHS in scripts + +On systems that do not follow the Filesystem Hierarchy Standard, such as +guix, the hardcoded `/bin/pwd` will fail to be found so that the script +will fail. + +Use `pwd`, instead, so that the command can be found through the normal +path search mechanism. + +See https://github.com/qt/qtbase/commit/3388de698bfb9bbc456c08f03e83bf3e749df35c. + +diff --git a/qtbase/configure b/qtbase/configure +index 08b49a8d..faea5b55 100755 +--- a/qtbase/configure ++++ b/qtbase/configure +@@ -36,9 +36,9 @@ + relconf=`basename $0` + # the directory of this script is the "source tree" + relpath=`dirname $0` +-relpath=`(cd "$relpath"; /bin/pwd)` ++relpath=`(cd "$relpath"; pwd)` + # the current directory is the "build tree" or "object tree" +-outpath=`/bin/pwd` ++outpath=`pwd` + + WHICH="which" + diff --git a/depends/patches/qt/duplicate_lcqpafonts.patch b/depends/patches/qt/duplicate_lcqpafonts.patch new file mode 100644 index 0000000000000..c460b51dcff67 --- /dev/null +++ b/depends/patches/qt/duplicate_lcqpafonts.patch @@ -0,0 +1,104 @@ +QtGui: Fix duplication of logging category lcQpaFonts + +Move it to qplatformfontdatabase.h. + +Upstream commit: + - Qt 6.0: ab01885e48873fb2ad71841a3f1627fe4d9cd835 + +--- a/qtbase/src/gui/text/qplatformfontdatabase.cpp ++++ b/qtbase/src/gui/text/qplatformfontdatabase.cpp +@@ -52,6 +52,8 @@ + + QT_BEGIN_NAMESPACE + ++Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") ++ + void qt_registerFont(const QString &familyname, const QString &stylename, + const QString &foundryname, int weight, + QFont::Style style, int stretch, bool antialiased, + +--- a/qtbase/src/gui/text/qplatformfontdatabase.h ++++ b/qtbase/src/gui/text/qplatformfontdatabase.h +@@ -50,6 +50,7 @@ + // + + #include ++#include + #include + #include + #include +@@ -62,6 +63,7 @@ + + QT_BEGIN_NAMESPACE + ++Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) + + class QWritingSystemsPrivate; + + +--- a/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext.mm ++++ b/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext.mm +@@ -86,8 +86,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") +- + static float SYNTHETIC_ITALIC_SKEW = std::tan(14.f * std::acos(0.f) / 90.f); + + bool QCoreTextFontEngine::ct_getSfntTable(void *user_data, uint tag, uchar *buffer, uint *length) + +--- a/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext_p.h ++++ b/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext_p.h +@@ -64,8 +64,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) +- + class QCoreTextFontEngine : public QFontEngine + { + Q_GADGET + +--- a/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase.cpp ++++ b/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase.cpp +@@ -68,8 +68,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") +- + #ifndef QT_NO_DIRECTWRITE + // ### fixme: Consider direct linking of dwrite.dll once Windows Vista pre SP2 is dropped (QTBUG-49711) + + +--- a/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase_p.h ++++ b/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase_p.h +@@ -63,8 +63,6 @@ + + QT_BEGIN_NAMESPACE + +-Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) +- + class QWindowsFontEngineData + { + Q_DISABLE_COPY_MOVE(QWindowsFontEngineData) + +--- a/qtbase/src/platformsupport/themes/genericunix/qgenericunixthemes.cpp ++++ b/qtbase/src/platformsupport/themes/genericunix/qgenericunixthemes.cpp +@@ -40,6 +40,7 @@ + #include "qgenericunixthemes_p.h" + + #include "qpa/qplatformtheme_p.h" ++#include "qpa/qplatformfontdatabase.h" + + #include + #include +@@ -76,7 +77,6 @@ + QT_BEGIN_NAMESPACE + + Q_DECLARE_LOGGING_CATEGORY(qLcTray) +-Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") + + ResourceHelper::ResourceHelper() + { diff --git a/depends/patches/qt/fix-macos-linker.patch b/depends/patches/qt/fix-macos-linker.patch new file mode 100644 index 0000000000000..e439685656550 --- /dev/null +++ b/depends/patches/qt/fix-macos-linker.patch @@ -0,0 +1,55 @@ +qmake: Don't error out if QMAKE_DEFAULT_LIBDIRS is empty on macOS + +The new linker in Xcode 15 doesn't provide any default linker or +framework paths when requested via -v, but still seems to use the +default paths documented in the ld man page. + +We trust that linker will do the right thing, even if we don't +know of its default linker paths. + +We also need to opt out of the default fallback logic to +set the libdirs to /lib and /usr/lib. + +This may result in UnixMakefileGenerator::findLibraries finding +different libraries than expected, if additional paths are +passed with -L, which will then take precedence for qmake, +even if the linker itself will use the library from the +SDK's default paths. This should hopefully not be an issue +in practice, as we don't turn -lFoo into absolute paths in +qmake, so the only risk is that we're picking up the wrong +prl files and adding additional dependencies that the lib +in the SDK doesn't have. + +Upstream commits: + - Qt 5.15.16: Not yet publicly available. + - Qt dev: cdf64b0e47115cc473e1afd1472b4b09e130b2a5 + +For other Qt branches see +https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d + +--- old/qtbase/mkspecs/features/toolchain.prf ++++ new/qtbase/mkspecs/features/toolchain.prf +@@ -288,9 +288,12 @@ isEmpty($${target_prefix}.INCDIRS) { + } + } + } +- isEmpty(QMAKE_DEFAULT_LIBDIRS)|isEmpty(QMAKE_DEFAULT_INCDIRS): \ ++ isEmpty(QMAKE_DEFAULT_INCDIRS): \ + !integrity: \ +- error("failed to parse default search paths from compiler output") ++ error("failed to parse default include paths from compiler output") ++ isEmpty(QMAKE_DEFAULT_LIBDIRS): \ ++ !integrity:!darwin: \ ++ error("failed to parse default library paths from compiler output") + QMAKE_DEFAULT_LIBDIRS = $$unique(QMAKE_DEFAULT_LIBDIRS) + } else: ghs { + cmd = $$QMAKE_CXX $$QMAKE_CXXFLAGS -$${LITERAL_HASH} -o /tmp/fake_output /tmp/fake_input.cpp +@@ -412,7 +415,7 @@ isEmpty($${target_prefix}.INCDIRS) { + QMAKE_DEFAULT_INCDIRS = $$split(INCLUDE, $$QMAKE_DIRLIST_SEP) + } + +- unix:if(!cross_compile|host_build) { ++ unix:!darwin:if(!cross_compile|host_build) { + isEmpty(QMAKE_DEFAULT_INCDIRS): QMAKE_DEFAULT_INCDIRS = /usr/include /usr/local/include + isEmpty(QMAKE_DEFAULT_LIBDIRS): QMAKE_DEFAULT_LIBDIRS = /lib /usr/lib + } diff --git a/depends/patches/qt/fix_android_jni_static.patch b/depends/patches/qt/fix_android_jni_static.patch deleted file mode 100644 index 2f6ff00f40c25..0000000000000 --- a/depends/patches/qt/fix_android_jni_static.patch +++ /dev/null @@ -1,18 +0,0 @@ ---- old/qtbase/src/plugins/platforms/android/androidjnimain.cpp -+++ new/qtbase/src/plugins/platforms/android/androidjnimain.cpp -@@ -890,6 +890,14 @@ - __android_log_print(ANDROID_LOG_FATAL, "Qt", "registerNatives failed"); - return -1; - } -+ -+ const jint ret = QT_PREPEND_NAMESPACE(QtAndroidPrivate::initJNI(vm, env)); -+ if (ret != 0) -+ { -+ __android_log_print(ANDROID_LOG_FATAL, "Qt", "initJNI failed"); -+ return ret; -+ } -+ - QWindowSystemInterfacePrivate::TabletEvent::setPlatformSynthesizesMouse(false); - - m_javaVM = vm; - diff --git a/depends/patches/qt/fix_android_qmake_conf.patch b/depends/patches/qt/fix_android_qmake_conf.patch deleted file mode 100644 index 13bfff9776474..0000000000000 --- a/depends/patches/qt/fix_android_qmake_conf.patch +++ /dev/null @@ -1,20 +0,0 @@ ---- old/qtbase/mkspecs/android-clang/qmake.conf -+++ new/qtbase/mkspecs/android-clang/qmake.conf -@@ -30,7 +30,7 @@ - QMAKE_CFLAGS += -target mips64el-none-linux-android - - QMAKE_CFLAGS += -gcc-toolchain $$NDK_TOOLCHAIN_PATH --QMAKE_LINK = $$QMAKE_CXX $$QMAKE_CFLAGS -Wl,--exclude-libs,libgcc.a -+QMAKE_LINK = $$QMAKE_CXX $$QMAKE_CFLAGS -Wl,--exclude-libs,libgcc.a -nostdlib++ - QMAKE_CFLAGS += -DANDROID_HAS_WSTRING --sysroot=$$NDK_ROOT/sysroot \ - -isystem $$NDK_ROOT/sysroot/usr/include/$$NDK_TOOLS_PREFIX \ - -isystem $$NDK_ROOT/sources/cxx-stl/llvm-libc++/include \ -@@ -40,7 +40,7 @@ - ANDROID_SOURCES_CXX_STL_LIBDIR = $$NDK_ROOT/sources/cxx-stl/llvm-libc++/libs/$$ANDROID_TARGET_ARCH - - ANDROID_STDCPP_PATH = $$ANDROID_SOURCES_CXX_STL_LIBDIR/libc++_shared.so --ANDROID_CXX_STL_LIBS = -lc++ -+ANDROID_CXX_STL_LIBS = -lc++_shared - - QMAKE_ARM_CFLAGS_RELEASE = -Oz - QMAKE_ARM_CFLAGS_RELEASE_WITH_DEBUGINFO = -g -Oz diff --git a/depends/patches/qt/fix_configure_mac.patch b/depends/patches/qt/fix_configure_mac.patch deleted file mode 100644 index 0d7dd647debca..0000000000000 --- a/depends/patches/qt/fix_configure_mac.patch +++ /dev/null @@ -1,50 +0,0 @@ ---- old/qtbase/mkspecs/features/mac/sdk.prf 2018-02-08 10:24:48.000000000 -0800 -+++ new/qtbase/mkspecs/features/mac/sdk.prf 2018-03-23 10:38:56.000000000 -0700 -@@ -8,21 +8,21 @@ - defineReplace(xcodeSDKInfo) { - info = $$1 - equals(info, "Path"): \ -- info = --show-sdk-path -+ infoarg = --show-sdk-path - equals(info, "PlatformPath"): \ -- info = --show-sdk-platform-path -+ infoarg = --show-sdk-platform-path - equals(info, "SDKVersion"): \ -- info = --show-sdk-version -+ infoarg = --show-sdk-version - sdk = $$2 - isEmpty(sdk): \ - sdk = $$QMAKE_MAC_SDK - - isEmpty(QMAKE_MAC_SDK.$${sdk}.$${info}) { -- QMAKE_MAC_SDK.$${sdk}.$${info} = $$system("/usr/bin/xcrun --sdk $$sdk $$info 2>/dev/null") -+ QMAKE_MAC_SDK.$${sdk}.$${info} = $$system("/usr/bin/xcrun --sdk $$sdk $$infoarg 2>/dev/null") - # --show-sdk-platform-path won't work for Command Line Tools; this is fine - # only used by the XCTest backend to testlib -- isEmpty(QMAKE_MAC_SDK.$${sdk}.$${info}):if(!isEmpty(QMAKE_XCODEBUILD_PATH)|!equals(info, "--show-sdk-platform-path")): \ -- error("Could not resolve SDK $$info for \'$$sdk\'") -+ isEmpty(QMAKE_MAC_SDK.$${sdk}.$${info}):if(!isEmpty(QMAKE_XCODEBUILD_PATH)|!equals(infoarg, "--show-sdk-platform-path")): \ -+ error("Could not resolve SDK $$info for \'$$sdk\' using $$infoarg") - cache(QMAKE_MAC_SDK.$${sdk}.$${info}, set stash, QMAKE_MAC_SDK.$${sdk}.$${info}) - } - ---- old/qtbase/configure 2018-02-08 10:24:48.000000000 -0800 -+++ new/qtbase/configure 2018-03-23 05:42:29.000000000 -0700 -@@ -232,8 +232,13 @@ - - sdk=$(getSingleQMakeVariable "QMAKE_MAC_SDK" "$1") - if [ -z "$sdk" ]; then echo "QMAKE_MAC_SDK must be set when building on Mac" >&2; exit 1; fi -- sysroot=$(/usr/bin/xcrun --sdk $sdk --show-sdk-path 2>/dev/null) -- if [ -z "$sysroot" ]; then echo "Failed to resolve SDK path for '$sdk'" >&2; exit 1; fi -+ sysroot=$(getSingleQMakeVariable "QMAKE_MAC_SDK_PATH" "$1") -+ -+ echo "sysroot pre-configured as $sysroot"; -+ if [ -z "$sysroot" ]; then -+ sysroot=$(/usr/bin/xcrun --sdk $sdk --show-sdk-path 2>/dev/null) -+ if [ -z "$sysroot" ]; then echo "Failed to resolve SDK path for '$sdk'" >&2; exit 1; fi -+ fi - - case "$sdk" in - macosx*) - - diff --git a/depends/patches/qt/fix_no_printer.patch b/depends/patches/qt/fix_no_printer.patch deleted file mode 100644 index f868ca2577554..0000000000000 --- a/depends/patches/qt/fix_no_printer.patch +++ /dev/null @@ -1,19 +0,0 @@ ---- x/qtbase/src/plugins/platforms/cocoa/qprintengine_mac_p.h -+++ y/qtbase/src/plugins/platforms/cocoa/qprintengine_mac_p.h -@@ -52,6 +52,7 @@ - // - - #include -+#include - - #ifndef QT_NO_PRINTER - ---- x/qtbase/src/plugins/plugins.pro -+++ y/qtbase/src/plugins/plugins.pro -@@ -8,6 +8,3 @@ qtHaveModule(gui) { - qtConfig(imageformatplugin): SUBDIRS *= imageformats - !android:qtConfig(library): SUBDIRS *= generic - } -- --!winrt:qtHaveModule(printsupport): \ -- SUBDIRS += printsupport diff --git a/depends/patches/qt/fix_qt_pkgconfig.patch b/depends/patches/qt/fix_qt_pkgconfig.patch index 34302a9f2d2eb..73f4d89f7354e 100644 --- a/depends/patches/qt/fix_qt_pkgconfig.patch +++ b/depends/patches/qt/fix_qt_pkgconfig.patch @@ -1,11 +1,11 @@ --- old/qtbase/mkspecs/features/qt_module.prf +++ new/qtbase/mkspecs/features/qt_module.prf -@@ -245,7 +245,7 @@ +@@ -269,7 +269,7 @@ load(qt_installs) load(qt_targets) # this builds on top of qt_common --!internal_module:!lib_bundle:if(unix|mingw) { -+unix|mingw { +-!internal_module:if(unix|mingw):!if(darwin:debug_and_release:CONFIG(debug, debug|release)) { ++if(unix|mingw):!if(darwin:debug_and_release:CONFIG(debug, debug|release)) { CONFIG += create_pc QMAKE_PKGCONFIG_DESTDIR = pkgconfig host_build: \ diff --git a/depends/patches/qt/fix_rcc_determinism.patch b/depends/patches/qt/fix_rcc_determinism.patch deleted file mode 100644 index c1b07fe23afdb..0000000000000 --- a/depends/patches/qt/fix_rcc_determinism.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- old/qtbase/src/tools/rcc/rcc.cpp -+++ new/qtbase/src/tools/rcc/rcc.cpp -@@ -207,7 +207,11 @@ void RCCFileInfo::writeDataInfo(RCCResourceLibrary &lib) - if (lib.formatVersion() >= 2) { - // last modified time stamp - const QDateTime lastModified = m_fileInfo.lastModified(); -- lib.writeNumber8(quint64(lastModified.isValid() ? lastModified.toMSecsSinceEpoch() : 0)); -+ quint64 lastmod = quint64(lastModified.isValid() ? lastModified.toMSecsSinceEpoch() : 0); -+ static const quint64 sourceDate = 1000 * qgetenv("QT_RCC_SOURCE_DATE_OVERRIDE").toULongLong(); -+ if (sourceDate != 0) -+ lastmod = sourceDate; -+ lib.writeNumber8(lastmod); - if (text || pass1) - lib.writeChar('\n'); - } diff --git a/depends/patches/qt/fix_riscv64_arch.patch b/depends/patches/qt/fix_riscv64_arch.patch deleted file mode 100644 index e7f29f01f9cd7..0000000000000 --- a/depends/patches/qt/fix_riscv64_arch.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git a/qtbase/src/3rdparty/double-conversion/include/double-conversion/utils.h b/qtbase/src/3rdparty/double-conversion/include/double-conversion/utils.h -index 20bfd36..93729fa 100644 ---- a/qtbase/src/3rdparty/double-conversion/include/double-conversion/utils.h -+++ b/qtbase/src/3rdparty/double-conversion/include/double-conversion/utils.h -@@ -65,7 +65,8 @@ - defined(__sparc__) || defined(__sparc) || defined(__s390__) || \ - defined(__SH4__) || defined(__alpha__) || \ - defined(_MIPS_ARCH_MIPS32R2) || \ -- defined(__AARCH64EL__) -+ defined(__AARCH64EL__) || defined(__aarch64__) || \ -+ defined(__riscv) - #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 - #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) - #if defined(_WIN32) diff --git a/depends/patches/qt/guix_cross_lib_path.patch b/depends/patches/qt/guix_cross_lib_path.patch new file mode 100644 index 0000000000000..7911dc21d7dba --- /dev/null +++ b/depends/patches/qt/guix_cross_lib_path.patch @@ -0,0 +1,17 @@ +Facilitate guix building with CROSS_LIBRARY_PATH + +See discussion in https://github.com/bitcoin/bitcoin/pull/15277. + +--- a/qtbase/mkspecs/features/toolchain.prf ++++ b/qtbase/mkspecs/features/toolchain.prf +@@ -236,8 +236,8 @@ isEmpty($${target_prefix}.INCDIRS) { + add_libraries = false + for (line, output) { + line ~= s/^[ \\t]*// # remove leading spaces +- contains(line, "LIBRARY_PATH=.*") { +- line ~= s/^LIBRARY_PATH=// # remove leading LIBRARY_PATH= ++ contains(line, "(CROSS_)?LIBRARY_PATH=.*") { ++ line ~= s/^(CROSS_)?LIBRARY_PATH=// # remove leading (CROSS_)?LIBRARY_PATH= + equals(QMAKE_HOST.os, Windows): \ + paths = $$split(line, ;) + else: \ diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf index 4cd96df29ff30..a29db20004bbe 100644 --- a/depends/patches/qt/mac-qmake.conf +++ b/depends/patches/qt/mac-qmake.conf @@ -1,26 +1,23 @@ MAKEFILE_GENERATOR = UNIX -CONFIG += app_bundle incremental global_init_link_order lib_version_first plugin_no_soname absolute_library_soname +CONFIG += app_bundle incremental lib_version_first absolute_library_soname QMAKE_INCREMENTAL_STYLE = sublib include(../common/macx.conf) include(../common/gcc-base-mac.conf) include(../common/clang.conf) include(../common/clang-mac.conf) QMAKE_MAC_SDK_PATH=$${MAC_SDK_PATH} -QMAKE_XCODE_VERSION=4.3 +QMAKE_XCODE_VERSION = $${XCODE_VERSION} QMAKE_XCODE_DEVELOPER_PATH=/Developer -QMAKE_MACOSX_DEPLOYMENT_TARGET = $${MAC_MIN_VERSION} QMAKE_MAC_SDK=macosx QMAKE_MAC_SDK.macosx.Path = $${MAC_SDK_PATH} QMAKE_MAC_SDK.macosx.platform_name = macosx QMAKE_MAC_SDK.macosx.SDKVersion = $${MAC_SDK_VERSION} QMAKE_MAC_SDK.macosx.PlatformPath = /phony -QMAKE_APPLE_DEVICE_ARCHS=x86_64 +QMAKE_CXXFLAGS += -fuse-ld=lld !host_build: QMAKE_CFLAGS += -target $${MAC_TARGET} !host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS -!host_build: QMAKE_CXXFLAGS += $$QMAKE_CFLAGS +!host_build: QMAKE_CXXFLAGS += -target $${MAC_TARGET} !host_build: QMAKE_LFLAGS += -target $${MAC_TARGET} QMAKE_AR = $${CROSS_COMPILE}ar cq QMAKE_RANLIB=$${CROSS_COMPILE}ranlib -QMAKE_LIBTOOL=$${CROSS_COMPILE}libtool -QMAKE_INSTALL_NAME_TOOL=$${CROSS_COMPILE}install_name_tool load(qt_config) diff --git a/depends/patches/qt/memory_resource.patch b/depends/patches/qt/memory_resource.patch new file mode 100644 index 0000000000000..312f0669f6311 --- /dev/null +++ b/depends/patches/qt/memory_resource.patch @@ -0,0 +1,49 @@ +Fix unusable memory_resource on macos + +See https://bugreports.qt.io/browse/QTBUG-117484 +and https://bugreports.qt.io/browse/QTBUG-114316 + +--- a/qtbase/src/corelib/tools/qduplicatetracker_p.h ++++ b/qtbase/src/corelib/tools/qduplicatetracker_p.h +@@ -52,7 +52,7 @@ + + #include + +-#if QT_HAS_INCLUDE() && __cplusplus > 201402L ++#ifdef __cpp_lib_memory_resource + # include + # include + #else + +--- a/qtbase/src/corelib/global/qcompilerdetection.h ++++ b/qtbase/src/corelib/global/qcompilerdetection.h +@@ -1055,16 +1055,22 @@ + # endif // !_HAS_CONSTEXPR + # endif // !__GLIBCXX__ && !_LIBCPP_VERSION + # endif // Q_OS_QNX +-# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) && defined(__GNUC_LIBSTD__) \ +- && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) ++# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) ++# if defined(__GNUC_LIBSTD__) && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) + // Apple has not updated libstdc++ since 2007, which means it does not have + // or std::move. Let's disable these features +-# undef Q_COMPILER_INITIALIZER_LISTS +-# undef Q_COMPILER_RVALUE_REFS +-# undef Q_COMPILER_REF_QUALIFIERS ++# undef Q_COMPILER_INITIALIZER_LISTS ++# undef Q_COMPILER_RVALUE_REFS ++# undef Q_COMPILER_REF_QUALIFIERS + // Also disable , since it's clearly not there +-# undef Q_COMPILER_ATOMICS +-# endif ++# undef Q_COMPILER_ATOMICS ++# endif ++# if defined(__cpp_lib_memory_resource) \ ++ && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000) \ ++ || (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 170000)) ++# undef __cpp_lib_memory_resource // Only supported on macOS 14 and iOS 17 ++# endif ++# endif // (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) + # if defined(Q_CC_CLANG) && defined(Q_CC_INTEL) && Q_CC_INTEL >= 1500 + // ICC 15.x and 16.0 have their own implementation of std::atomic, which is activated when in Clang mode + // (probably because libc++'s on OS X failed to compile), but they're missing some diff --git a/depends/patches/qt/no-xlib.patch b/depends/patches/qt/no-xlib.patch index fe82c2c73cb09..0f7965d2ea8b7 100644 --- a/depends/patches/qt/no-xlib.patch +++ b/depends/patches/qt/no-xlib.patch @@ -4,12 +4,7 @@ Date: Thu, 18 Jul 2019 17:22:05 -0400 Subject: [PATCH] Wrap xlib related code blocks in #if's They are not necessary to compile QT. ---- - qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp | 8 ++++++++ - 1 file changed, 8 insertions(+) -diff --git a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp -index 7c62c2e2b3..c05c6c0a07 100644 --- a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp +++ b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp @@ -49,7 +49,9 @@ @@ -22,15 +17,15 @@ index 7c62c2e2b3..c05c6c0a07 100644 #include #include -@@ -384,6 +386,7 @@ void QXcbCursor::changeCursor(QCursor *cursor, QWindow *widget) - w->setCursor(c, isBitmapCursor); +@@ -391,6 +393,7 @@ void QXcbCursor::changeCursor(QCursor *cursor, QWindow *window) + xcb_flush(xcb_connection()); } +#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) static int cursorIdForShape(int cshape) { int cursorId = 0; -@@ -437,6 +440,7 @@ static int cursorIdForShape(int cshape) +@@ -444,6 +447,7 @@ static int cursorIdForShape(int cshape) } return cursorId; } @@ -47,8 +42,8 @@ index 7c62c2e2b3..c05c6c0a07 100644 +#endif xcb_cursor_t cursor = XCB_NONE; - // Try Xcursor first -@@ -589,6 +595,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) + #if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) +@@ -590,6 +596,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) // Non-standard X11 cursors are created from bitmaps cursor = createNonStandardCursor(cshape); @@ -56,14 +51,11 @@ index 7c62c2e2b3..c05c6c0a07 100644 // Create a glpyh cursor if everything else failed if (!cursor && cursorId) { cursor = xcb_generate_id(conn); -@@ -596,6 +603,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) +@@ -597,6 +604,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) cursorId, cursorId + 1, 0xFFFF, 0xFFFF, 0xFFFF, 0, 0, 0); } +#endif if (cursor && cshape >= 0 && cshape < Qt::LastCursor && connection()->hasXFixes()) { - const char *name = cursorNames[cshape]; --- -2.22.0 - + const char *name = cursorNames[cshape].front(); diff --git a/depends/patches/qt/no_warnings_for_symbols.patch b/depends/patches/qt/no_warnings_for_symbols.patch new file mode 100644 index 0000000000000..11cdc599ed3d6 --- /dev/null +++ b/depends/patches/qt/no_warnings_for_symbols.patch @@ -0,0 +1,11 @@ +--- a/qtbase/mkspecs/features/mac/no_warn_empty_obj_files.prf ++++ b/qtbase/mkspecs/features/mac/no_warn_empty_obj_files.prf +@@ -1,7 +1,7 @@ + # Prevent warnings about object files without any symbols. This is a common + # thing in Qt as we tend to build files unconditionally, and then use ifdefs + # to compile out parts that are not relevant. +-QMAKE_RANLIB += -no_warning_for_no_symbols ++# QMAKE_RANLIB += -no_warning_for_no_symbols + + # We have to tell 'ar' to not run ranlib by itself + QMAKE_AR += -S diff --git a/depends/patches/qt/qt.pro b/depends/patches/qt/qt.pro new file mode 100644 index 0000000000000..6d8b7fdb6a2ca --- /dev/null +++ b/depends/patches/qt/qt.pro @@ -0,0 +1,12 @@ +# Create the super cache so modules will add themselves to it. +cache(, super) + +!QTDIR_build: cache(CONFIG, add, $$list(QTDIR_build)) + +TEMPLATE = subdirs +SUBDIRS = qtbase qttools qttranslations + +qttools.depends = qtbase +qttranslations.depends = qttools + +load(qt_configure) diff --git a/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch b/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch new file mode 100644 index 0000000000000..f0c14a9400e12 --- /dev/null +++ b/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch @@ -0,0 +1,17 @@ +The moc executable loops through headers on CPLUS_INCLUDE_PATH and stumbles +on the GCC internal _GLIBCXX_VISIBILITY macro. Tell it to ignore it as it is +not supposed to be looking there to begin with. + +Upstream report: https://bugreports.qt.io/browse/QTBUG-83160 + +diff --git a/qtbase/src/tools/moc/main.cpp b/qtbase/src/tools/moc/main.cpp +--- a/qtbase/src/tools/moc/main.cpp ++++ b/qtbase/src/tools/moc/main.cpp +@@ -238,6 +238,7 @@ int runMoc(int argc, char **argv) + dummyVariadicFunctionMacro.arguments += Symbol(0, PP_IDENTIFIER, "__VA_ARGS__"); + pp.macros["__attribute__"] = dummyVariadicFunctionMacro; + pp.macros["__declspec"] = dummyVariadicFunctionMacro; ++ pp.macros["_GLIBCXX_VISIBILITY"] = dummyVariadicFunctionMacro; + + QString filename; + QString output; diff --git a/depends/patches/qt/qttools_src.pro b/depends/patches/qt/qttools_src.pro new file mode 100644 index 0000000000000..6ef71a0942735 --- /dev/null +++ b/depends/patches/qt/qttools_src.pro @@ -0,0 +1,6 @@ +TEMPLATE = subdirs +SUBDIRS = linguist + +fb = force_bootstrap +CONFIG += $$fb +cache(CONFIG, add, fb) diff --git a/depends/patches/qt/rcc_hardcode_timestamp.patch b/depends/patches/qt/rcc_hardcode_timestamp.patch new file mode 100644 index 0000000000000..03f3897975646 --- /dev/null +++ b/depends/patches/qt/rcc_hardcode_timestamp.patch @@ -0,0 +1,24 @@ +Hardcode last modified timestamp in Qt RCC + +This change allows the already built qt package to be reused even with +the SOURCE_DATE_EPOCH variable set, e.g., for Guix builds. + + +--- old/qtbase/src/tools/rcc/rcc.cpp ++++ new/qtbase/src/tools/rcc/rcc.cpp +@@ -227,14 +227,7 @@ void RCCFileInfo::writeDataInfo(RCCResourceLibrary &lib) + + if (lib.formatVersion() >= 2) { + // last modified time stamp +- const QDateTime lastModified = m_fileInfo.lastModified(); +- quint64 lastmod = quint64(lastModified.isValid() ? lastModified.toMSecsSinceEpoch() : 0); +- static const quint64 sourceDate = 1000 * qgetenv("QT_RCC_SOURCE_DATE_OVERRIDE").toULongLong(); +- if (sourceDate != 0) +- lastmod = sourceDate; +- static const quint64 sourceDate2 = 1000 * qgetenv("SOURCE_DATE_EPOCH").toULongLong(); +- if (sourceDate2 != 0) +- lastmod = sourceDate2; ++ quint64 lastmod = quint64(1); + lib.writeNumber8(lastmod); + if (text || pass1) + lib.writeChar('\n'); diff --git a/depends/patches/qt/utc_from_string_no_optimize.patch b/depends/patches/qt/utc_from_string_no_optimize.patch new file mode 100644 index 0000000000000..533ef59b37960 --- /dev/null +++ b/depends/patches/qt/utc_from_string_no_optimize.patch @@ -0,0 +1,84 @@ +Modify optimisation flags for various functions. +This fixes non-determinism issues in the asm produced for +these function when cross-compiling on x86_64 and aarch64 for +the arm64-apple-darwin HOST. + +--- a/qtbase/src/corelib/itemmodels/qitemselectionmodel.cpp ++++ b/qtbase/src/corelib/itemmodels/qitemselectionmodel.cpp +@@ -1078,9 +1078,9 @@ void QItemSelectionModelPrivate::_q_layoutChanged(const QList &parents = QList(), QAbstractItemModel::LayoutChangeHint hint = QAbstractItemModel::NoLayoutChangeHint); +- void _q_layoutChanged(const QList &parents = QList(), QAbstractItemModel::LayoutChangeHint hint = QAbstractItemModel::NoLayoutChangeHint); ++ __attribute__ ((optnone)) void _q_layoutChanged(const QList &parents = QList(), QAbstractItemModel::LayoutChangeHint hint = QAbstractItemModel::NoLayoutChangeHint); + + inline void remove(QList &r) + { + +--- a/qtbase/src/corelib/time/qdatetimeparser_p.h ++++ b/qtbase/src/corelib/time/qdatetimeparser_p.h +@@ -215,7 +215,7 @@ private: + : value(ok == Invalid ? -1 : val), used(read), zeroes(zs), state(ok) + {} + }; +- ParsedSection parseSection(const QDateTime ¤tValue, int sectionIndex, ++ __attribute__ ((optnone)) ParsedSection parseSection(const QDateTime ¤tValue, int sectionIndex, + int offset, QString *text) const; + int findMonth(const QString &str1, int monthstart, int sectionIndex, + int year, QString *monthName = nullptr, int *used = nullptr) const; + +--- a/qtbase/src/corelib/time/qtimezoneprivate_p.h ++++ b/qtbase/src/corelib/time/qtimezoneprivate_p.h +@@ -191,7 +191,7 @@ public: + virtual ~QUtcTimeZonePrivate(); + + // Fall-back for UTC[+-]\d+(:\d+){,2} IDs. +- static qint64 offsetFromUtcString(const QByteArray &id); ++ static __attribute__ ((optnone)) qint64 offsetFromUtcString(const QByteArray &id); + + QUtcTimeZonePrivate *clone() const override; + +--- a/qtbase/src/widgets/widgets/qcalendarwidget.cpp ++++ b/qtbase/src/widgets/widgets/qcalendarwidget.cpp +@@ -329,13 +329,13 @@ class QCalendarYearValidator : public QCalendarDateSectionValidator + + public: + QCalendarYearValidator(); +- virtual Section handleKey(int key) override; ++ __attribute__ ((optnone)) virtual Section handleKey(int key) override; + virtual QDate applyToDate(QDate date, QCalendar cal) const override; + virtual void setDate(QDate date, QCalendar cal) override; + virtual QString text() const override; + virtual QString text(QDate date, QCalendar cal, int repeat) const override; + private: +- int pow10(int n); ++ __attribute__ ((optnone)) int pow10(int n); + int m_pos; + int m_year; + int m_oldYear; diff --git a/depends/patches/qt/windows_lto.patch b/depends/patches/qt/windows_lto.patch new file mode 100644 index 0000000000000..ea379a60f14a1 --- /dev/null +++ b/depends/patches/qt/windows_lto.patch @@ -0,0 +1,31 @@ +Qt (for Windows) fails to build under LTO, due to multiple definition issues, i.e + +multiple definition of `QAccessibleLineEdit::~QAccessibleLineEdit()'; + +Possibly related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94156. + +diff --git a/qtbase/src/widgets/accessible/simplewidgets.cpp b/qtbase/src/widgets/accessible/simplewidgets.cpp +index 107fd729fe..0e61878f39 100644 +--- a/qtbase/src/widgets/accessible/simplewidgets.cpp ++++ b/qtbase/src/widgets/accessible/simplewidgets.cpp +@@ -109,6 +109,8 @@ QString qt_accHotKey(const QString &text); + \ingroup accessibility + */ + ++QAccessibleLineEdit::~QAccessibleLineEdit(){}; ++ + /*! + Creates a QAccessibleButton object for \a w. + */ +diff --git a/qtbase/src/widgets/accessible/simplewidgets_p.h b/qtbase/src/widgets/accessible/simplewidgets_p.h +index 73572e3059..658da86143 100644 +--- a/qtbase/src/widgets/accessible/simplewidgets_p.h ++++ b/qtbase/src/widgets/accessible/simplewidgets_p.h +@@ -155,6 +155,7 @@ class QAccessibleLineEdit : public QAccessibleWidget, public QAccessibleTextInte + public: + explicit QAccessibleLineEdit(QWidget *o, const QString &name = QString()); + ++ ~QAccessibleLineEdit(); + QString text(QAccessible::Text t) const override; + void setText(QAccessible::Text t, const QString &text) override; + QAccessible::State state() const override; diff --git a/depends/patches/qt/xkb-default.patch b/depends/patches/qt/xkb-default.patch deleted file mode 100644 index 165abf3e2e7a6..0000000000000 --- a/depends/patches/qt/xkb-default.patch +++ /dev/null @@ -1,26 +0,0 @@ ---- old/qtbase/src/gui/configure.pri 2018-06-06 17:28:10.000000000 -0400 -+++ new/qtbase/src/gui/configure.pri 2018-08-17 18:43:01.589384567 -0400 -@@ -43,18 +43,11 @@ - } - - defineTest(qtConfTest_xkbConfigRoot) { -- qtConfTest_getPkgConfigVariable($${1}): return(true) -- -- for (dir, $$list("/usr/share/X11/xkb", "/usr/local/share/X11/xkb")) { -- exists($$dir) { -- $${1}.value = $$dir -- export($${1}.value) -- $${1}.cache += value -- export($${1}.cache) -- return(true) -- } -- } -- return(false) -+ $${1}.value = "/usr/share/X11/xkb" -+ export($${1}.value) -+ $${1}.cache += value -+ export($${1}.cache) -+ return(true) - } - - defineTest(qtConfTest_qpaDefaultPlatform) { diff --git a/depends/patches/qt/zlib-timebits64.patch b/depends/patches/qt/zlib-timebits64.patch new file mode 100644 index 0000000000000..139c1dfa77f3e --- /dev/null +++ b/depends/patches/qt/zlib-timebits64.patch @@ -0,0 +1,31 @@ +From a566e156b3fa07b566ddbf6801b517a9dba04fa3 Mon Sep 17 00:00:00 2001 +From: Mark Adler +Date: Sat, 29 Jul 2023 22:13:09 -0700 +Subject: [PATCH] Avoid compiler complaints if _TIME_BITS defined when building + zlib. + +zlib does not use time_t, so _TIME_BITS is irrelevant. However it +may be defined anyway as part of a sledgehammer indiscriminately +applied to all builds. + +From https://github.com/madler/zlib/commit/a566e156b3fa07b566ddbf6801b517a9dba04fa3.patch +--- + qtbase/src/3rdparty/zlib/src/gzguts.h | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/qtbase/src/3rdparty/zlib/src/gzguts.h b/qtbase/src/3rdparty/zlib/src/gzguts.h +index e23f831f5..f9375047e 100644 +--- a/qtbase/src/3rdparty/zlib/src/gzguts.h ++++ b/qtbase/src/3rdparty/zlib/src/gzguts.h +@@ -26,9 +26,8 @@ + # ifndef _LARGEFILE_SOURCE + # define _LARGEFILE_SOURCE 1 + # endif +-# ifdef _FILE_OFFSET_BITS +-# undef _FILE_OFFSET_BITS +-# endif ++# undef _FILE_OFFSET_BITS ++# undef _TIME_BITS + #endif + + #ifdef HAVE_HIDDEN diff --git a/depends/patches/systemtap/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch b/depends/patches/systemtap/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch new file mode 100644 index 0000000000000..c115bc43e81f2 --- /dev/null +++ b/depends/patches/systemtap/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch @@ -0,0 +1,27 @@ +Remove _SDT_ASM_SECTION_AUTOGROUP_SUPPORT check + +We assume that the assembler supports "?" in .pushsection directives. +This enables us to skip configure and make. + +See https://github.com/bitcoin/bitcoin/issues/23297. + +diff --git a/includes/sys/sdt.h b/includes/sys/sdt.h +index ca0162b..f96e0ee 100644 +--- a/includes/sys/sdt.h ++++ b/includes/sys/sdt.h +@@ -241,12 +241,10 @@ __extension__ extern unsigned long long __sdt_unsp; + nice with code in COMDAT sections, which comes up in C++ code. + Without that assembler support, some combinations of probe placements + in certain kinds of C++ code may produce link-time errors. */ +-#include "sdt-config.h" +-#if _SDT_ASM_SECTION_AUTOGROUP_SUPPORT ++/* PATCH: We assume that the assembler supports the feature. This ++ enables us to skip configure and make. In turn, this means we ++ require fewer dependencies and have shorter depend build times. */ + # define _SDT_ASM_AUTOGROUP "?" +-#else +-# define _SDT_ASM_AUTOGROUP "" +-#endif + + #define _SDT_DEF_MACROS \ + _SDT_ASM_1(.altmacro) \ diff --git a/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch b/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch deleted file mode 100644 index b911ac56724b1..0000000000000 --- a/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch +++ /dev/null @@ -1,30 +0,0 @@ -From f6866b0f166ad168618aae64c7fbee8775d3eb23 Mon Sep 17 00:00:00 2001 -From: mruddy <6440430+mruddy@users.noreply.github.com> -Date: Sat, 30 Jun 2018 09:44:58 -0400 -Subject: [PATCH] fix build with older mingw64 - ---- - src/windows.hpp | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/src/windows.hpp b/src/windows.hpp -index 6c3839fd..2c32ec79 100644 ---- a/src/windows.hpp -+++ b/src/windows.hpp -@@ -58,6 +58,13 @@ - #include - #include - #include -+ -+#if defined __MINGW64_VERSION_MAJOR && __MINGW64_VERSION_MAJOR < 4 -+// Workaround for mingw-w64 < v4.0 which did not include ws2ipdef.h in iphlpapi.h. -+// Fixed in mingw-w64 by 9bd8fe9148924840d315b4c915dd099955ea89d1. -+#include -+#include -+#endif - #include - - #if !defined __MINGW32__ --- -2.17.1 - diff --git a/depends/patches/zeromq/0002-disable-pthread_set_name_np.patch b/depends/patches/zeromq/0002-disable-pthread_set_name_np.patch deleted file mode 100644 index b1c6f78a7046d..0000000000000 --- a/depends/patches/zeromq/0002-disable-pthread_set_name_np.patch +++ /dev/null @@ -1,35 +0,0 @@ -From c9bbdd6581d07acfe8971e4bcebe278a3676cf03 Mon Sep 17 00:00:00 2001 -From: mruddy <6440430+mruddy@users.noreply.github.com> -Date: Sat, 30 Jun 2018 09:57:18 -0400 -Subject: [PATCH] disable pthread_set_name_np - -pthread_set_name_np adds a Glibc requirement on >= 2.12. ---- - src/thread.cpp | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/thread.cpp b/src/thread.cpp -index a1086b0c..9943f354 100644 ---- a/src/thread.cpp -+++ b/src/thread.cpp -@@ -308,7 +308,7 @@ void zmq::thread_t::setThreadName (const char *name_) - */ - if (!name_) - return; -- -+#if 0 - #if defined(ZMQ_HAVE_PTHREAD_SETNAME_1) - int rc = pthread_setname_np (name_); - if (rc) -@@ -324,6 +324,8 @@ void zmq::thread_t::setThreadName (const char *name_) - #elif defined(ZMQ_HAVE_PTHREAD_SET_NAME) - pthread_set_name_np (_descriptor, name_); - #endif -+#endif -+ return; - } - - #endif --- -2.17.1 - diff --git a/depends/patches/zeromq/builtin_sha1.patch b/depends/patches/zeromq/builtin_sha1.patch new file mode 100644 index 0000000000000..5481c9dbddef8 --- /dev/null +++ b/depends/patches/zeromq/builtin_sha1.patch @@ -0,0 +1,17 @@ +Don't use builtin sha1 if not using ws + +The builtin SHA1 (ZMQ_USE_BUILTIN_SHA1) is only used in the websocket +engine (ws_engine.cpp). +Upstreamed in https://github.com/zeromq/libzmq/pull/4670. + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -234,7 +234,7 @@ if(NOT ZMQ_USE_GNUTLS) + endif() + endif() + endif() +- if(NOT ZMQ_USE_NSS) ++ if(ENABLE_WS AND NOT ZMQ_USE_NSS) + list(APPEND sources ${CMAKE_CURRENT_SOURCE_DIR}/external/sha1/sha1.c + ${CMAKE_CURRENT_SOURCE_DIR}/external/sha1/sha1.h) + message(STATUS "Using builtin sha1") diff --git a/depends/patches/zeromq/cacheline_undefined.patch b/depends/patches/zeromq/cacheline_undefined.patch new file mode 100644 index 0000000000000..02bd2a5fe5d5a --- /dev/null +++ b/depends/patches/zeromq/cacheline_undefined.patch @@ -0,0 +1,15 @@ +Use proper STREQUAL instead of EQUAL to compare strings.txt + +See: https://github.com/zeromq/libzmq/pull/4711. + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -476,7 +476,7 @@ execute_process( + if(CACHELINE_SIZE STREQUAL "" + OR CACHELINE_SIZE EQUAL 0 + OR CACHELINE_SIZE EQUAL -1 +- OR CACHELINE_SIZE EQUAL "undefined") ++ OR CACHELINE_SIZE STREQUAL "undefined") + set(ZMQ_CACHELINE_SIZE 64) + else() + set(ZMQ_CACHELINE_SIZE ${CACHELINE_SIZE}) diff --git a/depends/patches/zeromq/cmake_minimum.patch b/depends/patches/zeromq/cmake_minimum.patch new file mode 100644 index 0000000000000..d6b6b5fae7ba8 --- /dev/null +++ b/depends/patches/zeromq/cmake_minimum.patch @@ -0,0 +1,18 @@ +Set a more sane cmake_minimum_required. + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,12 +1,7 @@ + # CMake build script for ZeroMQ ++cmake_minimum_required(VERSION 3.16) + project(ZeroMQ) + +-if(${CMAKE_SYSTEM_NAME} STREQUAL Darwin) +- cmake_minimum_required(VERSION 3.0.2) +-else() +- cmake_minimum_required(VERSION 2.8.12) +-endif() +- + include(CheckIncludeFiles) + include(CheckCCompilerFlag) + include(CheckCXXCompilerFlag) diff --git a/depends/patches/zeromq/fix_have_windows.patch b/depends/patches/zeromq/fix_have_windows.patch new file mode 100644 index 0000000000000..e77ef31adfae6 --- /dev/null +++ b/depends/patches/zeromq/fix_have_windows.patch @@ -0,0 +1,54 @@ +This fixes several instances where _MSC_VER was +used to determine whether to use afunix.h or not. + +See https://github.com/zeromq/libzmq/pull/4678. +--- a/src/ipc_address.hpp ++++ b/src/ipc_address.hpp +@@ -7,7 +7,7 @@ + + #include + +-#if defined _MSC_VER ++#if defined ZMQ_HAVE_WINDOWS + #include + #else + #include +diff --git a/src/ipc_connecter.cpp b/src/ipc_connecter.cpp +index 3f988745..ed2a0645 100644 +--- a/src/ipc_connecter.cpp ++++ b/src/ipc_connecter.cpp +@@ -16,7 +16,7 @@ + #include "ipc_address.hpp" + #include "session_base.hpp" + +-#ifdef _MSC_VER ++#if defined ZMQ_HAVE_WINDOWS + #include + #else + #include +diff --git a/src/ipc_listener.cpp b/src/ipc_listener.cpp +index 50126040..5428579b 100644 +--- a/src/ipc_listener.cpp ++++ b/src/ipc_listener.cpp +@@ -17,7 +17,7 @@ + #include "socket_base.hpp" + #include "address.hpp" + +-#ifdef _MSC_VER ++#ifdef ZMQ_HAVE_WINDOWS + #ifdef ZMQ_IOTHREAD_POLLER_USE_SELECT + #error On Windows, IPC does not work with POLLER=select, use POLLER=epoll instead, or disable IPC transport + #endif +diff --git a/tests/testutil.cpp b/tests/testutil.cpp +index bdc80283..6f21e8f6 100644 +--- a/tests/testutil.cpp ++++ b/tests/testutil.cpp +@@ -7,7 +7,7 @@ + + #if defined _WIN32 + #include "../src/windows.hpp" +-#if defined _MSC_VER ++#if defined ZMQ_HAVE_WINDOWS + #if defined ZMQ_HAVE_IPC + #include + #include diff --git a/depends/patches/zeromq/fix_mingw_link.patch b/depends/patches/zeromq/fix_mingw_link.patch new file mode 100644 index 0000000000000..1434557dc72cd --- /dev/null +++ b/depends/patches/zeromq/fix_mingw_link.patch @@ -0,0 +1,31 @@ +Fix CMake-generated `libzmq.pc` file + +This change mirrors the Autotools-based build system behavior for +cross-compiling for Windows with static linking. + +See https://github.com/zeromq/libzmq/pull/4706. + + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 03462271..0315e606 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -546,12 +546,18 @@ if(ZMQ_HAVE_WINDOWS) + # Cannot use check_library_exists because the symbol is always declared as char(*)(void) + set(CMAKE_REQUIRED_LIBRARIES "ws2_32.lib") + check_cxx_symbol_exists(WSAStartup "winsock2.h" HAVE_WS2_32) ++ if(HAVE_WS2_32) ++ set(pkg_config_libs_private "${pkg_config_libs_private} -lws2_32") ++ endif() + + set(CMAKE_REQUIRED_LIBRARIES "rpcrt4.lib") + check_cxx_symbol_exists(UuidCreateSequential "rpc.h" HAVE_RPCRT4) + + set(CMAKE_REQUIRED_LIBRARIES "iphlpapi.lib") + check_cxx_symbol_exists(GetAdaptersAddresses "winsock2.h;iphlpapi.h" HAVE_IPHLAPI) ++ if(HAVE_IPHLAPI) ++ set(pkg_config_libs_private "${pkg_config_libs_private} -liphlpapi") ++ endif() + check_cxx_symbol_exists(if_nametoindex "iphlpapi.h" HAVE_IF_NAMETOINDEX) + + set(CMAKE_REQUIRED_LIBRARIES "") diff --git a/depends/patches/zeromq/macos_mktemp_check.patch b/depends/patches/zeromq/macos_mktemp_check.patch new file mode 100644 index 0000000000000..c703abcd718e2 --- /dev/null +++ b/depends/patches/zeromq/macos_mktemp_check.patch @@ -0,0 +1,16 @@ +build: fix mkdtemp check on macOS + +On macOS, mkdtemp is in unistd.h. Fix the CMake check so that is works. +Upstreamed in https://github.com/zeromq/libzmq/pull/4668. + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -599,7 +599,7 @@ if(NOT MSVC) + + check_cxx_symbol_exists(fork unistd.h HAVE_FORK) + check_cxx_symbol_exists(gethrtime sys/time.h HAVE_GETHRTIME) +- check_cxx_symbol_exists(mkdtemp stdlib.h HAVE_MKDTEMP) ++ check_cxx_symbol_exists(mkdtemp "stdlib.h;unistd.h" HAVE_MKDTEMP) + check_cxx_symbol_exists(accept4 sys/socket.h HAVE_ACCEPT4) + check_cxx_symbol_exists(strnlen string.h HAVE_STRNLEN) + else() diff --git a/depends/patches/zeromq/no_librt.patch b/depends/patches/zeromq/no_librt.patch new file mode 100644 index 0000000000000..b63854c95b281 --- /dev/null +++ b/depends/patches/zeromq/no_librt.patch @@ -0,0 +1,54 @@ +We don't use librt, so don't try and link against it. + +Related to: https://github.com/zeromq/libzmq/pull/4702. + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 03462271..87ceab3c 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -564,13 +564,6 @@ else() + check_cxx_symbol_exists(SO_BUSY_POLL sys/socket.h ZMQ_HAVE_BUSY_POLL) + endif() + +-if(NOT MINGW) +- find_library(RT_LIBRARY rt) +- if(RT_LIBRARY) +- set(pkg_config_libs_private "${pkg_config_libs_private} -lrt") +- endif() +-endif() +- + find_package(Threads) + + if(WIN32 AND NOT CYGWIN) +@@ -588,9 +581,7 @@ if(WIN32 AND NOT CYGWIN) + endif() + + if(NOT MSVC) +- set(CMAKE_REQUIRED_LIBRARIES rt) + check_cxx_symbol_exists(clock_gettime time.h HAVE_CLOCK_GETTIME) +- set(CMAKE_REQUIRED_LIBRARIES) + + check_cxx_symbol_exists(fork unistd.h HAVE_FORK) + check_cxx_symbol_exists(gethrtime sys/time.h HAVE_GETHRTIME) +@@ -1503,10 +1494,6 @@ if(BUILD_SHARED) + target_link_libraries(libzmq iphlpapi) + endif() + +- if(RT_LIBRARY) +- target_link_libraries(libzmq -lrt) +- endif() +- + if(norm_FOUND) + target_link_libraries(libzmq norm::norm) + endif() +@@ -1553,10 +1540,6 @@ if(BUILD_STATIC) + target_link_libraries(libzmq-static iphlpapi) + endif() + +- if(RT_LIBRARY) +- target_link_libraries(libzmq-static -lrt) +- endif() +- + if(CMAKE_SYSTEM_NAME MATCHES "QNX") + add_definitions(-DUNITY_EXCLUDE_MATH_H) + endif() diff --git a/depends/patches/zeromq/openbsd_kqueue_headers.patch b/depends/patches/zeromq/openbsd_kqueue_headers.patch new file mode 100644 index 0000000000000..7000e209fe468 --- /dev/null +++ b/depends/patches/zeromq/openbsd_kqueue_headers.patch @@ -0,0 +1,24 @@ +commit ff231d267370493814f933d151441866bf1e200b +Author: Min RK +Date: Fri Feb 23 13:21:08 2024 +0100 + + Problem: cmake search for kqueue missing headers + + Solution: include sys/types.h and sys/time.h as documented by kqueue + and used in autotools + + fixes kqueue detection on openbsd + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index f956f3fd..814d5d46 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -380,7 +380,7 @@ endif(WIN32) + + if(NOT MSVC) + if(POLLER STREQUAL "") +- check_cxx_symbol_exists(kqueue sys/event.h HAVE_KQUEUE) ++ check_cxx_symbol_exists(kqueue "sys/types.h;sys/event.h;sys/time.h" HAVE_KQUEUE) + if(HAVE_KQUEUE) + set(POLLER "kqueue") + endif() diff --git a/depends/patches/zeromq/remove_libstd_link.patch b/depends/patches/zeromq/remove_libstd_link.patch new file mode 100644 index 0000000000000..ddf91e6abfaba --- /dev/null +++ b/depends/patches/zeromq/remove_libstd_link.patch @@ -0,0 +1,25 @@ +commit 47d4cd12a2c051815ddda78adebdb3923b260d8a +Author: fanquake +Date: Tue Aug 18 14:45:40 2020 +0800 + + Remove needless linking against libstdc++ + + This is broken for a number of reasons, including: + - g++ understands "static-libstdc++ -lstdc++" to mean "link against + whatever libstdc++ exists, probably shared", which in itself is buggy. + - another stdlib (libc++ for example) may be in use + + See #11981. + +diff --git a/src/libzmq.pc.in b/src/libzmq.pc.in +index 233bc3a..3c2bf0d 100644 +--- a/src/libzmq.pc.in ++++ b/src/libzmq.pc.in +@@ -7,6 +7,6 @@ Name: libzmq + Description: 0MQ c++ library + Version: @VERSION@ + Libs: -L${libdir} -lzmq +-Libs.private: -lstdc++ @pkg_config_libs_private@ ++Libs.private: @pkg_config_libs_private@ + Requires.private: @pkg_config_names_private@ + Cflags: -I${includedir} @pkg_config_defines@ diff --git a/depends/toolchain.cmake.in b/depends/toolchain.cmake.in new file mode 100644 index 0000000000000..735ebc8ea423f --- /dev/null +++ b/depends/toolchain.cmake.in @@ -0,0 +1,168 @@ +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# This file is expected to be highly volatile and may still change substantially. + +# If CMAKE_SYSTEM_NAME is set within a toolchain file, CMake will also +# set CMAKE_CROSSCOMPILING to TRUE, even if CMAKE_SYSTEM_NAME matches +# CMAKE_HOST_SYSTEM_NAME. To avoid potential misconfiguration of CMake, +# it is best not to touch CMAKE_SYSTEM_NAME unless cross-compiling is +# intended. +if(@depends_crosscompiling@) + set(CMAKE_SYSTEM_NAME @host_system_name@) + set(CMAKE_SYSTEM_VERSION @host_system_version@) + set(CMAKE_SYSTEM_PROCESSOR @host_arch@) +endif() + +if(NOT DEFINED CMAKE_C_FLAGS_INIT) + set(CMAKE_C_FLAGS_INIT "@CFLAGS@") +endif() +if(NOT DEFINED CMAKE_C_FLAGS_RELWITHDEBINFO_INIT) + set(CMAKE_C_FLAGS_RELWITHDEBINFO_INIT "@CFLAGS_RELEASE@") +endif() +if(NOT DEFINED CMAKE_C_FLAGS_DEBUG_INIT) + set(CMAKE_C_FLAGS_DEBUG_INIT "@CFLAGS_DEBUG@") +endif() + +if(NOT DEFINED CMAKE_C_COMPILER) + set(CMAKE_C_COMPILER @CC@) +endif() + +if(NOT DEFINED CMAKE_CXX_FLAGS_INIT) + set(CMAKE_CXX_FLAGS_INIT "@CXXFLAGS@") + set(CMAKE_OBJCXX_FLAGS_INIT "@CXXFLAGS@") +endif() +if(NOT DEFINED CMAKE_CXX_FLAGS_RELWITHDEBINFO_INIT) + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO_INIT "@CXXFLAGS_RELEASE@") + set(CMAKE_OBJCXX_FLAGS_RELWITHDEBINFO_INIT "@CXXFLAGS_RELEASE@") +endif() +if(NOT DEFINED CMAKE_CXX_FLAGS_DEBUG_INIT) + set(CMAKE_CXX_FLAGS_DEBUG_INIT "@CXXFLAGS_DEBUG@") + set(CMAKE_OBJCXX_FLAGS_DEBUG_INIT "@CXXFLAGS_DEBUG@") +endif() + +if(NOT DEFINED CMAKE_CXX_COMPILER) + set(CMAKE_CXX_COMPILER @CXX@) + set(CMAKE_OBJCXX_COMPILER ${CMAKE_CXX_COMPILER}) +endif() + +# The DEPENDS_COMPILE_DEFINITIONS* variables are to be treated as lists. +set(DEPENDS_COMPILE_DEFINITIONS @CPPFLAGS@) +set(DEPENDS_COMPILE_DEFINITIONS_RELWITHDEBINFO @CPPFLAGS_RELEASE@) +set(DEPENDS_COMPILE_DEFINITIONS_DEBUG @CPPFLAGS_DEBUG@) + +if(NOT DEFINED CMAKE_EXE_LINKER_FLAGS_INIT) + set(CMAKE_EXE_LINKER_FLAGS_INIT "@LDFLAGS@") +endif() +if(NOT DEFINED CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO_INIT) + set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO_INIT "@LDFLAGS_RELEASE@") +endif() +if(NOT DEFINED CMAKE_EXE_LINKER_FLAGS_DEBUG_INIT) + set(CMAKE_EXE_LINKER_FLAGS_DEBUG_INIT "@LDFLAGS_DEBUG@") +endif() + +set(CMAKE_AR "@AR@") +set(CMAKE_RANLIB "@RANLIB@") +set(CMAKE_STRIP "@STRIP@") +set(CMAKE_OBJCOPY "@OBJCOPY@") +set(CMAKE_OBJDUMP "@OBJDUMP@") + +# Using our own built dependencies should not be +# affected by a potentially random environment. +set(CMAKE_FIND_USE_CMAKE_ENVIRONMENT_PATH OFF) + +set(CMAKE_FIND_ROOT_PATH "@depends_prefix@") +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) +set(QT_TRANSLATIONS_DIR "@depends_prefix@/translations") + +if(CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND NOT CMAKE_HOST_APPLE) + # The find_package(Qt ...) function internally uses find_library() + # calls for all dependencies to ensure their availability. + # In turn, the find_library() inspects the well-known locations + # on the file system; therefore, a hint is required. + set(CMAKE_FRAMEWORK_PATH "@OSX_SDK@/System/Library/Frameworks") +endif() + + +# Customize pkg-config behaviour. +cmake_path(APPEND CMAKE_FIND_ROOT_PATH "lib" "pkgconfig" OUTPUT_VARIABLE pkg_config_path) +set(ENV{PKG_CONFIG_PATH} ${pkg_config_path}) +set(ENV{PKG_CONFIG_LIBDIR} ${pkg_config_path}) +unset(pkg_config_path) +set(PKG_CONFIG_ARGN --static) + + +# Set configuration options for the main build system. +set(qt_packages @qt_packages@) +if("${qt_packages}" STREQUAL "") + set(BUILD_GUI OFF CACHE BOOL "") +else() + set(BUILD_GUI ON CACHE BOOL "") +endif() + +set(qrencode_packages @qrencode_packages@) +if("${qrencode_packages}" STREQUAL "") + set(WITH_QRENCODE OFF CACHE BOOL "") +else() + set(WITH_QRENCODE ON CACHE BOOL "") +endif() + +set(zmq_packages @zmq_packages@) +if("${zmq_packages}" STREQUAL "") + set(WITH_ZMQ OFF CACHE BOOL "") +else() + set(WITH_ZMQ ON CACHE BOOL "") +endif() + +set(wallet_packages @wallet_packages@) +if("${wallet_packages}" STREQUAL "") + set(ENABLE_WALLET OFF CACHE BOOL "") +else() + set(ENABLE_WALLET ON CACHE BOOL "") +endif() + +set(bdb_packages @bdb_packages@) +if("${wallet_packages}" STREQUAL "" OR "${bdb_packages}" STREQUAL "") + set(WITH_BDB OFF CACHE BOOL "") +else() + set(WITH_BDB ON CACHE BOOL "") +endif() + +set(sqlite_packages @sqlite_packages@) +if("${wallet_packages}" STREQUAL "" OR "${sqlite_packages}" STREQUAL "") + set(WITH_SQLITE OFF CACHE BOOL "") +else() + set(WITH_SQLITE ON CACHE BOOL "") +endif() + +set(upnp_packages @upnp_packages@) +if("${upnp_packages}" STREQUAL "") + set(WITH_MINIUPNPC OFF CACHE BOOL "") +else() + set(WITH_MINIUPNPC ON CACHE BOOL "") +endif() + +set(usdt_packages @usdt_packages@) +if("${usdt_packages}" STREQUAL "") + set(WITH_USDT OFF CACHE BOOL "") +else() + set(WITH_USDT ON CACHE BOOL "") +endif() + +if("@no_harden@") + set(ENABLE_HARDENING OFF CACHE BOOL "") +else() + set(ENABLE_HARDENING ON CACHE BOOL "") +endif() + +if("@multiprocess@" STREQUAL "1") + set(WITH_MULTIPROCESS ON CACHE BOOL "") + set(Libmultiprocess_ROOT "${CMAKE_CURRENT_LIST_DIR}" CACHE PATH "") + set(LibmultiprocessNative_ROOT "${CMAKE_CURRENT_LIST_DIR}/native" CACHE PATH "") +else() + set(WITH_MULTIPROCESS OFF CACHE BOOL "") +endif() diff --git a/doc/.gitignore b/doc/.gitignore deleted file mode 100644 index 38498103bb170..0000000000000 --- a/doc/.gitignore +++ /dev/null @@ -1 +0,0 @@ -Doxyfile diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt new file mode 100644 index 0000000000000..310a90612bb6d --- /dev/null +++ b/doc/CMakeLists.txt @@ -0,0 +1,25 @@ +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +find_package(Doxygen COMPONENTS dot) + +if(DOXYGEN_FOUND) + set(doxyfile ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) + configure_file(Doxyfile.in ${doxyfile} USE_SOURCE_PERMISSIONS) + + # In CMake 3.27, The FindDoxygen module's doxygen_add_docs() + # command gained a CONFIG_FILE option to specify a custom doxygen + # configuration file. + # TODO: Consider using it. + add_custom_target(docs + COMMAND Doxygen::doxygen ${doxyfile} + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + COMMENT "Generating developer documentation" + VERBATIM USES_TERMINAL + ) +else() + add_custom_target(docs + COMMAND ${CMAKE_COMMAND} -E echo "Error: Doxygen not found" + ) +endif() diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 7e307ab7c84b0..ccaf31170a175 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -58,7 +58,7 @@ PROJECT_LOGO = doc/bitcoin_logo_doxygen.png # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = doc/doxygen +OUTPUT_DIRECTORY = @PROJECT_BINARY_DIR@/doc/doxygen # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -863,9 +863,7 @@ RECURSIVE = YES EXCLUDE = src/crc32c \ src/leveldb \ - src/json \ - src/test \ - src/qt/test + src/json # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded @@ -2073,7 +2071,7 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = +PREDEFINED = ENABLE_EXTERNAL_SIGNER # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md index a0cfe84a3e50a..10d8ee52ebe8d 100644 --- a/doc/JSON-RPC-interface.md +++ b/doc/JSON-RPC-interface.md @@ -5,6 +5,63 @@ The headless daemon `bitcoind` has the JSON-RPC API enabled by default, the GUI option. In the GUI it is possible to execute RPC methods in the Debug Console Dialog. +## Endpoints + +There are two JSON-RPC endpoints on the server: + +1. `/` +2. `/wallet//` + +### `/` endpoint + +This endpoint is always active. +It can always service non-wallet requests and can service wallet requests when +exactly one wallet is loaded. + +### `/wallet//` endpoint + +This endpoint is only activated when the wallet component has been compiled in. +It can service both wallet and non-wallet requests. +It MUST be used for wallet requests when two or more wallets are loaded. + +This is the endpoint used by bitcoin-cli when a `-rpcwallet=` parameter is passed in. + +Best practice would dictate using the `/wallet//` endpoint for ALL +requests when multiple wallets are in use. + +### Examples + +```sh +# Get block count from the / endpoint when rpcuser=alice and rpcport=38332 +$ curl --user alice --data-binary '{"jsonrpc": "2.0", "id": "0", "method": "getblockcount", "params": []}' -H 'content-type: application/json' localhost:38332/ + +# Get balance from the /wallet/walletname endpoint when rpcuser=alice, rpcport=38332 and rpcwallet=desc-wallet +$ curl --user alice --data-binary '{"jsonrpc": "2.0", "id": "0", "method": "getbalance", "params": []}' -H 'content-type: application/json' localhost:38332/wallet/desc-wallet + +``` + +## Parameter passing + +The JSON-RPC server supports both _by-position_ and _by-name_ [parameter +structures](https://www.jsonrpc.org/specification#parameter_structures) +described in the JSON-RPC specification. For extra convenience, to avoid the +need to name every parameter value, all RPC methods accept a named parameter +called `args`, which can be set to an array of initial positional values that +are combined with named values. + +Examples: + +```sh +# "params": ["mywallet", false, false, "", false, false, true] +bitcoin-cli createwallet mywallet false false "" false false true + +# "params": {"wallet_name": "mywallet", "load_on_startup": true} +bitcoin-cli -named createwallet wallet_name=mywallet load_on_startup=true + +# "params": {"args": ["mywallet"], "load_on_startup": true} +bitcoin-cli -named createwallet mywallet load_on_startup=true +``` + ## Versioning The RPC interface might change from one major version of Bitcoin Core to the @@ -17,6 +74,22 @@ major version via the `-deprecatedrpc=` command line option. The release notes of a new major release come with detailed instructions on what RPC features were deprecated and how to re-enable them temporarily. +## JSON-RPC 1.1 vs 2.0 + +The server recognizes [JSON-RPC v2.0](https://www.jsonrpc.org/specification) requests +and responds accordingly. A 2.0 request is identified by the presence of +`"jsonrpc": "2.0"` in the request body. If that key + value is not present in a request, +the legacy JSON-RPC v1.1 protocol is followed instead, which was the only available +protocol in v27.0 and prior releases. + +|| 1.1 | 2.0 | +|-|-|-| +| Request marker | `"version": "1.1"` (or none) | `"jsonrpc": "2.0"` | +| Response marker | (none) | `"jsonrpc": "2.0"` | +| `"error"` and `"result"` fields in response | both present | only one is present | +| HTTP codes in response | `200` unless there is any kind of RPC error (invalid parameters, method not found, etc) | Always `200` unless there is an actual HTTP server error (request parsing error, endpoint not found, etc) | +| Notifications: requests that get no reply | (not supported) | Supported for requests that exclude the "id" field. Returns HTTP status `204` "No Content" | + ## Security The RPC interface allows other programs to control Bitcoin Core, @@ -60,7 +133,7 @@ RPC interface will be abused. are sent as clear text that can be read by anyone on your network path. Additionally, the RPC interface has not been hardened to withstand arbitrary Internet traffic, so changing the above settings - to expose it to the Internet (even using something like a Tor hidden + to expose it to the Internet (even using something like a Tor onion service) could expose you to unconsidered vulnerabilities. See `bitcoind -help` for more information about these settings and other settings described in this document. @@ -71,7 +144,7 @@ RPC interface will be abused. Instead, expose it only on the host system's localhost, for example: `-p 127.0.0.1:8332:8332` -- **Secure authentication:** By default, Bitcoin Core generates unique +- **Secure authentication:** By default, when no `rpcpassword` is specified, Bitcoin Core generates unique login credentials each time it restarts and puts them into a file readable only by the user that started Bitcoin Core, allowing any of that user's RPC clients with read access to the file to login @@ -88,13 +161,14 @@ RPC interface will be abused. - **Secure string handling:** The RPC interface does not guarantee any escaping of data beyond what's necessary to encode it as JSON, although it does usually provide serialized data using a hex - representation of the bytes. If you use RPC data in your programs or - provide its data to other programs, you must ensure any problem - strings are properly escaped. For example, multiple websites have - been manipulated because they displayed decoded hex strings that - included HTML `" - << "
" - << ""; + auto it = map_label_priority.find(str); + if (it == map_label_priority.end()) throw std::runtime_error(strprintf("Unknown priority level %s", str)); + return it->second; } - -benchmark::BenchRunner::BenchmarkMap& benchmark::BenchRunner::benchmarks() +BenchRunner::BenchmarkMap& BenchRunner::benchmarks() { - static std::map benchmarks_map; + static BenchmarkMap benchmarks_map; return benchmarks_map; } -benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func, uint64_t num_iters_for_one_second) +BenchRunner::BenchRunner(std::string name, BenchFunction func, PriorityLevel level) { - benchmarks().insert(std::make_pair(name, Bench{func, num_iters_for_one_second})); + benchmarks().insert(std::make_pair(name, std::make_pair(func, level))); } -void benchmark::BenchRunner::RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only) +void BenchRunner::RunAll(const Args& args) { - if (!std::ratio_less_equal::value) { - std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n"; - } -#ifdef DEBUG - std::cerr << "WARNING: This is a debug build - may result in slower benchmarks.\n"; -#endif - - std::regex reFilter(filter); + std::regex reFilter(args.regex_filter); std::smatch baseMatch; - printer.header(); + if (args.sanity_check) { + std::cout << "Running with -sanity-check option, output is being suppressed as benchmark results will be useless." << std::endl; + } + + std::vector benchmarkResults; + for (const auto& [name, bench_func] : benchmarks()) { + const auto& [func, priority_level] = bench_func; - for (const auto& p : benchmarks()) { - if (!std::regex_match(p.first, baseMatch, reFilter)) { + if (!(priority_level & args.priority)) { continue; } - uint64_t num_iters = static_cast(p.second.num_iters_for_one_second * scaling); - if (0 == num_iters) { - num_iters = 1; + if (!std::regex_match(name, baseMatch, reFilter)) { + continue; } - State state(p.first, num_evals, num_iters, printer); - if (!is_list_only) { - p.second.func(state); + + if (args.is_list_only) { + std::cout << name << std::endl; + continue; } - printer.result(state); - } - printer.footer(); -} + Bench bench; + if (args.sanity_check) { + bench.epochs(1).epochIterations(1); + bench.output(nullptr); + } + bench.name(name); + if (args.min_time > 0ms) { + // convert to nanos before dividing to reduce rounding errors + std::chrono::nanoseconds min_time_ns = args.min_time; + bench.minEpochTime(min_time_ns / bench.epochs()); + } -bool benchmark::State::UpdateTimer(const benchmark::time_point current_time) -{ - if (m_start_time != time_point()) { - std::chrono::duration diff = current_time - m_start_time; - m_elapsed_results.push_back(diff.count() / m_num_iters); + if (args.asymptote.empty()) { + func(bench); + } else { + for (auto n : args.asymptote) { + bench.complexityN(n); + func(bench); + } + std::cout << bench.complexityBigO() << std::endl; + } - if (m_elapsed_results.size() == m_num_evals) { - return false; + if (!bench.results().empty()) { + benchmarkResults.push_back(bench.results().back()); } } - m_num_iters_left = m_num_iters - 1; - return true; + GenerateTemplateResults(benchmarkResults, args.output_csv, "# Benchmark, evals, iterations, total, min, max, median\n" + "{{#result}}{{name}}, {{epochs}}, {{average(iterations)}}, {{sumProduct(iterations, elapsed)}}, {{minimum(elapsed)}}, {{maximum(elapsed)}}, {{median(elapsed)}}\n" + "{{/result}}"); + GenerateTemplateResults(benchmarkResults, args.output_json, ankerl::nanobench::templates::json()); } + +} // namespace benchmark diff --git a/src/bench/bench.h b/src/bench/bench.h index 629bca9a73d0d..f0705f4fed88f 100644 --- a/src/bench/bench.h +++ b/src/bench/bench.h @@ -1,141 +1,83 @@ -// Copyright (c) 2015-2020 The Bitcoin Core developers +// Copyright (c) 2015-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_BENCH_BENCH_H #define BITCOIN_BENCH_BENCH_H +#include // IWYU pragma: export +#include +#include + #include +#include #include #include #include +#include #include -#include -#include - -// Simple micro-benchmarking framework; API mostly matches a subset of the Google Benchmark -// framework (see https://github.com/google/benchmark) -// Why not use the Google Benchmark framework? Because adding Yet Another Dependency -// (that uses cmake as its build system and has lots of features we don't need) isn't -// worth it. - /* * Usage: -static void CODE_TO_TIME(benchmark::State& state) +static void NameOfYourBenchmarkFunction(benchmark::Bench& bench) { - ... do any setup needed... - while (state.KeepRunning()) { - ... do stuff you want to time... - } - ... do any cleanup needed... + ...do any setup needed... + + bench.run([&] { + ...do stuff you want to time; refer to src/bench/nanobench.h + for more information and the options that can be passed here... + }); + + ...do any cleanup needed... } -// default to running benchmark for 5000 iterations -BENCHMARK(CODE_TO_TIME, 5000); +BENCHMARK(NameOfYourBenchmarkFunction); */ namespace benchmark { -// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock. -struct best_clock { - using hi_res_clock = std::chrono::high_resolution_clock; - using steady_clock = std::chrono::steady_clock; - using type = std::conditional::type; -}; -using clock = best_clock::type; -using time_point = clock::time_point; -using duration = clock::duration; -class Printer; +using ankerl::nanobench::Bench; -class State +typedef std::function BenchFunction; + +enum PriorityLevel : uint8_t { -public: - std::string m_name; - uint64_t m_num_iters_left; - const uint64_t m_num_iters; - const uint64_t m_num_evals; - std::vector m_elapsed_results; - time_point m_start_time; - - bool UpdateTimer(time_point finish_time); - - State(std::string name, uint64_t num_evals, double num_iters, Printer& printer) : m_name(name), m_num_iters_left(0), m_num_iters(num_iters), m_num_evals(num_evals) - { - } - - inline bool KeepRunning() - { - if (m_num_iters_left--) { - return true; - } - - bool result = UpdateTimer(clock::now()); - // measure again so runtime of UpdateTimer is not included - m_start_time = clock::now(); - return result; - } + LOW = 1 << 0, + HIGH = 1 << 2, }; -typedef std::function BenchFunction; +// List priority labels, comma-separated and sorted by increasing priority +std::string ListPriorities(); +uint8_t StringToPriority(const std::string& str); + +struct Args { + bool is_list_only; + bool sanity_check; + std::chrono::milliseconds min_time; + std::vector asymptote; + fs::path output_csv; + fs::path output_json; + std::string regex_filter; + uint8_t priority; +}; class BenchRunner { - struct Bench { - BenchFunction func; - uint64_t num_iters_for_one_second; - }; - typedef std::map BenchmarkMap; + // maps from "name" -> (function, priority_level) + typedef std::map> BenchmarkMap; static BenchmarkMap& benchmarks(); public: - BenchRunner(std::string name, BenchFunction func, uint64_t num_iters_for_one_second); + BenchRunner(std::string name, BenchFunction func, PriorityLevel level); - static void RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only); + static void RunAll(const Args& args); }; +} // namespace benchmark -// interface to output benchmark results. -class Printer -{ -public: - virtual ~Printer() {} - virtual void header() = 0; - virtual void result(const State& state) = 0; - virtual void footer() = 0; -}; - -// default printer to console, shows min, max, median. -class ConsolePrinter : public Printer -{ -public: - void header() override; - void result(const State& state) override; - void footer() override; -}; - -// creates box plot with plotly.js -class PlotlyPrinter : public Printer -{ -public: - PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height); - void header() override; - void result(const State& state) override; - void footer() override; - -private: - std::string m_plotly_url; - int64_t m_width; - int64_t m_height; -}; -} - - -// BENCHMARK(foo, num_iters_for_one_second) expands to: benchmark::BenchRunner bench_11foo("foo", num_iterations); -// Choose a num_iters_for_one_second that takes roughly 1 second. The goal is that all benchmarks should take approximately -// the same time, and scaling factor can be used that the total time is appropriate for your system. -#define BENCHMARK(n, num_iters_for_one_second) \ - benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n, (num_iters_for_one_second)); +// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo, priority_level); +#define BENCHMARK(n, priority_level) \ + benchmark::BenchRunner PASTE2(bench_, PASTE2(__LINE__, n))(STRINGIZE(n), n, priority_level); #endif // BITCOIN_BENCH_BENCH_H diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp index 1b75854210520..555dca7d5982f 100644 --- a/src/bench/bench_bitcoin.cpp +++ b/src/bench/bench_bitcoin.cpp @@ -1,40 +1,70 @@ -// Copyright (c) 2015-2020 The Bitcoin Core developers +// Copyright (c) 2015-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include +#include +#include +#include +#include +#include -#include -#include +#include +#include +#include +#include +#include +#include +#include -#include +using util::SplitString; -static const int64_t DEFAULT_BENCH_EVALUATIONS = 5; static const char* DEFAULT_BENCH_FILTER = ".*"; -static const char* DEFAULT_BENCH_SCALING = "1.0"; -static const char* DEFAULT_BENCH_PRINTER = "console"; -static const char* DEFAULT_PLOT_PLOTLYURL = "https://cdn.plot.ly/plotly-latest.min.js"; -static const int64_t DEFAULT_PLOT_WIDTH = 1024; -static const int64_t DEFAULT_PLOT_HEIGHT = 768; +static constexpr int64_t DEFAULT_MIN_TIME_MS{10}; +/** Priority level default value, run "all" priority levels */ +static const std::string DEFAULT_PRIORITY{"all"}; static void SetupBenchArgs(ArgsManager& argsman) { SetupHelpOptions(argsman); - argsman.AddArg("-list", "List benchmarks without executing them. Can be combined with -scaling and -filter", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-evals=", strprintf("Number of measurement evaluations to perform. (default: %u)", DEFAULT_BENCH_EVALUATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-asymptote=", "Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-filter=", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-scaling=", strprintf("Scaling factor for benchmark's runtime (default: %u)", DEFAULT_BENCH_SCALING), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-printer=(console|plot)", strprintf("Choose printer format. console: print data to console. plot: Print results as HTML graph (default: %s)", DEFAULT_BENCH_PRINTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-plot-plotlyurl=", strprintf("URL to use for plotly.js (default: %s)", DEFAULT_PLOT_PLOTLYURL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-plot-width=", strprintf("Plot width in pixel (default: %u)", DEFAULT_PLOT_WIDTH), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-plot-height=", strprintf("Plot height in pixel (default: %u)", DEFAULT_PLOT_HEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-min-time=", strprintf("Minimum runtime per benchmark, in milliseconds (default: %d)", DEFAULT_MIN_TIME_MS), ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_NEGATION, OptionsCategory::OPTIONS); + argsman.AddArg("-output-csv=", "Generate CSV file with the most important benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-output-json=", "Generate JSON file with all benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-sanity-check", "Run benchmarks for only one iteration with no output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-priority-level=", strprintf("Run benchmarks of one or multiple priority level(s) (%s), default: '%s'", + benchmark::ListPriorities(), DEFAULT_PRIORITY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); +} + +// parses a comma separated list like "10,20,30,50" +static std::vector parseAsymptote(const std::string& str) { + std::stringstream ss(str); + std::vector numbers; + double d; + char c; + while (ss >> d) { + numbers.push_back(d); + ss >> c; + } + return numbers; +} + +static uint8_t parsePriorityLevel(const std::string& str) { + uint8_t levels{0}; + for (const auto& level: SplitString(str, ',')) { + levels |= benchmark::StringToPriority(level); + } + return levels; } int main(int argc, char** argv) { ArgsManager argsman; SetupBenchArgs(argsman); + SHA256AutoDetect(); std::string error; if (!argsman.ParseParameters(argc, argv, error)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error); @@ -42,39 +72,71 @@ int main(int argc, char** argv) } if (HelpRequested(argsman)) { - std::cout << argsman.GetHelpMessage(); + std::cout << "Usage: bench_bitcoin [options]\n" + "\n" + << argsman.GetHelpMessage() + << "Description:\n" + "\n" + " bench_bitcoin executes microbenchmarks. The quality of the benchmark results\n" + " highly depend on the stability of the machine. It can sometimes be difficult\n" + " to get stable, repeatable results, so here are a few tips:\n" + "\n" + " * Use pyperf [1] to disable frequency scaling, turbo boost etc. For best\n" + " results, use CPU pinning and CPU isolation (see [2]).\n" + "\n" + " * Each call of run() should do exactly the same work. E.g. inserting into\n" + " a std::vector doesn't do that as it will reallocate on certain calls. Make\n" + " sure each run has exactly the same preconditions.\n" + "\n" + " * If results are still not reliable, increase runtime with e.g.\n" + " -min-time=5000 to let a benchmark run for at least 5 seconds.\n" + "\n" + " * bench_bitcoin uses nanobench [3] for which there is extensive\n" + " documentation available online.\n" + "\n" + "Environment Variables:\n" + "\n" + " To attach a profiler you can run a benchmark in endless mode. This can be\n" + " done with the environment variable NANOBENCH_ENDLESS. E.g. like so:\n" + "\n" + " NANOBENCH_ENDLESS=MuHash ./bench_bitcoin -filter=MuHash\n" + "\n" + " In rare cases it can be useful to suppress stability warnings. This can be\n" + " done with the environment variable NANOBENCH_SUPPRESS_WARNINGS, e.g:\n" + "\n" + " NANOBENCH_SUPPRESS_WARNINGS=1 ./bench_bitcoin\n" + "\n" + "Notes:\n" + "\n" + " 1. pyperf\n" + " https://github.com/psf/pyperf\n" + "\n" + " 2. CPU pinning & isolation\n" + " https://pyperf.readthedocs.io/en/latest/system.html\n" + "\n" + " 3. nanobench\n" + " https://github.com/martinus/nanobench\n" + "\n"; return EXIT_SUCCESS; } - int64_t evaluations = argsman.GetArg("-evals", DEFAULT_BENCH_EVALUATIONS); - std::string regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER); - std::string scaling_str = argsman.GetArg("-scaling", DEFAULT_BENCH_SCALING); - bool is_list_only = argsman.GetBoolArg("-list", false); + try { + benchmark::Args args; + args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", "")); + args.is_list_only = argsman.GetBoolArg("-list", false); + args.min_time = std::chrono::milliseconds(argsman.GetIntArg("-min-time", DEFAULT_MIN_TIME_MS)); + args.output_csv = argsman.GetPathArg("-output-csv"); + args.output_json = argsman.GetPathArg("-output-json"); + args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER); + args.sanity_check = argsman.GetBoolArg("-sanity-check", false); + args.priority = parsePriorityLevel(argsman.GetArg("-priority-level", DEFAULT_PRIORITY)); - if (evaluations == 0) { - return EXIT_SUCCESS; - } else if (evaluations < 0) { - tfm::format(std::cerr, "Error parsing evaluations argument: %d\n", evaluations); - return EXIT_FAILURE; - } + benchmark::BenchRunner::RunAll(args); - double scaling_factor; - if (!ParseDouble(scaling_str, &scaling_factor)) { - tfm::format(std::cerr, "Error parsing scaling factor as double: %s\n", scaling_str); + return EXIT_SUCCESS; + } catch (const std::exception& e) { + tfm::format(std::cerr, "Error: %s\n", e.what()); return EXIT_FAILURE; } - - std::unique_ptr printer = MakeUnique(); - std::string printer_arg = argsman.GetArg("-printer", DEFAULT_BENCH_PRINTER); - if ("plot" == printer_arg) { - printer.reset(new benchmark::PlotlyPrinter( - argsman.GetArg("-plot-plotlyurl", DEFAULT_PLOT_PLOTLYURL), - argsman.GetArg("-plot-width", DEFAULT_PLOT_WIDTH), - argsman.GetArg("-plot-height", DEFAULT_PLOT_HEIGHT))); - } - - benchmark::BenchRunner::RunAll(*printer, evaluations, scaling_factor, regex_filter, is_list_only); - - return EXIT_SUCCESS; } diff --git a/src/bench/bip324_ecdh.cpp b/src/bench/bip324_ecdh.cpp new file mode 100644 index 0000000000000..e6a614dadf3b7 --- /dev/null +++ b/src/bench/bip324_ecdh.cpp @@ -0,0 +1,49 @@ +// Copyright (c) 2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include + +#include +#include +#include + +static void BIP324_ECDH(benchmark::Bench& bench) +{ + ECC_Context ecc_context{}; + FastRandomContext rng; + + std::array key_data; + std::array our_ellswift_data; + std::array their_ellswift_data; + + rng.fillrand(key_data); + rng.fillrand(our_ellswift_data); + rng.fillrand(their_ellswift_data); + + bench.batch(1).unit("ecdh").run([&] { + CKey key; + key.Set(key_data.data(), key_data.data() + 32, true); + EllSwiftPubKey our_ellswift(our_ellswift_data); + EllSwiftPubKey their_ellswift(their_ellswift_data); + + auto ret = key.ComputeBIP324ECDHSecret(their_ellswift, our_ellswift, true); + + // To make sure that the computation is not the same on every iteration (ellswift decoding + // is variable-time), distribute bytes from the shared secret over the 3 inputs. The most + // important one is their_ellswift, because that one is actually decoded, so it's given most + // bytes. The data is copied into the middle, so that both halves are affected: + // - Copy 8 bytes from the resulting shared secret into middle of the private key. + std::copy(ret.begin(), ret.begin() + 8, key_data.begin() + 12); + // - Copy 8 bytes from the resulting shared secret into the middle of our ellswift key. + std::copy(ret.begin() + 8, ret.begin() + 16, our_ellswift_data.begin() + 28); + // - Copy 16 bytes from the resulting shared secret into the middle of their ellswift key. + std::copy(ret.begin() + 16, ret.end(), their_ellswift_data.begin() + 24); + }); +} + +BENCHMARK(BIP324_ECDH, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp index 268f67cadafde..4005701caea6b 100644 --- a/src/bench/block_assemble.cpp +++ b/src/bench/block_assemble.cpp @@ -1,62 +1,68 @@ -// Copyright (c) 2011-2019 The Bitcoin Core developers +// Copyright (c) 2011-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include -#include -#include +#include +#include +#include +#include +#include + + + +
+ + + +)DELIM"; +} + +char const* pyperf() noexcept { + return R"DELIM({ + "benchmarks": [ + { + "runs": [ + { + "values": [ +{{#measurement}} {{elapsed}}{{^-last}}, +{{/last}}{{/measurement}} + ] + } + ] + } + ], + "metadata": { + "loops": {{sum(iterations)}}, + "inner_loops": {{batch}}, + "name": "{{title}}", + "unit": "second" + }, + "version": "1.0" +})DELIM"; +} + +char const* json() noexcept { + return R"DELIM({ + "results": [ +{{#result}} { + "title": "{{title}}", + "name": "{{name}}", + "unit": "{{unit}}", + "batch": {{batch}}, + "complexityN": {{complexityN}}, + "epochs": {{epochs}}, + "clockResolution": {{clockResolution}}, + "clockResolutionMultiple": {{clockResolutionMultiple}}, + "maxEpochTime": {{maxEpochTime}}, + "minEpochTime": {{minEpochTime}}, + "minEpochIterations": {{minEpochIterations}}, + "epochIterations": {{epochIterations}}, + "warmup": {{warmup}}, + "relative": {{relative}}, + "median(elapsed)": {{median(elapsed)}}, + "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}}, + "median(instructions)": {{median(instructions)}}, + "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}}, + "median(cpucycles)": {{median(cpucycles)}}, + "median(contextswitches)": {{median(contextswitches)}}, + "median(pagefaults)": {{median(pagefaults)}}, + "median(branchinstructions)": {{median(branchinstructions)}}, + "median(branchmisses)": {{median(branchmisses)}}, + "totalTime": {{sumProduct(iterations, elapsed)}}, + "measurements": [ +{{#measurement}} { + "iterations": {{iterations}}, + "elapsed": {{elapsed}}, + "pagefaults": {{pagefaults}}, + "cpucycles": {{cpucycles}}, + "contextswitches": {{contextswitches}}, + "instructions": {{instructions}}, + "branchinstructions": {{branchinstructions}}, + "branchmisses": {{branchmisses}} + }{{^-last}},{{/-last}} +{{/measurement}} ] + }{{^-last}},{{/-last}} +{{/result}} ] +})DELIM"; +} + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +struct Node { + enum class Type { tag, content, section, inverted_section }; + + char const* begin; + char const* end; + std::vector children; + Type type; + + template + // NOLINTNEXTLINE(hicpp-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) + bool operator==(char const (&str)[N]) const noexcept { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) + return static_cast(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1); + } +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +// NOLINTNEXTLINE(misc-no-recursion) +static std::vector parseMustacheTemplate(char const** tpl) { + std::vector nodes; + + while (true) { + auto const* begin = std::strstr(*tpl, "{{"); + auto const* end = begin; + if (begin != nullptr) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + begin += 2; + end = std::strstr(begin, "}}"); + } + + if (begin == nullptr || end == nullptr) { + // nothing found, finish node + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector{}, Node::Type::content}); + return nodes; + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + nodes.emplace_back(Node{*tpl, begin - 2, std::vector{}, Node::Type::content}); + + // we found a tag + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + *tpl = end + 2; + switch (*begin) { + case '/': + // finished! bail out + return nodes; + + case '#': + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section}); + break; + + case '^': + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section}); + break; + + default: + nodes.emplace_back(Node{begin, end, std::vector{}, Node::Type::tag}); + break; + } + } +} + +static bool generateFirstLast(Node const& n, size_t idx, size_t size, std::ostream& out) { + ANKERL_NANOBENCH_LOG("n.type=" << static_cast(n.type)); + bool const matchFirst = n == "-first"; + bool const matchLast = n == "-last"; + if (!matchFirst && !matchLast) { + return false; + } + + bool doWrite = false; + if (n.type == Node::Type::section) { + doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1); + } else if (n.type == Node::Type::inverted_section) { + doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1); + } + + if (doWrite) { + for (auto const& child : n.children) { + if (child.type == Node::Type::content) { + out.write(child.begin, std::distance(child.begin, child.end)); + } + } + } + return true; +} + +static bool matchCmdArgs(std::string const& str, std::vector& matchResult) { + matchResult.clear(); + auto idxOpen = str.find('('); + auto idxClose = str.find(')', idxOpen); + if (idxClose == std::string::npos) { + return false; + } + + matchResult.emplace_back(str.substr(0, idxOpen)); + + // split by comma + matchResult.emplace_back(); + for (size_t i = idxOpen + 1; i != idxClose; ++i) { + if (str[i] == ' ' || str[i] == '\t') { + // skip whitespace + continue; + } + if (str[i] == ',') { + // got a comma => new string + matchResult.emplace_back(); + continue; + } + // no whitespace no comma, append + matchResult.back() += str[i]; + } + return true; +} + +static bool generateConfigTag(Node const& n, Config const& config, std::ostream& out) { + using detail::d; + + if (n == "title") { + out << config.mBenchmarkTitle; + return true; + } + if (n == "name") { + out << config.mBenchmarkName; + return true; + } + if (n == "unit") { + out << config.mUnit; + return true; + } + if (n == "batch") { + out << config.mBatch; + return true; + } + if (n == "complexityN") { + out << config.mComplexityN; + return true; + } + if (n == "epochs") { + out << config.mNumEpochs; + return true; + } + if (n == "clockResolution") { + out << d(detail::clockResolution()); + return true; + } + if (n == "clockResolutionMultiple") { + out << config.mClockResolutionMultiple; + return true; + } + if (n == "maxEpochTime") { + out << d(config.mMaxEpochTime); + return true; + } + if (n == "minEpochTime") { + out << d(config.mMinEpochTime); + return true; + } + if (n == "minEpochIterations") { + out << config.mMinEpochIterations; + return true; + } + if (n == "epochIterations") { + out << config.mEpochIterations; + return true; + } + if (n == "warmup") { + out << config.mWarmup; + return true; + } + if (n == "relative") { + out << config.mIsRelative; + return true; + } + return false; +} + +// NOLINTNEXTLINE(readability-function-cognitive-complexity) +static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostream& out) { + if (generateConfigTag(n, r.config(), out)) { + return out; + } + // match e.g. "median(elapsed)" + // g++ 4.8 doesn't implement std::regex :( + // static std::regex const regOpArg1("^([a-zA-Z]+)\\(([a-zA-Z]*)\\)$"); + // std::cmatch matchResult; + // if (std::regex_match(n.begin, n.end, matchResult, regOpArg1)) { + std::vector matchResult; + if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) { + if (matchResult.size() == 2) { + if (matchResult[0] == "context") { + return out << r.context(matchResult[1]); + } + + auto m = Result::fromString(matchResult[1]); + if (m == Result::Measure::_size) { + return out << 0.0; + } + + if (matchResult[0] == "median") { + return out << r.median(m); + } + if (matchResult[0] == "average") { + return out << r.average(m); + } + if (matchResult[0] == "medianAbsolutePercentError") { + return out << r.medianAbsolutePercentError(m); + } + if (matchResult[0] == "sum") { + return out << r.sum(m); + } + if (matchResult[0] == "minimum") { + return out << r.minimum(m); + } + if (matchResult[0] == "maximum") { + return out << r.maximum(m); + } + } else if (matchResult.size() == 3) { + auto m1 = Result::fromString(matchResult[1]); + auto m2 = Result::fromString(matchResult[2]); + if (m1 == Result::Measure::_size || m2 == Result::Measure::_size) { + return out << 0.0; + } + + if (matchResult[0] == "sumProduct") { + return out << r.sumProduct(m1, m2); + } + } + } + + // match e.g. "sumProduct(elapsed, iterations)" + // static std::regex const regOpArg2("^([a-zA-Z]+)\\(([a-zA-Z]*)\\s*,\\s+([a-zA-Z]*)\\)$"); + + // nothing matches :( + throw std::runtime_error("command '" + std::string(n.begin, n.end) + "' not understood"); +} + +static void generateResultMeasurement(std::vector const& nodes, size_t idx, Result const& r, std::ostream& out) { + for (auto const& n : nodes) { + if (!generateFirstLast(n, idx, r.size(), out)) { + ANKERL_NANOBENCH_LOG("n.type=" << static_cast(n.type)); + switch (n.type) { + case Node::Type::content: + out.write(n.begin, std::distance(n.begin, n.end)); + break; + + case Node::Type::inverted_section: + throw std::runtime_error("got a inverted section inside measurement"); + + case Node::Type::section: + throw std::runtime_error("got a section inside measurement"); + + case Node::Type::tag: { + auto m = Result::fromString(std::string(n.begin, n.end)); + if (m == Result::Measure::_size || !r.has(m)) { + out << 0.0; + } else { + out << r.get(idx, m); + } + break; + } + } + } + } +} + +static void generateResult(std::vector const& nodes, size_t idx, std::vector const& results, std::ostream& out) { + auto const& r = results[idx]; + for (auto const& n : nodes) { + if (!generateFirstLast(n, idx, results.size(), out)) { + ANKERL_NANOBENCH_LOG("n.type=" << static_cast(n.type)); + switch (n.type) { + case Node::Type::content: + out.write(n.begin, std::distance(n.begin, n.end)); + break; + + case Node::Type::inverted_section: + throw std::runtime_error("got a inverted section inside result"); + + case Node::Type::section: + if (n == "measurement") { + for (size_t i = 0; i < r.size(); ++i) { + generateResultMeasurement(n.children, i, r, out); + } + } else { + throw std::runtime_error("got a section inside result"); + } + break; + + case Node::Type::tag: + generateResultTag(n, r, out); + break; + } + } + } +} + +} // namespace templates + +// helper stuff that only intended to be used internally +namespace detail { + +char const* getEnv(char const* name); +bool isEndlessRunning(std::string const& name); +bool isWarningsEnabled(); + +template +T parseFile(std::string const& filename, bool* fail); + +void gatherStabilityInformation(std::vector& warnings, std::vector& recommendations); +void printStabilityInformationOnce(std::ostream* outStream); + +// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry. +uint64_t& singletonHeaderHash() noexcept; + +// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference. +Clock::duration calcClockResolution(size_t numEvaluations) noexcept; + +// formatting utilities +namespace fmt { + +// adds thousands separator to numbers +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class NumSep : public std::numpunct { +public: + explicit NumSep(char sep); + char do_thousands_sep() const override; + std::string do_grouping() const override; + +private: + char mSep; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +// RAII to save & restore a stream's state +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class StreamStateRestorer { +public: + explicit StreamStateRestorer(std::ostream& s); + ~StreamStateRestorer(); + + // sets back all stream info that we remembered at construction + void restore(); + + // don't allow copying / moving + StreamStateRestorer(StreamStateRestorer const&) = delete; + StreamStateRestorer& operator=(StreamStateRestorer const&) = delete; + StreamStateRestorer(StreamStateRestorer&&) = delete; + StreamStateRestorer& operator=(StreamStateRestorer&&) = delete; + +private: + std::ostream& mStream; + std::locale mLocale; + std::streamsize const mPrecision; + std::streamsize const mWidth; + std::ostream::char_type const mFill; + std::ostream::fmtflags const mFmtFlags; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +// Number formatter +class Number { +public: + Number(int width, int precision, double value); + Number(int width, int precision, int64_t value); + ANKERL_NANOBENCH(NODISCARD) std::string to_s() const; + +private: + friend std::ostream& operator<<(std::ostream& os, Number const& n); + std::ostream& write(std::ostream& os) const; + + int mWidth; + int mPrecision; + double mValue; +}; + +// helper replacement for std::to_string of signed/unsigned numbers so we are locale independent +std::string to_s(uint64_t n); + +std::ostream& operator<<(std::ostream& os, Number const& n); + +class MarkDownColumn { +public: + MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val) noexcept; + ANKERL_NANOBENCH(NODISCARD) std::string title() const; + ANKERL_NANOBENCH(NODISCARD) std::string separator() const; + ANKERL_NANOBENCH(NODISCARD) std::string invalid() const; + ANKERL_NANOBENCH(NODISCARD) std::string value() const; + +private: + int mWidth; + int mPrecision; + std::string mTitle; + std::string mSuffix; + double mValue; +}; + +// Formats any text as markdown code, escaping backticks. +class MarkDownCode { +public: + explicit MarkDownCode(std::string const& what); + +private: + friend std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode); + std::ostream& write(std::ostream& os) const; + + std::string mWhat{}; +}; + +std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode); + +} // namespace fmt +} // namespace detail +} // namespace nanobench +} // namespace ankerl + +// implementation ///////////////////////////////////////////////////////////////////////////////// + +namespace ankerl { +namespace nanobench { + +// NOLINTNEXTLINE(readability-function-cognitive-complexity) +void render(char const* mustacheTemplate, std::vector const& results, std::ostream& out) { + detail::fmt::StreamStateRestorer const restorer(out); + + out.precision(std::numeric_limits::digits10); + auto nodes = templates::parseMustacheTemplate(&mustacheTemplate); + + for (auto const& n : nodes) { + ANKERL_NANOBENCH_LOG("n.type=" << static_cast(n.type)); + switch (n.type) { + case templates::Node::Type::content: + out.write(n.begin, std::distance(n.begin, n.end)); + break; + + case templates::Node::Type::inverted_section: + throw std::runtime_error("unknown list '" + std::string(n.begin, n.end) + "'"); + + case templates::Node::Type::section: + if (n == "result") { + const size_t nbResults = results.size(); + for (size_t i = 0; i < nbResults; ++i) { + generateResult(n.children, i, results, out); + } + } else if (n == "measurement") { + if (results.size() != 1) { + throw std::runtime_error( + "render: can only use section 'measurement' here if there is a single result, but there are " + + detail::fmt::to_s(results.size())); + } + // when we only have a single result, we can immediately go into its measurement. + auto const& r = results.front(); + for (size_t i = 0; i < r.size(); ++i) { + generateResultMeasurement(n.children, i, r, out); + } + } else { + throw std::runtime_error("render: unknown section '" + std::string(n.begin, n.end) + "'"); + } + break; + + case templates::Node::Type::tag: + if (results.size() == 1) { + // result & config are both supported there + generateResultTag(n, results.front(), out); + } else { + // This just uses the last result's config. + if (!generateConfigTag(n, results.back().config(), out)) { + throw std::runtime_error("unknown tag '" + std::string(n.begin, n.end) + "'"); + } + } + break; + } + } +} + +void render(std::string const& mustacheTemplate, std::vector const& results, std::ostream& out) { + render(mustacheTemplate.c_str(), results, out); +} + +void render(char const* mustacheTemplate, const Bench& bench, std::ostream& out) { + render(mustacheTemplate, bench.results(), out); +} + +void render(std::string const& mustacheTemplate, const Bench& bench, std::ostream& out) { + render(mustacheTemplate.c_str(), bench.results(), out); +} + +namespace detail { + +PerformanceCounters& performanceCounters() { +# if defined(__clang__) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wexit-time-destructors" +# endif + static PerformanceCounters pc; +# if defined(__clang__) +# pragma clang diagnostic pop +# endif + return pc; +} + +// Windows version of doNotOptimizeAway +// see https://github.com/google/benchmark/blob/v1.7.1/include/benchmark/benchmark.h#L514 +// see https://github.com/facebook/folly/blob/v2023.01.30.00/folly/lang/Hint-inl.h#L54-L58 +// see https://learn.microsoft.com/en-us/cpp/preprocessor/optimize +# if defined(_MSC_VER) +# pragma optimize("", off) +void doNotOptimizeAwaySink(void const*) {} +# pragma optimize("", on) +# endif + +template +T parseFile(std::string const& filename, bool* fail) { + std::ifstream fin(filename); // NOLINT(misc-const-correctness) + T num{}; + fin >> num; + if (fail != nullptr) { + *fail = fin.fail(); + } + return num; +} + +char const* getEnv(char const* name) { +# if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4996) // getenv': This function or variable may be unsafe. +# endif + return std::getenv(name); // NOLINT(concurrency-mt-unsafe) +# if defined(_MSC_VER) +# pragma warning(pop) +# endif +} + +bool isEndlessRunning(std::string const& name) { + auto const* const endless = getEnv("NANOBENCH_ENDLESS"); + return nullptr != endless && endless == name; +} + +// True when environment variable NANOBENCH_SUPPRESS_WARNINGS is either not set at all, or set to "0" +bool isWarningsEnabled() { + auto const* const suppression = getEnv("NANOBENCH_SUPPRESS_WARNINGS"); + return nullptr == suppression || suppression == std::string("0"); +} + +void gatherStabilityInformation(std::vector& warnings, std::vector& recommendations) { + warnings.clear(); + recommendations.clear(); + +# if defined(DEBUG) + warnings.emplace_back("DEBUG defined"); + bool const recommendCheckFlags = true; +# else + bool const recommendCheckFlags = false; +# endif + + bool recommendPyPerf = false; +# if defined(__linux__) + auto nprocs = sysconf(_SC_NPROCESSORS_CONF); + if (nprocs <= 0) { + warnings.emplace_back("couldn't figure out number of processors - no governor, turbo check possible"); + } else { + // check frequency scaling + for (long id = 0; id < nprocs; ++id) { + auto idStr = detail::fmt::to_s(static_cast(id)); + auto sysCpu = "/sys/devices/system/cpu/cpu" + idStr; + auto minFreq = parseFile(sysCpu + "/cpufreq/scaling_min_freq", nullptr); + auto maxFreq = parseFile(sysCpu + "/cpufreq/scaling_max_freq", nullptr); + if (minFreq != maxFreq) { + auto minMHz = d(minFreq) / 1000.0; + auto maxMHz = d(maxFreq) / 1000.0; + warnings.emplace_back("CPU frequency scaling enabled: CPU " + idStr + " between " + + detail::fmt::Number(1, 1, minMHz).to_s() + " and " + detail::fmt::Number(1, 1, maxMHz).to_s() + + " MHz"); + recommendPyPerf = true; + break; + } + } + + auto fail = false; + auto currentGovernor = parseFile("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", &fail); + if (!fail && "performance" != currentGovernor) { + warnings.emplace_back("CPU governor is '" + currentGovernor + "' but should be 'performance'"); + recommendPyPerf = true; + } + + auto noTurbo = parseFile("/sys/devices/system/cpu/intel_pstate/no_turbo", &fail); + if (!fail && noTurbo == 0) { + warnings.emplace_back("Turbo is enabled, CPU frequency will fluctuate"); + recommendPyPerf = true; + } + } +# endif + + if (recommendCheckFlags) { + recommendations.emplace_back("Make sure you compile for Release"); + } + if (recommendPyPerf) { + recommendations.emplace_back("Use 'pyperf system tune' before benchmarking. See https://github.com/psf/pyperf"); + } +} + +void printStabilityInformationOnce(std::ostream* outStream) { + static bool shouldPrint = true; + if (shouldPrint && (nullptr != outStream) && isWarningsEnabled()) { + auto& os = *outStream; + shouldPrint = false; + std::vector warnings; + std::vector recommendations; + gatherStabilityInformation(warnings, recommendations); + if (warnings.empty()) { + return; + } + + os << "Warning, results might be unstable:" << std::endl; + for (auto const& w : warnings) { + os << "* " << w << std::endl; + } + + os << std::endl << "Recommendations" << std::endl; + for (auto const& r : recommendations) { + os << "* " << r << std::endl; + } + } +} + +// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry. +uint64_t& singletonHeaderHash() noexcept { + static uint64_t sHeaderHash{}; + return sHeaderHash; +} + +ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") +inline uint64_t hash_combine(uint64_t seed, uint64_t val) { + return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U)); +} + +// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference. +Clock::duration calcClockResolution(size_t numEvaluations) noexcept { + auto bestDuration = Clock::duration::max(); + Clock::time_point tBegin; + Clock::time_point tEnd; + for (size_t i = 0; i < numEvaluations; ++i) { + tBegin = Clock::now(); + do { + tEnd = Clock::now(); + } while (tBegin == tEnd); + bestDuration = (std::min)(bestDuration, tEnd - tBegin); + } + return bestDuration; +} + +// Calculates clock resolution once, and remembers the result +Clock::duration clockResolution() noexcept { + static Clock::duration const sResolution = calcClockResolution(20); + return sResolution; +} + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +struct IterationLogic::Impl { + enum class State { warmup, upscaling_runtime, measuring, endless }; + + explicit Impl(Bench const& bench) + : mBench(bench) + , mResult(bench.config()) { + printStabilityInformationOnce(mBench.output()); + + // determine target runtime per epoch + mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple(); + if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) { + mTargetRuntimePerEpoch = mBench.maxEpochTime(); + } + if (mTargetRuntimePerEpoch < mBench.minEpochTime()) { + mTargetRuntimePerEpoch = mBench.minEpochTime(); + } + + if (isEndlessRunning(mBench.name())) { + std::cerr << "NANOBENCH_ENDLESS set: running '" << mBench.name() << "' endlessly" << std::endl; + mNumIters = (std::numeric_limits::max)(); + mState = State::endless; + } else if (0 != mBench.warmup()) { + mNumIters = mBench.warmup(); + mState = State::warmup; + } else if (0 != mBench.epochIterations()) { + // exact number of iterations + mNumIters = mBench.epochIterations(); + mState = State::measuring; + } else { + mNumIters = mBench.minEpochIterations(); + mState = State::upscaling_runtime; + } + } + + // directly calculates new iters based on elapsed&iters, and adds a 10% noise. Makes sure we don't underflow. + ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters) noexcept { + auto doubleElapsed = d(elapsed); + auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch); + auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters); + + auto doubleMinEpochIters = d(mBench.minEpochIterations()); + if (doubleNewIters < doubleMinEpochIters) { + doubleNewIters = doubleMinEpochIters; + } + doubleNewIters *= 1.0 + 0.2 * mRng.uniform01(); + + // +0.5 for correct rounding when casting + // NOLINTNEXTLINE(bugprone-incorrect-roundings) + return static_cast(doubleNewIters + 0.5); + } + + ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") void upscale(std::chrono::nanoseconds elapsed) { + if (elapsed * 10 < mTargetRuntimePerEpoch) { + // we are far below the target runtime. Multiply iterations by 10 (with overflow check) + if (mNumIters * 10 < mNumIters) { + // overflow :-( + showResult("iterations overflow. Maybe your code got optimized away?"); + mNumIters = 0; + return; + } + mNumIters *= 10; + } else { + mNumIters = calcBestNumIters(elapsed, mNumIters); + } + } + + void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept { +# if defined(ANKERL_NANOBENCH_LOG_ENABLED) + auto oldIters = mNumIters; +# endif + + switch (mState) { + case State::warmup: + if (isCloseEnoughForMeasurements(elapsed)) { + // if elapsed is close enough, we can skip upscaling and go right to measurements + // still, we don't add the result to the measurements. + mState = State::measuring; + mNumIters = calcBestNumIters(elapsed, mNumIters); + } else { + // not close enough: switch to upscaling + mState = State::upscaling_runtime; + upscale(elapsed); + } + break; + + case State::upscaling_runtime: + if (isCloseEnoughForMeasurements(elapsed)) { + // if we are close enough, add measurement and switch to always measuring + mState = State::measuring; + mTotalElapsed += elapsed; + mTotalNumIters += mNumIters; + mResult.add(elapsed, mNumIters, pc); + mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters); + } else { + upscale(elapsed); + } + break; + + case State::measuring: + // just add measurements - no questions asked. Even when runtime is low. But we can't ignore + // that fluctuation, or else we would bias the result + mTotalElapsed += elapsed; + mTotalNumIters += mNumIters; + mResult.add(elapsed, mNumIters, pc); + if (0 != mBench.epochIterations()) { + mNumIters = mBench.epochIterations(); + } else { + mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters); + } + break; + + case State::endless: + mNumIters = (std::numeric_limits::max)(); + break; + } + + if (static_cast(mResult.size()) == mBench.epochs()) { + // we got all the results that we need, finish it + showResult(""); + mNumIters = 0; + } + + ANKERL_NANOBENCH_LOG(mBench.name() << ": " << detail::fmt::Number(20, 3, d(elapsed.count())) << " elapsed, " + << detail::fmt::Number(20, 3, d(mTargetRuntimePerEpoch.count())) << " target. oldIters=" + << oldIters << ", mNumIters=" << mNumIters << ", mState=" << static_cast(mState)); + } + + // NOLINTNEXTLINE(readability-function-cognitive-complexity) + void showResult(std::string const& errorMessage) const { + ANKERL_NANOBENCH_LOG(errorMessage); + + if (mBench.output() != nullptr) { + // prepare column data /////// + std::vector columns; + + auto rMedian = mResult.median(Result::Measure::elapsed); + + if (mBench.relative()) { + double d = 100.0; + if (!mBench.results().empty()) { + d = rMedian <= 0.0 ? 0.0 : mBench.results().front().median(Result::Measure::elapsed) / rMedian * 100.0; + } + columns.emplace_back(11, 1, "relative", "%", d); + } + + if (mBench.complexityN() > 0) { + columns.emplace_back(14, 0, "complexityN", "", mBench.complexityN()); + } + + columns.emplace_back(22, 2, mBench.timeUnitName() + "/" + mBench.unit(), "", + rMedian / (mBench.timeUnit().count() * mBench.batch())); + columns.emplace_back(22, 2, mBench.unit() + "/s", "", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian); + + double const rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed); + columns.emplace_back(10, 1, "err%", "%", rErrorMedian * 100.0); + + double rInsMedian = -1.0; + if (mBench.performanceCounters() && mResult.has(Result::Measure::instructions)) { + rInsMedian = mResult.median(Result::Measure::instructions); + columns.emplace_back(18, 2, "ins/" + mBench.unit(), "", rInsMedian / mBench.batch()); + } + + double rCycMedian = -1.0; + if (mBench.performanceCounters() && mResult.has(Result::Measure::cpucycles)) { + rCycMedian = mResult.median(Result::Measure::cpucycles); + columns.emplace_back(18, 2, "cyc/" + mBench.unit(), "", rCycMedian / mBench.batch()); + } + if (rInsMedian > 0.0 && rCycMedian > 0.0) { + columns.emplace_back(9, 3, "IPC", "", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian); + } + if (mBench.performanceCounters() && mResult.has(Result::Measure::branchinstructions)) { + double const rBraMedian = mResult.median(Result::Measure::branchinstructions); + columns.emplace_back(17, 2, "bra/" + mBench.unit(), "", rBraMedian / mBench.batch()); + if (mResult.has(Result::Measure::branchmisses)) { + double p = 0.0; + if (rBraMedian >= 1e-9) { + p = 100.0 * mResult.median(Result::Measure::branchmisses) / rBraMedian; + } + columns.emplace_back(10, 1, "miss%", "%", p); + } + } + + columns.emplace_back(12, 2, "total", "", mResult.sumProduct(Result::Measure::iterations, Result::Measure::elapsed)); + + // write everything + auto& os = *mBench.output(); + + // combine all elements that are relevant for printing the header + uint64_t hash = 0; + hash = hash_combine(std::hash{}(mBench.unit()), hash); + hash = hash_combine(std::hash{}(mBench.title()), hash); + hash = hash_combine(std::hash{}(mBench.timeUnitName()), hash); + hash = hash_combine(std::hash{}(mBench.timeUnit().count()), hash); + hash = hash_combine(std::hash{}(mBench.relative()), hash); + hash = hash_combine(std::hash{}(mBench.performanceCounters()), hash); + + if (hash != singletonHeaderHash()) { + singletonHeaderHash() = hash; + + // no result yet, print header + os << std::endl; + for (auto const& col : columns) { + os << col.title(); + } + os << "| " << mBench.title() << std::endl; + + for (auto const& col : columns) { + os << col.separator(); + } + os << "|:" << std::string(mBench.title().size() + 1U, '-') << std::endl; + } + + if (!errorMessage.empty()) { + for (auto const& col : columns) { + os << col.invalid(); + } + os << "| :boom: " << fmt::MarkDownCode(mBench.name()) << " (" << errorMessage << ')' << std::endl; + } else { + for (auto const& col : columns) { + os << col.value(); + } + os << "| "; + auto showUnstable = isWarningsEnabled() && rErrorMedian >= 0.05; + if (showUnstable) { + os << ":wavy_dash: "; + } + os << fmt::MarkDownCode(mBench.name()); + if (showUnstable) { + auto avgIters = d(mTotalNumIters) / d(mBench.epochs()); + // NOLINTNEXTLINE(bugprone-incorrect-roundings) + auto suggestedIters = static_cast(avgIters * 10 + 0.5); + + os << " (Unstable with ~" << detail::fmt::Number(1, 1, avgIters) + << " iters. Increase `minEpochIterations` to e.g. " << suggestedIters << ")"; + } + os << std::endl; + } + } + } + + ANKERL_NANOBENCH(NODISCARD) bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed) const noexcept { + return elapsed * 3 >= mTargetRuntimePerEpoch * 2; + } + + uint64_t mNumIters = 1; // NOLINT(misc-non-private-member-variables-in-classes) + Bench const& mBench; // NOLINT(misc-non-private-member-variables-in-classes) + std::chrono::nanoseconds mTargetRuntimePerEpoch{}; // NOLINT(misc-non-private-member-variables-in-classes) + Result mResult; // NOLINT(misc-non-private-member-variables-in-classes) + Rng mRng{123}; // NOLINT(misc-non-private-member-variables-in-classes) + std::chrono::nanoseconds mTotalElapsed{}; // NOLINT(misc-non-private-member-variables-in-classes) + uint64_t mTotalNumIters = 0; // NOLINT(misc-non-private-member-variables-in-classes) + State mState = State::upscaling_runtime; // NOLINT(misc-non-private-member-variables-in-classes) +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +IterationLogic::IterationLogic(Bench const& bench) + : mPimpl(new Impl(bench)) {} + +IterationLogic::~IterationLogic() { + delete mPimpl; +} + +uint64_t IterationLogic::numIters() const noexcept { + ANKERL_NANOBENCH_LOG(mPimpl->mBench.name() << ": mNumIters=" << mPimpl->mNumIters); + return mPimpl->mNumIters; +} + +void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept { + mPimpl->add(elapsed, pc); +} + +void IterationLogic::moveResultTo(std::vector& results) noexcept { + results.emplace_back(std::move(mPimpl->mResult)); +} + +# if ANKERL_NANOBENCH(PERF_COUNTERS) + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class LinuxPerformanceCounters { +public: + struct Target { + Target(uint64_t* targetValue_, bool correctMeasuringOverhead_, bool correctLoopOverhead_) + : targetValue(targetValue_) + , correctMeasuringOverhead(correctMeasuringOverhead_) + , correctLoopOverhead(correctLoopOverhead_) {} + + uint64_t* targetValue{}; // NOLINT(misc-non-private-member-variables-in-classes) + bool correctMeasuringOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes) + bool correctLoopOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes) + }; + + LinuxPerformanceCounters() = default; + LinuxPerformanceCounters(LinuxPerformanceCounters const&) = delete; + LinuxPerformanceCounters(LinuxPerformanceCounters&&) = delete; + LinuxPerformanceCounters& operator=(LinuxPerformanceCounters const&) = delete; + LinuxPerformanceCounters& operator=(LinuxPerformanceCounters&&) = delete; + ~LinuxPerformanceCounters(); + + // quick operation + inline void start() {} + + inline void stop() {} + + bool monitor(perf_sw_ids swId, Target target); + bool monitor(perf_hw_id hwId, Target target); + + ANKERL_NANOBENCH(NODISCARD) bool hasError() const noexcept { + return mHasError; + } + + // Just reading data is faster than enable & disabling. + // we subtract data ourselves. + inline void beginMeasure() { + if (mHasError) { + return; + } + + // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) + mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); + if (mHasError) { + return; + } + + // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) + mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP); + } + + inline void endMeasure() { + if (mHasError) { + return; + } + + // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) + mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP)); + if (mHasError) { + return; + } + + auto const numBytes = sizeof(uint64_t) * mCounters.size(); + auto ret = read(mFd, mCounters.data(), numBytes); + mHasError = ret != static_cast(numBytes); + } + + void updateResults(uint64_t numIters); + + // rounded integer division + template + static inline T divRounded(T a, T divisor) { + return (a + divisor / 2) / divisor; + } + + ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") + static inline uint32_t mix(uint32_t x) noexcept { + x ^= x << 13U; + x ^= x >> 17U; + x ^= x << 5U; + return x; + } + + template + ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") + void calibrate(Op&& op) { + // clear current calibration data, + for (auto& v : mCalibratedOverhead) { + v = UINT64_C(0); + } + + // create new calibration data + auto newCalibration = mCalibratedOverhead; + for (auto& v : newCalibration) { + v = (std::numeric_limits::max)(); + } + for (size_t iter = 0; iter < 100; ++iter) { + beginMeasure(); + op(); + endMeasure(); + if (mHasError) { + return; + } + + for (size_t i = 0; i < newCalibration.size(); ++i) { + auto diff = mCounters[i]; + if (newCalibration[i] > diff) { + newCalibration[i] = diff; + } + } + } + + mCalibratedOverhead = std::move(newCalibration); + + { + // calibrate loop overhead. For branches & instructions this makes sense, not so much for everything else like cycles. + // marsaglia's xorshift: mov, sal/shr, xor. Times 3. + // This has the nice property that the compiler doesn't seem to be able to optimize multiple calls any further. + // see https://godbolt.org/z/49RVQ5 + uint64_t const numIters = 100000U + (std::random_device{}() & 3U); + uint64_t n = numIters; + uint32_t x = 1234567; + + beginMeasure(); + while (n-- > 0) { + x = mix(x); + } + endMeasure(); + detail::doNotOptimizeAway(x); + auto measure1 = mCounters; + + n = numIters; + beginMeasure(); + while (n-- > 0) { + // we now run *twice* so we can easily calculate the overhead + x = mix(x); + x = mix(x); + } + endMeasure(); + detail::doNotOptimizeAway(x); + auto measure2 = mCounters; + + for (size_t i = 0; i < mCounters.size(); ++i) { + // factor 2 because we have two instructions per loop + auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0; + auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0; + auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0; + + mLoopOverhead[i] = divRounded(overhead, numIters); + } + } + } + +private: + bool monitor(uint32_t type, uint64_t eventid, Target target); + + std::map mIdToTarget{}; + + // start with minimum size of 3 for read_format + std::vector mCounters{3}; + std::vector mCalibratedOverhead{3}; + std::vector mLoopOverhead{3}; + + uint64_t mTimeEnabledNanos = 0; + uint64_t mTimeRunningNanos = 0; + int mFd = -1; + bool mHasError = false; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +LinuxPerformanceCounters::~LinuxPerformanceCounters() { + if (-1 != mFd) { + close(mFd); + } +} + +bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) { + return monitor(PERF_TYPE_SOFTWARE, swId, target); +} + +bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) { + return monitor(PERF_TYPE_HARDWARE, hwId, target); +} + +// overflow is ok, it's checked +ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") +void LinuxPerformanceCounters::updateResults(uint64_t numIters) { + // clear old data + for (auto& id_value : mIdToTarget) { + *id_value.second.targetValue = UINT64_C(0); + } + + if (mHasError) { + return; + } + + mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1]; + mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2]; + + for (uint64_t i = 0; i < mCounters[0]; ++i) { + auto idx = static_cast(3 + i * 2 + 0); + auto id = mCounters[idx + 1U]; + + auto it = mIdToTarget.find(id); + if (it != mIdToTarget.end()) { + + auto& tgt = it->second; + *tgt.targetValue = mCounters[idx]; + if (tgt.correctMeasuringOverhead) { + if (*tgt.targetValue >= mCalibratedOverhead[idx]) { + *tgt.targetValue -= mCalibratedOverhead[idx]; + } else { + *tgt.targetValue = 0U; + } + } + if (tgt.correctLoopOverhead) { + auto correctionVal = mLoopOverhead[idx] * numIters; + if (*tgt.targetValue >= correctionVal) { + *tgt.targetValue -= correctionVal; + } else { + *tgt.targetValue = 0U; + } + } + } + } +} + +bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) { + *target.targetValue = (std::numeric_limits::max)(); + if (mHasError) { + return false; + } + + auto pea = perf_event_attr(); + std::memset(&pea, 0, sizeof(perf_event_attr)); + pea.type = type; + pea.size = sizeof(perf_event_attr); + pea.config = eventid; + pea.disabled = 1; // start counter as disabled + pea.exclude_kernel = 1; + pea.exclude_hv = 1; + + // NOLINTNEXTLINE(hicpp-signed-bitwise) + pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; + + const int pid = 0; // the current process + const int cpu = -1; // all CPUs +# if defined(PERF_FLAG_FD_CLOEXEC) // since Linux 3.14 + const unsigned long flags = PERF_FLAG_FD_CLOEXEC; +# else + const unsigned long flags = 0; +# endif + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg) + auto fd = static_cast(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd, flags)); + if (-1 == fd) { + return false; + } + if (-1 == mFd) { + // first call: set to fd, and use this from now on + mFd = fd; + } + uint64_t id = 0; + // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) + if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &id)) { + // couldn't get id + return false; + } + + // insert into map, rely on the fact that map's references are constant. + mIdToTarget.emplace(id, target); + + // prepare readformat with the correct size (after the insert) + auto size = 3 + 2 * mIdToTarget.size(); + mCounters.resize(size); + mCalibratedOverhead.resize(size); + mLoopOverhead.resize(size); + + return true; +} + +PerformanceCounters::PerformanceCounters() + : mPc(new LinuxPerformanceCounters()) + , mVal() + , mHas() { + + // HW events + mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles, true, false)); + if (!mHas.cpuCycles) { + // Fallback to cycles counter, reference cycles not available in many systems. + mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles, true, false)); + } + mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions, true, true)); + mHas.branchInstructions = + mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions, true, false)); + mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses, true, false)); + // mHas.branchMisses = false; + + // SW events + mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults, true, false)); + mHas.contextSwitches = + mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches, true, false)); + + mPc->start(); + mPc->calibrate([] { + auto before = ankerl::nanobench::Clock::now(); + auto after = ankerl::nanobench::Clock::now(); + (void)before; + (void)after; + }); + + if (mPc->hasError()) { + // something failed, don't monitor anything. + mHas = PerfCountSet{}; + } +} + +PerformanceCounters::~PerformanceCounters() { + // no need to check for nullptr, delete nullptr has no effect + delete mPc; +} + +void PerformanceCounters::beginMeasure() { + mPc->beginMeasure(); +} + +void PerformanceCounters::endMeasure() { + mPc->endMeasure(); +} + +void PerformanceCounters::updateResults(uint64_t numIters) { + mPc->updateResults(numIters); +} + +# else + +PerformanceCounters::PerformanceCounters() = default; +PerformanceCounters::~PerformanceCounters() = default; +void PerformanceCounters::beginMeasure() {} +void PerformanceCounters::endMeasure() {} +void PerformanceCounters::updateResults(uint64_t) {} + +# endif + +ANKERL_NANOBENCH(NODISCARD) PerfCountSet const& PerformanceCounters::val() const noexcept { + return mVal; +} +ANKERL_NANOBENCH(NODISCARD) PerfCountSet const& PerformanceCounters::has() const noexcept { + return mHas; +} + +// formatting utilities +namespace fmt { + +// adds thousands separator to numbers +NumSep::NumSep(char sep) + : mSep(sep) {} + +char NumSep::do_thousands_sep() const { + return mSep; +} + +std::string NumSep::do_grouping() const { + return "\003"; +} + +// RAII to save & restore a stream's state +StreamStateRestorer::StreamStateRestorer(std::ostream& s) + : mStream(s) + , mLocale(s.getloc()) + , mPrecision(s.precision()) + , mWidth(s.width()) + , mFill(s.fill()) + , mFmtFlags(s.flags()) {} + +StreamStateRestorer::~StreamStateRestorer() { + restore(); +} + +// sets back all stream info that we remembered at construction +void StreamStateRestorer::restore() { + mStream.imbue(mLocale); + mStream.precision(mPrecision); + mStream.width(mWidth); + mStream.fill(mFill); + mStream.flags(mFmtFlags); +} + +Number::Number(int width, int precision, int64_t value) + : mWidth(width) + , mPrecision(precision) + , mValue(d(value)) {} + +Number::Number(int width, int precision, double value) + : mWidth(width) + , mPrecision(precision) + , mValue(value) {} + +std::ostream& Number::write(std::ostream& os) const { + StreamStateRestorer const restorer(os); + os.imbue(std::locale(os.getloc(), new NumSep(','))); + os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue; + return os; +} + +std::string Number::to_s() const { + std::stringstream ss; + write(ss); + return ss.str(); +} + +std::string to_s(uint64_t n) { + std::string str; + do { + str += static_cast('0' + static_cast(n % 10)); + n /= 10; + } while (n != 0); + std::reverse(str.begin(), str.end()); + return str; +} + +std::ostream& operator<<(std::ostream& os, Number const& n) { + return n.write(os); +} + +MarkDownColumn::MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val) noexcept + : mWidth(w) + , mPrecision(prec) + , mTitle(std::move(tit)) + , mSuffix(std::move(suff)) + , mValue(val) {} + +std::string MarkDownColumn::title() const { + std::stringstream ss; + ss << '|' << std::setw(mWidth - 2) << std::right << mTitle << ' '; + return ss.str(); +} + +std::string MarkDownColumn::separator() const { + std::string sep(static_cast(mWidth), '-'); + sep.front() = '|'; + sep.back() = ':'; + return sep; +} + +std::string MarkDownColumn::invalid() const { + std::string sep(static_cast(mWidth), ' '); + sep.front() = '|'; + sep[sep.size() - 2] = '-'; + return sep; +} + +std::string MarkDownColumn::value() const { + std::stringstream ss; + auto width = mWidth - 2 - static_cast(mSuffix.size()); + ss << '|' << Number(width, mPrecision, mValue) << mSuffix << ' '; + return ss.str(); +} + +// Formats any text as markdown code, escaping backticks. +MarkDownCode::MarkDownCode(std::string const& what) { + mWhat.reserve(what.size() + 2); + mWhat.push_back('`'); + for (char const c : what) { + mWhat.push_back(c); + if ('`' == c) { + mWhat.push_back('`'); + } + } + mWhat.push_back('`'); +} + +std::ostream& MarkDownCode::write(std::ostream& os) const { + return os << mWhat; +} + +std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode) { + return mdCode.write(os); +} +} // namespace fmt +} // namespace detail + +// provide implementation here so it's only generated once +Config::Config() = default; +Config::~Config() = default; +Config& Config::operator=(Config const&) = default; +Config& Config::operator=(Config&&) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default; +Config::Config(Config const&) = default; +Config::Config(Config&&) noexcept = default; + +// provide implementation here so it's only generated once +Result::~Result() = default; +Result& Result::operator=(Result const&) = default; +Result& Result::operator=(Result&&) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default; +Result::Result(Result const&) = default; +Result::Result(Result&&) noexcept = default; + +namespace detail { +template +inline constexpr typename std::underlying_type::type u(T val) noexcept { + return static_cast::type>(val); +} +} // namespace detail + +// Result returned after a benchmark has finished. Can be used as a baseline for relative(). +Result::Result(Config benchmarkConfig) + : mConfig(std::move(benchmarkConfig)) + , mNameToMeasurements{detail::u(Result::Measure::_size)} {} + +void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc) { + using detail::d; + using detail::u; + + double const dIters = d(iters); + mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters); + + mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters); + if (pc.has().pageFaults) { + mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters); + } + if (pc.has().cpuCycles) { + mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters); + } + if (pc.has().contextSwitches) { + mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters); + } + if (pc.has().instructions) { + mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters); + } + if (pc.has().branchInstructions) { + double branchInstructions = 0.0; + // correcting branches: remove branch introduced by the while (...) loop for each iteration. + if (pc.val().branchInstructions > iters + 1U) { + branchInstructions = d(pc.val().branchInstructions - (iters + 1U)); + } + mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters); + + if (pc.has().branchMisses) { + // correcting branch misses + double branchMisses = d(pc.val().branchMisses); + if (branchMisses > branchInstructions) { + // can't have branch misses when there were branches... + branchMisses = branchInstructions; + } + + // assuming at least one missed branch for the loop + branchMisses -= 1.0; + if (branchMisses < 1.0) { + branchMisses = 1.0; + } + mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters); + } + } +} + +Config const& Result::config() const noexcept { + return mConfig; +} + +inline double calcMedian(std::vector& data) { + if (data.empty()) { + return 0.0; + } + std::sort(data.begin(), data.end()); + + auto midIdx = data.size() / 2U; + if (1U == (data.size() & 1U)) { + return data[midIdx]; + } + return (data[midIdx - 1U] + data[midIdx]) / 2U; +} + +double Result::median(Measure m) const { + // create a copy so we can sort + auto data = mNameToMeasurements[detail::u(m)]; + return calcMedian(data); +} + +double Result::average(Measure m) const { + using detail::d; + auto const& data = mNameToMeasurements[detail::u(m)]; + if (data.empty()) { + return 0.0; + } + + // create a copy so we can sort + return sum(m) / d(data.size()); +} + +double Result::medianAbsolutePercentError(Measure m) const { + // create copy + auto data = mNameToMeasurements[detail::u(m)]; + + // calculates MdAPE which is the median of percentage error + // see https://support.numxl.com/hc/en-us/articles/115001223503-MdAPE-Median-Absolute-Percentage-Error + auto med = calcMedian(data); + + // transform the data to absolute error + for (auto& x : data) { + x = (x - med) / x; + if (x < 0) { + x = -x; + } + } + return calcMedian(data); +} + +double Result::sum(Measure m) const noexcept { + auto const& data = mNameToMeasurements[detail::u(m)]; + return std::accumulate(data.begin(), data.end(), 0.0); +} + +double Result::sumProduct(Measure m1, Measure m2) const noexcept { + auto const& data1 = mNameToMeasurements[detail::u(m1)]; + auto const& data2 = mNameToMeasurements[detail::u(m2)]; + + if (data1.size() != data2.size()) { + return 0.0; + } + + double result = 0.0; + for (size_t i = 0, s = data1.size(); i != s; ++i) { + result += data1[i] * data2[i]; + } + return result; +} + +bool Result::has(Measure m) const noexcept { + return !mNameToMeasurements[detail::u(m)].empty(); +} + +double Result::get(size_t idx, Measure m) const { + auto const& data = mNameToMeasurements[detail::u(m)]; + return data.at(idx); +} + +bool Result::empty() const noexcept { + return 0U == size(); +} + +size_t Result::size() const noexcept { + auto const& data = mNameToMeasurements[detail::u(Measure::elapsed)]; + return data.size(); +} + +double Result::minimum(Measure m) const noexcept { + auto const& data = mNameToMeasurements[detail::u(m)]; + if (data.empty()) { + return 0.0; + } + + // here its save to assume that at least one element is there + return *std::min_element(data.begin(), data.end()); +} + +double Result::maximum(Measure m) const noexcept { + auto const& data = mNameToMeasurements[detail::u(m)]; + if (data.empty()) { + return 0.0; + } + + // here its save to assume that at least one element is there + return *std::max_element(data.begin(), data.end()); +} + +std::string const& Result::context(char const* variableName) const { + return mConfig.mContext.at(variableName); +} + +std::string const& Result::context(std::string const& variableName) const { + return mConfig.mContext.at(variableName); +} + +Result::Measure Result::fromString(std::string const& str) { + if (str == "elapsed") { + return Measure::elapsed; + } + if (str == "iterations") { + return Measure::iterations; + } + if (str == "pagefaults") { + return Measure::pagefaults; + } + if (str == "cpucycles") { + return Measure::cpucycles; + } + if (str == "contextswitches") { + return Measure::contextswitches; + } + if (str == "instructions") { + return Measure::instructions; + } + if (str == "branchinstructions") { + return Measure::branchinstructions; + } + if (str == "branchmisses") { + return Measure::branchmisses; + } + // not found, return _size + return Measure::_size; +} + +// Configuration of a microbenchmark. +Bench::Bench() { + mConfig.mOut = &std::cout; +} + +Bench::Bench(Bench&&) noexcept = default; +Bench& Bench::operator=(Bench&&) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default; +Bench::Bench(Bench const&) = default; +Bench& Bench::operator=(Bench const&) = default; +Bench::~Bench() noexcept = default; + +double Bench::batch() const noexcept { + return mConfig.mBatch; +} + +double Bench::complexityN() const noexcept { + return mConfig.mComplexityN; +} + +// Set a baseline to compare it to. 100% it is exactly as fast as the baseline, >100% means it is faster than the baseline, <100% +// means it is slower than the baseline. +Bench& Bench::relative(bool isRelativeEnabled) noexcept { + mConfig.mIsRelative = isRelativeEnabled; + return *this; +} +bool Bench::relative() const noexcept { + return mConfig.mIsRelative; +} + +Bench& Bench::performanceCounters(bool showPerformanceCounters) noexcept { + mConfig.mShowPerformanceCounters = showPerformanceCounters; + return *this; +} +bool Bench::performanceCounters() const noexcept { + return mConfig.mShowPerformanceCounters; +} + +// Operation unit. Defaults to "op", could be e.g. "byte" for string processing. +// If u differs from currently set unit, the stored results will be cleared. +// Use singular (byte, not bytes). +Bench& Bench::unit(char const* u) { + if (u != mConfig.mUnit) { + mResults.clear(); + } + mConfig.mUnit = u; + return *this; +} + +Bench& Bench::unit(std::string const& u) { + return unit(u.c_str()); +} + +std::string const& Bench::unit() const noexcept { + return mConfig.mUnit; +} + +Bench& Bench::timeUnit(std::chrono::duration const& tu, std::string const& tuName) { + mConfig.mTimeUnit = tu; + mConfig.mTimeUnitName = tuName; + return *this; +} + +std::string const& Bench::timeUnitName() const noexcept { + return mConfig.mTimeUnitName; +} + +std::chrono::duration const& Bench::timeUnit() const noexcept { + return mConfig.mTimeUnit; +} + +// If benchmarkTitle differs from currently set title, the stored results will be cleared. +Bench& Bench::title(const char* benchmarkTitle) { + if (benchmarkTitle != mConfig.mBenchmarkTitle) { + mResults.clear(); + } + mConfig.mBenchmarkTitle = benchmarkTitle; + return *this; +} +Bench& Bench::title(std::string const& benchmarkTitle) { + if (benchmarkTitle != mConfig.mBenchmarkTitle) { + mResults.clear(); + } + mConfig.mBenchmarkTitle = benchmarkTitle; + return *this; +} + +std::string const& Bench::title() const noexcept { + return mConfig.mBenchmarkTitle; +} + +Bench& Bench::name(const char* benchmarkName) { + mConfig.mBenchmarkName = benchmarkName; + return *this; +} + +Bench& Bench::name(std::string const& benchmarkName) { + mConfig.mBenchmarkName = benchmarkName; + return *this; +} + +std::string const& Bench::name() const noexcept { + return mConfig.mBenchmarkName; +} + +Bench& Bench::context(char const* variableName, char const* variableValue) { + mConfig.mContext[variableName] = variableValue; + return *this; +} + +Bench& Bench::context(std::string const& variableName, std::string const& variableValue) { + mConfig.mContext[variableName] = variableValue; + return *this; +} + +Bench& Bench::clearContext() { + mConfig.mContext.clear(); + return *this; +} + +// Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch. +Bench& Bench::epochs(size_t numEpochs) noexcept { + mConfig.mNumEpochs = numEpochs; + return *this; +} +size_t Bench::epochs() const noexcept { + return mConfig.mNumEpochs; +} + +// Desired evaluation time is a multiple of clock resolution. Default is to be 1000 times above this measurement precision. +Bench& Bench::clockResolutionMultiple(size_t multiple) noexcept { + mConfig.mClockResolutionMultiple = multiple; + return *this; +} +size_t Bench::clockResolutionMultiple() const noexcept { + return mConfig.mClockResolutionMultiple; +} + +// Sets the maximum time each epoch should take. Default is 100ms. +Bench& Bench::maxEpochTime(std::chrono::nanoseconds t) noexcept { + mConfig.mMaxEpochTime = t; + return *this; +} +std::chrono::nanoseconds Bench::maxEpochTime() const noexcept { + return mConfig.mMaxEpochTime; +} + +// Sets the maximum time each epoch should take. Default is 100ms. +Bench& Bench::minEpochTime(std::chrono::nanoseconds t) noexcept { + mConfig.mMinEpochTime = t; + return *this; +} +std::chrono::nanoseconds Bench::minEpochTime() const noexcept { + return mConfig.mMinEpochTime; +} + +Bench& Bench::minEpochIterations(uint64_t numIters) noexcept { + mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters; + return *this; +} +uint64_t Bench::minEpochIterations() const noexcept { + return mConfig.mMinEpochIterations; +} + +Bench& Bench::epochIterations(uint64_t numIters) noexcept { + mConfig.mEpochIterations = numIters; + return *this; +} +uint64_t Bench::epochIterations() const noexcept { + return mConfig.mEpochIterations; +} + +Bench& Bench::warmup(uint64_t numWarmupIters) noexcept { + mConfig.mWarmup = numWarmupIters; + return *this; +} +uint64_t Bench::warmup() const noexcept { + return mConfig.mWarmup; +} + +Bench& Bench::config(Config const& benchmarkConfig) { + mConfig = benchmarkConfig; + return *this; +} +Config const& Bench::config() const noexcept { + return mConfig; +} + +Bench& Bench::output(std::ostream* outstream) noexcept { + mConfig.mOut = outstream; + return *this; +} + +ANKERL_NANOBENCH(NODISCARD) std::ostream* Bench::output() const noexcept { + return mConfig.mOut; +} + +std::vector const& Bench::results() const noexcept { + return mResults; +} + +Bench& Bench::render(char const* templateContent, std::ostream& os) { + ::ankerl::nanobench::render(templateContent, *this, os); + return *this; +} + +Bench& Bench::render(std::string const& templateContent, std::ostream& os) { + ::ankerl::nanobench::render(templateContent, *this, os); + return *this; +} + +std::vector Bench::complexityBigO() const { + std::vector bigOs; + auto rangeMeasure = BigO::collectRangeMeasure(mResults); + bigOs.emplace_back("O(1)", rangeMeasure, [](double) { + return 1.0; + }); + bigOs.emplace_back("O(n)", rangeMeasure, [](double n) { + return n; + }); + bigOs.emplace_back("O(log n)", rangeMeasure, [](double n) { + return std::log2(n); + }); + bigOs.emplace_back("O(n log n)", rangeMeasure, [](double n) { + return n * std::log2(n); + }); + bigOs.emplace_back("O(n^2)", rangeMeasure, [](double n) { + return n * n; + }); + bigOs.emplace_back("O(n^3)", rangeMeasure, [](double n) { + return n * n * n; + }); + std::sort(bigOs.begin(), bigOs.end()); + return bigOs; +} + +Rng::Rng() + : mX(0) + , mY(0) { + std::random_device rd; + std::uniform_int_distribution dist; + do { + mX = dist(rd); + mY = dist(rd); + } while (mX == 0 && mY == 0); +} + +ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") +uint64_t splitMix64(uint64_t& state) noexcept { + uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15)); + z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9); + z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb); + return z ^ (z >> 31U); +} + +// Seeded as described in romu paper (update april 2020) +Rng::Rng(uint64_t seed) noexcept + : mX(splitMix64(seed)) + , mY(splitMix64(seed)) { + for (size_t i = 0; i < 10; ++i) { + operator()(); + } +} + +// only internally used to copy the RNG. +Rng::Rng(uint64_t x, uint64_t y) noexcept + : mX(x) + , mY(y) {} + +Rng Rng::copy() const noexcept { + return Rng{mX, mY}; +} + +Rng::Rng(std::vector const& data) + : mX(0) + , mY(0) { + if (data.size() != 2) { + throw std::runtime_error("ankerl::nanobench::Rng::Rng: needed exactly 2 entries in data, but got " + + detail::fmt::to_s(data.size())); + } + mX = data[0]; + mY = data[1]; +} + +std::vector Rng::state() const { + std::vector data(2); + data[0] = mX; + data[1] = mY; + return data; +} + +BigO::RangeMeasure BigO::collectRangeMeasure(std::vector const& results) { + BigO::RangeMeasure rangeMeasure; + for (auto const& result : results) { + if (result.config().mComplexityN > 0.0) { + rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed)); + } + } + return rangeMeasure; +} + +BigO::BigO(std::string bigOName, RangeMeasure const& rangeMeasure) + : mName(std::move(bigOName)) { + + // estimate the constant factor + double sumRangeMeasure = 0.0; + double sumRangeRange = 0.0; + + for (const auto& rm : rangeMeasure) { + sumRangeMeasure += rm.first * rm.second; + sumRangeRange += rm.first * rm.first; + } + mConstant = sumRangeMeasure / sumRangeRange; + + // calculate root mean square + double err = 0.0; + double sumMeasure = 0.0; + for (const auto& rm : rangeMeasure) { + auto diff = mConstant * rm.first - rm.second; + err += diff * diff; + + sumMeasure += rm.second; + } + + auto n = detail::d(rangeMeasure.size()); + auto mean = sumMeasure / n; + mNormalizedRootMeanSquare = std::sqrt(err / n) / mean; +} + +BigO::BigO(const char* bigOName, RangeMeasure const& rangeMeasure) + : BigO(std::string(bigOName), rangeMeasure) {} + +std::string const& BigO::name() const noexcept { + return mName; +} + +double BigO::constant() const noexcept { + return mConstant; +} + +double BigO::normalizedRootMeanSquare() const noexcept { + return mNormalizedRootMeanSquare; +} + +bool BigO::operator<(BigO const& other) const noexcept { + return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName); +} + +std::ostream& operator<<(std::ostream& os, BigO const& bigO) { + return os << bigO.constant() << " * " << bigO.name() << ", rms=" << bigO.normalizedRootMeanSquare(); +} + +std::ostream& operator<<(std::ostream& os, std::vector const& bigOs) { + detail::fmt::StreamStateRestorer const restorer(os); + os << std::endl << "| coefficient | err% | complexity" << std::endl << "|--------------:|-------:|------------" << std::endl; + for (auto const& bigO : bigOs) { + os << "|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() << " "; + os << "|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) << "% "; + os << "| " << bigO.name(); + os << std::endl; + } + return os; +} + +} // namespace nanobench +} // namespace ankerl + +#endif // ANKERL_NANOBENCH_IMPLEMENT +#endif // ANKERL_NANOBENCH_H_INCLUDED diff --git a/src/bench/parse_hex.cpp b/src/bench/parse_hex.cpp new file mode 100644 index 0000000000000..db3ead043c815 --- /dev/null +++ b/src/bench/parse_hex.cpp @@ -0,0 +1,36 @@ +// Copyright (c) 2024- The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include + +std::string generateHexString(size_t length) { + const auto hex_digits = "0123456789ABCDEF"; + FastRandomContext rng(/*fDeterministic=*/true); + + std::string data; + while (data.size() < length) { + auto digit = hex_digits[rng.randbits(4)]; + data.push_back(digit); + } + return data; +} + +static void HexParse(benchmark::Bench& bench) +{ + auto data = generateHexString(130); // Generates 678B0EDA0A1FD30904D5A65E3568DB82DB2D918B0AD8DEA18A63FECCB877D07CAD1495C7157584D877420EF38B8DA473A6348B4F51811AC13C786B962BEE5668F9 by default + + bench.batch(data.size()).unit("base16").run([&] { + auto result = TryParseHex(data); + assert(result != std::nullopt); // make sure we're measuring the successful case + ankerl::nanobench::doNotOptimizeAway(result); + }); +} + +BENCHMARK(HexParse, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/peer_eviction.cpp b/src/bench/peer_eviction.cpp new file mode 100644 index 0000000000000..5b08cc758bef1 --- /dev/null +++ b/src/bench/peer_eviction.cpp @@ -0,0 +1,154 @@ +// Copyright (c) 2021-2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include + +#include +#include +#include + +static void EvictionProtectionCommon( + benchmark::Bench& bench, + int num_candidates, + std::function candidate_setup_fn) +{ + using Candidates = std::vector; + FastRandomContext random_context{true}; + + Candidates candidates{GetRandomNodeEvictionCandidates(num_candidates, random_context)}; + for (auto& c : candidates) { + candidate_setup_fn(c); + } + + + bench.run([&] { + // creating a copy has an overhead of about 3%, so it does not influence the benchmark results much. + auto copy = candidates; + ProtectEvictionCandidatesByRatio(copy); + }); +} + +/* Benchmarks */ + +static void EvictionProtection0Networks250Candidates(benchmark::Bench& bench) +{ + EvictionProtectionCommon( + bench, + /*num_candidates=*/250, + [](NodeEvictionCandidate& c) { + c.m_connected = std::chrono::seconds{c.id}; + c.m_network = NET_IPV4; + }); +} + +static void EvictionProtection1Networks250Candidates(benchmark::Bench& bench) +{ + EvictionProtectionCommon( + bench, + /*num_candidates=*/250, + [](NodeEvictionCandidate& c) { + c.m_connected = std::chrono::seconds{c.id}; + c.m_is_local = false; + if (c.id >= 130 && c.id < 240) { // 110 Tor + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV4; + } + }); +} + +static void EvictionProtection2Networks250Candidates(benchmark::Bench& bench) +{ + EvictionProtectionCommon( + bench, + /*num_candidates=*/250, + [](NodeEvictionCandidate& c) { + c.m_connected = std::chrono::seconds{c.id}; + c.m_is_local = false; + if (c.id >= 90 && c.id < 160) { // 70 Tor + c.m_network = NET_ONION; + } else if (c.id >= 170 && c.id < 250) { // 80 I2P + c.m_network = NET_I2P; + } else { + c.m_network = NET_IPV4; + } + }); +} + +static void EvictionProtection3Networks050Candidates(benchmark::Bench& bench) +{ + EvictionProtectionCommon( + bench, + /*num_candidates=*/50, + [](NodeEvictionCandidate& c) { + c.m_connected = std::chrono::seconds{c.id}; + c.m_is_local = (c.id == 28 || c.id == 47); // 2 localhost + if (c.id >= 30 && c.id < 47) { // 17 I2P + c.m_network = NET_I2P; + } else if (c.id >= 24 && c.id < 28) { // 4 Tor + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV4; + } + }); +} + +static void EvictionProtection3Networks100Candidates(benchmark::Bench& bench) +{ + EvictionProtectionCommon( + bench, + /*num_candidates=*/100, + [](NodeEvictionCandidate& c) { + c.m_connected = std::chrono::seconds{c.id}; + c.m_is_local = (c.id >= 55 && c.id < 60); // 5 localhost + if (c.id >= 70 && c.id < 80) { // 10 I2P + c.m_network = NET_I2P; + } else if (c.id >= 80 && c.id < 96) { // 16 Tor + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV4; + } + }); +} + +static void EvictionProtection3Networks250Candidates(benchmark::Bench& bench) +{ + EvictionProtectionCommon( + bench, + /*num_candidates=*/250, + [](NodeEvictionCandidate& c) { + c.m_connected = std::chrono::seconds{c.id}; + c.m_is_local = (c.id >= 140 && c.id < 160); // 20 localhost + if (c.id >= 170 && c.id < 180) { // 10 I2P + c.m_network = NET_I2P; + } else if (c.id >= 190 && c.id < 240) { // 50 Tor + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV4; + } + }); +} + +// Candidate numbers used for the benchmarks: +// - 50 candidates simulates a possible use of -maxconnections +// - 100 candidates approximates an average node with default settings +// - 250 candidates is the number of peers reported by operators of busy nodes + +// No disadvantaged networks, with 250 eviction candidates. +BENCHMARK(EvictionProtection0Networks250Candidates, benchmark::PriorityLevel::HIGH); + +// 1 disadvantaged network (Tor) with 250 eviction candidates. +BENCHMARK(EvictionProtection1Networks250Candidates, benchmark::PriorityLevel::HIGH); + +// 2 disadvantaged networks (I2P, Tor) with 250 eviction candidates. +BENCHMARK(EvictionProtection2Networks250Candidates, benchmark::PriorityLevel::HIGH); + +// 3 disadvantaged networks (I2P/localhost/Tor) with 50/100/250 eviction candidates. +BENCHMARK(EvictionProtection3Networks050Candidates, benchmark::PriorityLevel::HIGH); +BENCHMARK(EvictionProtection3Networks100Candidates, benchmark::PriorityLevel::HIGH); +BENCHMARK(EvictionProtection3Networks250Candidates, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/poly1305.cpp b/src/bench/poly1305.cpp index 02e5fecc0def4..f58afe984511c 100644 --- a/src/bench/poly1305.cpp +++ b/src/bench/poly1305.cpp @@ -1,40 +1,46 @@ -// Copyright (c) 2019 The Bitcoin Core developers +// Copyright (c) 2019-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include +#include + +#include +#include +#include /* Number of bytes to process per iteration */ static constexpr uint64_t BUFFER_SIZE_TINY = 64; static constexpr uint64_t BUFFER_SIZE_SMALL = 256; static constexpr uint64_t BUFFER_SIZE_LARGE = 1024*1024; -static void POLY1305(benchmark::State& state, size_t buffersize) +static void POLY1305(benchmark::Bench& bench, size_t buffersize) { - std::vector tag(POLY1305_TAGLEN, 0); - std::vector key(POLY1305_KEYLEN, 0); - std::vector in(buffersize, 0); - while (state.KeepRunning()) - poly1305_auth(tag.data(), in.data(), in.size(), key.data()); + std::vector tag(Poly1305::TAGLEN, {}); + std::vector key(Poly1305::KEYLEN, {}); + std::vector in(buffersize, {}); + bench.batch(in.size()).unit("byte").run([&] { + Poly1305{key}.Update(in).Finalize(tag); + }); } -static void POLY1305_64BYTES(benchmark::State& state) +static void POLY1305_64BYTES(benchmark::Bench& bench) { - POLY1305(state, BUFFER_SIZE_TINY); + POLY1305(bench, BUFFER_SIZE_TINY); } -static void POLY1305_256BYTES(benchmark::State& state) +static void POLY1305_256BYTES(benchmark::Bench& bench) { - POLY1305(state, BUFFER_SIZE_SMALL); + POLY1305(bench, BUFFER_SIZE_SMALL); } -static void POLY1305_1MB(benchmark::State& state) +static void POLY1305_1MB(benchmark::Bench& bench) { - POLY1305(state, BUFFER_SIZE_LARGE); + POLY1305(bench, BUFFER_SIZE_LARGE); } -BENCHMARK(POLY1305_64BYTES, 500000); -BENCHMARK(POLY1305_256BYTES, 250000); -BENCHMARK(POLY1305_1MB, 340); +BENCHMARK(POLY1305_64BYTES, benchmark::PriorityLevel::HIGH); +BENCHMARK(POLY1305_256BYTES, benchmark::PriorityLevel::HIGH); +BENCHMARK(POLY1305_1MB, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/pool.cpp b/src/bench/pool.cpp new file mode 100644 index 0000000000000..40b098f54c464 --- /dev/null +++ b/src/bench/pool.cpp @@ -0,0 +1,53 @@ +// Copyright (c) 2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include + +#include +#include +#include +#include +#include + +template +void BenchFillClearMap(benchmark::Bench& bench, Map& map) +{ + size_t batch_size = 5000; + + // make sure each iteration of the benchmark contains exactly 5000 inserts and one clear. + // do this at least 10 times so we get reasonable accurate results + + bench.batch(batch_size).minEpochIterations(10).run([&] { + auto rng = ankerl::nanobench::Rng(1234); + for (size_t i = 0; i < batch_size; ++i) { + map[rng()]; + } + map.clear(); + }); +} + +static void PoolAllocator_StdUnorderedMap(benchmark::Bench& bench) +{ + auto map = std::unordered_map(); + BenchFillClearMap(bench, map); +} + +static void PoolAllocator_StdUnorderedMapWithPoolResource(benchmark::Bench& bench) +{ + using Map = std::unordered_map, + std::equal_to, + PoolAllocator, + sizeof(std::pair) + 4 * sizeof(void*)>>; + + // make sure the resource supports large enough pools to hold the node. We do this by adding the size of a few pointers to it. + auto pool_resource = Map::allocator_type::ResourceType(); + auto map = Map{0, std::hash{}, std::equal_to{}, &pool_resource}; + BenchFillClearMap(bench, map); +} + +BENCHMARK(PoolAllocator_StdUnorderedMap, benchmark::PriorityLevel::HIGH); +BENCHMARK(PoolAllocator_StdUnorderedMapWithPoolResource, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp index 00e5d7e7a0402..9b83c42693fd3 100644 --- a/src/bench/prevector.cpp +++ b/src/bench/prevector.cpp @@ -1,84 +1,69 @@ -// Copyright (c) 2015-2018 The Bitcoin Core developers +// Copyright (c) 2015-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include -#include -#include -#include #include +#include +#include -// GCC 4.8 is missing some C++11 type_traits, -// https://www.gnu.org/software/gcc/gcc-5/changes.html -#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 5 -#define IS_TRIVIALLY_CONSTRUCTIBLE std::has_trivial_default_constructor -#else -#define IS_TRIVIALLY_CONSTRUCTIBLE std::is_trivially_default_constructible -#endif +#include +#include struct nontrivial_t { - int x; - nontrivial_t() :x(-1) {} - ADD_SERIALIZE_METHODS - template - inline void SerializationOp(Stream& s, Operation ser_action) {READWRITE(x);} + int x{-1}; + nontrivial_t() = default; + SERIALIZE_METHODS(nontrivial_t, obj) { READWRITE(obj.x); } }; -static_assert(!IS_TRIVIALLY_CONSTRUCTIBLE::value, +static_assert(!std::is_trivially_default_constructible::value, "expected nontrivial_t to not be trivially constructible"); typedef unsigned char trivial_t; -static_assert(IS_TRIVIALLY_CONSTRUCTIBLE::value, +static_assert(std::is_trivially_default_constructible::value, "expected trivial_t to be trivially constructible"); template -static void PrevectorDestructor(benchmark::State& state) +static void PrevectorDestructor(benchmark::Bench& bench) { - while (state.KeepRunning()) { - for (auto x = 0; x < 1000; ++x) { - prevector<28, T> t0; - prevector<28, T> t1; - t0.resize(28); - t1.resize(29); - } - } + bench.batch(2).run([&] { + prevector<28, T> t0; + prevector<28, T> t1; + t0.resize(28); + t1.resize(29); + }); } template -static void PrevectorClear(benchmark::State& state) +static void PrevectorClear(benchmark::Bench& bench) { - - while (state.KeepRunning()) { - for (auto x = 0; x < 1000; ++x) { - prevector<28, T> t0; - prevector<28, T> t1; - t0.resize(28); - t0.clear(); - t1.resize(29); - t1.clear(); - } - } + prevector<28, T> t0; + prevector<28, T> t1; + bench.batch(2).run([&] { + t0.resize(28); + t0.clear(); + t1.resize(29); + t1.clear(); + }); } template -static void PrevectorResize(benchmark::State& state) +static void PrevectorResize(benchmark::Bench& bench) { - while (state.KeepRunning()) { - prevector<28, T> t0; - prevector<28, T> t1; - for (auto x = 0; x < 1000; ++x) { - t0.resize(28); - t0.resize(0); - t1.resize(29); - t1.resize(0); - } - } + prevector<28, T> t0; + prevector<28, T> t1; + bench.batch(4).run([&] { + t0.resize(28); + t0.resize(0); + t1.resize(29); + t1.resize(0); + }); } template -static void PrevectorDeserialize(benchmark::State& state) +static void PrevectorDeserialize(benchmark::Bench& bench) { - CDataStream s0(SER_NETWORK, 0); + DataStream s0{}; prevector<28, T> t0; t0.resize(28); for (auto x = 0; x < 900; ++x) { @@ -88,26 +73,54 @@ static void PrevectorDeserialize(benchmark::State& state) for (auto x = 0; x < 101; ++x) { s0 << t0; } - while (state.KeepRunning()) { + bench.batch(1000).run([&] { prevector<28, T> t1; for (auto x = 0; x < 1000; ++x) { s0 >> t1; } - s0.Init(SER_NETWORK, 0); - } + s0.Rewind(); + }); +} + +template +static void PrevectorFillVectorDirect(benchmark::Bench& bench) +{ + bench.run([&] { + std::vector> vec; + for (size_t i = 0; i < 260; ++i) { + vec.emplace_back(); + } + }); +} + + +template +static void PrevectorFillVectorIndirect(benchmark::Bench& bench) +{ + bench.run([&] { + std::vector> vec; + for (size_t i = 0; i < 260; ++i) { + // force allocation + vec.emplace_back(29, T{}); + } + }); } -#define PREVECTOR_TEST(name, nontrivops, trivops) \ - static void Prevector ## name ## Nontrivial(benchmark::State& state) { \ - Prevector ## name(state); \ - } \ - BENCHMARK(Prevector ## name ## Nontrivial, nontrivops); \ - static void Prevector ## name ## Trivial(benchmark::State& state) { \ - Prevector ## name(state); \ - } \ - BENCHMARK(Prevector ## name ## Trivial, trivops); +#define PREVECTOR_TEST(name) \ + static void Prevector##name##Nontrivial(benchmark::Bench& bench) \ + { \ + Prevector##name(bench); \ + } \ + BENCHMARK(Prevector##name##Nontrivial, benchmark::PriorityLevel::HIGH); \ + static void Prevector##name##Trivial(benchmark::Bench& bench) \ + { \ + Prevector##name(bench); \ + } \ + BENCHMARK(Prevector##name##Trivial, benchmark::PriorityLevel::HIGH); -PREVECTOR_TEST(Clear, 28300, 88600) -PREVECTOR_TEST(Destructor, 28800, 88900) -PREVECTOR_TEST(Resize, 28900, 90300) -PREVECTOR_TEST(Deserialize, 6800, 52000) +PREVECTOR_TEST(Clear) +PREVECTOR_TEST(Destructor) +PREVECTOR_TEST(Resize) +PREVECTOR_TEST(Deserialize) +PREVECTOR_TEST(FillVectorDirect) +PREVECTOR_TEST(FillVectorIndirect) diff --git a/src/bench/random.cpp b/src/bench/random.cpp new file mode 100644 index 0000000000000..c2aa66850ad29 --- /dev/null +++ b/src/bench/random.cpp @@ -0,0 +1,105 @@ +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include + +#include +#include +#include +#include + +namespace { + +template +void BenchRandom_rand64(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(1).unit("number").run([&] { + rng.rand64(); + }); +} + +template +void BenchRandom_rand32(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(1).unit("number").run([&] { + rng.rand32(); + }); +} + +template +void BenchRandom_randbool(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(1).unit("number").run([&] { + rng.randbool(); + }); +} + +template +void BenchRandom_randbits(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(64).unit("number").run([&] { + for (int i = 1; i <= 64; ++i) { + rng.randbits(i); + } + }); +} + +template +void BenchRandom_randrange(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(RANGE).unit("number").run([&] { + for (int i = 1; i <= RANGE; ++i) { + rng.randrange(i); + } + }); +} + +template +void BenchRandom_stdshuffle(benchmark::Bench& bench, RNG&& rng) noexcept +{ + uint64_t data[RANGE]; + std::iota(std::begin(data), std::end(data), uint64_t(0)); + bench.batch(RANGE).unit("number").run([&] { + std::shuffle(std::begin(data), std::end(data), rng); + }); +} + +void FastRandom_rand64(benchmark::Bench& bench) { BenchRandom_rand64(bench, FastRandomContext(true)); } +void FastRandom_rand32(benchmark::Bench& bench) { BenchRandom_rand32(bench, FastRandomContext(true)); } +void FastRandom_randbool(benchmark::Bench& bench) { BenchRandom_randbool(bench, FastRandomContext(true)); } +void FastRandom_randbits(benchmark::Bench& bench) { BenchRandom_randbits(bench, FastRandomContext(true)); } +void FastRandom_randrange100(benchmark::Bench& bench) { BenchRandom_randrange<100>(bench, FastRandomContext(true)); } +void FastRandom_randrange1000(benchmark::Bench& bench) { BenchRandom_randrange<1000>(bench, FastRandomContext(true)); } +void FastRandom_randrange1000000(benchmark::Bench& bench) { BenchRandom_randrange<1000000>(bench, FastRandomContext(true)); } +void FastRandom_stdshuffle100(benchmark::Bench& bench) { BenchRandom_stdshuffle<100>(bench, FastRandomContext(true)); } + +void InsecureRandom_rand64(benchmark::Bench& bench) { BenchRandom_rand64(bench, InsecureRandomContext(251438)); } +void InsecureRandom_rand32(benchmark::Bench& bench) { BenchRandom_rand32(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randbool(benchmark::Bench& bench) { BenchRandom_randbool(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randbits(benchmark::Bench& bench) { BenchRandom_randbits(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randrange100(benchmark::Bench& bench) { BenchRandom_randrange<100>(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randrange1000(benchmark::Bench& bench) { BenchRandom_randrange<1000>(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randrange1000000(benchmark::Bench& bench) { BenchRandom_randrange<1000000>(bench, InsecureRandomContext(251438)); } +void InsecureRandom_stdshuffle100(benchmark::Bench& bench) { BenchRandom_stdshuffle<100>(bench, InsecureRandomContext(251438)); } + +} // namespace + +BENCHMARK(FastRandom_rand64, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_rand32, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randbool, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randbits, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randrange100, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randrange1000, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randrange1000000, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_stdshuffle100, benchmark::PriorityLevel::HIGH); + +BENCHMARK(InsecureRandom_rand64, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_rand32, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randbool, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randbits, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randrange100, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randrange1000, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randrange1000000, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_stdshuffle100, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/readblock.cpp b/src/bench/readblock.cpp new file mode 100644 index 0000000000000..058d953b4e96d --- /dev/null +++ b/src/bench/readblock.cpp @@ -0,0 +1,60 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static FlatFilePos WriteBlockToDisk(ChainstateManager& chainman) +{ + DataStream stream{benchmark::data::block413567}; + CBlock block; + stream >> TX_WITH_WITNESS(block); + + return chainman.m_blockman.SaveBlockToDisk(block, 0); +} + +static void ReadBlockFromDiskTest(benchmark::Bench& bench) +{ + const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; + ChainstateManager& chainman{*testing_setup->m_node.chainman}; + + CBlock block; + const auto pos{WriteBlockToDisk(chainman)}; + + bench.run([&] { + const auto success{chainman.m_blockman.ReadBlockFromDisk(block, pos)}; + assert(success); + }); +} + +static void ReadRawBlockFromDiskTest(benchmark::Bench& bench) +{ + const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; + ChainstateManager& chainman{*testing_setup->m_node.chainman}; + + std::vector block_data; + const auto pos{WriteBlockToDisk(chainman)}; + + bench.run([&] { + const auto success{chainman.m_blockman.ReadRawBlockFromDisk(block_data, pos)}; + assert(success); + }); +} + +BENCHMARK(ReadBlockFromDiskTest, benchmark::PriorityLevel::HIGH); +BENCHMARK(ReadRawBlockFromDiskTest, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/rollingbloom.cpp b/src/bench/rollingbloom.cpp index 6cdb4ff0a700d..6b3e1c434e0fd 100644 --- a/src/bench/rollingbloom.cpp +++ b/src/bench/rollingbloom.cpp @@ -1,39 +1,38 @@ -// Copyright (c) 2016-2019 The Bitcoin Core developers +// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include -#include +#include +#include +#include -static void RollingBloom(benchmark::State& state) +#include +#include + +static void RollingBloom(benchmark::Bench& bench) { CRollingBloomFilter filter(120000, 0.000001); std::vector data(32); uint32_t count = 0; - while (state.KeepRunning()) { + bench.run([&] { count++; - data[0] = count; - data[1] = count >> 8; - data[2] = count >> 16; - data[3] = count >> 24; + WriteLE32(data.data(), count); filter.insert(data); - data[0] = count >> 24; - data[1] = count >> 16; - data[2] = count >> 8; - data[3] = count; + WriteBE32(data.data(), count); filter.contains(data); - } + }); } -static void RollingBloomReset(benchmark::State& state) +static void RollingBloomReset(benchmark::Bench& bench) { CRollingBloomFilter filter(120000, 0.000001); - while (state.KeepRunning()) { + bench.run([&] { filter.reset(); - } + }); } -BENCHMARK(RollingBloom, 1500 * 1000); -BENCHMARK(RollingBloomReset, 20000); +BENCHMARK(RollingBloom, benchmark::PriorityLevel::HIGH); +BENCHMARK(RollingBloomReset, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp index 511573abac5c2..7e3e2d8e48ab0 100644 --- a/src/bench/rpc_blockchain.cpp +++ b/src/bench/rpc_blockchain.cpp @@ -1,32 +1,69 @@ -// Copyright (c) 2016-2020 The Bitcoin Core developers +// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include -#include - +#include +#include +#include +#include +#include #include +#include +#include #include +#include +#include +#include #include -#include +#include +#include +#include -static void BlockToJsonVerbose(benchmark::State& state) { - CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION); - char a = '\0'; - stream.write(&a, 1); // Prevent compaction +namespace { - CBlock block; - stream >> block; +struct TestBlockAndIndex { + const std::unique_ptr testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; + CBlock block{}; + uint256 blockHash{}; + CBlockIndex blockindex{}; - CBlockIndex blockindex; - const uint256 blockHash = block.GetHash(); - blockindex.phashBlock = &blockHash; - blockindex.nBits = 403014710; + TestBlockAndIndex() + { + DataStream stream{benchmark::data::block413567}; + std::byte a{0}; + stream.write({&a, 1}); // Prevent compaction - while (state.KeepRunning()) { - (void)blockToJSON(block, &blockindex, &blockindex, /*verbose*/ true); + stream >> TX_WITH_WITNESS(block); + + blockHash = block.GetHash(); + blockindex.phashBlock = &blockHash; + blockindex.nBits = 403014710; } +}; + +} // namespace + +static void BlockToJsonVerbose(benchmark::Bench& bench) +{ + TestBlockAndIndex data; + bench.run([&] { + auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT); + ankerl::nanobench::doNotOptimizeAway(univalue); + }); +} + +BENCHMARK(BlockToJsonVerbose, benchmark::PriorityLevel::HIGH); + +static void BlockToJsonVerboseWrite(benchmark::Bench& bench) +{ + TestBlockAndIndex data; + auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT); + bench.run([&] { + auto str = univalue.write(); + ankerl::nanobench::doNotOptimizeAway(str); + }); } -BENCHMARK(BlockToJsonVerbose, 10); +BENCHMARK(BlockToJsonVerboseWrite, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/rpc_mempool.cpp b/src/bench/rpc_mempool.cpp index bf63cccf096d4..6e8757bbd5f89 100644 --- a/src/bench/rpc_mempool.cpp +++ b/src/bench/rpc_mempool.cpp @@ -1,23 +1,33 @@ -// Copyright (c) 2011-2019 The Bitcoin Core developers +// Copyright (c) 2011-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include -#include +#include +#include +#include +#include +#include