diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100755 index 0000000000..6a4c26bea2 --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,38 @@ +#!/bin/sh + +set -eu -o pipefail + +STAGED_GO_FILES=$(git diff --cached --name-only -- '*.go') +STAGED_MD_FILES=$(git diff --cached --name-only -- '*.md') + +if [[ $STAGED_GO_FILES == "" ]] && [[ $STAGED_MD_FILES == "" ]]; then + echo "--> Found no go or markdown files, skipping linting" +elif [[ $STAGED_GO_FILES == "" ]]; then + echo "--> Found markdown files, linting" + if ! command -v markdownlint &> /dev/null ; then + echo "markdownlint is not installed of available in the PATH" >&2 + echo "please check https://github.com/igorshubovych/markdownlint-cli" >&2 + exit 1 + fi + markdownlint --config .markdownlint.yaml '**/*.md' +else + echo "--> Found go files, running make lint" + if ! command -v golangci-lint &> /dev/null ; then + echo "golangci-lint not installed or available in the PATH" >&2 + echo "please check https://github.com/golangci/golangci-lint" >&2 + exit 1 + fi + make lint +fi + +if go mod tidy -v 2>&1 | grep -q 'updates to go.mod needed'; then + exit 1 +fi + +git diff --exit-code go.* &> /dev/null + +if [ $? -eq 1 ]; then + echo "go.mod or go.sum differs, please re-add it to your commit" + + exit 1 +fi diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6253ffe97c..f9ac165e7a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,4 +7,6 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @liamsi @renaynay @Wondertan @vgonkivs @distractedm1nd +* @renaynay @Wondertan @vgonkivs @distractedm1nd @walldiss @ramin + +docs/adr @adlerjohn @liamsi diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 021e3fcef4..f6a1f79857 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -1,6 +1,6 @@ name: Bug Report description: File a bug report to inform the community on your awesome finding! -labels: ["bug",] +labels: ["bug"] body: - type: markdown attributes: @@ -8,13 +8,15 @@ body: Thank you for filling out this bug report! - type: input id: version - attributes: + attributes: label: Celestia Node version - description: use 'celestia version' or 'git rev-parse --verify HEAD' if installed from source code + description: > + use 'celestia version' or 'git rev-parse --verify HEAD' if installed + from source code validations: required: true - type: markdown - attributes: + attributes: value: | Environment - type: input @@ -33,52 +35,48 @@ body: id: others attributes: label: Others - description: e.g. flag options, celestia config file changes, resources limitation(like cpu, ram limit, swap etc.) + description: > + e.g. flag options, celestia config file changes, resources + limitation(like cpu, ram limit, swap etc.) - type: textarea id: steps attributes: label: Steps to reproduce it - description: What steps have you made to reproduce it? + description: What steps have you made to reproduce it? placeholder: Tell us what you see! - value: validations: required: true - type: textarea id: expectation attributes: label: Expected result - description: What do you expect to happen as a final result? + description: What do you expect to happen as a final result? placeholder: Let us know what is expected - value: validations: required: true - type: textarea id: actual attributes: label: Actual result - description: What do you see happened instead as a final result? - placeholder: This is the crucial part in detecting the root cause of the issue - value: + description: What do you see happened instead as a final result? + placeholder: > + This is the crucial part in detecting the root cause of the issue validations: required: true - type: textarea id: logs attributes: label: Relevant log output - description: Please copy and paste any relevant log(max 20 lines) output. This will be automatically formatted into code, so no need for backticks. Or paste gists, pastebins links here - render: shell + description: > + Please copy and paste any relevant log(max 20 lines) output. This will + be automatically formatted into code, so no need for backticks. Or paste + gists, pastebins links here + render: Shell - type: textarea id: misc attributes: label: Notes - description: Is there anything else we need to know? - placeholder: Maybe, you have other ways to repro or what side effects there are if changing steps - #75 - # - type: checkboxes - # id: terms - # attributes: - # label: Code of Conduct - # description: By submitting this issue, you agree to follow our [Code of Conduct](https://example.com) - # options: - # - label: I agree to follow this project's Code of Conduct - # required: true + description: Is there anything else we need to know? + placeholder: > + Maybe, you have other ways to repro or what side effects there are if + changing steps diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index ba7439cfbf..91c680c682 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -1,7 +1,9 @@ name: Feature Request -description: Request a new feature to inform the community on what will be benenificial for the project! +description: > + Request a new feature to inform the community on what will be beneficial for + the project! title: "[Feature Request]: " -labels: ["enhancement",] +labels: ["enhancement"] body: - type: markdown attributes: diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9010c315be..edcd86e11d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,6 +7,8 @@ updates: day: monday time: "11:00" open-pull-requests-limit: 10 + labels: + - kind:deps - package-ecosystem: gomod directory: "/" schedule: @@ -18,4 +20,20 @@ updates: - Wondertan - renaynay labels: - - kind:dependencies + - kind:deps + ignore: + - dependency-name: "*otel*" + update-types: ["version-update:semver-patch"] + groups: + otel: + patterns: + - "go.opentelemetry.io/otel*" + - package-ecosystem: docker + directory: "/" + schedule: + interval: weekly + day: monday + time: "11:00" + open-pull-requests-limit: 10 + labels: + - kind:deps diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..61c286f936 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,14 @@ + diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml new file mode 100644 index 0000000000..73572e4cef --- /dev/null +++ b/.github/workflows/ci_release.yml @@ -0,0 +1,140 @@ +name: CI and Release +on: + merge_group: + push: + branches: + - main + # Trigger on version tags + tags: + - "v*" + pull_request: + workflow_dispatch: + inputs: + version: + # Friendly description to be shown in the UI instead of 'name' + description: "Semver type of new version (major / minor / patch)" + # Input has to be provided for the workflow to run + required: true + type: choice + options: + - patch + - minor + - major + +jobs: + # set up go version for use through pipelines, setting + # variable one time and setting outputs to access passing it + # to other jobs + setup: + runs-on: ubuntu-latest + env: + # upgrade go version throughout pipeline here + GO_VERSION: "1.21" + outputs: + go-version: ${{ steps.set-vars.outputs.go-version }} + branch: ${{ steps.trim_ref.outputs.branch }} + debug: ${{ steps.debug.outputs.debug }} + steps: + - name: Set go version + id: set-vars + run: echo "go-version=${{env.GO_VERSION}}" >> "$GITHUB_OUTPUT" + + - name: Trim branch name + id: trim_ref + run: | + echo "branch=$(${${{ github.ref }}:11})" >> $GITHUB_OUTPUT + + - name: Set debug output + id: debug + run: | + if [[ "${{ runner.debug }}" == "true" ]]; then + echo "debug=true" >> $GITHUB_ENV + else + echo "debug=false" >> $GITHUB_ENV + fi + + # Dockerfile Linting + hadolint: + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_lint.yml@v0.2.8 # yamllint disable-line rule:line-length + with: + dockerfile: Dockerfile + + yamllint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: celestiaorg/.github/.github/actions/yamllint@v0.2.8 + + markdown-lint: + name: Markdown Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 18 + - run: | + npm install -g markdownlint-cli@0.32.1 + markdownlint --config .markdownlint.yaml '**/*.md' + + go-ci: + needs: setup + uses: ./.github/workflows/go-ci.yml + with: + go-version: ${{ needs.setup.outputs.go-version }} + + # If this was a workflow dispatch event, we need to generate and push a tag + # for goreleaser to grab + version_bump: + needs: [hadolint, yamllint, markdown-lint, go-ci] + runs-on: ubuntu-latest + permissions: "write-all" + steps: + - uses: actions/checkout@v4 + + - name: Bump version and push tag + # Placing the if condition here is a workaround for needing to block + # on this step during workflow dispatch events but the step not + # needing to run on tags. If we had the if condition on the full + # version_bump section, it would skip and not run, which would result + # in goreleaser not running either. + if: ${{ github.event_name == 'workflow_dispatch' }} + uses: mathieudutour/github-tag-action@v6.1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + default_bump: ${{ inputs.version }} + release_branches: ${{ needs.setup.outputs.branch }} + + # Generate the release with goreleaser to include pre-built binaries + goreleaser: + needs: version_bump + runs-on: ubuntu-latest + if: | + github.event_name == 'workflow_dispatch' || + (github.event_name == 'push' && contains(github.ref, 'refs/tags/')) + permissions: "write-all" + steps: + - uses: actions/checkout@v4 + + - run: git fetch --force --tags + + - uses: actions/setup-go@v5 + with: + go-version: ${{ needs.setup.outputs.go-version }} + + - name: Import GPG key + id: import_gpg + uses: crazy-max/ghaction-import-gpg@v6 + with: + gpg_private_key: ${{ secrets.GPG_SIGNING_KEY }} + passphrase: ${{ secrets.GPG_PASSPHRASE }} + + # Generate the binaries and release + - uses: goreleaser/goreleaser-action@v5 + with: + distribution: goreleaser + version: latest + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} diff --git a/.github/workflows/docker-build-publish.yml b/.github/workflows/docker-build-publish.yml new file mode 100644 index 0000000000..b8322d5e17 --- /dev/null +++ b/.github/workflows/docker-build-publish.yml @@ -0,0 +1,23 @@ +name: Docker Build & Publish + +# Trigger on all push events, new semantic version tags, and all PRs +on: + merge_group: + push: + branches: + - "main" + tags: + - "v[0-9]+.[0-9]+.[0-9]+" + - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" + - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" + - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" + pull_request: + +jobs: + docker-security-build: + permissions: + contents: write + packages: write + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.2.8 # yamllint disable-line rule:line-length + with: + dockerfile: Dockerfile diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml deleted file mode 100644 index 13cad5a212..0000000000 --- a/.github/workflows/docker-build.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: "docker-build" - -on: - push: - branches: - - "**" - workflow_dispatch: - -env: - GO_VERSION: 1.18 - IMAGE_NAME: ${{ github.repository }} - REGISTRY: ghcr.io - -jobs: - docker-build: - runs-on: "ubuntu-latest" - permissions: - contents: write - packages: write - - steps: - - uses: "actions/checkout@v3" - - - name: set up go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v4 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - tags: | - type=sha - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Login to GHCR - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and push - uses: docker/build-push-action@v3 - with: - platforms: linux/amd64, linux/arm64 - push: true - tags: ${{ steps.meta.outputs.tags }} - file: docker/Dockerfile diff --git a/.github/workflows/github_pages.yml b/.github/workflows/github_pages.yml new file mode 100644 index 0000000000..b212c2d885 --- /dev/null +++ b/.github/workflows/github_pages.yml @@ -0,0 +1,48 @@ +name: github-pages + +on: + push: + branches: + - main + paths: + - specs/** + pull_request: + paths: + - specs/** + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + + - name: Setup mdBook + uses: peaceiris/actions-mdbook@v1 + with: + mdbook-version: "latest" + + - name: Build book + run: mdbook build specs + + - name: Deploy main + if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./specs/book + # keep_files is to prevent PR preview files from being overwritten. + # If we need to overwrite such files, trigger this workflow manually. + keep_files: ${{ github.event_name != 'workflow_dispatch' }} + + - name: Deploy PR preview + # Only run this job if the PR was created from a branch on celestiaorg/celestia-node + # because this job will fail for branches from forks. + # https://github.com/celestiaorg/celestia-app/issues/1506 + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository + uses: rossjrw/pr-preview-action@v1 + with: + source-dir: ./specs/book diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml index 4d1d7e4ad2..81fec7dfcb 100644 --- a/.github/workflows/go-ci.yml +++ b/.github/workflows/go-ci.yml @@ -1,43 +1,64 @@ name: Go CI on: - push: - branches: - - main - pull_request: - release: - types: [published] + workflow_call: + inputs: + go-version: + description: 'Go version' + required: true + type: string -env: - GO_VERSION: 1.18 +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true jobs: + setup: + name: Setup + runs-on: ubuntu-latest + outputs: + debug: ${{ steps.debug.outputs.debug }} + steps: + - name: Set debug output + id: debug + run: | + if [[ "${{ runner.debug }}" == "true" ]]; then + echo "debug=true" >> $GITHUB_ENV + else + echo "debug=false" >> $GITHUB_ENV + fi + lint: + needs: [setup] name: Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v5 with: - go-version: ${{ env.GO_VERSION }} + go-version: ${{ inputs.go-version }} - name: golangci-lint - uses: golangci/golangci-lint-action@v3.2.0 + uses: golangci/golangci-lint-action@v4.0.0 with: - version: v1.45 + args: --timeout 10m + version: v1.55 + skip-pkg-cache: true + skip-build-cache: true go_mod_tidy_check: + needs: [setup] name: Go Mod Tidy Check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v5 with: - go-version: ${{ env.GO_VERSION }} + go-version: ${{ inputs.go-version }} - run: go mod tidy @@ -45,54 +66,70 @@ jobs: run: git diff --exit-code test_coverage: + needs: [lint, go_mod_tidy_check] name: Unit Tests Coverage - runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-14] + runs-on: ${{ matrix.os }} + env: + OS: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: set up go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Test & Coverage - run: | - go install github.com/ory/go-acc@v0.2.6 - go-acc -o coverage.txt `go list ./... | grep -v node/tests` -- -v - - uses: codecov/codecov-action@v3.1.0 + uses: actions/setup-go@v5 with: - file: ./coverage.txt + go-version: ${{ inputs.go-version }} - unit_race_test: - name: Run Unit Tests with Race Detector - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 + - name: run unit tests + run: make test-unit ENABLE_VERBOSE=${{ needs.setup.outputs.debug }} - - name: set up go - uses: actions/setup-go@v3 + - name: Upload unit test output + uses: actions/upload-artifact@v4 + if: always() && needs.setup.outputs.debug == 'true' with: - go-version: ${{ env.GO_VERSION }} - - - name: execute test run - run: make test-unit-race - - integration_test: - name: Run Integration Tests - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - - name: set up go - uses: actions/setup-go@v3 + name: unit-test-output-${{ matrix.os }} + path: | + debug.log + coverage.txt + retention-days: 5 + + - name: upload coverage + uses: codecov/codecov-action@v4.0.1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Swamp Tests - run: make test-swamp + env_vars: OS + token: ${{ secrets.CODECOV_TOKEN }} + file: ./coverage.txt + name: coverage-${{ matrix.os }} + + # @ramin - Temporarily removed while we figure out getting + # these unit tests consistently running on ubuntu-latest + # and then enabled for macos-latest. We aren't requiring + # unit_race_test to pass for PRs so lets remove and reintroduce + # once green + # + # unit_test_race: + # needs: [lint, go_mod_tidy_check] + # name: Unit Tests with Race Detector (ubuntu-latest) + # runs-on: ubuntu-latest + + # steps: + # - uses: actions/checkout@v4 + + # - name: set up go + # uses: actions/setup-go@v5 + # with: + # go-version: ${{ inputs.go-version }} + + # - name: execute test run + # run: make test-unit-race ENABLE_VERBOSE=${{ needs.setup.outputs.debug }} - - name: Swamp Tests with Race Detector - run: make test-swamp-race + integration_test: + name: Integration Tests + needs: [lint, go_mod_tidy_check] + uses: ./.github/workflows/integration-tests.yml + with: + go-version: ${{ inputs.go-version }} diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 0000000000..cc1196fccf --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,130 @@ +name: Integration Tests + +on: + workflow_call: + inputs: + go-version: + description: 'Go version' + required: true + type: string + +jobs: + api_tests: + name: Integration Tests API + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run API tests + run: make test-integration TAGS=api + + blob_tests: + name: Integration Tests Blob + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run blob tests + run: make test-integration TAGS=blob + + da_tests: + name: Da Tests Sync + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run da tests + run: make test-integration SHORT=true TAGS=da + + fraud_tests: + name: Integration Tests Fraud + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run fraud tests + run: make test-integration TAGS=fraud + + nd_tests: + name: Integration Tests ND + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run nd tests + run: make test-integration TAGS=nd + + p2p_tests: + name: Integration Tests p2p + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run p2p tests + run: make test-integration TAGS=p2p + + reconstruction_tests: + name: Integration Tests Reconstruction + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run reconstruction tests + run: make test-integration SHORT=true TAGS=reconstruction + + sync_tests: + name: Integration Tests Sync + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run sync tests + run: make test-integration SHORT=true TAGS=sync diff --git a/.github/workflows/issue-label-automation.yml b/.github/workflows/issue-label-automation.yml new file mode 100644 index 0000000000..0d2d37ca49 --- /dev/null +++ b/.github/workflows/issue-label-automation.yml @@ -0,0 +1,42 @@ +name: Label Automation +on: + # Using pull_request_target for forks since labels are not a security issue + pull_request_target: + types: [opened] + issues: + types: [opened] + +jobs: + automate-labels: + runs-on: ubuntu-latest + if: ${{ github.actor != 'dependabot[bot]' }} + permissions: + issues: write + pull-requests: write + + steps: + - name: Check for External Contributor + uses: tspascoal/get-user-teams-membership@v3 + id: teamCheck + with: + username: ${{ github.actor }} + team: "celestia-node" + GITHUB_TOKEN: ${{ secrets.PAT_TEAM_CHECK }} + + # For issues we want to add a `needs:triage` label if it is unlabeled + - name: Triage labeling + if: ${{ github.event_name == 'issues' }} + uses: andymckay/labeler@master + with: + add-labels: "needs:triage" + ignore-if-labeled: true + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # For both issues and PRs we want to add the `external` label if the + # author is not a member of the node team + - name: External labeling + if: ${{ steps.teamCheck.outputs.isTeamMember == 'false' }} + uses: andymckay/labeler@master + with: + add-labels: "external" + repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index b967061b8e..b9d4351bbd 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -3,12 +3,17 @@ name: Required Labels on: pull_request: types: [opened, labeled, unlabeled, synchronize] + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: label: runs-on: ubuntu-latest steps: - - uses: mheap/github-action-required-labels@v2 + - uses: mheap/github-action-required-labels@v5 with: mode: minimum count: 1 - labels: "kind:bug-fix, kind:miscellaneous, kind:breaking, kind:improvement, kind:feature, kind:dependencies" + labels: "kind:fix, kind:misc, kind:break!, kind:refactor, kind:feat, kind:deps, kind:docs, kind:ci, kind:chore, kind:testing" # yamllint disable-line rule:line-length diff --git a/.github/workflows/markdown-lint.yml b/.github/workflows/markdown-lint.yml deleted file mode 100644 index f694e6d122..0000000000 --- a/.github/workflows/markdown-lint.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Markdown Lint - -on: - push: - branches: - - main - pull_request: - release: - types: [published] - -jobs: - markdown-lint: - name: Markdown Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-node@v3 - with: - node-version: 18 - - run: | - npm install -g markdownlint-cli@0.32.1 - markdownlint --config .markdownlint.yaml **/*.md diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..595effed1f --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,18 @@ +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 8 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + days-before-stale: 60 + days-before-close: 14 + days-before-issue-stale: 90 + days-before-issue-close: 21 + exempt-issue-labels: 'keep-open' + start-date: '2023-09-01T00:00:00Z' diff --git a/.gitignore b/.gitignore index f96f7a7abc..24c3851f21 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -build/ +/build/ *.bak *.iml *.log @@ -10,14 +10,13 @@ build/ *.coverprofile *.test *.orig -*/vendor vendor .DS_Store .bak .idea/ .vscode/ -celestia -cel-shed -cel-key +/celestia +/cel-shed +/cel-key coverage.txt go.work diff --git a/.golangci.yml b/.golangci.yml index d43d74c587..a0f2754a9b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,8 +4,7 @@ run: linters: enable: - bodyclose - - deadcode - - depguard + # - depguard as of v1.54.2, the default config throws errors on our repo - dogsled - dupl - errcheck @@ -33,13 +32,11 @@ linters: # - scopelint - deprecated since v1.39. exportloopref will be used instead - exportloopref - staticcheck - - structcheck - stylecheck - typecheck - unconvert # - unparam - unused - - varcheck # - whitespace # - wsl # - gocognit @@ -66,6 +63,6 @@ linters-settings: misspell: locale: US goimports: - local-prefixes: github.com/celestiaorg + local-prefixes: github.com/celestiaorg/celestia-node dupl: threshold: 200 diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000000..9a3991ecae --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,69 @@ +# This is an example .goreleaser.yml file with some sensible defaults. +# Make sure to check the documentation at https://goreleaser.com +before: + hooks: + - go mod tidy +builds: + - main: ./cmd/celestia + binary: celestia + env: + # NOTE: goreleaser doesn't fully support CGO natively. If CGO is needed + # for any node features, this should be removed and a workaround might + # need to be created. + # REF: https://goreleaser.com/limitations/cgo/ + - CGO_ENABLED=0 + - VersioningPath={{ "github.com/celestiaorg/celestia-node/nodebuilder/node" }} + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + ldflags: + # Ref: https://goreleaser.com/customization/templates/#common-fields + # + # .CommitDate is used to help with reproducible builds, ensuring that the + # same date is always used + # + # .FullCommit is git commit hash goreleaser is using for the release + # + # .Version is the version being released + - -X "{{ .Env.VersioningPath }}.buildTime={{ .CommitDate }}" + - -X "{{ .Env.VersioningPath }}.lastCommit={{ .FullCommit }}" + - -X "{{ .Env.VersioningPath }}.semanticVersion={{ .Version }}" +dist: ./build/goreleaser +archives: + - format: tar.gz + # this name template makes the OS and Arch compatible with the results of + # uname. + name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} +checksum: + name_template: "checksums.txt" +signs: + - artifacts: checksum + args: + [ + "--batch", + "-u", + "{{ .Env.GPG_FINGERPRINT }}", + "--output", + "${signature}", + "--detach-sign", + "${artifact}", + ] +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" +git: + prerelease_suffix: "-" diff --git a/.markdownlint.yaml b/.markdownlint.yaml index 21909e1ab4..b4b33c34d7 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -1,3 +1,4 @@ "default": true # Default state for all rules "MD013": false # Disable rule for line length "MD033": false # Disable rule banning inline HTML +"MD024": false # Disable "Multiple headings with the same content" rule diff --git a/.yamllint.yml b/.yamllint.yml new file mode 100644 index 0000000000..cd2a9e8293 --- /dev/null +++ b/.yamllint.yml @@ -0,0 +1,9 @@ +--- +# Built from docs https://yamllint.readthedocs.io/en/stable/configuration.html +extends: default + +rules: + # 120 chars should be enough, but don't fail if a line is longer + line-length: + max: 120 + level: warning diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 57bdd95dfb..af167ede5c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -46,21 +46,6 @@ pkg: Concise title of PR service/header: Remove race in core_listener ``` -## Changelog - -Every *notable* fix, improvement, feature, or breaking change should be made in a -pull-request that includes an update to the `CHANGELOG_PENDING.md` file. - -Changelog entries should be formatted as follows: - -```md -- [module/pkg: Some description about the change #xxx](link to PR) [@contributor](link to contributer github) -``` - -Here, `module` is the part of the code that changed (typically a -top-level Go package), `xxx` is the pull-request number, and `contributor` -is the author/s of the change. - ## Branching Model and Release The main development branch is `main`. @@ -123,3 +108,7 @@ package](https://golang.org/pkg/testing/). If you're adding or removing a function, please check there's a `TestType_Method` test for it. Run: `make test` + +## Protobuf + +If your PR modifies `*.proto` files, you will need to regenerate protobuf files with `make pb-gen`. Note this command assumes you have installed [protoc](https://grpc.io/docs/protoc-installation/). diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..01fccafe2e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,65 @@ +FROM --platform=$BUILDPLATFORM docker.io/golang:1.21-alpine3.18 as builder + +ARG TARGETPLATFORM +ARG BUILDPLATFORM +ARG TARGETOS +ARG TARGETARCH + +ENV CGO_ENABLED=0 +ENV GO111MODULE=on + +# hadolint ignore=DL3018 +RUN uname -a && apk update && apk add --no-cache \ + bash \ + gcc \ + git \ + make \ + musl-dev + +WORKDIR /src +COPY go.mod go.sum ./ +RUN go mod download +COPY . . + +RUN uname -a &&\ + CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} \ + make build && make cel-key + +FROM docker.io/alpine:3.19.1 + +# Read here why UID 10001: https://github.com/hexops/dockerfile/blob/main/README.md#do-not-use-a-uid-below-10000 +ARG UID=10001 +ARG USER_NAME=celestia + +ENV CELESTIA_HOME=/home/${USER_NAME} + +# Default node type can be overwritten in deployment manifest +ENV NODE_TYPE bridge +ENV P2P_NETWORK mocha + +# hadolint ignore=DL3018 +RUN uname -a &&\ + apk update && apk add --no-cache \ + bash \ + curl \ + jq \ + # Creates a user with $UID and $GID=$UID + && adduser ${USER_NAME} \ + -D \ + -g ${USER_NAME} \ + -h ${CELESTIA_HOME} \ + -s /sbin/nologin \ + -u ${UID} + +# Copy in the binary +COPY --from=builder /src/build/celestia /bin/celestia +COPY --from=builder /src/./cel-key /bin/cel-key + +COPY --chown=${USER_NAME}:${USER_NAME} docker/entrypoint.sh /opt/entrypoint.sh + +USER ${USER_NAME} + +EXPOSE 2121 + +ENTRYPOINT [ "/bin/bash", "/opt/entrypoint.sh" ] +CMD [ "celestia" ] diff --git a/Makefile b/Makefile index 5deab12d50..a43c917345 100644 --- a/Makefile +++ b/Makefile @@ -1,29 +1,70 @@ SHELL=/usr/bin/env bash PROJECTNAME=$(shell basename "$(PWD)") -LDFLAGS="-X 'main.buildTime=$(shell date)' -X 'main.lastCommit=$(shell git rev-parse HEAD)' -X 'main.semanticVersion=$(shell git describe --tags --dirty=-dev)'" +DIR_FULLPATH=$(shell pwd) +versioningPath := "github.com/celestiaorg/celestia-node/nodebuilder/node" +LDFLAGS=-ldflags="-X '$(versioningPath).buildTime=$(shell date)' -X '$(versioningPath).lastCommit=$(shell git rev-parse HEAD)' -X '$(versioningPath).semanticVersion=$(shell git describe --tags --dirty=-dev 2>/dev/null || git rev-parse --abbrev-ref HEAD)'" +TAGS=integration +SHORT= ifeq (${PREFIX},) PREFIX := /usr/local endif - +ifeq ($(ENABLE_VERBOSE),true) + LOG_AND_FILTER = | tee debug.log + VERBOSE = -v +else + VERBOSE = + LOG_AND_FILTER = +endif +ifeq ($(SHORT),true) + INTEGRATION_RUN_LENGTH = -short +else + INTEGRATION_RUN_LENGTH = +endif ## help: Get more info on make commands. help: Makefile @echo " Choose a command run in "$(PROJECTNAME)":" @sed -n 's/^##//p' $< | column -t -s ':' | sed -e 's/^/ /' .PHONY: help +## install-hooks: Install git-hooks from .githooks directory. +install-hooks: + @echo "--> Installing git hooks" + @git config core.hooksPath .githooks +.PHONY: install-hooks + ## build: Build celestia-node binary. build: @echo "--> Building Celestia" - @go build -o build/ -ldflags ${LDFLAGS} ./cmd/celestia + @go build -o build/ ${LDFLAGS} ./cmd/celestia .PHONY: build +## build-jemalloc: Build celestia-node binary with jemalloc allocator for BadgerDB instead of Go's native one +build-jemalloc: jemalloc + @echo "--> Building Celestia with jemalloc" + @go build -o build/ ${LDFLAGS} -tags jemalloc ./cmd/celestia +.PHONY: build-jemalloc + ## clean: Clean up celestia-node binary. clean: @echo "--> Cleaning up ./build" @rm -rf build/* - -## install: Build and install the celestia-node binary into the $PREFIX (/usr/local/ by default) directory. -install: build +.PHONY: clean + +## cover: generate to code coverage report. +cover: + @echo "--> Generating Code Coverage" + @go install github.com/ory/go-acc@latest + @go-acc -o coverage.txt `go list ./... | grep -v nodebuilder/tests` -- -v +.PHONY: cover + +## deps: install dependencies. +deps: + @echo "--> Installing Dependencies" + @go mod download +.PHONY: deps + +## install: Install all build binaries into the $PREFIX (/usr/local/ by default) directory. +install: @echo "--> Installing Celestia" @install -v ./build/* -t ${PREFIX}/bin/ .PHONY: install @@ -31,10 +72,10 @@ install: build ## go-install: Build and install the celestia-node binary into the GOBIN directory. go-install: @echo "--> Installing Celestia" - @go install -ldflags ${LDFLAGS} ./cmd/celestia + @go install ${LDFLAGS} ./cmd/celestia .PHONY: go-install -## shed: Build cel-shed binary. +## cel-shed: Build cel-shed binary. cel-shed: @echo "--> Building cel-shed" @go build ./cmd/cel-shed @@ -46,7 +87,7 @@ install-shed: @go install ./cmd/cel-shed .PHONY: install-shed -## key: Build cel-key binary. +## cel-key: Build cel-key binary. cel-key: @echo "--> Building cel-key" @go build ./cmd/cel-key @@ -59,51 +100,45 @@ install-key: .PHONY: install-key ## fmt: Formats only *.go (excluding *.pb.go *pb_test.go). Runs `gofmt & goimports` internally. -fmt: +fmt: sort-imports @find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s @find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w -local github.com/celestiaorg - @go mod tidy -compat=1.17 + @go mod tidy -compat=1.20 + @cfmt -w -m=100 ./... @markdownlint --fix --quiet --config .markdownlint.yaml . .PHONY: fmt -## lint: Linting *.go files using golangci-lint. Look for .golangci.yml for the list of linters. -lint: +## lint: Linting *.go files using golangci-lint. Look for .golangci.yml for the list of linters. Also lint *.md files using markdownlint. +lint: lint-imports @echo "--> Running linter" @golangci-lint run @markdownlint --config .markdownlint.yaml '**/*.md' + @cfmt -m=100 ./... .PHONY: lint ## test-unit: Running unit tests test-unit: @echo "--> Running unit tests" - @go test -v `go list ./... | grep -v node/tests` -covermode=atomic -coverprofile=coverage.out + @go test $(VERBOSE) -covermode=atomic -coverprofile=coverage.txt `go list ./... | grep -v nodebuilder/tests` $(LOG_AND_FILTER) .PHONY: test-unit ## test-unit-race: Running unit tests with data race detector test-unit-race: @echo "--> Running unit tests with data race detector" - @go test -v -race `go list ./... | grep -v node/tests` + @go test $(VERBOSE) -race -covermode=atomic -coverprofile=coverage.txt `go list ./... | grep -v nodebuilder/tests` $(LOG_AND_FILTER) .PHONY: test-unit-race -## test-swamp: Running swamp tests located in node/tests -test-swamp: - @echo "--> Running swamp tests" - @go test -v ./node/tests -.PHONY: test-swamp - -## test-swamp: Running swamp tests with data race detector located in node/tests -test-swamp-race: - @echo "--> Running swamp tests with data race detector" - @go test -v -race ./node/tests -.PHONY: test-swamp-race - -## test-all: Running both unit and swamp tests -test: - @echo "--> Running all tests without data race detector" - @go test ./... - @echo "--> Running all tests with data race detector" - @go test -race ./... -.PHONY: test +## test-integration: Running /integration tests located in nodebuilder/tests +test-integration: + @echo "--> Running integrations tests $(VERBOSE) -tags=$(TAGS) $(INTEGRATION_RUN_LENGTH)" + @go test $(VERBOSE) -tags=$(TAGS) $(INTEGRATION_RUN_LENGTH) ./nodebuilder/tests +.PHONY: test-integration + +## test-integration-race: Running integration tests with data race detector located in node/tests +test-integration-race: + @echo "--> Running integration tests with data race detector -tags=$(TAGS)" + @go test -race -tags=$(TAGS) ./nodebuilder/tests +.PHONY: test-integration-race ## benchmark: Running all benchmarks benchmark: @@ -114,14 +149,97 @@ benchmark: PB_PKGS=$(shell find . -name 'pb' -type d) PB_CORE=$(shell go list -f {{.Dir}} -m github.com/tendermint/tendermint) PB_GOGO=$(shell go list -f {{.Dir}} -m github.com/gogo/protobuf) +PB_CELESTIA_APP=$(shell go list -f {{.Dir}} -m github.com/celestiaorg/celestia-app) +PB_NMT=$(shell go list -f {{.Dir}} -m github.com/celestiaorg/nmt) ## pb-gen: Generate protobuf code for all /pb/*.proto files in the project. pb-gen: @echo '--> Generating protobuf' @for dir in $(PB_PKGS); \ do for file in `find $$dir -type f -name "*.proto"`; \ - do protoc -I=. -I=${PB_CORE}/proto/ -I=${PB_GOGO} --gogofaster_out=paths=source_relative:. $$file; \ + do protoc -I=. -I=${PB_CORE}/proto/ -I=${PB_GOGO} -I=${PB_CELESTIA_APP}/proto -I=${PB_NMT} --gogofaster_out=paths=source_relative:. $$file; \ echo '-->' $$file; \ done; \ done; .PHONY: pb-gen + +## openrpc-gen: Generate OpenRPC spec for Celestia-Node's RPC api +openrpc-gen: + @echo "--> Generating OpenRPC spec" + @go run ./cmd/docgen fraud header state share das p2p node blob da +.PHONY: openrpc-gen + +## lint-imports: Lint only Go imports. +## flag -set-exit-status doesn't exit with code 1 as it should, so we use find until it is fixed by goimports-reviser +lint-imports: + @echo "--> Running imports linter" + @for file in `find . -type f -name '*.go'`; \ + do goimports-reviser -list-diff -set-exit-status -company-prefixes "github.com/celestiaorg" -project-name "github.com/celestiaorg/celestia-node" -output stdout $$file \ + || exit 1; \ + done; +.PHONY: lint-imports + +## sort-imports: Sort Go imports. +sort-imports: + @goimports-reviser -company-prefixes "github.com/celestiaorg" -project-name "github.com/celestiaorg/celestia-node" -output stdout ./... +.PHONY: sort-imports + +## adr-gen: Generate ADR from template. Must set NUM and TITLE parameters. +adr-gen: + @echo "--> Generating ADR" + @curl -sSL https://raw.githubusercontent.com/celestiaorg/.github/main/adr-template.md > docs/architecture/adr-$(NUM)-$(TITLE).md +.PHONY: adr-gen + +## telemetry-infra-up: launches local telemetry infrastructure. This includes grafana, jaeger, loki, pyroscope, and an otel-collector. +## you can access the grafana instance at localhost:3000 and login with admin:admin. +telemetry-infra-up: + PWD="${DIR_FULLPATH}/docker/telemetry" docker-compose -f ./docker/telemetry/docker-compose.yml up +.PHONY: telemetry-infra-up + +## telemetry-infra-down: tears the telemetry infrastructure down. The stores for grafana, prometheus, and loki will persist. +telemetry-infra-down: + PWD="${DIR_FULLPATH}/docker/telemetry" docker-compose -f ./docker/telemetry/docker-compose.yml down +.PHONY: telemetry-infra-down + +## goreleaser: List Goreleaser commands and checks if GoReleaser is installed. +goreleaser: Makefile + @echo " Choose a goreleaser command to run:" + @sed -n 's/^## goreleaser/goreleaser/p' $< | column -t -s ':' | sed -e 's/^/ /' + @goreleaser --version +.PHONY: goreleaser + +## goreleaser-build: Builds the celestia binary using GoReleaser for your local OS. +goreleaser-build: + goreleaser build --snapshot --clean --single-target +.PHONY: goreleaser-build + +## goreleaser-release: Builds the release celestia binaries as defined in .goreleaser.yaml. This requires there be a git tag for the release in the local git history. +goreleaser-release: + goreleaser release --clean --fail-fast --skip-publish +.PHONY: goreleaser-release + +# Copied from https://github.com/dgraph-io/badger/blob/main/Makefile +USER_ID = $(shell id -u) +HAS_JEMALLOC = $(shell test -f /usr/local/lib/libjemalloc.a && echo "jemalloc") +JEMALLOC_URL = "https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2" + +## jemalloc installs jemalloc allocator +jemalloc: + @if [ -z "$(HAS_JEMALLOC)" ] ; then \ + mkdir -p /tmp/jemalloc-temp && cd /tmp/jemalloc-temp ; \ + echo "Downloading jemalloc..." ; \ + curl -s -L ${JEMALLOC_URL} -o jemalloc.tar.bz2 ; \ + tar xjf ./jemalloc.tar.bz2 ; \ + cd jemalloc-5.2.1 ; \ + ./configure --with-jemalloc-prefix='je_' --with-malloc-conf='background_thread:true,metadata_thp:auto'; \ + make ; \ + if [ "$(USER_ID)" -eq "0" ]; then \ + make install ; \ + else \ + echo "==== Need sudo access to install jemalloc" ; \ + sudo make install ; \ + fi ; \ + cd /tmp ; \ + rm -rf /tmp/jemalloc-temp ; \ + fi +.PHONY: jemalloc diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000000..faf11e3461 --- /dev/null +++ b/NOTICE @@ -0,0 +1,2 @@ +Celestia Node +Copyright 2021 and onwards Strange Loop Labs AG diff --git a/README.md b/README.md index bfb5ca2ae3..3a552495ad 100644 --- a/README.md +++ b/README.md @@ -14,24 +14,38 @@ The DA network wraps the celestia-core consensus network by listening for blocks Continue reading [here](https://blog.celestia.org/celestia-mvp-release-data-availability-sampling-light-clients) if you want to learn more about DAS and how it enables secure and scalable access to Celestia chain data. +## Table of Contents + +- [Celestia Node](#celestia-node) + - [Table of Contents](#table-of-contents) + - [Minimum requirements](#minimum-requirements) + - [System Requirements](#system-requirements) + - [Installation](#installation) + - [API docs](#api-docs) + - [Node types](#node-types) + - [Run a node](#run-a-node) + - [Environment variables](#environment-variables) + - [Package-specific documentation](#package-specific-documentation) + - [Code of Conduct](#code-of-conduct) + ## Minimum requirements | Requirement | Notes | -|-------------|----------------| -| Go version | 1.18 or higher | +| ----------- |----------------| +| Go version | 1.21 or higher | ## System Requirements See the official docs page for system requirements per node type: -* [Bridge](https://docs.celestia.org/nodes/bridge-node#hardware-requirements) -* [Light](https://docs.celestia.org/nodes/light-node#hardware-requirements) -* [Full](https://docs.celestia.org/nodes/full-storage-node#hardware-requirements) +- [Bridge](https://docs.celestia.org/nodes/bridge-node#hardware-requirements) +- [Light](https://docs.celestia.org/nodes/light-node#hardware-requirements) +- [Full](https://docs.celestia.org/nodes/full-storage-node#hardware-requirements) ## Installation ```sh -git clone https://github.com/celestiaorg/celestia-node.git +git clone https://github.com/celestiaorg/celestia-node.git cd celestia-node make build sudo make install @@ -41,33 +55,41 @@ For more information on setting up a node and the hardware requirements needed, ## API docs -Celestia-node public API is documented [here](https://docs.celestia.org/developers/node-api/). +The celestia-node public API is documented [here](https://node-rpc-docs.celestia.org/). ## Node types -* **Bridge** nodes - relay blocks from the celestia consensus network to the celestia data availability (DA) network -* **Full** nodes - fully reconstruct and store blocks by sampling the DA network for shares -* **Light** nodes - verify the availability of block data by sampling the DA network for shares +- **Bridge** nodes - relay blocks from the celestia consensus network to the celestia data availability (DA) network +- **Full** nodes - fully reconstruct and store blocks by sampling the DA network for shares +- **Light** nodes - verify the availability of block data by sampling the DA network for shares More information can be found [here](https://github.com/celestiaorg/celestia-node/blob/main/docs/adr/adr-003-march2022-testnet.md#legend). ## Run a node -`` can be `bridge`, `full` or `light`. +`` can be: `bridge`, `full` or `light`. ```sh -celestia init +celestia init ``` ```sh celestia start ``` +Please refer to [this guide](https://docs.celestia.org/nodes/celestia-node/) for more information on running a node. + +## Environment variables + +| Variable | Explanation | Default value | Required | +| ----------------------- | ----------------------------------- | ------------- | -------- | +| `CELESTIA_BOOTSTRAPPER` | Start the node in bootstrapper mode | `false` | Optional | + ## Package-specific documentation -* [Header](./service/header/doc.go) -* [Share](./service/share/doc.go) -* [DAS](./das/doc.go) +- [Header](./header/doc.go) +- [Share](./share/doc.go) +- [DAS](./das/doc.go) ## Code of Conduct diff --git a/api/docgen/exampledata/extendedHeader.json b/api/docgen/exampledata/extendedHeader.json new file mode 100644 index 0000000000..5da16246c1 --- /dev/null +++ b/api/docgen/exampledata/extendedHeader.json @@ -0,0 +1,77 @@ +{ + "header": { + "version": { + "block": "11" + }, + "chain_id": "arabica-6", + "height": "67374", + "time": "2023-02-25T12:10:28.067566292Z", + "last_block_id": { + "hash": "47A2C7758760988500B2F043D3903BBBF1C8B383CA33CF7056AA45E22055663E", + "parts": { + "total": 1, + "hash": "33B012F244E27672169DD3D62CDBC92DA9486E410A5530F41FE6A890D8E2EE42" + } + }, + "last_commit_hash": "888D47F5E9473501C99F2B6136B6B9FFBC9D1CD2F54002BCD5DF002FFEF0A83D", + "data_hash": "257760461993F8F197B421EC7435F3C36C3734923E3DA9A42DC73B05F07B3D08", + "validators_hash": "883A0C92B8D976312B249C1397E73CF2981A9EB715717CBEE3800B8380C22C1D", + "next_validators_hash": "883A0C92B8D976312B249C1397E73CF2981A9EB715717CBEE3800B8380C22C1D", + "consensus_hash": "048091BC7DDC283F77BFBF91D73C44DA58C3DF8A9CBC867405D8B7F3DAADA22F", + "app_hash": "1FC70854A185737C7FD720FCCE9167876EE4B9ABE23DB1EBB8C552D3E3978435", + "last_results_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "evidence_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "proposer_address": "57DC09D28388DBF977CFC30EF50BE8B644CCC1FA" + }, + "validator_set": { + "validators": [ + { + "address": "57DC09D28388DBF977CFC30EF50BE8B644CCC1FA", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "aoB4xU9//HAqOP9ciyp0+PTdZxt/UGKgZOabU6JxW8o=" + }, + "voting_power": "5000000000", + "proposer_priority": "0" + } + ], + "proposer": { + "address": "57DC09D28388DBF977CFC30EF50BE8B644CCC1FA", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "aoB4xU9//HAqOP9ciyp0+PTdZxt/UGKgZOabU6JxW8o=" + }, + "voting_power": "5000000000", + "proposer_priority": "0" + } + }, + "commit": { + "height": 67374, + "round": 0, + "block_id": { + "hash": "A7F6B1CF33313121539206754A73FDC22ADA48C4AA8C4BB4F707ED2E089E59D3", + "parts": { + "total": 1, + "hash": "6634FE1E1DDDCB9914ACE81F146013986F5FDA03A8F1C16DC5ECA0D9B0E08FBC" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "57DC09D28388DBF977CFC30EF50BE8B644CCC1FA", + "timestamp": "2023-02-25T12:10:38.130121476Z", + "signature": "HyR/uRIUNc5GNqQteZyrVjJM47SI9sRAgrLsNqJDls3AzbvHUfN4zzWyw0afyEvNm98Bm2GIoJoZC5D8oQvdBA==" + } + ] + }, + "dah": { + "row_roots": [ + "//////////7//////////ql+/VFmJ8PWE9BcjrTDLrY/hzVeGdzFCpfEhiXDXZmt", + "/////////////////////zHeGnUtPJn8QyPpePSYl4qRVrcUvG2fwptyoA85Myik" + ], + "column_roots": [ + "//////////7//////////ql+/VFmJ8PWE9BcjrTDLrY/hzVeGdzFCpfEhiXDXZmt", + "/////////////////////zHeGnUtPJn8QyPpePSYl4qRVrcUvG2fwptyoA85Myik" + ] + } +} \ No newline at end of file diff --git a/api/docgen/exampledata/resourceManagerStats.json b/api/docgen/exampledata/resourceManagerStats.json new file mode 100644 index 0000000000..2b85465871 --- /dev/null +++ b/api/docgen/exampledata/resourceManagerStats.json @@ -0,0 +1,88 @@ +{ + "System": { + "NumStreamsInbound": 4, + "NumStreamsOutbound": 13, + "NumConnsInbound": 0, + "NumConnsOutbound": 13, + "NumFD": 7, + "Memory": 4456448 + }, + "Transient": { + "NumStreamsInbound": 0, + "NumStreamsOutbound": 0, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + }, + "Services": { + "libp2p.autonat": { + "NumStreamsInbound": 0, + "NumStreamsOutbound": 0, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + }, + "libp2p.identify": { + "NumStreamsInbound": 0, + "NumStreamsOutbound": 0, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + } + }, + "Protocols": { + "/celestia/arabica-3/ipfs/bitswap/1.2.0": { + "NumStreamsInbound": 0, + "NumStreamsOutbound": 4, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + }, + "/celestia/arabica-3/kad/1.0.0": { + "NumStreamsInbound": 0, + "NumStreamsOutbound": 4, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + }, + "/floodsub/1.0.0": { + "NumStreamsInbound": 2, + "NumStreamsOutbound": 0, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + }, + "/ipfs/id/1.0.0": { + "NumStreamsInbound": 0, + "NumStreamsOutbound": 1, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + }, + "/meshsub/1.1.0": { + "NumStreamsInbound": 2, + "NumStreamsOutbound": 4, + "NumConnsInbound": 0, + "NumConnsOutbound": 0, + "NumFD": 0, + "Memory": 0 + } + }, + "Peers": { + "12D3KooWPRb5h3g9MH7sx9qfbSQZG5cXv1a2Qs3o4aW5YmmzPq82": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 3, + "NumConnsInbound": 0, + "NumConnsOutbound": 3, + "NumFD": 3, + "Memory": 1048576 + } + } +} diff --git a/api/docgen/exampledata/samplingStats.json b/api/docgen/exampledata/samplingStats.json new file mode 100644 index 0000000000..f6efe7d6da --- /dev/null +++ b/api/docgen/exampledata/samplingStats.json @@ -0,0 +1,46 @@ +{ + "head_of_sampled_chain": 1092, + "head_of_catchup": 34101, + "network_head_height": 470292, + "workers": [ + { + "job_type": "catchup", + "current": 1093, + "from": 1002, + "to": 1101 + }, + { + "job_type": "catchup", + "current": 33343, + "from": 33302, + "to": 33401 + }, + { + "job_type": "catchup", + "current": 34047, + "from": 34002, + "to": 34101 + }, + { + "job_type": "catchup", + "current": 1327, + "from": 1302, + "to": 1401 + }, + { + "job_type": "catchup", + "current": 1197, + "from": 1102, + "to": 1201 + }, + { + "job_type": "catchup", + "current": 1408, + "from": 1402, + "to": 1501 + } + ], + "concurrency": 6, + "catch_up_done": false, + "is_running": true +} diff --git a/api/docgen/exampledata/txResponse.json b/api/docgen/exampledata/txResponse.json new file mode 100644 index 0000000000..c03731097f --- /dev/null +++ b/api/docgen/exampledata/txResponse.json @@ -0,0 +1,187 @@ +{ + "height": 30497, + "txhash": "05D9016060072AA71B007A6CFB1B895623192D6616D513017964C3BFCD047282", + "data": "12260A242F636F736D6F732E62616E6B2E763162657461312E4D736753656E64526573706F6E7365", + "raw_log": "[{\"msg_index\":0,\"events\":[{\"type\":\"coin_received\",\"attributes\":[{\"key\":\"receiver\",\"value\":\"celestia12les8l8gzsacjjxwum9wdy7me8x9xajqch4gyw\"},{\"key\":\"amount\",\"value\":\"30utia\"}]},{\"type\":\"coin_spent\",\"attributes\":[{\"key\":\"spender\",\"value\":\"celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h\"},{\"key\":\"amount\",\"value\":\"30utia\"}]},{\"type\":\"message\",\"attributes\":[{\"key\":\"action\",\"value\":\"/cosmos.bank.v1beta1.MsgSend\"},{\"key\":\"sender\",\"value\":\"celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h\"},{\"key\":\"module\",\"value\":\"bank\"}]},{\"type\":\"transfer\",\"attributes\":[{\"key\":\"recipient\",\"value\":\"celestia12les8l8gzsacjjxwum9wdy7me8x9xajqch4gyw\"},{\"key\":\"sender\",\"value\":\"celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h\"},{\"key\":\"amount\",\"value\":\"30utia\"}]}]}]", + "logs": [ + { + "msg_index": 0, + "events": [ + { + "type": "coin_received", + "attributes": [ + { + "key": "receiver", + "value": "celestia12les8l8gzsacjjxwum9wdy7me8x9xajqch4gyw" + }, + { + "key": "amount", + "value": "30utia" + } + ] + }, + { + "type": "coin_spent", + "attributes": [ + { + "key": "spender", + "value": "celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h" + }, + { + "key": "amount", + "value": "30utia" + } + ] + }, + { + "type": "message", + "attributes": [ + { + "key": "action", + "value": "/cosmos.bank.v1beta1.MsgSend" + }, + { + "key": "sender", + "value": "celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h" + }, + { + "key": "module", + "value": "bank" + } + ] + }, + { + "type": "transfer", + "attributes": [ + { + "key": "recipient", + "value": "celestia12les8l8gzsacjjxwum9wdy7me8x9xajqch4gyw" + }, + { + "key": "sender", + "value": "celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h" + }, + { + "key": "amount", + "value": "30utia" + } + ] + } + ] + } + ], + "gas_wanted": 10000000, + "gas_used": 69085, + "events": [ + { + "type": "tx", + "attributes": [ + { + "key": "ZmVl", + "value": null, + "index": true + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "YWNjX3NlcQ==", + "value": "Y2VsZXN0aWExMzc3azVhbjNmOTR2Nnd5YWNldTBjZjRucTZnazJqdHBjNDZnN2gvMA==", + "index": true + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "c2lnbmF0dXJl", + "value": "R3NlVjhGNThFNGphR05LU0NicDBvNmRILytKK3BNQjNvUmtoNVpKNE8rVjdvNVVYQkJNNXpmNkdiYnN6OW9Takc1OUZkSHJRYzFvVVVBbnRBZW1wV0E9PQ==", + "index": true + } + ] + }, + { + "type": "message", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "L2Nvc21vcy5iYW5rLnYxYmV0YTEuTXNnU2VuZA==", + "index": true + } + ] + }, + { + "type": "coin_spent", + "attributes": [ + { + "key": "c3BlbmRlcg==", + "value": "Y2VsZXN0aWExMzc3azVhbjNmOTR2Nnd5YWNldTBjZjRucTZnazJqdHBjNDZnN2g=", + "index": true + }, + { + "key": "YW1vdW50", + "value": "MzB1dGlh", + "index": true + } + ] + }, + { + "type": "coin_received", + "attributes": [ + { + "key": "cmVjZWl2ZXI=", + "value": "Y2VsZXN0aWExMmxlczhsOGd6c2Fjamp4d3VtOXdkeTdtZTh4OXhhanFjaDRneXc=", + "index": true + }, + { + "key": "YW1vdW50", + "value": "MzB1dGlh", + "index": true + } + ] + }, + { + "type": "transfer", + "attributes": [ + { + "key": "cmVjaXBpZW50", + "value": "Y2VsZXN0aWExMmxlczhsOGd6c2Fjamp4d3VtOXdkeTdtZTh4OXhhanFjaDRneXc=", + "index": true + }, + { + "key": "c2VuZGVy", + "value": "Y2VsZXN0aWExMzc3azVhbjNmOTR2Nnd5YWNldTBjZjRucTZnazJqdHBjNDZnN2g=", + "index": true + }, + { + "key": "YW1vdW50", + "value": "MzB1dGlh", + "index": true + } + ] + }, + { + "type": "message", + "attributes": [ + { + "key": "c2VuZGVy", + "value": "Y2VsZXN0aWExMzc3azVhbjNmOTR2Nnd5YWNldTBjZjRucTZnazJqdHBjNDZnN2g=", + "index": true + } + ] + }, + { + "type": "message", + "attributes": [ + { + "key": "bW9kdWxl", + "value": "YmFuaw==", + "index": true + } + ] + } + ] +} \ No newline at end of file diff --git a/api/docgen/examples.go b/api/docgen/examples.go new file mode 100644 index 0000000000..83d25da6df --- /dev/null +++ b/api/docgen/examples.go @@ -0,0 +1,227 @@ +package docgen + +import ( + _ "embed" + "encoding/json" + "errors" + "fmt" + "reflect" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + "github.com/multiformats/go-multiaddr" + "golang.org/x/text/cases" + "golang.org/x/text/language" + + "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/state" +) + +//go:embed "exampledata/extendedHeader.json" +var exampleExtendedHeader string + +//go:embed "exampledata/samplingStats.json" +var exampleSamplingStats string + +//go:embed "exampledata/txResponse.json" +var exampleTxResponse string + +//go:embed "exampledata/resourceManagerStats.json" +var exampleResourceMngrStats string + +var ExampleValues = map[reflect.Type]interface{}{ + reflect.TypeOf(""): "string value", + reflect.TypeOf(uint64(42)): uint64(42), + reflect.TypeOf(uint32(42)): uint32(42), + reflect.TypeOf(int32(42)): int32(42), + reflect.TypeOf(int64(42)): int64(42), + reflect.TypeOf(42): 42, + reflect.TypeOf(byte(7)): byte(7), + reflect.TypeOf(float64(42)): float64(42), + reflect.TypeOf(blob.GasPrice(0)): blob.GasPrice(0.002), + reflect.TypeOf(true): true, + reflect.TypeOf([]byte{}): []byte("byte array"), + reflect.TypeOf(node.Full): node.Full, + reflect.TypeOf(auth.Permission("admin")): auth.Permission("admin"), + reflect.TypeOf(byzantine.BadEncoding): byzantine.BadEncoding, + reflect.TypeOf((*fraud.Proof[*header.ExtendedHeader])(nil)).Elem(): byzantine.CreateBadEncodingProof( + []byte("bad encoding proof"), + 42, + &byzantine.ErrByzantine{ + Index: 0, + Axis: rsmt2d.Axis(0), + Shares: []*byzantine.ShareWithProof{}, + }, + ), + reflect.TypeOf((*error)(nil)).Elem(): errors.New("error"), +} + +func init() { + addToExampleValues(share.EmptyExtendedDataSquare()) + addr, err := sdk.AccAddressFromBech32("celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h") + if err != nil { + panic(err) + } + addToExampleValues(addr) + ExampleValues[reflect.TypeOf((*sdk.Address)(nil)).Elem()] = addr + + valAddr, err := sdk.ValAddressFromBech32("celestiavaloper1q3v5cugc8cdpud87u4zwy0a74uxkk6u4q4gx4p") + if err != nil { + panic(err) + } + addToExampleValues(valAddr) + + addToExampleValues(state.Address{Address: addr}) + + var txResponse *state.TxResponse + err = json.Unmarshal([]byte(exampleTxResponse), &txResponse) + if err != nil { + panic(err) + } + + var samplingStats das.SamplingStats + err = json.Unmarshal([]byte(exampleSamplingStats), &samplingStats) + if err != nil { + panic(err) + } + + var extendedHeader *header.ExtendedHeader + err = json.Unmarshal([]byte(exampleExtendedHeader), &extendedHeader) + if err != nil { + panic(err) + } + + var resourceMngrStats rcmgr.ResourceManagerStat + err = json.Unmarshal([]byte(exampleResourceMngrStats), &resourceMngrStats) + if err != nil { + panic(err) + } + + addToExampleValues(txResponse) + addToExampleValues(samplingStats) + addToExampleValues(extendedHeader) + addToExampleValues(resourceMngrStats) + + mathInt, _ := math.NewIntFromString("42") + addToExampleValues(mathInt) + + addToExampleValues(network.Connected) + addToExampleValues(network.ReachabilityPrivate) + + pID := protocol.ID("/celestia/mocha/ipfs/bitswap") + addToExampleValues(pID) + + peerID := peer.ID("12D3KooWPRb5h3g9MH7sx9qfbSQZG5cXv1a2Qs3o4aW5YmmzPq82") + addToExampleValues(peerID) + + ma, _ := multiaddr.NewMultiaddr("/ip6/::1/udp/2121/quic-v1") + addrInfo := peer.AddrInfo{ + ID: peerID, + Addrs: []multiaddr.Multiaddr{ma}, + } + addToExampleValues(addrInfo) + + namespace, err := share.NewBlobNamespaceV0([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}) + if err != nil { + panic(err) + } + addToExampleValues(namespace) + + generatedBlob, err := blob.NewBlobV0(namespace, []byte("This is an example of some blob data")) + if err != nil { + panic(err) + } + addToExampleValues(generatedBlob) + + proof := nmt.NewInclusionProof(0, 4, [][]byte{[]byte("test")}, true) + blobProof := &blob.Proof{&proof} + addToExampleValues(blobProof) +} + +func addToExampleValues(v interface{}) { + ExampleValues[reflect.TypeOf(v)] = v +} + +func ExampleValue(t, parent reflect.Type) (interface{}, error) { + v, ok := ExampleValues[t] + if ok { + return v, nil + } + + switch t.Kind() { + case reflect.Slice: + out := reflect.New(t).Elem() + val, err := ExampleValue(t.Elem(), t) + if err != nil { + return nil, err + } + out = reflect.Append(out, reflect.ValueOf(val)) + return out.Interface(), nil + case reflect.Chan: + return ExampleValue(t.Elem(), nil) + case reflect.Struct: + es, err := exampleStruct(t, parent) + if err != nil { + return nil, err + } + v := reflect.ValueOf(es).Elem().Interface() + ExampleValues[t] = v + return v, nil + case reflect.Array: + out := reflect.New(t).Elem() + for i := 0; i < t.Len(); i++ { + val, err := ExampleValue(t.Elem(), t) + if err != nil { + return nil, err + } + out.Index(i).Set(reflect.ValueOf(val)) + } + return out.Interface(), nil + + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct { + es, err := exampleStruct(t.Elem(), t) + if err != nil { + return nil, err + } + return es, err + } + case reflect.Interface: + return struct{}{}, nil + } + + return nil, fmt.Errorf("failed to retrieve example value for type: %s on parent '%s')", t, parent) +} + +func exampleStruct(t, parent reflect.Type) (interface{}, error) { + ns := reflect.New(t) + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type == parent { + continue + } + if cases.Title(language.Und, cases.NoLower).String(f.Name) == f.Name { + val, err := ExampleValue(f.Type, t) + if err != nil { + return nil, err + } + ns.Elem().Field(i).Set(reflect.ValueOf(val)) + } + } + + return ns.Interface(), nil +} diff --git a/api/docgen/openrpc.go b/api/docgen/openrpc.go new file mode 100644 index 0000000000..737b491a89 --- /dev/null +++ b/api/docgen/openrpc.go @@ -0,0 +1,246 @@ +// Package docgen generates an OpenRPC spec for the Celestia Node. It has been inspired by and +// adapted from Filecoin's Lotus API implementation. +package docgen + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/token" + "net" + "reflect" + "strings" + + "github.com/alecthomas/jsonschema" + go_openrpc_reflect "github.com/etclabscore/go-openrpc-reflect" + meta_schema "github.com/open-rpc/meta-schema" + + "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +const ( + APIDescription = "The Celestia Node API is the collection of RPC methods that " + + "can be used to interact with the services provided by Celestia Data Availability Nodes." + APIName = "Celestia Node API" + DocsURL = "https://github.com/celestiaorg/celestia-node" + DocsName = "Celestia Node GitHub" +) + +type Visitor struct { + Methods map[string]ast.Node +} + +func (v *Visitor) Visit(node ast.Node) ast.Visitor { + st, ok := node.(*ast.TypeSpec) + if !ok { + return v + } + + if st.Name.Name != "Module" { + return nil + } + + iface := st.Type.(*ast.InterfaceType) + for _, m := range iface.Methods.List { + if len(m.Names) > 0 { + v.Methods[m.Names[0].Name] = m + } + } + + return v +} + +type Comments = map[string]string + +func ParseCommentsFromNodebuilderModules(moduleNames ...string) (Comments, Comments) { + fset := token.NewFileSet() + nodeComments := make(Comments) + permComments := make(Comments) + for _, moduleName := range moduleNames { + fileName := fmt.Sprintf("nodebuilder/%s/%s.go", moduleName, moduleName) + f, err := parser.ParseFile(fset, fileName, nil, parser.AllErrors|parser.ParseComments) + if err != nil { + panic(err) + } + + cmap := ast.NewCommentMap(fset, f, f.Comments) + + v := &Visitor{make(map[string]ast.Node)} + ast.Walk(v, f) + + for mn, node := range v.Methods { + filteredComments := cmap.Filter(node).Comments() + if len(filteredComments) == 0 { + nodeComments[moduleName+mn] = "No comment exists yet for this method." + } else { + nodeComments[moduleName+mn] = filteredComments[0].Text() + } + } + + module := reflect.TypeOf(client.Modules[moduleName]).Elem() + var meth reflect.StructField + for i := 0; i < module.NumField(); i++ { + meth = module.Field(i) + perms := meth.Tag.Get("perm") + permComments[meth.Name] = perms + } + } + return nodeComments, permComments +} + +func NewOpenRPCDocument(comments Comments, permissions Comments) *go_openrpc_reflect.Document { + d := &go_openrpc_reflect.Document{} + + d.WithMeta(&go_openrpc_reflect.MetaT{ + GetServersFn: func() func(listeners []net.Listener) (*meta_schema.Servers, error) { + return func(listeners []net.Listener) (*meta_schema.Servers, error) { + return nil, nil + } + }, + GetInfoFn: func() (info *meta_schema.InfoObject) { + info = &meta_schema.InfoObject{} + title := APIName + info.Title = (*meta_schema.InfoObjectProperties)(&title) + + version := node.APIVersion + info.Version = (*meta_schema.InfoObjectVersion)(&version) + + description := APIDescription + info.Description = (*meta_schema.InfoObjectDescription)(&description) + + return info + }, + GetExternalDocsFn: func() (exdocs *meta_schema.ExternalDocumentationObject) { + url, description := DocsURL, DocsName + + return &meta_schema.ExternalDocumentationObject{ + Url: (*meta_schema.ExternalDocumentationObjectUrl)(&url), + Description: (*meta_schema.ExternalDocumentationObjectDescription)(&description), + } + }, + }) + + appReflector := &go_openrpc_reflect.EthereumReflectorT{} + + appReflector.FnGetMethodExternalDocs = func( + r reflect.Value, + m reflect.Method, + funcDecl *ast.FuncDecl, + ) (*meta_schema.ExternalDocumentationObject, error) { + extDocs, err := go_openrpc_reflect.EthereumReflector.GetMethodExternalDocs(r, m, funcDecl) + if err != nil { + return nil, err + } + + desc := "Source of the default service's implementation of this method." + extDocs.Description = (*meta_schema.ExternalDocumentationObjectDescription)(&desc) + + url := strings.Replace(string(*extDocs.Url), "/master/", "/main/", 1) + extDocs.Url = (*meta_schema.ExternalDocumentationObjectUrl)(&url) + // + return extDocs, nil + } + + appReflector.FnIsMethodEligible = func(m reflect.Method) bool { + // methods are only eligible if they were found in the Module interface + _, ok := comments[extractPackageNameFromAPIMethod(m)+m.Name] + if !ok { + return false + } + + /* TODO(@distractedm1nd): find out why chans are excluded in lotus. is this a must? + for i := 0; i < m.Func.Type().NumOut(); i++ { + if m.Func.Type().Out(i).Kind() == reflect.Chan { + return false + } + } + */ + return go_openrpc_reflect.EthereumReflector.IsMethodEligible(m) + } + + // remove the default implementation from the method descriptions + appReflector.FnGetMethodDescription = func(r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) { + if v, ok := permissions[m.Name]; ok { + return "Auth level: " + v, nil + } + return "", nil // noComment + } + + appReflector.FnGetMethodName = func( + moduleName string, + r reflect.Value, + m reflect.Method, + funcDecl *ast.FuncDecl, + ) (string, error) { + return moduleName + "." + m.Name, nil + } + + appReflector.FnGetMethodSummary = func(r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) { + if v, ok := comments[extractPackageNameFromAPIMethod(m)+m.Name]; ok { + return v, nil + } + return "", nil // noComment + } + + appReflector.FnSchemaExamples = func(ty reflect.Type) (examples *meta_schema.Examples, err error) { + v, err := ExampleValue(ty, ty) // This isn't ideal, but seems to work well enough. + if err != nil { + fmt.Println(err) + } + return &meta_schema.Examples{ + meta_schema.AlwaysTrue(v), + }, nil + } + + d.WithReflector(appReflector) + return d +} + +const integerD = `{ "title": "number", "type": "number", "description": "Number is a number" }` + +func OpenRPCSchemaTypeMapper(ty reflect.Type) *jsonschema.Type { + unmarshalJSONToJSONSchemaType := func(input string) *jsonschema.Type { + var js jsonschema.Type + err := json.Unmarshal([]byte(input), &js) + if err != nil { + panic(err) + } + return &js + } + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty == reflect.TypeOf((*interface{})(nil)).Elem() { + return &jsonschema.Type{Type: "object", AdditionalProperties: []byte("true")} + } + + // Handle primitive types in case there are generic cases + // specific to our services. + switch ty.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // Return all integer types as the hex representation integer schemea. + ret := unmarshalJSONToJSONSchemaType(integerD) + return ret + case reflect.Uintptr: + return &jsonschema.Type{Type: "number", Title: "uintptr-title"} + case reflect.Struct: + case reflect.Map: + case reflect.Slice, reflect.Array: + case reflect.Float32, reflect.Float64: + case reflect.Bool: + case reflect.String: + case reflect.Ptr, reflect.Interface: + default: + } + + return nil +} + +func extractPackageNameFromAPIMethod(m reflect.Method) string { + return strings.TrimSuffix(m.Type.In(0).String()[1:], ".API") +} diff --git a/service/rpc/availability.go b/api/gateway/availability.go similarity index 73% rename from service/rpc/availability.go rename to api/gateway/availability.go index 116a61755e..70341b34f2 100644 --- a/service/rpc/availability.go +++ b/api/gateway/availability.go @@ -1,4 +1,4 @@ -package rpc +package gateway import ( "encoding/json" @@ -7,7 +7,7 @@ import ( "github.com/gorilla/mux" - "github.com/celestiaorg/celestia-node/service/share" + "github.com/celestiaorg/celestia-node/share" ) const heightAvailabilityEndpoint = "/data_available" @@ -15,8 +15,7 @@ const heightAvailabilityEndpoint = "/data_available" // AvailabilityResponse represents the response to a // `/data_available` request. type AvailabilityResponse struct { - Available bool `json:"available"` - Probability string `json:"probability_of_availability"` + Available bool `json:"available"` } func (h *Handler) handleHeightAvailabilityRequest(w http.ResponseWriter, r *http.Request) { @@ -33,16 +32,10 @@ func (h *Handler) handleHeightAvailabilityRequest(w http.ResponseWriter, r *http return } - availResp := &AvailabilityResponse{ - Probability: strconv.FormatFloat( - h.share.ProbabilityOfAvailability(), 'g', -1, 64), - } - - err = h.share.Availability.SharesAvailable(r.Context(), header.DAH) + err = h.share.SharesAvailable(r.Context(), header) switch err { case nil: - availResp.Available = true - resp, err := json.Marshal(availResp) + resp, err := json.Marshal(&AvailabilityResponse{Available: true}) if err != nil { writeError(w, http.StatusInternalServerError, heightAvailabilityEndpoint, err) return @@ -52,8 +45,7 @@ func (h *Handler) handleHeightAvailabilityRequest(w http.ResponseWriter, r *http log.Errorw("serving request", "endpoint", heightAvailabilityEndpoint, "err", err) } case share.ErrNotAvailable: - availResp.Available = false - resp, err := json.Marshal(availResp) + resp, err := json.Marshal(&AvailabilityResponse{Available: false}) if err != nil { writeError(w, http.StatusInternalServerError, heightAvailabilityEndpoint, err) return diff --git a/api/gateway/bindings.go b/api/gateway/bindings.go new file mode 100644 index 0000000000..c01bd2da47 --- /dev/null +++ b/api/gateway/bindings.go @@ -0,0 +1,73 @@ +package gateway + +import ( + "fmt" + "net/http" +) + +func (h *Handler) RegisterEndpoints(rpc *Server) { + // state endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", balanceEndpoint, addrKey), + h.handleBalanceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + submitTxEndpoint, + h.handleSubmitTx, + http.MethodPost, + ) + + rpc.RegisterHandlerFunc( + healthEndpoint, + h.handleHealthRequest, + http.MethodGet, + ) + + // share endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf( + "%s/{%s}/height/{%s}", + namespacedSharesEndpoint, + namespaceKey, + heightKey, + ), + h.handleSharesByNamespaceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, namespaceKey), + h.handleSharesByNamespaceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, namespaceKey, heightKey), + h.handleDataByNamespaceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, namespaceKey), + h.handleDataByNamespaceRequest, + http.MethodGet, + ) + + // DAS endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", heightAvailabilityEndpoint, heightKey), + h.handleHeightAvailabilityRequest, + http.MethodGet, + ) + + // header endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", headerByHeightEndpoint, heightKey), + h.handleHeaderRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc(headEndpoint, h.handleHeadRequest, http.MethodGet) +} diff --git a/api/gateway/bindings_test.go b/api/gateway/bindings_test.go new file mode 100644 index 0000000000..5d27d5e4c7 --- /dev/null +++ b/api/gateway/bindings_test.go @@ -0,0 +1,119 @@ +package gateway + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/require" +) + +func TestRegisterEndpoints(t *testing.T) { + handler := &Handler{} + rpc := NewServer("localhost", "6969") + + handler.RegisterEndpoints(rpc) + + testCases := []struct { + name string + path string + method string + expected bool + }{ + { + name: "Get balance endpoint", + path: fmt.Sprintf("%s/{%s}", balanceEndpoint, addrKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Submit transaction endpoint", + path: submitTxEndpoint, + method: http.MethodPost, + expected: true, + }, + { + name: "Get namespaced shares by height endpoint", + path: fmt.Sprintf("%s/{%s}/height/{%s}", namespacedSharesEndpoint, namespaceKey, heightKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get namespaced shares endpoint", + path: fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, namespaceKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get namespaced data by height endpoint", + path: fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, namespaceKey, heightKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get namespaced data endpoint", + path: fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, namespaceKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get health endpoint", + path: "/status/health", + method: http.MethodGet, + expected: true, + }, + + // Going forward, we can add previously deprecated and since + // removed endpoints here to ensure we don't accidentally re-enable + // them in the future and accidentally expand surface area + { + name: "example totally bogus endpoint", + path: fmt.Sprintf("/wutang/{%s}/%s", "chambers", "36"), + method: http.MethodGet, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + require.Equal( + t, + tc.expected, + hasEndpointRegistered(rpc.Router(), tc.path, tc.method), + "Endpoint registration mismatch for: %s %s %s", tc.name, tc.method, tc.path) + }) + } +} + +func hasEndpointRegistered(router *mux.Router, path string, method string) bool { + var registered bool + err := router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + template, err := route.GetPathTemplate() + if err != nil { + return err + } + + if template == path { + methods, err := route.GetMethods() + if err != nil { + return err + } + + for _, m := range methods { + if m == method { + registered = true + return nil + } + } + } + return nil + }) + + if err != nil { + fmt.Println("Error walking through routes:", err) + return false + } + + return registered +} diff --git a/api/gateway/config.go b/api/gateway/config.go new file mode 100644 index 0000000000..0485da486e --- /dev/null +++ b/api/gateway/config.go @@ -0,0 +1,23 @@ +package gateway + +import ( + "fmt" + "net" + "strconv" +) + +type Config struct { + Address string + Port string +} + +func (cfg *Config) Validate() error { + if ip := net.ParseIP(cfg.Address); ip == nil { + return fmt.Errorf("service/gateway: invalid listen address format: %s", cfg.Address) + } + _, err := strconv.Atoi(cfg.Port) + if err != nil { + return fmt.Errorf("service/gateway: invalid port: %s", err.Error()) + } + return nil +} diff --git a/api/gateway/handler.go b/api/gateway/handler.go new file mode 100644 index 0000000000..2602528170 --- /dev/null +++ b/api/gateway/handler.go @@ -0,0 +1,33 @@ +package gateway + +import ( + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +var log = logging.Logger("gateway") + +type Handler struct { + state state.Module + share share.Module + header header.Module + das *das.DASer +} + +func NewHandler( + state state.Module, + share share.Module, + header header.Module, + das *das.DASer, +) *Handler { + return &Handler{ + state: state, + share: share, + header: header, + das: das, + } +} diff --git a/service/rpc/header.go b/api/gateway/header.go similarity index 96% rename from service/rpc/header.go rename to api/gateway/header.go index e1d44dc55a..5b8a82351c 100644 --- a/service/rpc/header.go +++ b/api/gateway/header.go @@ -1,4 +1,4 @@ -package rpc +package gateway import ( "encoding/json" @@ -20,7 +20,7 @@ var ( ) func (h *Handler) handleHeadRequest(w http.ResponseWriter, r *http.Request) { - head, err := h.header.Head(r.Context()) + head, err := h.header.LocalHead(r.Context()) if err != nil { writeError(w, http.StatusInternalServerError, headEndpoint, err) return @@ -69,11 +69,12 @@ func (h *Handler) performGetHeaderRequest( writeError(w, http.StatusBadRequest, endpoint, err) return nil, err } - // perform request + header, err := h.header.GetByHeight(r.Context(), uint64(height)) if err != nil { writeError(w, http.StatusInternalServerError, endpoint, err) return nil, err } + return header, nil } diff --git a/api/gateway/health.go b/api/gateway/health.go new file mode 100644 index 0000000000..2a96e0200e --- /dev/null +++ b/api/gateway/health.go @@ -0,0 +1,18 @@ +package gateway + +import ( + "net/http" +) + +const ( + healthEndpoint = "/status/health" +) + +func (h *Handler) handleHealthRequest(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte("ok")) + if err != nil { + log.Errorw("serving request", "endpoint", healthEndpoint, "err", err) + writeError(w, http.StatusBadGateway, healthEndpoint, err) + return + } +} diff --git a/api/gateway/middleware.go b/api/gateway/middleware.go new file mode 100644 index 0000000000..4b669113dd --- /dev/null +++ b/api/gateway/middleware.go @@ -0,0 +1,41 @@ +package gateway + +import ( + "context" + "net/http" + "time" +) + +const timeout = time.Minute + +func (h *Handler) RegisterMiddleware(srv *Server) { + srv.RegisterMiddleware( + setContentType, + wrapRequestContext, + enableCors, + ) +} + +func enableCors(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + next.ServeHTTP(w, r) + }) +} + +func setContentType(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + next.ServeHTTP(w, r) + }) +} + +// wrapRequestContext ensures we implement a deadline on serving requests +// via the gateway server-side to prevent context leaks. +func wrapRequestContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer cancel() + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} diff --git a/api/gateway/server.go b/api/gateway/server.go new file mode 100644 index 0000000000..7eab7c7bf9 --- /dev/null +++ b/api/gateway/server.go @@ -0,0 +1,103 @@ +package gateway + +import ( + "context" + "net" + "net/http" + "sync/atomic" + "time" + + "github.com/gorilla/mux" +) + +// Server represents a gateway server on the Node. +type Server struct { + srv *http.Server + srvMux *mux.Router // http request multiplexer + listener net.Listener + + started atomic.Bool +} + +// NewServer returns a new gateway Server. +func NewServer(address, port string) *Server { + srvMux := mux.NewRouter() + srvMux.Use(setContentType) + + server := &Server{ + srvMux: srvMux, + } + server.srv = &http.Server{ + Addr: address + ":" + port, + Handler: server, + // the amount of time allowed to read request headers. set to the default 2 seconds + ReadHeaderTimeout: 2 * time.Second, + } + return server +} + +func (s *Server) Router() *mux.Router { + return s.srvMux +} + +// Start starts the gateway Server, listening on the given address. +func (s *Server) Start(context.Context) error { + couldStart := s.started.CompareAndSwap(false, true) + if !couldStart { + log.Warn("cannot start server: already started") + return nil + } + listener, err := net.Listen("tcp", s.srv.Addr) + if err != nil { + return err + } + s.listener = listener + log.Infow("server started", "listening on", s.srv.Addr) + //nolint:errcheck + go s.srv.Serve(listener) + return nil +} + +// Stop stops the gateway Server. +func (s *Server) Stop(ctx context.Context) error { + couldStop := s.started.CompareAndSwap(true, false) + if !couldStop { + log.Warn("cannot stop server: already stopped") + return nil + } + err := s.srv.Shutdown(ctx) + if err != nil { + return err + } + s.listener = nil + log.Info("server stopped") + return nil +} + +// RegisterMiddleware allows to register a custom middleware that will be called before +// http.Request will reach handler. +func (s *Server) RegisterMiddleware(middlewareFuncs ...mux.MiddlewareFunc) { + for _, m := range middlewareFuncs { + // `router.Use` appends new middleware to existing + s.srvMux.Use(m) + } +} + +// RegisterHandlerFunc registers the given http.HandlerFunc on the Server's multiplexer +// on the given pattern. +func (s *Server) RegisterHandlerFunc(pattern string, handlerFunc http.HandlerFunc, method string) { + s.srvMux.HandleFunc(pattern, handlerFunc).Methods(method) +} + +// ServeHTTP serves inbound requests on the Server. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.srvMux.ServeHTTP(w, r) +} + +// ListenAddr returns the listen address of the server. +func (s *Server) ListenAddr() string { + if s.listener == nil { + return "" + } + return s.listener.Addr().String() +} diff --git a/api/gateway/server_test.go b/api/gateway/server_test.go new file mode 100644 index 0000000000..cb8e3d17ae --- /dev/null +++ b/api/gateway/server_test.go @@ -0,0 +1,129 @@ +package gateway + +import ( + "context" + "fmt" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + address = "localhost" + port = "0" +) + +func TestServer(t *testing.T) { + server := NewServer(address, port) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + err := server.Start(ctx) + require.NoError(t, err) + + // register ping handler + ping := new(ping) + server.RegisterHandlerFunc("/ping", ping.ServeHTTP, http.MethodGet) + + url := fmt.Sprintf("http://%s/ping", server.ListenAddr()) + + resp, err := http.Get(url) + require.NoError(t, err) + + buf, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Cleanup(func() { + resp.Body.Close() + }) + assert.Equal(t, "pong", string(buf)) + + err = server.Stop(ctx) + require.NoError(t, err) +} + +func TestCorsEnabled(t *testing.T) { + server := NewServer(address, port) + server.RegisterMiddleware(enableCors) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + err := server.Start(ctx) + require.NoError(t, err) + + // register ping handler + ping := new(ping) + server.RegisterHandlerFunc("/ping", ping.ServeHTTP, http.MethodGet) + + url := fmt.Sprintf("http://%s/ping", server.ListenAddr()) + + resp, err := http.Get(url) + require.NoError(t, err) + defer resp.Body.Close() + + require.NoError(t, err) + require.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), "*") +} + +// TestServer_contextLeakProtection tests to ensure a context +// deadline was added by the context wrapper middleware server-side. +func TestServer_contextLeakProtection(t *testing.T) { + server := NewServer(address, port) + server.RegisterMiddleware(wrapRequestContext) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + err := server.Start(ctx) + require.NoError(t, err) + + // register ping handler + ch := new(contextHandler) + server.RegisterHandlerFunc("/ch", ch.ServeHTTP, http.MethodGet) + + url := fmt.Sprintf("http://%s/ch", server.ListenAddr()) + req, err := http.NewRequest(http.MethodGet, url, nil) + require.NoError(t, err) + + cli := new(http.Client) + + originalCtx, originalCancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute)) + t.Cleanup(originalCancel) + resp, err := cli.Do(req.WithContext(originalCtx)) + require.NoError(t, err) + buf, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + + dur := new(time.Time) + err = dur.UnmarshalJSON(buf) + require.NoError(t, err) + assert.True(t, dur.After(time.Now())) +} + +type ping struct{} + +func (p ping) ServeHTTP(w http.ResponseWriter, _ *http.Request) { + //nolint:errcheck + w.Write([]byte("pong")) +} + +type contextHandler struct{} + +func (ch contextHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + deadline, ok := r.Context().Deadline() + if !ok { + w.Write([]byte("no deadline")) //nolint:errcheck + return + } + bin, err := deadline.MarshalJSON() + if err != nil { + panic(err) + } + w.Write(bin) //nolint:errcheck +} diff --git a/service/rpc/share.go b/api/gateway/share.go similarity index 64% rename from service/rpc/share.go rename to api/gateway/share.go index 51bf2684c9..36c1f94a0a 100644 --- a/service/rpc/share.go +++ b/api/gateway/share.go @@ -1,4 +1,4 @@ -package rpc +package gateway import ( "context" @@ -8,11 +8,10 @@ import ( "strconv" "github.com/gorilla/mux" - "github.com/tendermint/tendermint/types" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/service/share" - "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/celestia-app/pkg/shares" + + "github.com/celestiaorg/celestia-node/share" ) const ( @@ -20,7 +19,7 @@ const ( namespacedDataEndpoint = "/namespaced_data" ) -var nIDKey = "nid" +var namespaceKey = "nid" // NamespacedSharesResponse represents the response to a // SharesByNamespace request. @@ -37,19 +36,19 @@ type NamespacedDataResponse struct { } func (h *Handler) handleSharesByNamespaceRequest(w http.ResponseWriter, r *http.Request) { - height, nID, err := parseGetByNamespaceArgs(r) + height, namespace, err := parseGetByNamespaceArgs(r) if err != nil { writeError(w, http.StatusBadRequest, namespacedSharesEndpoint, err) return } - shares, headerHeight, err := h.getShares(r.Context(), height, nID) + shares, err := h.getShares(r.Context(), height, namespace) if err != nil { writeError(w, http.StatusInternalServerError, namespacedSharesEndpoint, err) return } resp, err := json.Marshal(&NamespacedSharesResponse{ Shares: shares, - Height: uint64(headerHeight), + Height: height, }) if err != nil { writeError(w, http.StatusInternalServerError, namespacedSharesEndpoint, err) @@ -62,12 +61,12 @@ func (h *Handler) handleSharesByNamespaceRequest(w http.ResponseWriter, r *http. } func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Request) { - height, nID, err := parseGetByNamespaceArgs(r) + height, namespace, err := parseGetByNamespaceArgs(r) if err != nil { writeError(w, http.StatusBadRequest, namespacedDataEndpoint, err) return } - shares, headerHeight, err := h.getShares(r.Context(), height, nID) + shares, err := h.getShares(r.Context(), height, namespace) if err != nil { writeError(w, http.StatusInternalServerError, namespacedDataEndpoint, err) return @@ -79,7 +78,7 @@ func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Re } resp, err := json.Marshal(&NamespacedDataResponse{ Data: data, - Height: uint64(headerHeight), + Height: height, }) if err != nil { writeError(w, http.StatusInternalServerError, namespacedDataEndpoint, err) @@ -91,39 +90,40 @@ func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Re } } -func (h *Handler) getShares(ctx context.Context, height uint64, nID namespace.ID) ([]share.Share, int64, error) { - // get header - var ( - err error - header *header.ExtendedHeader - ) - switch height { - case 0: - header, err = h.header.Head(ctx) - default: - header, err = h.header.GetByHeight(ctx, height) +func (h *Handler) getShares(ctx context.Context, height uint64, namespace share.Namespace) ([]share.Share, error) { + header, err := h.header.GetByHeight(ctx, height) + if err != nil { + return nil, err } + + shares, err := h.share.GetSharesByNamespace(ctx, header, namespace) if err != nil { - return nil, 0, err + return nil, err } - // perform request - shares, err := h.share.GetSharesByNamespace(ctx, header.DAH, nID) - return shares, header.Height, err + + return shares.Flatten(), nil } -func dataFromShares(shares []share.Share) ([][]byte, error) { - messages, err := types.ParseMsgs(shares) +func dataFromShares(input []share.Share) (data [][]byte, err error) { + appShares, err := shares.FromBytes(input) if err != nil { return nil, err } - data := make([][]byte, len(messages.MessagesList)) - for i := range messages.MessagesList { - data[i] = messages.MessagesList[i].Data + sequences, err := shares.ParseShares(appShares, false) + if err != nil { + return nil, err + } + for _, sequence := range sequences { + raw, err := sequence.RawData() + if err != nil { + return nil, err + } + data = append(data, raw) } return data, nil } -func parseGetByNamespaceArgs(r *http.Request) (height uint64, nID namespace.ID, err error) { +func parseGetByNamespaceArgs(r *http.Request) (height uint64, namespace share.Namespace, err error) { vars := mux.Vars(r) // if a height was given, parse it, otherwise get namespaced shares/data from the latest header if strHeight, ok := vars[heightKey]; ok { @@ -132,11 +132,10 @@ func parseGetByNamespaceArgs(r *http.Request) (height uint64, nID namespace.ID, return 0, nil, err } } - hexNID := vars[nIDKey] - nID, err = hex.DecodeString(hexNID) + hexNamespace := vars[namespaceKey] + namespace, err = hex.DecodeString(hexNamespace) if err != nil { return 0, nil, err } - - return height, nID, nil + return height, namespace, namespace.ValidateForData() } diff --git a/api/gateway/share_test.go b/api/gateway/share_test.go new file mode 100644 index 0000000000..9b12240f62 --- /dev/null +++ b/api/gateway/share_test.go @@ -0,0 +1,48 @@ +package gateway + +import ( + _ "embed" + "testing" + + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func Test_dataFromShares(t *testing.T) { + testData := [][]byte{ + []byte("beep"), + []byte("beeap"), + []byte("BEEEEAHP"), + } + + ns := sharetest.RandV0Namespace() + sss := shares.NewSparseShareSplitter() + for _, data := range testData { + b := coretypes.Blob{ + Data: data, + NamespaceID: ns.ID(), + NamespaceVersion: ns.Version(), + ShareVersion: appconsts.ShareVersionZero, + } + err := sss.Write(b) + require.NoError(t, err) + } + + sssShares := sss.Export() + + rawSSSShares := make([][]byte, len(sssShares)) + for i := 0; i < len(sssShares); i++ { + d := sssShares[i].ToBytes() + rawSSSShares[i] = d + } + + parsedSSSShares, err := dataFromShares(rawSSSShares) + require.NoError(t, err) + + require.Equal(t, testData, parsedSSSShares) +} diff --git a/api/gateway/state.go b/api/gateway/state.go new file mode 100644 index 0000000000..13cf729cc6 --- /dev/null +++ b/api/gateway/state.go @@ -0,0 +1,102 @@ +package gateway + +import ( + "encoding/hex" + "encoding/json" + "errors" + "net/http" + + "github.com/cosmos/cosmos-sdk/types" + "github.com/gorilla/mux" + + "github.com/celestiaorg/celestia-node/state" +) + +const ( + balanceEndpoint = "/balance" + submitTxEndpoint = "/submit_tx" +) + +const addrKey = "address" + +var ( + ErrInvalidAddressFormat = errors.New("address must be a valid account or validator address") + ErrMissingAddress = errors.New("address not specified") +) + +// submitTxRequest represents a request to submit a raw transaction +type submitTxRequest struct { + Tx string `json:"tx"` +} + +func (h *Handler) handleBalanceRequest(w http.ResponseWriter, r *http.Request) { + var ( + bal *state.Balance + err error + ) + // read and parse request + vars := mux.Vars(r) + addrStr, exists := vars[addrKey] + if !exists { + writeError(w, http.StatusBadRequest, balanceEndpoint, errors.New("balance endpoint requires address")) + return + } + + // convert address to Address type + var addr state.AccAddress + addr, err = types.AccAddressFromBech32(addrStr) + if err != nil { + // first check if it is a validator address and can be converted + valAddr, err := types.ValAddressFromBech32(addrStr) + if err != nil { + writeError(w, http.StatusBadRequest, balanceEndpoint, ErrInvalidAddressFormat) + return + } + addr = valAddr.Bytes() + } + + bal, err = h.state.BalanceForAddress(r.Context(), state.Address{Address: addr}) + if err != nil { + writeError(w, http.StatusInternalServerError, balanceEndpoint, err) + return + } + resp, err := json.Marshal(bal) + if err != nil { + writeError(w, http.StatusInternalServerError, balanceEndpoint, err) + return + } + _, err = w.Write(resp) + if err != nil { + log.Errorw("writing response", "endpoint", balanceEndpoint, "err", err) + } +} + +func (h *Handler) handleSubmitTx(w http.ResponseWriter, r *http.Request) { + // decode request + var req submitTxRequest + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + writeError(w, http.StatusBadRequest, submitTxEndpoint, err) + return + } + rawTx, err := hex.DecodeString(req.Tx) + if err != nil { + writeError(w, http.StatusBadRequest, submitTxEndpoint, err) + return + } + // perform request + txResp, err := h.state.SubmitTx(r.Context(), rawTx) + if err != nil { + writeError(w, http.StatusInternalServerError, submitTxEndpoint, err) + return + } + resp, err := json.Marshal(txResp) + if err != nil { + writeError(w, http.StatusInternalServerError, submitTxEndpoint, err) + return + } + _, err = w.Write(resp) + if err != nil { + log.Errorw("writing response", "endpoint", submitTxEndpoint, "err", err) + } +} diff --git a/api/gateway/util.go b/api/gateway/util.go new file mode 100644 index 0000000000..d3739f9e9c --- /dev/null +++ b/api/gateway/util.go @@ -0,0 +1,19 @@ +package gateway + +import ( + "net/http" +) + +func writeError(w http.ResponseWriter, statusCode int, endpoint string, err error) { + log.Debugw("serving request", "endpoint", endpoint, "err", err) + + w.WriteHeader(statusCode) + + errorMessage := err.Error() // Get the error message as a string + errorBytes := []byte(errorMessage) + + _, err = w.Write(errorBytes) + if err != nil { + log.Errorw("writing error response", "endpoint", endpoint, "err", err) + } +} diff --git a/api/gateway/util_test.go b/api/gateway/util_test.go new file mode 100644 index 0000000000..d41b0918d2 --- /dev/null +++ b/api/gateway/util_test.go @@ -0,0 +1,24 @@ +package gateway + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteError(t *testing.T) { + t.Run("writeError", func(t *testing.T) { + // Create a mock HTTP response writer + w := httptest.NewRecorder() + + testErr := errors.New("test error") + + writeError(w, http.StatusInternalServerError, "/api/endpoint", testErr) + assert.Equal(t, http.StatusInternalServerError, w.Code) + responseBody := w.Body.Bytes() + assert.Equal(t, testErr.Error(), string(responseBody)) + }) +} diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go new file mode 100644 index 0000000000..1d8142048b --- /dev/null +++ b/api/rpc/client/client.go @@ -0,0 +1,98 @@ +package client + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +var ( + // staticClient is used for generating the OpenRPC spec. + staticClient Client + Modules = moduleMap(&staticClient) +) + +type Client struct { + Fraud fraud.API + Header header.API + State state.API + Share share.API + DAS das.API + P2P p2p.API + Node node.API + Blob blob.API + DA da.API + + closer multiClientCloser +} + +// multiClientCloser is a wrapper struct to close clients across multiple namespaces. +type multiClientCloser struct { + closers []jsonrpc.ClientCloser +} + +// register adds a new closer to the multiClientCloser +func (m *multiClientCloser) register(closer jsonrpc.ClientCloser) { + m.closers = append(m.closers, closer) +} + +// closeAll closes all saved clients. +func (m *multiClientCloser) closeAll() { + for _, closer := range m.closers { + closer() + } +} + +// Close closes the connections to all namespaces registered on the staticClient. +func (c *Client) Close() { + c.closer.closeAll() +} + +// NewClient creates a new Client with one connection per namespace with the +// given token as the authorization token. +func NewClient(ctx context.Context, addr string, token string) (*Client, error) { + authHeader := http.Header{perms.AuthKey: []string{fmt.Sprintf("Bearer %s", token)}} + return newClient(ctx, addr, authHeader) +} + +func newClient(ctx context.Context, addr string, authHeader http.Header) (*Client, error) { + var multiCloser multiClientCloser + var client Client + for name, module := range moduleMap(&client) { + closer, err := jsonrpc.NewClient(ctx, addr, name, module, authHeader) + if err != nil { + return nil, err + } + multiCloser.register(closer) + } + + return &client, nil +} + +func moduleMap(client *Client) map[string]interface{} { + // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 + return map[string]interface{}{ + "share": &client.Share.Internal, + "state": &client.State.Internal, + "header": &client.Header.Internal, + "fraud": &client.Fraud.Internal, + "das": &client.DAS.Internal, + "p2p": &client.P2P.Internal, + "node": &client.Node.Internal, + "blob": &client.Blob.Internal, + "da": &client.DA.Internal, + } +} diff --git a/api/rpc/perms/permissions.go b/api/rpc/perms/permissions.go new file mode 100644 index 0000000000..00cb056ca9 --- /dev/null +++ b/api/rpc/perms/permissions.go @@ -0,0 +1,36 @@ +package perms + +import ( + "encoding/json" + + "github.com/cristalhq/jwt" + "github.com/filecoin-project/go-jsonrpc/auth" +) + +var ( + DefaultPerms = []auth.Permission{"public"} + ReadPerms = []auth.Permission{"public", "read"} + ReadWritePerms = []auth.Permission{"public", "read", "write"} + AllPerms = []auth.Permission{"public", "read", "write", "admin"} +) + +var AuthKey = "Authorization" + +// JWTPayload is a utility struct for marshaling/unmarshalling +// permissions into for token signing/verifying. +type JWTPayload struct { + Allow []auth.Permission +} + +func (j *JWTPayload) MarshalBinary() (data []byte, err error) { + return json.Marshal(j) +} + +// NewTokenWithPerms generates and signs a new JWT token with the given secret +// and given permissions. +func NewTokenWithPerms(secret jwt.Signer, perms []auth.Permission) ([]byte, error) { + p := &JWTPayload{ + Allow: perms, + } + return jwt.NewTokenBuilder(secret).BuildBytes(p) +} diff --git a/api/rpc/server.go b/api/rpc/server.go new file mode 100644 index 0000000000..f247682083 --- /dev/null +++ b/api/rpc/server.go @@ -0,0 +1,118 @@ +package rpc + +import ( + "context" + "net" + "net/http" + "reflect" + "sync/atomic" + "time" + + "github.com/cristalhq/jwt" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/libs/authtoken" +) + +var log = logging.Logger("rpc") + +type Server struct { + srv *http.Server + rpc *jsonrpc.RPCServer + listener net.Listener + authDisabled bool + + started atomic.Bool + + auth jwt.Signer +} + +func NewServer(address, port string, authDisabled bool, secret jwt.Signer) *Server { + rpc := jsonrpc.NewServer() + srv := &Server{ + rpc: rpc, + srv: &http.Server{ + Addr: address + ":" + port, + // the amount of time allowed to read request headers. set to the default 2 seconds + ReadHeaderTimeout: 2 * time.Second, + }, + auth: secret, + authDisabled: authDisabled, + } + srv.srv.Handler = &auth.Handler{ + Verify: srv.verifyAuth, + Next: rpc.ServeHTTP, + } + return srv +} + +// verifyAuth is the RPC server's auth middleware. This middleware is only +// reached if a token is provided in the header of the request, otherwise only +// methods with `read` permissions are accessible. +func (s *Server) verifyAuth(_ context.Context, token string) ([]auth.Permission, error) { + if s.authDisabled { + return perms.AllPerms, nil + } + return authtoken.ExtractSignedPermissions(s.auth, token) +} + +// RegisterService registers a service onto the RPC server. All methods on the service will then be +// exposed over the RPC. +func (s *Server) RegisterService(namespace string, service interface{}, out interface{}) { + if s.authDisabled { + s.rpc.Register(namespace, service) + return + } + + auth.PermissionedProxy(perms.AllPerms, perms.DefaultPerms, service, getInternalStruct(out)) + s.rpc.Register(namespace, out) +} + +func getInternalStruct(api interface{}) interface{} { + return reflect.ValueOf(api).Elem().FieldByName("Internal").Addr().Interface() +} + +// Start starts the RPC Server. +func (s *Server) Start(context.Context) error { + couldStart := s.started.CompareAndSwap(false, true) + if !couldStart { + log.Warn("cannot start server: already started") + return nil + } + listener, err := net.Listen("tcp", s.srv.Addr) + if err != nil { + return err + } + s.listener = listener + log.Infow("server started", "listening on", s.srv.Addr) + //nolint:errcheck + go s.srv.Serve(listener) + return nil +} + +// Stop stops the RPC Server. +func (s *Server) Stop(ctx context.Context) error { + couldStop := s.started.CompareAndSwap(true, false) + if !couldStop { + log.Warn("cannot stop server: already stopped") + return nil + } + err := s.srv.Shutdown(ctx) + if err != nil { + return err + } + s.listener = nil + log.Info("server stopped") + return nil +} + +// ListenAddr returns the listen address of the server. +func (s *Server) ListenAddr() string { + if s.listener == nil { + return "" + } + return s.listener.Addr().String() +} diff --git a/api/rpc_test.go b/api/rpc_test.go new file mode 100644 index 0000000000..ff38a42045 --- /dev/null +++ b/api/rpc_test.go @@ -0,0 +1,343 @@ +package api + +import ( + "context" + "encoding/json" + "reflect" + "strconv" + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cristalhq/jwt" + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p/core/network" + "github.com/stretchr/testify/require" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/api/rpc" + "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/api/rpc/perms" + daspkg "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" + blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + dasMock "github.com/celestiaorg/celestia-node/nodebuilder/das/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + fraudMock "github.com/celestiaorg/celestia-node/nodebuilder/fraud/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + headerMock "github.com/celestiaorg/celestia-node/nodebuilder/header/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + nodeMock "github.com/celestiaorg/celestia-node/nodebuilder/node/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + p2pMock "github.com/celestiaorg/celestia-node/nodebuilder/p2p/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + shareMock "github.com/celestiaorg/celestia-node/nodebuilder/share/mocks" + statemod "github.com/celestiaorg/celestia-node/nodebuilder/state" + stateMock "github.com/celestiaorg/celestia-node/nodebuilder/state/mocks" + "github.com/celestiaorg/celestia-node/state" +) + +func TestRPCCallsUnderlyingNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // generate dummy signer and sign admin perms token with it + signer, err := jwt.NewHS256(make([]byte, 32)) + require.NoError(t, err) + + nd, server := setupNodeWithAuthedRPC(t, signer) + url := nd.RPCServer.ListenAddr() + + adminToken, err := perms.NewTokenWithPerms(signer, perms.AllPerms) + require.NoError(t, err) + + // we need to run this a few times to prevent the race where the server is not yet started + var ( + rpcClient *client.Client + ) + for i := 0; i < 3; i++ { + time.Sleep(time.Second * 1) + rpcClient, err = client.NewClient(ctx, "http://"+url, string(adminToken)) + if err == nil { + t.Cleanup(rpcClient.Close) + break + } + } + require.NotNil(t, rpcClient) + require.NoError(t, err) + + expectedBalance := &state.Balance{ + Amount: sdk.NewInt(100), + Denom: "utia", + } + + server.State.EXPECT().Balance(gomock.Any()).Return(expectedBalance, nil) + + balance, err := rpcClient.State.Balance(ctx) + require.NoError(t, err) + require.Equal(t, expectedBalance, balance) +} + +// api contains all modules that are made available as the node's +// public API surface +type api struct { + Fraud fraud.Module + Header header.Module + State statemod.Module + Share share.Module + DAS das.Module + Node node.Module + P2P p2p.Module + Blob blob.Module + DA da.Module +} + +func TestModulesImplementFullAPI(t *testing.T) { + api := reflect.TypeOf(new(api)).Elem() + client := reflect.TypeOf(new(client.Client)).Elem() + for i := 0; i < client.NumField(); i++ { + module := client.Field(i) + switch module.Name { + case "closer": + // the "closers" field is not an actual module + continue + default: + internal, ok := module.Type.FieldByName("Internal") + require.True(t, ok, "module %s's API does not have an Internal field", module.Name) + for j := 0; j < internal.Type.NumField(); j++ { + impl := internal.Type.Field(j) + field, _ := api.FieldByName(module.Name) + method, _ := field.Type.MethodByName(impl.Name) + require.Equal(t, method.Type, impl.Type, "method %s does not match", impl.Name) + } + } + } +} + +func TestAuthedRPC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // generate dummy signer and sign admin perms token with it + signer, err := jwt.NewHS256(make([]byte, 32)) + require.NoError(t, err) + + nd, server := setupNodeWithAuthedRPC(t, signer) + url := nd.RPCServer.ListenAddr() + + // create permissioned tokens + publicToken, err := perms.NewTokenWithPerms(signer, perms.DefaultPerms) + require.NoError(t, err) + readToken, err := perms.NewTokenWithPerms(signer, perms.ReadPerms) + require.NoError(t, err) + rwToken, err := perms.NewTokenWithPerms(signer, perms.ReadWritePerms) + require.NoError(t, err) + adminToken, err := perms.NewTokenWithPerms(signer, perms.AllPerms) + require.NoError(t, err) + + var tests = []struct { + perm int + token string + }{ + {perm: 1, token: string(publicToken)}, // public + {perm: 2, token: string(readToken)}, // read + {perm: 3, token: string(rwToken)}, // RW + {perm: 4, token: string(adminToken)}, // admin + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + // we need to run this a few times to prevent the race where the server is not yet started + var rpcClient *client.Client + require.NoError(t, err) + for i := 0; i < 3; i++ { + time.Sleep(time.Second * 1) + rpcClient, err = client.NewClient(ctx, "http://"+url, tt.token) + if err == nil { + break + } + } + require.NotNil(t, rpcClient) + require.NoError(t, err) + + // 1. Test method with read-level permissions + expected := daspkg.SamplingStats{ + SampledChainHead: 100, + CatchupHead: 100, + NetworkHead: 1000, + Failed: nil, + Workers: nil, + Concurrency: 0, + CatchUpDone: true, + IsRunning: false, + } + if tt.perm > 1 { + server.Das.EXPECT().SamplingStats(gomock.Any()).Return(expected, nil) + stats, err := rpcClient.DAS.SamplingStats(ctx) + require.NoError(t, err) + require.Equal(t, expected, stats) + } else { + _, err := rpcClient.DAS.SamplingStats(ctx) + require.Error(t, err) + require.ErrorContains(t, err, "missing permission") + } + + // 2. Test method with write-level permissions + expectedResp := &state.TxResponse{} + if tt.perm > 2 { + server.State.EXPECT().Delegate(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedResp, nil) + txResp, err := rpcClient.State.Delegate(ctx, + state.ValAddress{}, state.Int{}, state.Int{}, 0) + require.NoError(t, err) + require.Equal(t, expectedResp, txResp) + } else { + _, err := rpcClient.State.Delegate(ctx, + state.ValAddress{}, state.Int{}, state.Int{}, 0) + require.Error(t, err) + require.ErrorContains(t, err, "missing permission") + } + + // 3. Test method with admin-level permissions + expectedReachability := network.Reachability(3) + if tt.perm > 3 { + server.P2P.EXPECT().NATStatus(gomock.Any()).Return(expectedReachability, nil) + natstatus, err := rpcClient.P2P.NATStatus(ctx) + require.NoError(t, err) + require.Equal(t, expectedReachability, natstatus) + } else { + _, err := rpcClient.P2P.NATStatus(ctx) + require.Error(t, err) + require.ErrorContains(t, err, "missing permission") + } + + rpcClient.Close() + }) + } +} + +func TestAllReturnValuesAreMarshalable(t *testing.T) { + ra := reflect.TypeOf(new(api)).Elem() + for i := 0; i < ra.NumMethod(); i++ { + m := ra.Method(i) + for j := 0; j < m.Type.NumOut(); j++ { + implementsMarshaler(t, m.Type.Out(j)) + } + } + // NOTE: see comment above api interface definition. + na := reflect.TypeOf(new(node.Module)).Elem() + for i := 0; i < na.NumMethod(); i++ { + m := na.Method(i) + for j := 0; j < m.Type.NumOut(); j++ { + implementsMarshaler(t, m.Type.Out(j)) + } + } +} + +func implementsMarshaler(t *testing.T, typ reflect.Type) { + // the passed type may already implement json.Marshaler and we don't need to go deeper + if typ.Implements(reflect.TypeOf(new(json.Marshaler)).Elem()) { + return + } + + switch typ.Kind() { + case reflect.Struct: + // a user defined struct could implement json.Marshaler on the pointer receiver, so check there + // first. note that the "non-pointer" receiver is checked before the switch. + pointerType := reflect.TypeOf(reflect.New(typ).Elem().Addr().Interface()) + if pointerType.Implements(reflect.TypeOf(new(json.Marshaler)).Elem()) { + return + } + // struct doesn't implement the interface itself, check all individual fields + reflect.New(typ).Pointer() + for i := 0; i < typ.NumField(); i++ { + implementsMarshaler(t, typ.Field(i).Type) + } + return + case reflect.Map: + implementsMarshaler(t, typ.Elem()) + implementsMarshaler(t, typ.Key()) + case reflect.Ptr: + fallthrough + case reflect.Array: + fallthrough + case reflect.Slice: + fallthrough + case reflect.Chan: + implementsMarshaler(t, typ.Elem()) + case reflect.Interface: + if typ != reflect.TypeOf(new(interface{})).Elem() && typ != reflect.TypeOf(new(error)).Elem() { + require.True( + t, + typ.Implements(reflect.TypeOf(new(json.Marshaler)).Elem()), + "type %s does not implement json.Marshaler", typ.String(), + ) + } + default: + return + } + +} + +// setupNodeWithAuthedRPC sets up a node and overrides its JWT +// signer with the given signer. +func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, *mockAPI) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ctrl := gomock.NewController(t) + + mockAPI := &mockAPI{ + stateMock.NewMockModule(ctrl), + shareMock.NewMockModule(ctrl), + fraudMock.NewMockModule(ctrl), + headerMock.NewMockModule(ctrl), + dasMock.NewMockModule(ctrl), + p2pMock.NewMockModule(ctrl), + nodeMock.NewMockModule(ctrl), + blobMock.NewMockModule(ctrl), + daMock.NewMockModule(ctrl), + } + + // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root + // level module. For further information, check the documentation on fx.Invoke. + invokeRPC := fx.Invoke(func(srv *rpc.Server) { + srv.RegisterService("fraud", mockAPI.Fraud, &fraud.API{}) + srv.RegisterService("das", mockAPI.Das, &das.API{}) + srv.RegisterService("header", mockAPI.Header, &header.API{}) + srv.RegisterService("state", mockAPI.State, &statemod.API{}) + srv.RegisterService("share", mockAPI.Share, &share.API{}) + srv.RegisterService("p2p", mockAPI.P2P, &p2p.API{}) + srv.RegisterService("node", mockAPI.Node, &node.API{}) + srv.RegisterService("blob", mockAPI.Blob, &blob.API{}) + srv.RegisterService("da", mockAPI.DA, &da.API{}) + }) + // fx.Replace does not work here, but fx.Decorate does + nd := nodebuilder.TestNode(t, node.Full, invokeRPC, fx.Decorate(func() (jwt.Signer, error) { + return auth, nil + })) + // start node + err := nd.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + err = nd.Stop(ctx) + require.NoError(t, err) + }) + return nd, mockAPI +} + +type mockAPI struct { + State *stateMock.MockModule + Share *shareMock.MockModule + Fraud *fraudMock.MockModule + Header *headerMock.MockModule + Das *dasMock.MockModule + P2P *p2pMock.MockModule + Node *nodeMock.MockModule + Blob *blobMock.MockModule + DA *daMock.MockModule +} diff --git a/blob/blob.go b/blob/blob.go new file mode 100644 index 0000000000..9843441dd2 --- /dev/null +++ b/blob/blob.go @@ -0,0 +1,203 @@ +package blob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-app/x/blob/types" + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/share" +) + +// Commitment is a Merkle Root of the subtree built from shares of the Blob. +// It is computed by splitting the blob into shares and building the Merkle subtree to be included +// after Submit. +type Commitment []byte + +func (com Commitment) String() string { + return string(com) +} + +// Equal ensures that commitments are the same +func (com Commitment) Equal(c Commitment) bool { + return bytes.Equal(com, c) +} + +// Proof is a collection of nmt.Proofs that verifies the inclusion of the data. +type Proof []*nmt.Proof + +func (p Proof) Len() int { return len(p) } + +func (p Proof) MarshalJSON() ([]byte, error) { + proofs := make([]string, 0, len(p)) + for _, proof := range p { + proofBytes, err := proof.MarshalJSON() + if err != nil { + return nil, err + } + proofs = append(proofs, string(proofBytes)) + } + return json.Marshal(proofs) +} + +func (p *Proof) UnmarshalJSON(b []byte) error { + var proofs []string + if err := json.Unmarshal(b, &proofs); err != nil { + return err + } + for _, proof := range proofs { + var nmtProof nmt.Proof + if err := nmtProof.UnmarshalJSON([]byte(proof)); err != nil { + return err + } + *p = append(*p, &nmtProof) + } + return nil +} + +// equal is a temporary method that compares two proofs. +// should be removed in BlobService V1. +func (p Proof) equal(input Proof) error { + if p.Len() != input.Len() { + return ErrInvalidProof + } + + for i, proof := range p { + pNodes := proof.Nodes() + inputNodes := input[i].Nodes() + for i, node := range pNodes { + if !bytes.Equal(node, inputNodes[i]) { + return ErrInvalidProof + } + } + + if proof.Start() != input[i].Start() || proof.End() != input[i].End() { + return ErrInvalidProof + } + + if !bytes.Equal(proof.LeafHash(), input[i].LeafHash()) { + return ErrInvalidProof + } + + } + return nil +} + +// Blob represents any application-specific binary data that anyone can submit to Celestia. +type Blob struct { + types.Blob `json:"blob"` + + Commitment Commitment `json:"commitment"` + + // the celestia-node's namespace type + // this is to avoid converting to and from app's type + namespace share.Namespace +} + +// NewBlobV0 constructs a new blob from the provided Namespace and data. +// The blob will be formatted as v0 shares. +func NewBlobV0(namespace share.Namespace, data []byte) (*Blob, error) { + return NewBlob(appconsts.ShareVersionZero, namespace, data) +} + +// NewBlob constructs a new blob from the provided Namespace, data and share version. +func NewBlob(shareVersion uint8, namespace share.Namespace, data []byte) (*Blob, error) { + if len(data) == 0 || len(data) > appconsts.DefaultMaxBytes { + return nil, fmt.Errorf("blob data must be > 0 && <= %d, but it was %d bytes", appconsts.DefaultMaxBytes, len(data)) + } + if err := namespace.ValidateForBlob(); err != nil { + return nil, err + } + + blob := tmproto.Blob{ + NamespaceId: namespace.ID(), + Data: data, + ShareVersion: uint32(shareVersion), + NamespaceVersion: uint32(namespace.Version()), + } + + com, err := types.CreateCommitment(&blob) + if err != nil { + return nil, err + } + return &Blob{Blob: blob, Commitment: com, namespace: namespace}, nil +} + +// Namespace returns blob's namespace. +func (b *Blob) Namespace() share.Namespace { + return b.namespace +} + +type jsonBlob struct { + Namespace share.Namespace `json:"namespace"` + Data []byte `json:"data"` + ShareVersion uint32 `json:"share_version"` + Commitment Commitment `json:"commitment"` +} + +func (b *Blob) MarshalJSON() ([]byte, error) { + blob := &jsonBlob{ + Namespace: b.Namespace(), + Data: b.Data, + ShareVersion: b.ShareVersion, + Commitment: b.Commitment, + } + return json.Marshal(blob) +} + +func (b *Blob) UnmarshalJSON(data []byte) error { + var blob jsonBlob + err := json.Unmarshal(data, &blob) + if err != nil { + return err + } + + b.Blob.NamespaceVersion = uint32(blob.Namespace.Version()) + b.Blob.NamespaceId = blob.Namespace.ID() + b.Blob.Data = blob.Data + b.Blob.ShareVersion = blob.ShareVersion + b.Commitment = blob.Commitment + b.namespace = blob.Namespace + return nil +} + +// buildBlobsIfExist takes shares and tries building the Blobs from them. +// It will build blobs either until appShares will be empty or the first incomplete blob will +// appear, so in this specific case it will return all built blobs + remaining shares. +func buildBlobsIfExist(appShares []shares.Share) ([]*Blob, []shares.Share, error) { + if len(appShares) == 0 { + return nil, nil, errors.New("empty shares received") + } + blobs := make([]*Blob, 0, len(appShares)) + for { + length, err := appShares[0].SequenceLen() + if err != nil { + return nil, nil, err + } + + amount := shares.SparseSharesNeeded(length) + if amount > len(appShares) { + return blobs, appShares, nil + } + + b, err := parseShares(appShares[:amount]) + if err != nil { + return nil, nil, err + } + + // only 1 blob will be created bc we passed the exact amount of shares + blobs = append(blobs, b[0]) + + if amount == len(appShares) { + return blobs, nil, nil + } + appShares = appShares[amount:] + } +} diff --git a/blob/blob_test.go b/blob/blob_test.go new file mode 100644 index 0000000000..85486ad125 --- /dev/null +++ b/blob/blob_test.go @@ -0,0 +1,90 @@ +package blob + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" + + apptypes "github.com/celestiaorg/celestia-app/x/blob/types" + + "github.com/celestiaorg/celestia-node/blob/blobtest" +) + +func TestBlob(t *testing.T) { + appBlobs, err := blobtest.GenerateV0Blobs([]int{1}, false) + require.NoError(t, err) + blob, err := convertBlobs(appBlobs...) + require.NoError(t, err) + + var test = []struct { + name string + expectedRes func(t *testing.T) + }{ + { + name: "new blob", + expectedRes: func(t *testing.T) { + require.NotEmpty(t, blob) + require.NotEmpty(t, blob[0].Namespace()) + require.NotEmpty(t, blob[0].Data) + require.NotEmpty(t, blob[0].Commitment) + }, + }, + { + name: "compare commitments", + expectedRes: func(t *testing.T) { + comm, err := apptypes.CreateCommitment(&blob[0].Blob) + require.NoError(t, err) + assert.Equal(t, blob[0].Commitment, Commitment(comm)) + }, + }, + { + name: "verify namespace", + expectedRes: func(t *testing.T) { + ns := blob[0].Namespace().ToAppNamespace() + require.NoError(t, err) + require.NoError(t, apptypes.ValidateBlobNamespace(ns)) + }, + }, + { + name: "shares to blobs", + expectedRes: func(t *testing.T) { + sh, err := BlobsToShares(blob...) + require.NoError(t, err) + b, err := SharesToBlobs(sh) + require.NoError(t, err) + assert.Equal(t, len(b), 1) + assert.Equal(t, blob[0].Commitment, b[0].Commitment) + }, + }, + { + name: "blob marshaling", + expectedRes: func(t *testing.T) { + data, err := blob[0].MarshalJSON() + require.NoError(t, err) + + newBlob := &Blob{} + require.NoError(t, newBlob.UnmarshalJSON(data)) + require.True(t, reflect.DeepEqual(blob[0], newBlob)) + }, + }, + } + + for _, tt := range test { + t.Run(tt.name, tt.expectedRes) + } +} + +func convertBlobs(appBlobs ...types.Blob) ([]*Blob, error) { + blobs := make([]*Blob, 0, len(appBlobs)) + for _, b := range appBlobs { + blob, err := NewBlob(b.ShareVersion, append([]byte{b.NamespaceVersion}, b.NamespaceID...), b.Data) + if err != nil { + return nil, err + } + blobs = append(blobs, blob) + } + return blobs, nil +} diff --git a/blob/blobtest/testing.go b/blob/blobtest/testing.go new file mode 100644 index 0000000000..a22f22f790 --- /dev/null +++ b/blob/blobtest/testing.go @@ -0,0 +1,38 @@ +package blobtest + +import ( + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-app/test/util/testfactory" + + "github.com/celestiaorg/celestia-node/share" +) + +// GenerateV0Blobs is a test utility producing v0 share formatted blobs with the +// requested size and random namespaces. +func GenerateV0Blobs(sizes []int, sameNamespace bool) ([]types.Blob, error) { + blobs := make([]types.Blob, 0, len(sizes)) + + for _, size := range sizes { + size := rawBlobSize(appconsts.FirstSparseShareContentSize * size) + appBlob := testfactory.GenerateRandomBlob(size) + if !sameNamespace { + nid, err := share.NewBlobNamespaceV0(tmrand.Bytes(7)) + if err != nil { + return nil, err + } + appBlob.NamespaceVersion = nid[0] + appBlob.NamespaceID = nid[1:] + } + + blobs = append(blobs, appBlob) + } + return blobs, nil +} + +func rawBlobSize(totalSize int) int { + return totalSize - shares.DelimLen(uint64(totalSize)) +} diff --git a/blob/helper.go b/blob/helper.go new file mode 100644 index 0000000000..72a56c7889 --- /dev/null +++ b/blob/helper.go @@ -0,0 +1,86 @@ +package blob + +import ( + "bytes" + "sort" + + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/shares" + + "github.com/celestiaorg/celestia-node/share" +) + +// SharesToBlobs takes raw shares and converts them to the blobs. +func SharesToBlobs(rawShares []share.Share) ([]*Blob, error) { + if len(rawShares) == 0 { + return nil, ErrBlobNotFound + } + + appShares, err := toAppShares(rawShares...) + if err != nil { + return nil, err + } + return parseShares(appShares) +} + +// parseShares takes shares and converts them to the []*Blob. +func parseShares(appShrs []shares.Share) ([]*Blob, error) { + shareSequences, err := shares.ParseShares(appShrs, true) + if err != nil { + return nil, err + } + + // ensure that sequence length is not 0 + if len(shareSequences) == 0 { + return nil, ErrBlobNotFound + } + + blobs := make([]*Blob, len(shareSequences)) + for i, sequence := range shareSequences { + data, err := sequence.RawData() + if err != nil { + return nil, err + } + if len(data) == 0 { + continue + } + + shareVersion, err := sequence.Shares[0].Version() + if err != nil { + return nil, err + } + + blob, err := NewBlob(shareVersion, sequence.Namespace.Bytes(), data) + if err != nil { + return nil, err + } + blobs[i] = blob + } + return blobs, nil +} + +// BlobsToShares accepts blobs and convert them to the Shares. +func BlobsToShares(blobs ...*Blob) ([]share.Share, error) { + b := make([]types.Blob, len(blobs)) + for i, blob := range blobs { + namespace := blob.Namespace() + b[i] = types.Blob{ + NamespaceVersion: namespace[0], + NamespaceID: namespace[1:], + Data: blob.Data, + ShareVersion: uint8(blob.ShareVersion), + } + } + + sort.Slice(b, func(i, j int) bool { + val := bytes.Compare(b[i].NamespaceID, b[j].NamespaceID) + return val <= 0 + }) + + rawShares, err := shares.SplitBlobs(b...) + if err != nil { + return nil, err + } + return shares.ToBytes(rawShares), nil +} diff --git a/blob/service.go b/blob/service.go new file mode 100644 index 0000000000..fc1d630e62 --- /dev/null +++ b/blob/service.go @@ -0,0 +1,365 @@ +package blob + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/types" + auth "github.com/cosmos/cosmos-sdk/x/auth/types" + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" +) + +var ( + ErrBlobNotFound = errors.New("blob: not found") + ErrInvalidProof = errors.New("blob: invalid proof") + + log = logging.Logger("blob") + tracer = otel.Tracer("blob/service") +) + +// GasPrice represents the amount to be paid per gas unit. Fee is set by +// multiplying GasPrice by GasLimit, which is determined by the blob sizes. +type GasPrice float64 + +// DefaultGasPrice returns the default gas price, letting node automatically +// determine the Fee based on the passed blob sizes. +func DefaultGasPrice() GasPrice { + return -1.0 +} + +// Submitter is an interface that allows submitting blobs to the celestia-core. It is used to +// avoid a circular dependency between the blob and the state package, since the state package needs +// the blob.Blob type for this signature. +type Submitter interface { + SubmitPayForBlob(ctx context.Context, fee sdkmath.Int, gasLim uint64, blobs []*Blob) (*types.TxResponse, error) +} + +type Service struct { + // accessor dials the given celestia-core endpoint to submit blobs. + blobSubmitter Submitter + // shareGetter retrieves the EDS to fetch all shares from the requested header. + shareGetter share.Getter + // headerGetter fetches header by the provided height + headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error) +} + +func NewService( + submitter Submitter, + getter share.Getter, + headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error), +) *Service { + return &Service{ + blobSubmitter: submitter, + shareGetter: getter, + headerGetter: headerGetter, + } +} + +// SubmitOptions contains the information about fee and gasLimit price in order to configure the +// Submit request. +type SubmitOptions struct { + Fee int64 + GasLimit uint64 +} + +// DefaultSubmitOptions creates a default fee and gas price values. +func DefaultSubmitOptions() *SubmitOptions { + return &SubmitOptions{ + Fee: -1, + GasLimit: 0, + } +} + +// Submit sends PFB transaction and reports the height at which it was included. +// Allows sending multiple Blobs atomically synchronously. +// Uses default wallet registered on the Node. +// Handles gas estimation and fee calculation. +func (s *Service) Submit(ctx context.Context, blobs []*Blob, gasPrice GasPrice) (uint64, error) { + log.Debugw("submitting blobs", "amount", len(blobs)) + + options := DefaultSubmitOptions() + if gasPrice >= 0 { + blobSizes := make([]uint32, len(blobs)) + for i, blob := range blobs { + blobSizes[i] = uint32(len(blob.Data)) + } + options.GasLimit = blobtypes.EstimateGas(blobSizes, appconsts.DefaultGasPerBlobByte, auth.DefaultTxSizeCostPerByte) + options.Fee = types.NewInt(int64(math.Ceil(float64(gasPrice) * float64(options.GasLimit)))).Int64() + } + + resp, err := s.blobSubmitter.SubmitPayForBlob(ctx, types.NewInt(options.Fee), options.GasLimit, blobs) + if err != nil { + return 0, err + } + return uint64(resp.Height), nil +} + +// Get retrieves all the blobs for given namespaces at the given height by commitment. +func (s *Service) Get(ctx context.Context, height uint64, ns share.Namespace, commitment Commitment) (*Blob, error) { + blob, _, err := s.getByCommitment(ctx, height, ns, commitment) + if err != nil { + return nil, err + } + return blob, nil +} + +// GetProof retrieves all blobs in the given namespaces at the given height by commitment +// and returns their Proof. +func (s *Service) GetProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + commitment Commitment, +) (*Proof, error) { + _, proof, err := s.getByCommitment(ctx, height, namespace, commitment) + if err != nil { + return nil, err + } + return proof, nil +} + +// GetAll returns all blobs under the given namespaces at the given height. +// GetAll can return blobs and an error in case if some requests failed. +func (s *Service) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*Blob, error) { + header, err := s.headerGetter(ctx, height) + if err != nil { + return nil, err + } + + var ( + resultBlobs = make([][]*Blob, len(namespaces)) + resultErr = make([]error, len(namespaces)) + ) + + for _, ns := range namespaces { + log.Debugw("performing GetAll request", "namespace", ns.String(), "height", height) + } + + wg := sync.WaitGroup{} + for i, namespace := range namespaces { + wg.Add(1) + go func(i int, namespace share.Namespace) { + defer wg.Done() + blobs, err := s.getBlobs(ctx, namespace, header) + if err != nil { + resultErr[i] = fmt.Errorf("getting blobs for namespace(%s): %w", namespace.String(), err) + return + } + + log.Debugw("receiving blobs", "height", height, "total", len(blobs)) + resultBlobs[i] = blobs + }(i, namespace) + } + wg.Wait() + + blobs := make([]*Blob, 0) + for _, resBlobs := range resultBlobs { + if len(resBlobs) > 0 { + blobs = append(blobs, resBlobs...) + } + } + + if len(blobs) == 0 { + resultErr = append(resultErr, ErrBlobNotFound) + } + return blobs, errors.Join(resultErr...) +} + +// Included verifies that the blob was included in a specific height. +// To ensure that blob was included in a specific height, we need: +// 1. verify the provided commitment by recomputing it; +// 2. verify the provided Proof against subtree roots that were used in 1.; +func (s *Service) Included( + ctx context.Context, + height uint64, + namespace share.Namespace, + proof *Proof, + com Commitment, +) (_ bool, err error) { + ctx, span := tracer.Start(ctx, "included") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + // In the current implementation, LNs will have to download all shares to recompute the commitment. + // To achieve 1. we need to modify Proof structure and to store all subtree roots, that were + // involved in commitment creation and then call `merkle.HashFromByteSlices`(tendermint package). + // nmt.Proof is verifying share inclusion by recomputing row roots, so, theoretically, we can do + // the same but using subtree roots. For this case, we need an extra method in nmt.Proof + // that will perform all reconstructions, + // but we have to guarantee that all our stored subtree roots will be on the same height(e.g. one + // level above shares). + // TODO(@vgonkivs): rework the implementation to perform all verification without network requests. + _, resProof, err := s.getByCommitment(ctx, height, namespace, com) + switch err { + case nil: + case ErrBlobNotFound: + return false, nil + default: + return false, err + } + return true, resProof.equal(*proof) +} + +// getByCommitment retrieves the DAH row by row, fetching shares and constructing blobs in order to +// compare Commitments. Retrieving is stopped once the requested blob/proof is found. +func (s *Service) getByCommitment( + ctx context.Context, + height uint64, + namespace share.Namespace, + commitment Commitment, +) (_ *Blob, _ *Proof, err error) { + log.Infow("requesting blob", + "height", height, + "namespace", namespace.String()) + + ctx, span := tracer.Start(ctx, "get-by-commitment") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + span.SetAttributes( + attribute.Int64("height", int64(height)), + attribute.String("commitment", string(commitment)), + ) + + getCtx, headerGetterSpan := tracer.Start(ctx, "header-getter") + + header, err := s.headerGetter(getCtx, height) + if err != nil { + headerGetterSpan.SetStatus(codes.Error, err.Error()) + return nil, nil, err + } + + headerGetterSpan.SetStatus(codes.Ok, "") + headerGetterSpan.AddEvent("received eds", trace.WithAttributes( + attribute.Int64("eds-size", int64(len(header.DAH.RowRoots))))) + + getCtx, getSharesSpan := tracer.Start(ctx, "get-shares-by-namespace") + + namespacedShares, err := s.shareGetter.GetSharesByNamespace(getCtx, header, namespace) + if err != nil { + if errors.Is(err, share.ErrNotFound) { + err = ErrBlobNotFound + } + getSharesSpan.SetStatus(codes.Error, err.Error()) + return nil, nil, err + } + + getSharesSpan.SetStatus(codes.Ok, "") + getSharesSpan.AddEvent("received shares", trace.WithAttributes( + attribute.Int64("eds-size", int64(len(header.DAH.RowRoots))))) + + var ( + rawShares = make([]shares.Share, 0) + proofs = make(Proof, 0) + // spansMultipleRows specifies whether blob is expanded into multiple rows + spansMultipleRows bool + ) + + for _, row := range namespacedShares { + if len(row.Shares) == 0 { + // the above condition means that we've faced with an Absence Proof. + // This Proof proves that the namespace was not found in the DAH, so + // we can return `ErrBlobNotFound`. + return nil, nil, ErrBlobNotFound + } + + appShares, err := toAppShares(row.Shares...) + if err != nil { + return nil, nil, err + } + rawShares = append(rawShares, appShares...) + proofs = append(proofs, row.Proof) + + var blobs []*Blob + blobs, rawShares, err = buildBlobsIfExist(rawShares) + if err != nil { + return nil, nil, err + } + for _, b := range blobs { + if b.Commitment.Equal(commitment) { + span.AddEvent("blob reconstructed") + return b, &proofs, nil + } + // Falling under this flag means that the data from the last row + // was insufficient to create a complete blob. As a result, + // the first blob received spans two rows and includes proofs + // for both of these rows. All other blobs in the result will relate + // to the current row and have a single proof. + if spansMultipleRows { + spansMultipleRows = false + // leave proof only for the current row + proofs = proofs[len(proofs)-1:] + } + } + + if len(rawShares) > 0 { + spansMultipleRows = true + continue + } + proofs = nil + } + + err = ErrBlobNotFound + if len(rawShares) > 0 { + err = fmt.Errorf("incomplete blob with the "+ + "namespace: %s detected at %d: %w", namespace.String(), height, err) + log.Error(err) + } + return nil, nil, err +} + +// getBlobs retrieves the DAH and fetches all shares from the requested Namespace and converts +// them to Blobs. +func (s *Service) getBlobs( + ctx context.Context, + namespace share.Namespace, + header *header.ExtendedHeader, +) (_ []*Blob, err error) { + ctx, span := tracer.Start(ctx, "get-blobs") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, header, namespace) + if err != nil { + return nil, err + } + return SharesToBlobs(namespacedShares.Flatten()) +} + +// toAppShares converts node's raw shares to the app shares, skipping padding +func toAppShares(shrs ...share.Share) ([]shares.Share, error) { + appShrs := make([]shares.Share, 0, len(shrs)) + for _, shr := range shrs { + bShare, err := shares.NewShare(shr) + if err != nil { + return nil, err + } + + ok, err := bShare.IsPadding() + if err != nil { + return nil, err + } + if ok { + continue + } + + appShrs = append(appShrs, *bShare) + } + return appShrs, nil +} diff --git a/blob/service_test.go b/blob/service_test.go new file mode 100644 index 0000000000..3e22f887af --- /dev/null +++ b/blob/service_test.go @@ -0,0 +1,456 @@ +package blob + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "testing" + "time" + + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/go-header/store" + + "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +func TestBlobService_Get(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + var ( + blobSize0 = 18 + blobSize1 = 14 + blobSize2 = 20 + blobSize3 = 12 + ) + + appBlobs, err := blobtest.GenerateV0Blobs([]int{blobSize0, blobSize1}, false) + require.NoError(t, err) + blobs0, err := convertBlobs(appBlobs...) + require.NoError(t, err) + + appBlobs, err = blobtest.GenerateV0Blobs([]int{blobSize2, blobSize3}, true) + require.NoError(t, err) + blobs1, err := convertBlobs(appBlobs...) + require.NoError(t, err) + + service := createService(ctx, t, append(blobs0, blobs1...)) + var test = []struct { + name string + doFn func() (interface{}, error) + expectedResult func(interface{}, error) + }{ + { + name: "get single blob", + doFn: func() (interface{}, error) { + b, err := service.Get(ctx, 1, blobs0[0].Namespace(), blobs0[0].Commitment) + return []*Blob{b}, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + assert.NotEmpty(t, res) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.Len(t, blobs, 1) + + assert.Equal(t, blobs0[0].Commitment, blobs[0].Commitment) + }, + }, + { + name: "get all with the same namespace", + doFn: func() (interface{}, error) { + b, err := service.GetAll(ctx, 1, []share.Namespace{blobs1[0].Namespace()}) + return b, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.NotEmpty(t, blobs) + + assert.Len(t, blobs, 2) + + for i := range blobs1 { + bytes.Equal(blobs1[i].Commitment, blobs[i].Commitment) + } + }, + }, + { + name: "get all with different namespaces", + doFn: func() (interface{}, error) { + b, err := service.GetAll(ctx, 1, []share.Namespace{blobs0[0].Namespace(), blobs0[1].Namespace()}) + return b, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.NotEmpty(t, blobs) + + assert.Len(t, blobs, 2) + // check the order + require.True(t, bytes.Equal(blobs[0].Namespace(), blobs0[0].Namespace())) + require.True(t, bytes.Equal(blobs[1].Namespace(), blobs0[1].Namespace())) + }, + }, + { + name: "get blob with incorrect commitment", + doFn: func() (interface{}, error) { + b, err := service.Get(ctx, 1, blobs0[0].Namespace(), blobs0[1].Commitment) + return []*Blob{b}, err + }, + expectedResult: func(res interface{}, err error) { + require.Error(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.Empty(t, blobs[0]) + }, + }, + { + name: "get invalid blob", + doFn: func() (interface{}, error) { + appBlob, err := blobtest.GenerateV0Blobs([]int{10}, false) + require.NoError(t, err) + blob, err := convertBlobs(appBlob...) + require.NoError(t, err) + + b, err := service.Get(ctx, 1, blob[0].Namespace(), blob[0].Commitment) + return []*Blob{b}, err + }, + expectedResult: func(res interface{}, err error) { + require.Error(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.Empty(t, blobs[0]) + }, + }, + { + name: "get proof", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + return proof, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + + header, err := service.headerGetter(ctx, 1) + require.NoError(t, err) + + proof, ok := res.(*Proof) + assert.True(t, ok) + + verifyFn := func(t *testing.T, rawShares [][]byte, proof *Proof, namespace share.Namespace) { + for _, row := range header.DAH.RowRoots { + to := 0 + for _, p := range *proof { + from := to + to = p.End() - p.Start() + from + eq := p.VerifyInclusion(sha256.New(), namespace.ToNMT(), rawShares[from:to], row) + if eq == true { + return + } + } + } + t.Fatal("could not prove the shares") + } + + rawShares, err := BlobsToShares(blobs0[1]) + require.NoError(t, err) + verifyFn(t, rawShares, proof, blobs0[1].Namespace()) + }, + }, + { + name: "verify inclusion", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[0].Namespace(), blobs0[0].Commitment) + require.NoError(t, err) + return service.Included(ctx, 1, blobs0[0].Namespace(), proof, blobs0[0].Commitment) + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + included, ok := res.(bool) + require.True(t, ok) + require.True(t, included) + }, + }, + { + name: "verify inclusion fails with different proof", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + return service.Included(ctx, 1, blobs0[0].Namespace(), proof, blobs0[0].Commitment) + }, + expectedResult: func(res interface{}, err error) { + require.Error(t, err) + require.ErrorIs(t, err, ErrInvalidProof) + included, ok := res.(bool) + require.True(t, ok) + require.True(t, included) + }, + }, + { + name: "not included", + doFn: func() (interface{}, error) { + appBlob, err := blobtest.GenerateV0Blobs([]int{10}, false) + require.NoError(t, err) + blob, err := convertBlobs(appBlob...) + require.NoError(t, err) + + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + return service.Included(ctx, 1, blob[0].Namespace(), proof, blob[0].Commitment) + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + included, ok := res.(bool) + require.True(t, ok) + require.False(t, included) + }, + }, + { + name: "count proofs for the blob", + doFn: func() (interface{}, error) { + proof0, err := service.GetProof(ctx, 1, blobs0[0].Namespace(), blobs0[0].Commitment) + if err != nil { + return nil, err + } + proof1, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + if err != nil { + return nil, err + } + return []*Proof{proof0, proof1}, nil + }, + expectedResult: func(i interface{}, err error) { + require.NoError(t, err) + proofs, ok := i.([]*Proof) + require.True(t, ok) + + h, err := service.headerGetter(ctx, 1) + require.NoError(t, err) + + originalDataWidth := len(h.DAH.RowRoots) / 2 + sizes := []int{blobSize0, blobSize1} + for i, proof := range proofs { + require.True(t, sizes[i]/originalDataWidth+1 == proof.Len()) + } + }, + }, + { + name: "get all not found", + doFn: func() (interface{}, error) { + namespace := share.Namespace(tmrand.Bytes(share.NamespaceSize)) + return service.GetAll(ctx, 1, []share.Namespace{namespace}) + }, + expectedResult: func(i interface{}, err error) { + blobs, ok := i.([]*Blob) + require.True(t, ok) + assert.Empty(t, blobs) + require.Error(t, err) + require.ErrorIs(t, err, ErrBlobNotFound) + + }, + }, + { + name: "marshal proof", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + return json.Marshal(proof) + }, + expectedResult: func(i interface{}, err error) { + require.NoError(t, err) + jsonData, ok := i.([]byte) + require.True(t, ok) + var proof Proof + require.NoError(t, json.Unmarshal(jsonData, &proof)) + + newProof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + require.NoError(t, proof.equal(*newProof)) + }, + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + blobs, err := tt.doFn() + tt.expectedResult(blobs, err) + }) + } +} + +// TestService_GetSingleBlobWithoutPadding creates two blobs with the same namespace +// But to satisfy the rule of eds creating, padding namespace share is placed between +// blobs. Test ensures that blob service will skip padding share and return the correct blob. +func TestService_GetSingleBlobWithoutPadding(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + appBlob, err := blobtest.GenerateV0Blobs([]int{9, 5}, true) + require.NoError(t, err) + blobs, err := convertBlobs(appBlob...) + require.NoError(t, err) + + ns1, ns2 := blobs[0].Namespace().ToAppNamespace(), blobs[1].Namespace().ToAppNamespace() + + padding0, err := shares.NamespacePaddingShare(ns1, appconsts.ShareVersionZero) + require.NoError(t, err) + padding1, err := shares.NamespacePaddingShare(ns2, appconsts.ShareVersionZero) + require.NoError(t, err) + rawShares0, err := BlobsToShares(blobs[0]) + require.NoError(t, err) + rawShares1, err := BlobsToShares(blobs[1]) + require.NoError(t, err) + + rawShares := make([][]byte, 0) + rawShares = append(rawShares, append(rawShares0, padding0.ToBytes())...) + rawShares = append(rawShares, append(rawShares1, padding1.ToBytes())...) + + bs := ipld.NewMemBlockservice() + batching := ds_sync.MutexWrap(ds.NewMapDatastore()) + headerStore, err := store.NewStore[*header.ExtendedHeader](batching) + require.NoError(t, err) + eds, err := ipld.AddShares(ctx, rawShares, bs) + require.NoError(t, err) + + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + err = headerStore.Init(ctx, h) + require.NoError(t, err) + + fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return headerStore.GetByHeight(ctx, height) + } + service := NewService(nil, getters.NewIPLDGetter(bs), fn) + + newBlob, err := service.Get(ctx, 1, blobs[1].Namespace(), blobs[1].Commitment) + require.NoError(t, err) + assert.Equal(t, newBlob.Commitment, blobs[1].Commitment) +} + +func TestService_Get(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + sizes := []int{1, 6, 3, 2, 4, 6, 8, 2, 15, 17} + + appBlobs, err := blobtest.GenerateV0Blobs(sizes, true) + require.NoError(t, err) + blobs, err := convertBlobs(appBlobs...) + require.NoError(t, err) + + service := createService(ctx, t, blobs) + for _, blob := range blobs { + b, err := service.Get(ctx, 1, blob.Namespace(), blob.Commitment) + require.NoError(t, err) + assert.Equal(t, b.Commitment, blob.Commitment) + } +} + +func TestService_GetAllWithoutPadding(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + appBlob, err := blobtest.GenerateV0Blobs([]int{9, 5}, true) + require.NoError(t, err) + blobs, err := convertBlobs(appBlob...) + require.NoError(t, err) + + ns1, ns2 := blobs[0].Namespace().ToAppNamespace(), blobs[1].Namespace().ToAppNamespace() + + padding0, err := shares.NamespacePaddingShare(ns1, appconsts.ShareVersionZero) + require.NoError(t, err) + padding1, err := shares.NamespacePaddingShare(ns2, appconsts.ShareVersionZero) + require.NoError(t, err) + rawShares0, err := BlobsToShares(blobs[0]) + require.NoError(t, err) + rawShares1, err := BlobsToShares(blobs[1]) + require.NoError(t, err) + rawShares := make([][]byte, 0) + + // create shares in correct order with padding shares + if bytes.Compare(blobs[0].Namespace(), blobs[1].Namespace()) <= 0 { + rawShares = append(rawShares, append(rawShares0, padding0.ToBytes())...) + rawShares = append(rawShares, append(rawShares1, padding1.ToBytes())...) + } else { + rawShares = append(rawShares, append(rawShares1, padding1.ToBytes())...) + rawShares = append(rawShares, append(rawShares0, padding0.ToBytes())...) + } + + bs := ipld.NewMemBlockservice() + batching := ds_sync.MutexWrap(ds.NewMapDatastore()) + headerStore, err := store.NewStore[*header.ExtendedHeader](batching) + require.NoError(t, err) + eds, err := ipld.AddShares(ctx, rawShares, bs) + require.NoError(t, err) + + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + err = headerStore.Init(ctx, h) + require.NoError(t, err) + + fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return headerStore.GetByHeight(ctx, height) + } + + service := NewService(nil, getters.NewIPLDGetter(bs), fn) + + _, err = service.GetAll(ctx, 1, []share.Namespace{blobs[0].Namespace(), blobs[1].Namespace()}) + require.NoError(t, err) +} + +// BenchmarkGetByCommitment-12 3139 380827 ns/op 701647 B/op 4990 allocs/op +func BenchmarkGetByCommitment(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + b.Cleanup(cancel) + appBlobs, err := blobtest.GenerateV0Blobs([]int{32, 32}, true) + require.NoError(b, err) + + blobs, err := convertBlobs(appBlobs...) + require.NoError(b, err) + + service := createService(ctx, b, blobs) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.ReportAllocs() + _, _, err = service.getByCommitment( + ctx, 1, blobs[1].Namespace(), blobs[1].Commitment, + ) + require.NoError(b, err) + } +} + +func createService(ctx context.Context, t testing.TB, blobs []*Blob) *Service { + bs := ipld.NewMemBlockservice() + batching := ds_sync.MutexWrap(ds.NewMapDatastore()) + headerStore, err := store.NewStore[*header.ExtendedHeader](batching) + require.NoError(t, err) + rawShares, err := BlobsToShares(blobs...) + require.NoError(t, err) + eds, err := ipld.AddShares(ctx, rawShares, bs) + require.NoError(t, err) + + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + err = headerStore.Init(ctx, h) + require.NoError(t, err) + + fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return headerStore.GetByHeight(ctx, height) + } + return NewService(nil, getters.NewIPLDGetter(bs), fn) +} diff --git a/cmd/auth.go b/cmd/auth.go new file mode 100644 index 0000000000..6ffdab656e --- /dev/null +++ b/cmd/auth.go @@ -0,0 +1,112 @@ +package cmd + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/cristalhq/jwt" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/libs/authtoken" + "github.com/celestiaorg/celestia-node/libs/keystore" + nodemod "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +func AuthCmd(fsets ...*flag.FlagSet) *cobra.Command { + var cmd = &cobra.Command{ + Use: "auth [permission-level (e.g. read || write || admin)]", + Short: "Signs and outputs a hex-encoded JWT token with the given permissions.", + Long: "Signs and outputs a hex-encoded JWT token with the given permissions. NOTE: only use this command when " + + "the node has already been initialized and started.", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("must specify permissions") + } + permissions, err := convertToPerms(args[0]) + if err != nil { + return err + } + + ks, err := newKeystore(StorePath(cmd.Context())) + if err != nil { + return err + + } + + key, err := ks.Get(nodemod.SecretName) + if err != nil { + if !errors.Is(err, keystore.ErrNotFound) { + return err + } + key, err = generateNewKey(ks) + if err != nil { + return err + } + } + + token, err := buildJWTToken(key.Body, permissions) + if err != nil { + return err + } + fmt.Printf("%s\n", token) + return nil + }, + } + + for _, set := range fsets { + cmd.Flags().AddFlagSet(set) + } + return cmd +} + +func newKeystore(path string) (keystore.Keystore, error) { + expanded, err := homedir.Expand(filepath.Clean(path)) + if err != nil { + return nil, err + } + return keystore.NewFSKeystore(filepath.Join(expanded, "keys"), nil) +} + +func buildJWTToken(body []byte, permissions []auth.Permission) (string, error) { + signer, err := jwt.NewHS256(body) + if err != nil { + return "", err + } + return authtoken.NewSignedJWT(signer, permissions) +} + +func generateNewKey(ks keystore.Keystore) (keystore.PrivKey, error) { + sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32)) + if err != nil { + return keystore.PrivKey{}, err + } + // save key + key := keystore.PrivKey{Body: sk} + err = ks.Put(nodemod.SecretName, key) + if err != nil { + return keystore.PrivKey{}, err + } + return key, nil +} + +func convertToPerms(perm string) ([]auth.Permission, error) { + perms, ok := stringsToPerms[perm] + if !ok { + return nil, fmt.Errorf("invalid permission specified: %s", perm) + } + return perms, nil +} + +var stringsToPerms = map[string][]auth.Permission{ + "public": perms.DefaultPerms, + "read": perms.ReadPerms, + "write": perms.ReadWritePerms, + "admin": perms.AllPerms, +} diff --git a/cmd/cel-key/main.go b/cmd/cel-key/main.go index d22e0f3696..736ea9d7f5 100644 --- a/cmd/cel-key/main.go +++ b/cmd/cel-key/main.go @@ -8,16 +8,15 @@ import ( "github.com/cosmos/cosmos-sdk/client/config" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/keys" - sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/spf13/cobra" "github.com/celestiaorg/celestia-app/app" "github.com/celestiaorg/celestia-app/app/encoding" - "github.com/celestiaorg/celestia-node/cmd" ) -var encodingConfig = encoding.MakeEncodingConfig(app.ModuleEncodingRegisters...) +var encodingConfig = encoding.MakeConfig(app.ModuleEncodingRegisters...) var initClientCtx = client.Context{}. WithCodec(encodingConfig.Codec). @@ -48,6 +47,14 @@ func init() { return err } + if !cmd.Flag(flags.FlagKeyringBackend).Changed { + err = cmd.Flag(flags.FlagKeyringBackend).Value.Set(keyring.BackendTest) + if err != nil { + return err + } + cmd.Flag(flags.FlagKeyringBackend).Changed = true + } + return ParseDirectoryFlags(cmd) } } @@ -60,10 +67,6 @@ func main() { } func run() error { - cfg := sdk.GetConfig() - cfg.SetBech32PrefixForAccount(app.Bech32PrefixAccAddr, app.Bech32PrefixAccPub) - cfg.Seal() - ctx := context.WithValue(context.Background(), client.ClientContextKey, &initClientCtx) - return rootCmd.ExecuteContext(cmd.WithEnv(ctx)) + return rootCmd.ExecuteContext(ctx) } diff --git a/cmd/cel-key/node_types.go b/cmd/cel-key/node_types.go index 5fb3dea225..76cc690af7 100644 --- a/cmd/cel-key/node_types.go +++ b/cmd/cel-key/node_types.go @@ -1,47 +1,68 @@ package main import ( + "errors" "fmt" + "strings" + sdkflags "github.com/cosmos/cosmos-sdk/client/flags" "github.com/spf13/cobra" flag "github.com/spf13/pflag" - sdkflags "github.com/cosmos/cosmos-sdk/client/flags" + nodecmd "github.com/celestiaorg/celestia-node/cmd" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" ) var ( nodeDirKey = "node.type" - - bridgeDir = "~/.celestia-bridge/keys" - fullDir = "~/.celestia-full/keys" - lightDir = "~/.celestia-light/keys" + networkKey = "p2p.network" ) func DirectoryFlags() *flag.FlagSet { flags := &flag.FlagSet{} - flags.String(nodeDirKey, "", "Sets key utility to use the node type's directory (e.g. "+ - "~/.celestia-light if --node.type light is passed.") + defaultNetwork := string(p2p.DefaultNetwork) + + flags.String( + nodeDirKey, + "", + "Sets key utility to use the node type's directory (e.g. "+ + "~/.celestia-light-"+strings.ToLower(defaultNetwork)+" if --node.type light is passed).") + flags.String( + networkKey, + defaultNetwork, + "Sets key utility to use the node network's directory (e.g. "+ + "~/.celestia-light-mynetwork if --p2p.network MyNetwork is passed).") return flags } func ParseDirectoryFlags(cmd *cobra.Command) error { + // if keyring-dir is explicitly set, use it + if cmd.Flags().Changed(sdkflags.FlagKeyringDir) { + return nil + } + nodeType := cmd.Flag(nodeDirKey).Value.String() if nodeType == "" { - return nil + return errors.New("no node type provided") } + network := cmd.Flag(networkKey).Value.String() + if net, err := p2p.Network(network).Validate(); err == nil { + network = string(net) + } else { + fmt.Println("WARNING: unknown network specified: ", network) + } switch nodeType { - case "bridge": - if err := cmd.Flags().Set(sdkflags.FlagKeyringDir, bridgeDir); err != nil { + case "bridge", "full", "light": + path, err := nodecmd.DefaultNodeStorePath(nodeType, network) + if err != nil { return err } - case "full": - if err := cmd.Flags().Set(sdkflags.FlagKeyringDir, fullDir); err != nil { - return err - } - case "light": - if err := cmd.Flags().Set(sdkflags.FlagKeyringDir, lightDir); err != nil { + + keyPath := fmt.Sprintf("%s/keys", path) + fmt.Println("using directory: ", keyPath) + if err := cmd.Flags().Set(sdkflags.FlagKeyringDir, keyPath); err != nil { return err } default: diff --git a/cmd/cel-shed/eds_store_stress.go b/cmd/cel-shed/eds_store_stress.go new file mode 100644 index 0000000000..62ea5cb772 --- /dev/null +++ b/cmd/cel-shed/eds_store_stress.go @@ -0,0 +1,165 @@ +package main + +import ( + "context" + "errors" + _ "expvar" + "fmt" + "math" + "net/http" + "os" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/mitchellh/go-homedir" + "github.com/pyroscope-io/client/pyroscope" + "github.com/spf13/cobra" + + "github.com/celestiaorg/celestia-node/libs/edssser" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +const ( + edsStorePathFlag = "path" + edsWritesFlag = "writes" + edsSizeFlag = "size" + edsDisableLogFlag = "disable-log" + edsLogStatFreqFlag = "log-stat-freq" + edsCleanupFlag = "cleanup" + edsFreshStartFlag = "fresh" + + pyroscopeEndpointFlag = "pyroscope" + putTimeoutFlag = "timeout" + badgerLogLevelFlag = "badger-log-level" +) + +func init() { + edsStoreCmd.AddCommand(edsStoreStress) + + defaultPath := "~/.edssser" + path, err := homedir.Expand(defaultPath) + if err != nil { + panic(err) + } + + pathFlagUsage := fmt.Sprintf("Directory path to use for stress test. Uses %s by default.", defaultPath) + edsStoreStress.Flags().String(edsStorePathFlag, path, pathFlagUsage) + edsStoreStress.Flags().String(pyroscopeEndpointFlag, "", + "Pyroscope address. If no address provided, pyroscope will be disabled") + edsStoreStress.Flags().Int(edsWritesFlag, math.MaxInt, "Total EDS writes to make. MaxInt by default.") + edsStoreStress.Flags().Int(edsSizeFlag, 128, "Chooses EDS size. 128 by default.") + edsStoreStress.Flags().Bool(edsDisableLogFlag, false, "Disables logging. Enabled by default.") + edsStoreStress.Flags().Int(edsLogStatFreqFlag, 10, "Write statistic logging frequency. 10 by default.") + edsStoreStress.Flags().Bool(edsCleanupFlag, false, "Cleans up the store on stop. Disabled by default.") + edsStoreStress.Flags().Bool(edsFreshStartFlag, false, "Cleanup previous state on start. Disabled by default.") + edsStoreStress.Flags().Int(putTimeoutFlag, 30, "Sets put timeout in seconds. 30 sec by default.") + edsStoreStress.Flags().String(badgerLogLevelFlag, "INFO", "Badger log level, Defaults to INFO") + + // kill redundant print + nodebuilder.PrintKeyringInfo = false +} + +var edsStoreCmd = &cobra.Command{ + Use: "eds-store [subcommand]", + Short: "Collection of eds-store related utilities", +} + +var edsStoreStress = &cobra.Command{ + Use: "stress", + Short: `Runs eds.Store stress test over default node.Store Datastore backend (e.g. Badger).`, + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) (err error) { + // expose expvar vars over http + go http.ListenAndServe(":9999", http.DefaultServeMux) //nolint:errcheck,gosec + + endpoint, _ := cmd.Flags().GetString(pyroscopeEndpointFlag) + if endpoint != "" { + _, err = pyroscope.Start(pyroscope.Config{ + ApplicationName: "cel-shred.stresser", + ServerAddress: endpoint, + ProfileTypes: []pyroscope.ProfileType{ + pyroscope.ProfileCPU, + pyroscope.ProfileAllocObjects, + pyroscope.ProfileAllocSpace, + pyroscope.ProfileInuseObjects, + pyroscope.ProfileInuseSpace, + }, + }) + if err != nil { + fmt.Printf("failed to launch pyroscope with addr: %s err: %s\n", endpoint, err.Error()) + } else { + fmt.Println("connected pyroscope to:", endpoint) + } + } + + path, _ := cmd.Flags().GetString(edsStorePathFlag) + fmt.Printf("using %s\n", path) + + freshStart, _ := cmd.Flags().GetBool(edsFreshStartFlag) + if freshStart { + err = os.RemoveAll(path) + if err != nil { + return err + } + } + + cleanup, _ := cmd.Flags().GetBool(edsCleanupFlag) + if cleanup { + defer func() { + err = errors.Join(err, os.RemoveAll(path)) + }() + } + + loglevel, _ := cmd.Flags().GetString(badgerLogLevelFlag) + if err = logging.SetLogLevel("badger", loglevel); err != nil { + return err + } + + disableLog, _ := cmd.Flags().GetBool(edsDisableLogFlag) + logFreq, _ := cmd.Flags().GetInt(edsLogStatFreqFlag) + edsWrites, _ := cmd.Flags().GetInt(edsWritesFlag) + edsSize, _ := cmd.Flags().GetInt(edsSizeFlag) + putTimeout, _ := cmd.Flags().GetInt(putTimeoutFlag) + + cfg := edssser.Config{ + EDSSize: edsSize, + EDSWrites: edsWrites, + EnableLog: !disableLog, + LogFilePath: path, + StatLogFreq: logFreq, + OpTimeout: time.Duration(putTimeout) * time.Second, + } + + err = nodebuilder.Init(*nodebuilder.DefaultConfig(node.Full), path, node.Full) + if err != nil { + return err + } + + nodestore, err := nodebuilder.OpenStore(path, nil) + if err != nil { + return err + } + defer func() { + err = errors.Join(err, nodestore.Close()) + }() + + datastore, err := nodestore.Datastore() + if err != nil { + return err + } + + stresser, err := edssser.NewEDSsser(path, datastore, cfg) + if err != nil { + return err + } + + stats, err := stresser.Run(cmd.Context()) + if !errors.Is(err, context.Canceled) { + return err + } + + fmt.Printf("%s", stats.Finalize()) + return nil + }, +} diff --git a/cmd/cel-shed/header.go b/cmd/cel-shed/header.go index c1c9e7ec99..379e8aac85 100644 --- a/cmd/cel-shed/header.go +++ b/cmd/cel-shed/header.go @@ -1,14 +1,18 @@ package main import ( + "errors" "fmt" "strconv" "strings" "github.com/spf13/cobra" - "github.com/celestiaorg/celestia-node/header/store" - "github.com/celestiaorg/celestia-node/node" + "github.com/celestiaorg/go-header/store" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" ) func init() { @@ -21,26 +25,29 @@ var headerCmd = &cobra.Command{ } var headerStoreInit = &cobra.Command{ - Use: "store-init [node-type] [height]", + Use: "store-init [node-type] [network] [height]", Short: `Forcefully initialize header store head to be of the given height. Requires the node being stopped. Custom store path is not supported yet.`, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 2 { - return fmt.Errorf("not enough arguments") + if len(args) != 3 { + return errors.New("not enough arguments") } tp := node.ParseType(args[0]) if !tp.IsValid() { - return fmt.Errorf("invalid node-type") + return errors.New("invalid node-type") } - height, err := strconv.Atoi(args[1]) + network := args[1] + + height, err := strconv.Atoi(args[2]) if err != nil { return fmt.Errorf("invalid height: %w", err) } - s, err := node.OpenStore(fmt.Sprintf("~/.celestia-%s", strings.ToLower(tp.String()))) + s, err := nodebuilder.OpenStore(fmt.Sprintf("~/.celestia-%s-%s", strings.ToLower(tp.String()), + strings.ToLower(network)), nil) if err != nil { return err } @@ -50,7 +57,7 @@ Custom store path is not supported yet.`, return err } - hstore, err := store.NewStore(ds) + hstore, err := store.NewStore[*header.ExtendedHeader](ds) if err != nil { return err } diff --git a/cmd/cel-shed/main.go b/cmd/cel-shed/main.go index 75c47c4044..872bbb48a9 100644 --- a/cmd/cel-shed/main.go +++ b/cmd/cel-shed/main.go @@ -3,14 +3,14 @@ package main import ( "context" "os" + "os/signal" + "syscall" "github.com/spf13/cobra" - - "github.com/celestiaorg/celestia-node/cmd" ) func init() { - rootCmd.AddCommand(p2pCmd, headerCmd) + rootCmd.AddCommand(p2pCmd, headerCmd, edsStoreCmd) } var rootCmd = &cobra.Command{ @@ -28,5 +28,8 @@ func main() { } func run() error { - return rootCmd.ExecuteContext(cmd.WithEnv(context.Background())) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + return rootCmd.ExecuteContext(ctx) } diff --git a/cmd/cel-shed/p2p.go b/cmd/cel-shed/p2p.go index 0dd2c36e67..a313841fa9 100644 --- a/cmd/cel-shed/p2p.go +++ b/cmd/cel-shed/p2p.go @@ -5,8 +5,8 @@ import ( "encoding/hex" "fmt" - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" "github.com/spf13/cobra" ) diff --git a/cmd/celestia/bridge.go b/cmd/celestia/bridge.go deleted file mode 100644 index a5937e982c..0000000000 --- a/cmd/celestia/bridge.go +++ /dev/null @@ -1,75 +0,0 @@ -package main - -import ( - "github.com/spf13/cobra" - - cmdnode "github.com/celestiaorg/celestia-node/cmd" - "github.com/celestiaorg/celestia-node/node" -) - -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the PersistentPreRun func on -// parent command. - -func init() { - bridgeCmd.AddCommand( - cmdnode.Init( - cmdnode.NodeFlags(node.Bridge), - cmdnode.P2PFlags(), - cmdnode.CoreFlags(), - cmdnode.MiscFlags(), - cmdnode.RPCFlags(), - cmdnode.KeyFlags(), - ), - cmdnode.Start( - cmdnode.NodeFlags(node.Bridge), - cmdnode.P2PFlags(), - cmdnode.CoreFlags(), - cmdnode.MiscFlags(), - cmdnode.RPCFlags(), - cmdnode.KeyFlags(), - ), - ) -} - -var bridgeCmd = &cobra.Command{ - Use: "bridge [subcommand]", - Args: cobra.NoArgs, - Short: "Manage your Bridge node", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - env, err := cmdnode.GetEnv(cmd.Context()) - if err != nil { - return err - } - - env.SetNodeType(node.Bridge) - - err = cmdnode.ParseNodeFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseP2PFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseCoreFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseMiscFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseRPCFlags(cmd, env) - if err != nil { - return err - } - - cmdnode.ParseKeyFlags(cmd, env) - - return nil - }, -} diff --git a/cmd/celestia/cmd_test.go b/cmd/celestia/cmd_test.go index 0fee7ebf17..94dd3625b8 100644 --- a/cmd/celestia/cmd_test.go +++ b/cmd/celestia/cmd_test.go @@ -3,19 +3,43 @@ package main import ( "bytes" "context" - "io/ioutil" "os" + "reflect" "testing" "github.com/stretchr/testify/require" - cmdnode "github.com/celestiaorg/celestia-node/cmd" + "github.com/celestiaorg/celestia-node/header" ) +func TestCompletionHelpString(t *testing.T) { + type TestFields struct { + NoInputOneOutput func(context.Context) (*header.ExtendedHeader, error) + TwoInputsOneOutputArray func( + context.Context, + *header.ExtendedHeader, + uint64, + ) ([]*header.ExtendedHeader, error) + OneInputOneOutput func(context.Context, uint64) (*header.ExtendedHeader, error) + NoInputsNoOutputs func(ctx context.Context) error + NoInputsChanOutput func(ctx context.Context) (<-chan *header.ExtendedHeader, error) + } + testOutputs := []string{ + "() -> (*header.ExtendedHeader)", + "(*header.ExtendedHeader, uint64) -> ([]*header.ExtendedHeader)", + "(uint64) -> (*header.ExtendedHeader)", + "() -> ()", + "() -> (<-chan *header.ExtendedHeader)", + } + methods := reflect.VisibleFields(reflect.TypeOf(TestFields{})) + for i, method := range methods { + require.Equal(t, testOutputs[i], parseSignatureForHelpString(method)) + } +} + func TestLight(t *testing.T) { // Run the tests in a temporary directory - tmpDir, err := ioutil.TempDir("", "light") - require.NoError(t, err, "error creating a temporary test directory") + tmpDir := t.TempDir() testDir, err := os.Getwd() require.NoError(t, err, "error getting the current working directory") err = os.Chdir(tmpDir) @@ -29,7 +53,7 @@ func TestLight(t *testing.T) { "--node.store", ".celestia-light", "init", }) - err := rootCmd.ExecuteContext(cmdnode.WithEnv(context.Background())) + err := rootCmd.ExecuteContext(context.Background()) require.NoError(t, err) }) @@ -61,8 +85,7 @@ func TestLight(t *testing.T) { func TestBridge(t *testing.T) { // Run the tests in a temporary directory - tmpDir, err := ioutil.TempDir("", "bridge") - require.NoError(t, err, "error creating a temporary test directory") + tmpDir := t.TempDir() testDir, err := os.Getwd() require.NoError(t, err, "error getting the current working directory") err = os.Chdir(tmpDir) @@ -76,7 +99,7 @@ func TestBridge(t *testing.T) { "--node.store", ".celestia-bridge", "init", }) - err := rootCmd.ExecuteContext(cmdnode.WithEnv(context.Background())) + err := rootCmd.ExecuteContext(context.Background()) require.NoError(t, err) }) @@ -105,3 +128,23 @@ func TestBridge(t *testing.T) { }) */ } + +func parseSignatureForHelpString(methodSig reflect.StructField) string { + simplifiedSignature := "(" + in, out := methodSig.Type.NumIn(), methodSig.Type.NumOut() + for i := 1; i < in; i++ { + simplifiedSignature += methodSig.Type.In(i).String() + if i != in-1 { + simplifiedSignature += ", " + } + } + simplifiedSignature += ") -> (" + for i := 0; i < out-1; i++ { + simplifiedSignature += methodSig.Type.Out(i).String() + if i != out-2 { + simplifiedSignature += ", " + } + } + simplifiedSignature += ")" + return simplifiedSignature +} diff --git a/cmd/celestia/full.go b/cmd/celestia/full.go deleted file mode 100644 index 1c61f1f1e3..0000000000 --- a/cmd/celestia/full.go +++ /dev/null @@ -1,87 +0,0 @@ -//nolint:dupl -package main - -import ( - "github.com/spf13/cobra" - - cmdnode "github.com/celestiaorg/celestia-node/cmd" - "github.com/celestiaorg/celestia-node/node" -) - -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the PersistentPreRun func on -// parent command. - -func init() { - fullCmd.AddCommand( - cmdnode.Init( - cmdnode.NodeFlags(node.Full), - cmdnode.P2PFlags(), - cmdnode.HeadersFlags(), - cmdnode.MiscFlags(), - // NOTE: for now, state-related queries can only be accessed - // over an RPC connection with a celestia-core node. - cmdnode.CoreFlags(), - cmdnode.RPCFlags(), - cmdnode.KeyFlags(), - ), - cmdnode.Start( - cmdnode.NodeFlags(node.Full), - cmdnode.P2PFlags(), - cmdnode.HeadersFlags(), - cmdnode.MiscFlags(), - // NOTE: for now, state-related queries can only be accessed - // over an RPC connection with a celestia-core node. - cmdnode.CoreFlags(), - cmdnode.RPCFlags(), - cmdnode.KeyFlags(), - ), - ) -} - -var fullCmd = &cobra.Command{ - Use: "full [subcommand]", - Args: cobra.NoArgs, - Short: "Manage your Full node", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - env, err := cmdnode.GetEnv(cmd.Context()) - if err != nil { - return err - } - - env.SetNodeType(node.Full) - - err = cmdnode.ParseNodeFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseP2PFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseCoreFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseHeadersFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseMiscFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseRPCFlags(cmd, env) - if err != nil { - return err - } - - cmdnode.ParseKeyFlags(cmd, env) - - return nil - }, -} diff --git a/cmd/celestia/light.go b/cmd/celestia/light.go deleted file mode 100644 index 452c54b6f5..0000000000 --- a/cmd/celestia/light.go +++ /dev/null @@ -1,86 +0,0 @@ -//nolint:dupl -package main - -import ( - "github.com/spf13/cobra" - - cmdnode "github.com/celestiaorg/celestia-node/cmd" - "github.com/celestiaorg/celestia-node/node" -) - -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the PersistentPreRun func on -// parent command. - -func init() { - lightCmd.AddCommand( - cmdnode.Init( - cmdnode.NodeFlags(node.Light), - cmdnode.P2PFlags(), - cmdnode.HeadersFlags(), - cmdnode.MiscFlags(), - // NOTE: for now, state-related queries can only be accessed - // over an RPC connection with a celestia-core node. - cmdnode.CoreFlags(), - cmdnode.RPCFlags(), - cmdnode.KeyFlags(), - ), - cmdnode.Start( - cmdnode.NodeFlags(node.Light), - cmdnode.P2PFlags(), - cmdnode.HeadersFlags(), - cmdnode.MiscFlags(), - // NOTE: for now, state-related queries can only be accessed - // over an RPC connection with a celestia-core node. - cmdnode.CoreFlags(), - cmdnode.RPCFlags(), - cmdnode.KeyFlags(), - ), - ) -} - -var lightCmd = &cobra.Command{ - Use: "light [subcommand]", - Args: cobra.NoArgs, - Short: "Manage your Light node", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - env, err := cmdnode.GetEnv(cmd.Context()) - if err != nil { - return err - } - env.SetNodeType(node.Light) - - err = cmdnode.ParseNodeFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseP2PFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseCoreFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseHeadersFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseMiscFlags(cmd, env) - if err != nil { - return err - } - - err = cmdnode.ParseRPCFlags(cmd, env) - if err != nil { - return err - } - - cmdnode.ParseKeyFlags(cmd, env) - - return nil - }, -} diff --git a/cmd/celestia/main.go b/cmd/celestia/main.go index 74c5bf4062..76287d998f 100644 --- a/cmd/celestia/main.go +++ b/cmd/celestia/main.go @@ -2,24 +2,31 @@ package main import ( "context" - "math/rand" "os" - "time" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/spf13/cobra" + "github.com/spf13/pflag" - "github.com/celestiaorg/celestia-app/app" - "github.com/celestiaorg/celestia-node/cmd" + cmdnode "github.com/celestiaorg/celestia-node/cmd" ) -func init() { - // This is necessary to ensure that the account addresses are correctly prefixed - // as in the celestia application. - cfg := sdk.GetConfig() - cfg.SetBech32PrefixForAccount(app.Bech32PrefixAccAddr, app.Bech32PrefixAccPub) - cfg.Seal() +func WithSubcommands() func(*cobra.Command, []*pflag.FlagSet) { + return func(c *cobra.Command, flags []*pflag.FlagSet) { + c.AddCommand( + cmdnode.Init(flags...), + cmdnode.Start(cmdnode.WithFlagSet(flags)), + cmdnode.AuthCmd(flags...), + cmdnode.ResetStore(flags...), + cmdnode.RemoveConfigCmd(flags...), + cmdnode.UpdateConfigCmd(flags...), + ) + } +} +func init() { + bridgeCmd := cmdnode.NewBridge(WithSubcommands()) + lightCmd := cmdnode.NewLight(WithSubcommands()) + fullCmd := cmdnode.NewFull(WithSubcommands()) rootCmd.AddCommand( bridgeCmd, lightCmd, @@ -37,15 +44,13 @@ func main() { } func run() error { - rand.Seed(time.Now().Unix()) - - return rootCmd.ExecuteContext(cmd.WithEnv(context.Background())) + return rootCmd.ExecuteContext(context.Background()) } var rootCmd = &cobra.Command{ Use: "celestia [ bridge || full || light ] [subcommand]", Short: ` - ____ __ __ _ + ____ __ __ _ / ____/__ / /__ _____/ /_(_)___ _ / / / _ \/ / _ \/ ___/ __/ / __ / / /___/ __/ / __(__ ) /_/ / /_/ / @@ -53,6 +58,6 @@ var rootCmd = &cobra.Command{ `, Args: cobra.NoArgs, CompletionOptions: cobra.CompletionOptions{ - DisableDefaultCmd: true, + DisableDefaultCmd: false, }, } diff --git a/cmd/celestia/rpc.go b/cmd/celestia/rpc.go new file mode 100644 index 0000000000..11e96c2e46 --- /dev/null +++ b/cmd/celestia/rpc.go @@ -0,0 +1,32 @@ +package main + +import ( + "github.com/celestiaorg/celestia-node/cmd" + blob "github.com/celestiaorg/celestia-node/nodebuilder/blob/cmd" + das "github.com/celestiaorg/celestia-node/nodebuilder/das/cmd" + header "github.com/celestiaorg/celestia-node/nodebuilder/header/cmd" + node "github.com/celestiaorg/celestia-node/nodebuilder/node/cmd" + p2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p/cmd" + share "github.com/celestiaorg/celestia-node/nodebuilder/share/cmd" + state "github.com/celestiaorg/celestia-node/nodebuilder/state/cmd" +) + +func init() { + blob.Cmd.PersistentFlags().AddFlagSet(cmd.RPCFlags()) + das.Cmd.PersistentFlags().AddFlagSet(cmd.RPCFlags()) + header.Cmd.PersistentFlags().AddFlagSet(cmd.RPCFlags()) + p2p.Cmd.PersistentFlags().AddFlagSet(cmd.RPCFlags()) + share.Cmd.PersistentFlags().AddFlagSet(cmd.RPCFlags()) + state.Cmd.PersistentFlags().AddFlagSet(cmd.RPCFlags()) + node.Cmd.PersistentFlags().AddFlagSet(cmd.RPCFlags()) + + rootCmd.AddCommand( + blob.Cmd, + das.Cmd, + header.Cmd, + p2p.Cmd, + share.Cmd, + state.Cmd, + node.Cmd, + ) +} diff --git a/cmd/celestia/version.go b/cmd/celestia/version.go index f6fba4a007..f0d379e7a7 100644 --- a/cmd/celestia/version.go +++ b/cmd/celestia/version.go @@ -2,15 +2,10 @@ package main import ( "fmt" - "runtime" "github.com/spf13/cobra" -) -var ( - buildTime string - lastCommit string - semanticVersion string + "github.com/celestiaorg/celestia-node/nodebuilder/node" ) var versionCmd = &cobra.Command{ @@ -21,9 +16,10 @@ var versionCmd = &cobra.Command{ } func printBuildInfo(_ *cobra.Command, _ []string) { - fmt.Printf("Semantic version: %s\n", semanticVersion) - fmt.Printf("Commit: %s\n", lastCommit) - fmt.Printf("Build Date: %s\n", buildTime) - fmt.Printf("System version: %s/%s\n", runtime.GOARCH, runtime.GOOS) - fmt.Printf("Golang version: %s\n", runtime.Version()) + buildInfo := node.GetBuildInfo() + fmt.Printf("Semantic version: %s\n", buildInfo.SemanticVersion) + fmt.Printf("Commit: %s\n", buildInfo.LastCommit) + fmt.Printf("Build Date: %s\n", buildInfo.BuildTime) + fmt.Printf("System version: %s\n", buildInfo.SystemVersion) + fmt.Printf("Golang version: %s\n", buildInfo.GolangVersion) } diff --git a/cmd/config.go b/cmd/config.go new file mode 100644 index 0000000000..4a2d322adf --- /dev/null +++ b/cmd/config.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + "github.com/celestiaorg/celestia-node/nodebuilder" +) + +func RemoveConfigCmd(fsets ...*flag.FlagSet) *cobra.Command { + cmd := &cobra.Command{ + Use: "config-remove", + Short: "Deletes the node's config", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + return nodebuilder.RemoveConfig(StorePath(ctx)) + }, + } + + for _, set := range fsets { + cmd.Flags().AddFlagSet(set) + } + return cmd +} + +func UpdateConfigCmd(fsets ...*flag.FlagSet) *cobra.Command { + cmd := &cobra.Command{ + Use: "config-update", + Short: "Updates the node's outdated config", + Long: "Updates the node's outdated config with default values from newly-added fields. Check the config " + + " afterwards to ensure all old custom values were preserved.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + return nodebuilder.UpdateConfig(NodeType(ctx), StorePath(ctx)) + }, + } + + for _, set := range fsets { + cmd.Flags().AddFlagSet(set) + } + return cmd +} diff --git a/cmd/docgen/openrpc.go b/cmd/docgen/openrpc.go new file mode 100644 index 0000000000..cd5b57a82c --- /dev/null +++ b/cmd/docgen/openrpc.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + "encoding/json" + "os" + + "github.com/spf13/cobra" + + "github.com/celestiaorg/celestia-node/api/docgen" + "github.com/celestiaorg/celestia-node/nodebuilder" +) + +var rootCmd = &cobra.Command{ + Use: "docgen [packages]", + Short: "docgen generates the openrpc documentation for Celestia Node packages", + RunE: func(cmd *cobra.Command, moduleNames []string) error { + // 1. Open the respective nodebuilder/X/service.go files for AST parsing + nodeComments, permComments := docgen.ParseCommentsFromNodebuilderModules(moduleNames...) + + // 2. Create an OpenRPC document from the map of comments + hardcoded metadata + doc := docgen.NewOpenRPCDocument(nodeComments, permComments) + + // 3. Register the client wrapper interface on the document + for moduleName, module := range nodebuilder.PackageToAPI { + doc.RegisterReceiverName(moduleName, module) + } + + // 4. Call doc.Discover() + d, err := doc.Discover() + if err != nil { + return err + } + + // 5. Print to Stdout + jsonOut, err := json.MarshalIndent(d, "", " ") + if err != nil { + return err + } + + _, err = os.Stdout.Write(jsonOut) + return err + }, +} + +func main() { + err := run() + if err != nil { + os.Exit(1) + } +} + +func run() error { + return rootCmd.ExecuteContext(context.Background()) +} diff --git a/cmd/env.go b/cmd/env.go index 8c4c0e155b..f9860a2de8 100644 --- a/cmd/env.go +++ b/cmd/env.go @@ -2,60 +2,81 @@ package cmd import ( "context" - "fmt" - "github.com/celestiaorg/celestia-node/node" + logging "github.com/ipfs/go-log/v2" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" ) -// Env is an environment for CLI commands. -// It can be used to: -// 1. Propagate values from parent to child commands. -// 2. To group common logic that multiple commands rely on. -// Usage can be extended. -// TODO(@Wondertan): We should move to using context only instead. -// Env, in fact, only keeps some additional fields which should be -// kept in the context directly using WithValue (#965) -type Env struct { - NodeType node.Type - StorePath string - - opts []node.Option -} - -// WithEnv wraps given ctx with Env. -func WithEnv(ctx context.Context) context.Context { - _, err := GetEnv(ctx) - if err == nil { - panic("cmd: only one Env is allowed to be set in a ctx") - } +var log = logging.Logger("cmd") + +// NodeType reads the node type from the context. +func NodeType(ctx context.Context) node.Type { + return ctx.Value(nodeTypeKey{}).(node.Type) +} - return context.WithValue(ctx, envCtxKey{}, &Env{}) +// Network reads the node type from the context. +func Network(ctx context.Context) p2p.Network { + return ctx.Value(networkKey{}).(p2p.Network) } -// GetEnv takes Env from the given ctx, if any. -func GetEnv(ctx context.Context) (*Env, error) { - env, ok := ctx.Value(envCtxKey{}).(*Env) +// StorePath reads the store path from the context. +func StorePath(ctx context.Context) string { + return ctx.Value(storePathKey{}).(string) +} + +// NodeConfig reads the node config from the context. +func NodeConfig(ctx context.Context) nodebuilder.Config { + cfg, ok := ctx.Value(configKey{}).(nodebuilder.Config) if !ok { - return nil, fmt.Errorf("cmd: Env is not set in ctx.Context") + nodeType := NodeType(ctx) + cfg = *nodebuilder.DefaultConfig(nodeType) } + return cfg +} - return env, nil +// WithNodeType sets the node type in the given context. +func WithNodeType(ctx context.Context, tp node.Type) context.Context { + return context.WithValue(ctx, nodeTypeKey{}, tp) } -// SetNodeType sets Node Type to the Env. -func (env *Env) SetNodeType(tp node.Type) { - env.NodeType = tp +// WithNetwork sets the network in the given context. +func WithNetwork(ctx context.Context, network p2p.Network) context.Context { + return context.WithValue(ctx, networkKey{}, network) } -// Options returns Node Options parsed from Environment(Flags, ENV vars, etc) -func (env *Env) Options() []node.Option { - return env.opts +// WithStorePath sets Store Path in the given context. +func WithStorePath(ctx context.Context, storePath string) context.Context { + return context.WithValue(ctx, storePathKey{}, storePath) } -// AddOptions add new options to Env. -func (env *Env) AddOptions(opts ...node.Option) { - env.opts = append(env.opts, opts...) +// NodeOptions returns config options parsed from Environment(Flags, ENV vars, etc) +func NodeOptions(ctx context.Context) []fx.Option { + options, ok := ctx.Value(optionsKey{}).([]fx.Option) + if !ok { + return []fx.Option{} + } + return options +} + +// WithNodeOptions add new options to Env. +func WithNodeOptions(ctx context.Context, opts ...fx.Option) context.Context { + options := NodeOptions(ctx) + return context.WithValue(ctx, optionsKey{}, append(options, opts...)) } -// envCtxKey is a key used to identify Env on a ctx.Context. -type envCtxKey struct{} +// WithNodeConfig sets the node config in the Env. +func WithNodeConfig(ctx context.Context, config *nodebuilder.Config) context.Context { + return context.WithValue(ctx, configKey{}, *config) +} + +type ( + optionsKey struct{} + configKey struct{} + storePathKey struct{} + nodeTypeKey struct{} + networkKey struct{} +) diff --git a/cmd/flags_core.go b/cmd/flags_core.go deleted file mode 100644 index 3c05e87ed0..0000000000 --- a/cmd/flags_core.go +++ /dev/null @@ -1,90 +0,0 @@ -package cmd - -import ( - "fmt" - "strconv" - "strings" - - "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - - "github.com/celestiaorg/celestia-node/node" -) - -var ( - coreFlag = "core.ip" - coreRPCFlag = "core.rpc.port" - coreGRPCFlag = "core.grpc.port" -) - -// CoreFlags gives a set of hardcoded Core flags. -func CoreFlags() *flag.FlagSet { - flags := &flag.FlagSet{} - - flags.String( - coreFlag, - "", - "Indicates node to connect to the given core node. "+ - "Example: , 127.0.0.1. Assumes RPC port 26657 and gRPC port 9009 as default unless otherwise specified.", - ) - flags.String( - coreRPCFlag, - "26657", - "Set a custom RPC port for the core node connection. The --core.ip flag must also be provided.", - ) - flags.String( - coreGRPCFlag, - "9090", - "Set a custom gRPC port for the core node connection. The --core.ip flag must also be provided.", - ) - - return flags -} - -// ParseCoreFlags parses Core flags from the given cmd and applies values to Env. -func ParseCoreFlags(cmd *cobra.Command, env *Env) error { - coreIP := cmd.Flag(coreFlag).Value.String() - if coreIP == "" { - if cmd.Flag(coreGRPCFlag).Changed || cmd.Flag(coreRPCFlag).Changed { - return fmt.Errorf("cannot specify RPC/gRPC ports without specifying an IP address for --core.ip") - } - return nil - } - // sanity check given core ip addr and strip leading protocol - ip, err := sanityCheckIP(coreIP) - if err != nil { - return err - } - - rpc := cmd.Flag(coreRPCFlag).Value.String() - // sanity check rpc endpoint - _, err = strconv.Atoi(rpc) - if err != nil { - return err - } - env.AddOptions(node.WithRemoteCoreIP(ip), node.WithRemoteCorePort(rpc)) - - grpc := cmd.Flag(coreGRPCFlag).Value.String() - // sanity check gRPC endpoint - _, err = strconv.Atoi(grpc) - if err != nil { - return err - } - env.AddOptions(node.WithGRPCPort(grpc)) - return nil -} - -// sanityCheckIP trims leading protocol scheme and port from the given -// IP address if present. -func sanityCheckIP(ip string) (string, error) { - original := ip - ip = strings.TrimPrefix(ip, "http://") - ip = strings.TrimPrefix(ip, "https://") - ip = strings.TrimPrefix(ip, "tcp://") - ip = strings.TrimSuffix(ip, "/") - ip = strings.Split(ip, ":")[0] - if ip == "" { - return "", fmt.Errorf("invalid IP addr given: %s", original) - } - return ip, nil -} diff --git a/cmd/flags_key.go b/cmd/flags_key.go deleted file mode 100644 index 58c6a0e753..0000000000 --- a/cmd/flags_key.go +++ /dev/null @@ -1,25 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - - "github.com/celestiaorg/celestia-node/node" -) - -var keyringAccNameFlag = "keyring.accname" - -func KeyFlags() *flag.FlagSet { - flags := &flag.FlagSet{} - - flags.String(keyringAccNameFlag, "", "Directs node's keyring signer to use the key prefixed with the "+ - "given string.") - return flags -} - -func ParseKeyFlags(cmd *cobra.Command, env *Env) { - keyringAccName := cmd.Flag(keyringAccNameFlag).Value.String() - if keyringAccName != "" { - env.AddOptions(node.WithKeyringAccName(keyringAccName)) - } -} diff --git a/cmd/flags_misc.go b/cmd/flags_misc.go index 60363cb79c..cd539bde4c 100644 --- a/cmd/flags_misc.go +++ b/cmd/flags_misc.go @@ -1,34 +1,27 @@ package cmd import ( + "context" "fmt" - "log" "net/http" "net/http/pprof" "strings" - "time" logging "github.com/ipfs/go-log/v2" + otelpyroscope "github.com/pyroscope-io/otel-profiling-go" "github.com/spf13/cobra" flag "github.com/spf13/pflag" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" - "go.opentelemetry.io/otel/metric/global" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" - "go.opentelemetry.io/otel/sdk/resource" - tracesdk "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.10.0" "github.com/celestiaorg/celestia-node/logs" - "github.com/celestiaorg/celestia-node/node" + "github.com/celestiaorg/celestia-node/nodebuilder" + modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" ) var ( - logLevelFlag = "log.level" - logLevelModuleFlag = "log.level.module" + LogLevelFlag = "log.level" + LogLevelModuleFlag = "log.level.module" pprofFlag = "pprof" tracingFlag = "tracing" tracingEndpointFlag = "tracing.endpoint" @@ -36,6 +29,10 @@ var ( metricsFlag = "metrics" metricsEndpointFlag = "metrics.endpoint" metricsTlS = "metrics.tls" + p2pMetrics = "p2p.metrics" + pyroscopeFlag = "pyroscope" + pyroscopeTracing = "pyroscope.tracing" + pyroscopeEndpoint = "pyroscope.endpoint" ) // MiscFlags gives a set of hardcoded miscellaneous flags. @@ -43,14 +40,14 @@ func MiscFlags() *flag.FlagSet { flags := &flag.FlagSet{} flags.String( - logLevelFlag, + LogLevelFlag, "INFO", `DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL and their lower-case forms`, ) flags.StringSlice( - logLevelModuleFlag, + LogLevelModuleFlag, nil, ":, e.g. pubsub:debug", ) @@ -97,34 +94,58 @@ and their lower-case forms`, "Enable TLS connection to OTLP metric backend", ) + flags.Bool( + p2pMetrics, + false, + "Enable libp2p metrics", + ) + + flags.Bool( + pyroscopeFlag, + false, + "Enables Pyroscope profiling", + ) + + flags.Bool( + pyroscopeTracing, + false, + "Enables Pyroscope tracing integration. Depends on --tracing", + ) + + flags.String( + pyroscopeEndpoint, + "http://localhost:4040", + "Sets HTTP endpoint for Pyroscope profiles to be exported to. Depends on '--pyroscope'", + ) + return flags } // ParseMiscFlags parses miscellaneous flags from the given cmd and applies values to Env. -func ParseMiscFlags(cmd *cobra.Command, env *Env) error { - logLevel := cmd.Flag(logLevelFlag).Value.String() +func ParseMiscFlags(ctx context.Context, cmd *cobra.Command) (context.Context, error) { + logLevel := cmd.Flag(LogLevelFlag).Value.String() if logLevel != "" { level, err := logging.LevelFromString(logLevel) if err != nil { - return fmt.Errorf("cmd: while parsing '%s': %w", logLevelFlag, err) + return ctx, fmt.Errorf("cmd: while parsing '%s': %w", LogLevelFlag, err) } logs.SetAllLoggers(level) } - logModules, err := cmd.Flags().GetStringSlice(logLevelModuleFlag) + logModules, err := cmd.Flags().GetStringSlice(LogLevelModuleFlag) if err != nil { panic(err) } for _, ll := range logModules { params := strings.Split(ll, ":") if len(params) != 2 { - return fmt.Errorf("cmd: %s arg must be in form :, e.g. pubsub:debug", logLevelModuleFlag) + return ctx, fmt.Errorf("cmd: %s arg must be in form :, e.g. pubsub:debug", LogLevelModuleFlag) } err := logging.SetLogLevel(params[0], params[1]) if err != nil { - return err + return ctx, err } } @@ -144,10 +165,29 @@ func ParseMiscFlags(cmd *cobra.Command, env *Env) error { mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - log.Println(http.ListenAndServe("0.0.0.0:6000", mux)) + err := http.ListenAndServe("0.0.0.0:6000", mux) //nolint:gosec + if err != nil { + log.Fatalw("failed to start pprof server", "err", err) + } else { + log.Info("started pprof server on port 6000") + } }() } + ok, err = cmd.Flags().GetBool(pyroscopeFlag) + if err != nil { + panic(err) + } + + if ok { + ctx = WithNodeOptions(ctx, + nodebuilder.WithPyroscope( + cmd.Flag(pyroscopeEndpoint).Value.String(), + NodeType(ctx), + ), + ) + } + ok, err = cmd.Flags().GetBool(tracingFlag) if err != nil { panic(err) @@ -164,22 +204,22 @@ func ParseMiscFlags(cmd *cobra.Command, env *Env) error { opts = append(opts, otlptracehttp.WithInsecure()) } - exp, err := otlptracehttp.New(cmd.Context(), opts...) + pyroOpts := make([]otelpyroscope.Option, 0) + ok, err = cmd.Flags().GetBool(pyroscopeTracing) if err != nil { - return err + panic(err) } - - tp := tracesdk.NewTracerProvider( - // Always be sure to batch in production. - tracesdk.WithBatcher(exp), - // Record information about this application in a Resource. - tracesdk.WithResource(resource.NewWithAttributes( - semconv.SchemaURL, - semconv.ServiceNameKey.String(fmt.Sprintf("Celestia-%s", env.NodeType.String())), - // TODO(@Wondertan): Versioning: semconv.ServiceVersionKey - )), - ) - otel.SetTracerProvider(tp) + if ok { + pyroOpts = append(pyroOpts, + otelpyroscope.WithAppName("celestia.da-node"), + otelpyroscope.WithPyroscopeURL(cmd.Flag(pyroscopeEndpoint).Value.String()), + otelpyroscope.WithRootSpanOnly(true), + otelpyroscope.WithAddSpanName(true), + otelpyroscope.WithProfileURL(true), + otelpyroscope.WithProfileBaselineURL(true), + ) + } + ctx = WithNodeOptions(ctx, nodebuilder.WithTraces(opts, pyroOpts)) } ok, err = cmd.Flags().GetBool(metricsFlag) @@ -198,33 +238,21 @@ func ParseMiscFlags(cmd *cobra.Command, env *Env) error { opts = append(opts, otlpmetrichttp.WithInsecure()) } - exp, err := otlpmetrichttp.New(cmd.Context(), opts...) - if err != nil { - return err - } + ctx = WithNodeOptions(ctx, nodebuilder.WithMetrics(opts, NodeType(ctx))) + } - pusher := controller.New( - processor.NewFactory( - selector.NewWithHistogramDistribution(), - exp, - ), - controller.WithExporter(exp), - controller.WithCollectPeriod(2*time.Second), - controller.WithResource(resource.NewWithAttributes( - semconv.SchemaURL, - semconv.ServiceNameKey.String(fmt.Sprintf("Celestia-%s", env.NodeType.String())), - // TODO(@Wondertan): Versioning: semconv.ServiceVersionKey - )), - ) + ok, err = cmd.Flags().GetBool(p2pMetrics) + if err != nil { + panic(err) + } - err = pusher.Start(cmd.Context()) - if err != nil { - return err + if ok { + if metricsEnabled, _ := cmd.Flags().GetBool(metricsFlag); !metricsEnabled { + log.Error("--p2p.metrics used without --metrics being enabled") + } else { + ctx = WithNodeOptions(ctx, modp2p.WithMetrics()) } - global.SetMeterProvider(pusher) - - env.AddOptions(node.WithMetrics(true)) } - return err + return ctx, err } diff --git a/cmd/flags_node.go b/cmd/flags_node.go index 776d06ce03..ef5af26580 100644 --- a/cmd/flags_node.go +++ b/cmd/flags_node.go @@ -1,27 +1,32 @@ package cmd import ( + "context" "fmt" + "os" + "path/filepath" "strings" + "github.com/mitchellh/go-homedir" "github.com/spf13/cobra" flag "github.com/spf13/pflag" - "github.com/celestiaorg/celestia-node/node" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" ) -var ( +const ( nodeStoreFlag = "node.store" nodeConfigFlag = "node.config" ) // NodeFlags gives a set of hardcoded Node package flags. -func NodeFlags(tp node.Type) *flag.FlagSet { +func NodeFlags() *flag.FlagSet { flags := &flag.FlagSet{} flags.String( nodeStoreFlag, - fmt.Sprintf("~/.celestia-%s", strings.ToLower(tp.String())), + "", "The path to root/home directory of your Celestia Node Store", ) flags.String( @@ -34,18 +39,62 @@ func NodeFlags(tp node.Type) *flag.FlagSet { } // ParseNodeFlags parses Node flags from the given cmd and applies values to Env. -func ParseNodeFlags(cmd *cobra.Command, env *Env) error { - env.StorePath = cmd.Flag(nodeStoreFlag).Value.String() +func ParseNodeFlags(ctx context.Context, cmd *cobra.Command, network p2p.Network) (context.Context, error) { + store := cmd.Flag(nodeStoreFlag).Value.String() + if store == "" { + tp := NodeType(ctx) + var err error + store, err = DefaultNodeStorePath(tp.String(), network.String()) + if err != nil { + return ctx, err + } + } + ctx = WithStorePath(ctx, store) nodeConfig := cmd.Flag(nodeConfigFlag).Value.String() if nodeConfig != "" { - cfg, err := node.LoadConfig(nodeConfig) + // try to load config from given path + cfg, err := nodebuilder.LoadConfig(nodeConfig) if err != nil { - return fmt.Errorf("cmd: while parsing '%s': %w", nodeConfigFlag, err) + return ctx, fmt.Errorf("cmd: while parsing '%s': %w", nodeConfigFlag, err) } - env.AddOptions(node.WithConfig(cfg)) + ctx = WithNodeConfig(ctx, cfg) + } else { + // check if config already exists at the store path and load it + path := StorePath(ctx) + expanded, err := homedir.Expand(filepath.Clean(path)) + if err != nil { + return ctx, err + } + cfg, err := nodebuilder.LoadConfig(filepath.Join(expanded, "config.toml")) + if err == nil { + ctx = WithNodeConfig(ctx, cfg) + } } + return ctx, nil +} + +// DefaultNodeStorePath constructs the default node store path using the given +// node type and network. +func DefaultNodeStorePath(tp string, network string) (string, error) { + home := os.Getenv("CELESTIA_HOME") - return nil + if home == "" { + var err error + home, err = os.UserHomeDir() + if err != nil { + return "", err + } + } + if network == p2p.Mainnet.String() { + return fmt.Sprintf("%s/.celestia-%s", home, strings.ToLower(tp)), nil + } + // only include network name in path for testnets and custom networks + return fmt.Sprintf( + "%s/.celestia-%s-%s", + home, + strings.ToLower(tp), + strings.ToLower(network), + ), nil } diff --git a/cmd/flags_p2p.go b/cmd/flags_p2p.go deleted file mode 100644 index db7733f01f..0000000000 --- a/cmd/flags_p2p.go +++ /dev/null @@ -1,51 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/multiformats/go-multiaddr" - "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - - "github.com/celestiaorg/celestia-node/node" -) - -var ( - p2pMutualFlag = "p2p.mutual" -) - -// P2PFlags gives a set of p2p flags. -func P2PFlags() *flag.FlagSet { - flags := &flag.FlagSet{} - - flags.StringSlice( - p2pMutualFlag, - nil, - `Comma-separated multiaddresses of mutual peers to keep a prioritized connection with. -Such connection is immune to peer scoring slashing and connection manager trimming. -Peers must bidirectionally point to each other. (Format: multiformats.io/multiaddr) -`, - ) - - return flags -} - -// ParseP2PFlags parses P2P flags from the given cmd and applies values to Env. -func ParseP2PFlags(cmd *cobra.Command, env *Env) error { - mutualPeers, err := cmd.Flags().GetStringSlice(p2pMutualFlag) - if err != nil { - return err - } - - for _, peer := range mutualPeers { - _, err := multiaddr.NewMultiaddr(peer) - if err != nil { - return fmt.Errorf("cmd: while parsing '%s': %w", p2pMutualFlag, err) - } - } - - if len(mutualPeers) != 0 { - env.AddOptions(node.WithMutualPeers(mutualPeers)) - } - return nil -} diff --git a/cmd/flags_rpc.go b/cmd/flags_rpc.go deleted file mode 100644 index d52a0cfc8d..0000000000 --- a/cmd/flags_rpc.go +++ /dev/null @@ -1,44 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - - "github.com/celestiaorg/celestia-node/node" -) - -var ( - addrFlag = "rpc.addr" - portFlag = "rpc.port" -) - -// RPCFlags gives a set of hardcoded node/rpc package flags. -func RPCFlags() *flag.FlagSet { - flags := &flag.FlagSet{} - - flags.String( - addrFlag, - "", - "Set a custom RPC listen address (default: localhost)", - ) - flags.String( - portFlag, - "", - "Set a custom RPC port (default: 26658)", - ) - - return flags -} - -// ParseRPCFlags parses RPC flags from the given cmd and applies values to Env. -func ParseRPCFlags(cmd *cobra.Command, env *Env) error { - addr := cmd.Flag(addrFlag).Value.String() - if addr != "" { - env.AddOptions(node.WithRPCAddress(addr)) - } - port := cmd.Flag(portFlag).Value.String() - if port != "" { - env.AddOptions(node.WithRPCPort(port)) - } - return nil -} diff --git a/cmd/init.go b/cmd/init.go index f48c16ac4a..5eaa465701 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -4,7 +4,7 @@ import ( "github.com/spf13/cobra" flag "github.com/spf13/pflag" - "github.com/celestiaorg/celestia-node/node" + "github.com/celestiaorg/celestia-node/nodebuilder" ) // Init constructs a CLI command to initialize Celestia Node of any type with the given flags. @@ -14,12 +14,9 @@ func Init(fsets ...*flag.FlagSet) *cobra.Command { Short: "Initialization for Celestia Node. Passed flags have persisted effect.", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - env, err := GetEnv(cmd.Context()) - if err != nil { - return err - } + ctx := cmd.Context() - return node.Init(env.StorePath, env.NodeType, env.Options()...) + return nodebuilder.Init(NodeConfig(ctx), StorePath(ctx), NodeType(ctx)) }, } for _, set := range fsets { diff --git a/cmd/node.go b/cmd/node.go new file mode 100644 index 0000000000..51ac4a6d2e --- /dev/null +++ b/cmd/node.go @@ -0,0 +1,88 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/gateway" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +func NewBridge(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command { + flags := []*pflag.FlagSet{ + NodeFlags(), + p2p.Flags(), + MiscFlags(), + core.Flags(), + rpc.Flags(), + gateway.Flags(), + state.Flags(), + } + cmd := &cobra.Command{ + Use: "bridge [subcommand]", + Args: cobra.NoArgs, + Short: "Manage your Bridge node", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return PersistentPreRunEnv(cmd, node.Bridge, args) + }, + } + for _, option := range options { + option(cmd, flags) + } + return cmd +} + +func NewLight(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command { + flags := []*pflag.FlagSet{ + NodeFlags(), + p2p.Flags(), + header.Flags(), + MiscFlags(), + core.Flags(), + rpc.Flags(), + gateway.Flags(), + state.Flags(), + } + cmd := &cobra.Command{ + Use: "light [subcommand]", + Args: cobra.NoArgs, + Short: "Manage your Light node", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return PersistentPreRunEnv(cmd, node.Light, args) + }, + } + for _, option := range options { + option(cmd, flags) + } + return cmd +} + +func NewFull(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command { + flags := []*pflag.FlagSet{ + NodeFlags(), + p2p.Flags(), + header.Flags(), + MiscFlags(), + core.Flags(), + rpc.Flags(), + gateway.Flags(), + state.Flags(), + } + cmd := &cobra.Command{ + Use: "full [subcommand]", + Args: cobra.NoArgs, + Short: "Manage your Full node", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return PersistentPreRunEnv(cmd, node.Full, args) + }, + } + for _, option := range options { + option(cmd, flags) + } + return cmd +} diff --git a/cmd/reset_store.go b/cmd/reset_store.go new file mode 100644 index 0000000000..d386549efa --- /dev/null +++ b/cmd/reset_store.go @@ -0,0 +1,26 @@ +package cmd + +import ( + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + "github.com/celestiaorg/celestia-node/nodebuilder" +) + +// ResetStore constructs a CLI command to reset the store of Celestia Node. +func ResetStore(fsets ...*flag.FlagSet) *cobra.Command { + cmd := &cobra.Command{ + Use: "unsafe-reset-store", + Short: "Resets the node's store to a new state. Leaves the keystore and config intact.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + return nodebuilder.Reset(StorePath(ctx), NodeType(ctx)) + }, + } + for _, set := range fsets { + cmd.Flags().AddFlagSet(set) + } + return cmd +} diff --git a/cmd/rpc.go b/cmd/rpc.go new file mode 100644 index 0000000000..1935069229 --- /dev/null +++ b/cmd/rpc.go @@ -0,0 +1,98 @@ +package cmd + +import ( + "context" + "errors" + "fmt" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + rpc "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/api/rpc/perms" + nodemod "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +const ( + // defaultRPCAddress is a default address to dial to + defaultRPCAddress = "http://localhost:26658" +) + +var ( + requestURL string + authTokenFlag string +) + +func RPCFlags() *flag.FlagSet { + fset := &flag.FlagSet{} + + fset.StringVar( + &requestURL, + "url", + defaultRPCAddress, + "Request URL", + ) + + fset.StringVar( + &authTokenFlag, + "token", + "", + "Authorization token", + ) + + storeFlag := NodeFlags().Lookup(nodeStoreFlag) + fset.AddFlag(storeFlag) + return fset +} + +func InitClient(cmd *cobra.Command, _ []string) error { + if authTokenFlag == "" { + storePath := "" + if !cmd.Flag(nodeStoreFlag).Changed { + return errors.New("cant get the access to the auth token: token/node-store flag was not specified") + } + storePath = cmd.Flag(nodeStoreFlag).Value.String() + token, err := getToken(storePath) + if err != nil { + return fmt.Errorf("cant get the access to the auth token: %v", err) + } + authTokenFlag = token + } + + client, err := rpc.NewClient(cmd.Context(), requestURL, authTokenFlag) + if err != nil { + return err + } + + ctx := context.WithValue(cmd.Context(), rpcClientKey{}, client) + cmd.SetContext(ctx) + return nil +} + +func getToken(path string) (string, error) { + if path == "" { + return "", errors.New("root directory was not specified") + } + + ks, err := newKeystore(path) + if err != nil { + return "", err + } + + key, err := ks.Get(nodemod.SecretName) + if err != nil { + fmt.Printf("error getting the JWT secret: %v", err) + return "", err + } + return buildJWTToken(key.Body, perms.AllPerms) +} + +type rpcClientKey struct{} + +func ParseClientFromCtx(ctx context.Context) (*rpc.Client, error) { + client, ok := ctx.Value(rpcClientKey{}).(*rpc.Client) + if !ok { + return nil, errors.New("rpc client was not set") + } + return client, nil +} diff --git a/cmd/start.go b/cmd/start.go index 25b5779c42..281dfcc0e4 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -1,17 +1,23 @@ package cmd import ( + "errors" + "os" "os/signal" + "path/filepath" "syscall" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - "github.com/celestiaorg/celestia-node/node" + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/app/encoding" + + "github.com/celestiaorg/celestia-node/nodebuilder" ) // Start constructs a CLI command to start Celestia Node daemon of any type with the given flags. -func Start(fsets ...*flag.FlagSet) *cobra.Command { +func Start(options ...func(*cobra.Command)) *cobra.Command { cmd := &cobra.Command{ Use: "start", Short: `Starts Node daemon. First stopping signal gracefully stops the Node and second terminates it. @@ -19,18 +25,33 @@ Options passed on start override configuration options only on start and are not Aliases: []string{"run", "daemon"}, Args: cobra.NoArgs, SilenceUsage: true, - RunE: func(cmd *cobra.Command, args []string) error { - env, err := GetEnv(cmd.Context()) + RunE: func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + + // override config with all modifiers passed on start + cfg := NodeConfig(ctx) + + storePath := StorePath(ctx) + keysPath := filepath.Join(storePath, "keys") + + // construct ring + // TODO @renaynay: Include option for setting custom `userInput` parameter with + // implementation of https://github.com/celestiaorg/celestia-node/issues/415. + encConf := encoding.MakeConfig(app.ModuleEncodingRegisters...) + ring, err := keyring.New(app.Name, cfg.State.KeyringBackend, keysPath, os.Stdin, encConf.Codec) if err != nil { return err } - store, err := node.OpenStore(env.StorePath) + store, err := nodebuilder.OpenStore(storePath, ring) if err != nil { return err } + defer func() { + err = errors.Join(err, store.Close()) + }() - nd, err := node.New(env.NodeType, store, env.Options()...) + nd, err := nodebuilder.NewWithConfig(NodeType(ctx), Network(ctx), store, &cfg, NodeOptions(ctx)...) if err != nil { return err } @@ -47,16 +68,12 @@ Options passed on start override configuration options only on start and are not ctx, cancel = signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM) defer cancel() - err = nd.Stop(ctx) - if err != nil { - return err - } - - return store.Close() + return nd.Stop(ctx) }, } - for _, set := range fsets { - cmd.Flags().AddFlagSet(set) + // Apply each passed option to the command + for _, option := range options { + option(cmd) } return cmd } diff --git a/cmd/util.go b/cmd/util.go new file mode 100644 index 0000000000..08fa02155b --- /dev/null +++ b/cmd/util.go @@ -0,0 +1,137 @@ +package cmd + +import ( + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/gateway" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + rpc_cfg "github.com/celestiaorg/celestia-node/nodebuilder/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/state" + "github.com/celestiaorg/celestia-node/share" +) + +func PrintOutput(data interface{}, err error, formatData func(interface{}) interface{}) error { + switch { + case err != nil: + data = err.Error() + case formatData != nil: + data = formatData(data) + } + + resp := struct { + Result interface{} `json:"result"` + }{ + Result: data, + } + + bytes, err := json.MarshalIndent(resp, "", " ") + if err != nil { + return err + } + fmt.Fprintln(os.Stdout, string(bytes)) + return nil +} + +// ParseV0Namespace parses a namespace from a base64 or hex string. The param +// is expected to be the user-specified portion of a v0 namespace ID (i.e. the +// last 10 bytes). +func ParseV0Namespace(param string) (share.Namespace, error) { + userBytes, err := DecodeToBytes(param) + if err != nil { + return nil, err + } + + // if the namespace ID is <= 10 bytes, left pad it with 0s + return share.NewBlobNamespaceV0(userBytes) +} + +// DecodeToBytes decodes a Base64 or hex input string into a byte slice. +func DecodeToBytes(param string) ([]byte, error) { + if strings.HasPrefix(param, "0x") { + decoded, err := hex.DecodeString(param[2:]) + if err != nil { + return nil, fmt.Errorf("error decoding namespace ID: %w", err) + } + return decoded, nil + } + // otherwise, it's just a base64 string + decoded, err := base64.StdEncoding.DecodeString(param) + if err != nil { + return nil, fmt.Errorf("error decoding namespace ID: %w", err) + } + return decoded, nil +} + +func PersistentPreRunEnv(cmd *cobra.Command, nodeType node.Type, _ []string) error { + var ( + ctx = cmd.Context() + err error + ) + + ctx = WithNodeType(ctx, nodeType) + + parsedNetwork, err := p2p.ParseNetwork(cmd) + if err != nil { + return err + } + ctx = WithNetwork(ctx, parsedNetwork) + + // loads existing config into the environment + ctx, err = ParseNodeFlags(ctx, cmd, Network(ctx)) + if err != nil { + return err + } + + cfg := NodeConfig(ctx) + + err = p2p.ParseFlags(cmd, &cfg.P2P) + if err != nil { + return err + } + + err = core.ParseFlags(cmd, &cfg.Core) + if err != nil { + return err + } + + if nodeType != node.Bridge { + err = header.ParseFlags(cmd, &cfg.Header) + if err != nil { + return err + } + } + + ctx, err = ParseMiscFlags(ctx, cmd) + if err != nil { + return err + } + + rpc_cfg.ParseFlags(cmd, &cfg.RPC) + gateway.ParseFlags(cmd, &cfg.Gateway) + state.ParseFlags(cmd, &cfg.State) + + // set config + ctx = WithNodeConfig(ctx, &cfg) + cmd.SetContext(ctx) + return nil +} + +// WithFlagSet adds the given flagset to the command. +func WithFlagSet(fset []*flag.FlagSet) func(*cobra.Command) { + return func(c *cobra.Command) { + for _, set := range fset { + c.Flags().AddFlagSet(set) + } + } +} diff --git a/cmd/util_test.go b/cmd/util_test.go new file mode 100644 index 0000000000..b6e245f3e2 --- /dev/null +++ b/cmd/util_test.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/celestiaorg/celestia-node/share" +) + +func Test_parseNamespaceID(t *testing.T) { + type testCase struct { + name string + param string + want share.Namespace + wantErr bool + } + testCases := []testCase{ + { + param: "0x0c204d39600fddd3", + name: "8 byte hex encoded namespace ID gets left padded", + want: share.Namespace{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x20, 0x4d, 0x39, 0x60, 0xf, 0xdd, 0xd3, + }, + wantErr: false, + }, + { + name: "10 byte hex encoded namespace ID", + param: "0x42690c204d39600fddd3", + want: share.Namespace{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x42, 0x69, 0xc, 0x20, 0x4d, 0x39, 0x60, 0xf, 0xdd, 0xd3, + }, + wantErr: false, + }, + { + name: "29 byte hex encoded namespace ID", + param: "0x0000000000000000000000000000000000000001010101010101010101", + want: share.Namespace{ + 0x0, // namespace version + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // v0 ID prefix + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, // namespace ID + }, + wantErr: true, + }, + { + name: "11 byte hex encoded namespace ID returns error", + param: "0x42690c204d39600fddd3a3", + want: share.Namespace{}, + wantErr: true, + }, + { + name: "10 byte base64 encoded namespace ID", + param: "QmkMIE05YA/d0w==", + want: share.Namespace{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x42, 0x69, 0xc, 0x20, 0x4d, 0x39, 0x60, 0xf, 0xdd, 0xd3, + }, + wantErr: false, + }, + { + name: "not base64 or hex encoded namespace ID returns error", + param: "5748493939429", + want: share.Namespace{}, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := ParseV0Namespace(tc.param) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/core/client.go b/core/client.go index 0f14957165..9636619b02 100644 --- a/core/client.go +++ b/core/client.go @@ -4,7 +4,6 @@ import ( "fmt" retryhttp "github.com/hashicorp/go-retryablehttp" - "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/http" ) @@ -21,6 +20,7 @@ func NewRemote(ip, port string) (Client, error) { return http.NewWithClient( fmt.Sprintf("tcp://%s:%s", ip, port), + "/websocket", httpClient.StandardClient(), ) } diff --git a/core/client_test.go b/core/client_test.go index 69bc67a527..8ad9060555 100644 --- a/core/client_test.go +++ b/core/client_test.go @@ -10,27 +10,27 @@ import ( ) func TestRemoteClient_Status(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - _, client := StartTestClient(ctx, t) + client := StartTestNode(t).Client status, err := client.Status(ctx) require.NoError(t, err) require.NotNil(t, status) } func TestRemoteClient_StartBlockSubscription_And_GetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) t.Cleanup(cancel) - _, client := StartTestClient(ctx, t) - eventChan, err := client.Subscribe(ctx, newBlockSubscriber, newBlockEventQuery) + client := StartTestNode(t).Client + eventChan, err := client.Subscribe(ctx, newBlockSubscriber, newDataSignedBlockQuery) require.NoError(t, err) for i := 1; i <= 3; i++ { select { case evt := <-eventChan: - h := evt.Data.(types.EventDataNewBlock).Block.Height + h := evt.Data.(types.EventDataSignedBlock).Header.Height block, err := client.Block(ctx, &h) require.NoError(t, err) require.GreaterOrEqual(t, block.Block.Height, int64(i)) @@ -39,5 +39,5 @@ func TestRemoteClient_StartBlockSubscription_And_GetBlock(t *testing.T) { } } // unsubscribe to event channel - require.NoError(t, client.Unsubscribe(ctx, newBlockSubscriber, newBlockEventQuery)) + require.NoError(t, client.Unsubscribe(ctx, newBlockSubscriber, newDataSignedBlockQuery)) } diff --git a/core/eds.go b/core/eds.go new file mode 100644 index 0000000000..eb93c249ba --- /dev/null +++ b/core/eds.go @@ -0,0 +1,64 @@ +package core + +import ( + "context" + "errors" + "fmt" + + "github.com/filecoin-project/dagstore" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-app/pkg/square" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" +) + +// extendBlock extends the given block data, returning the resulting +// ExtendedDataSquare (EDS). If there are no transactions in the block, +// nil is returned in place of the eds. +func extendBlock(data types.Data, appVersion uint64, options ...nmt.Option) (*rsmt2d.ExtendedDataSquare, error) { + if app.IsEmptyBlock(data, appVersion) { + return nil, nil + } + + // Construct the data square from the block's transactions + dataSquare, err := square.Construct(data.Txs.ToSliceOfBytes(), appVersion, appconsts.SquareSizeUpperBound(appVersion)) + if err != nil { + return nil, err + } + return extendShares(shares.ToBytes(dataSquare), options...) +} + +func extendShares(s [][]byte, options ...nmt.Option) (*rsmt2d.ExtendedDataSquare, error) { + // Check that the length of the square is a power of 2. + if !shares.IsPowerOfTwo(len(s)) { + return nil, fmt.Errorf("number of shares is not a power of 2: got %d", len(s)) + } + // here we construct a tree + // Note: uses the nmt wrapper to construct the tree. + squareSize := square.Size(len(s)) + return rsmt2d.ComputeExtendedDataSquare(s, + appconsts.DefaultCodec(), + wrapper.NewConstructor(uint64(squareSize), + options...)) +} + +// storeEDS will only store extended block if it is not empty and doesn't already exist. +func storeEDS(ctx context.Context, hash share.DataHash, eds *rsmt2d.ExtendedDataSquare, store *eds.Store) error { + if eds == nil { + return nil + } + err := store.Put(ctx, hash, eds) + if errors.Is(err, dagstore.ErrShardExists) { + // block with given root already exists, return nil + return nil + } + return err +} diff --git a/core/eds_test.go b/core/eds_test.go new file mode 100644 index 0000000000..723cb77ad1 --- /dev/null +++ b/core/eds_test.go @@ -0,0 +1,52 @@ +package core + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + + "github.com/celestiaorg/celestia-node/share" +) + +// TestTrulyEmptySquare ensures that a truly empty square (square size 1 and no +// txs) will be recognized as empty and return nil from `extendBlock` so that +// we do not redundantly store empty EDSes. +func TestTrulyEmptySquare(t *testing.T) { + data := types.Data{ + Txs: []types.Tx{}, + SquareSize: 1, + } + + eds, err := extendBlock(data, appconsts.LatestVersion) + require.NoError(t, err) + assert.Nil(t, eds) +} + +// TestNonZeroSquareSize tests that the DAH hash of a block with no transactions +// is equal to the DAH hash for an empty root even if SquareSize is set to +// something non-zero. Technically, this block data is invalid because the +// construction of the square is deterministic, and the rules which dictate the +// square size do not allow for empty block data. However, should that ever +// occur, we need to ensure that the correct data root is generated. +func TestEmptySquareWithZeroTxs(t *testing.T) { + data := types.Data{ + Txs: []types.Tx{}, + } + + eds, err := extendBlock(data, appconsts.LatestVersion) + require.Nil(t, eds) + require.NoError(t, err) + + // force extend the square using an empty block and compare with the min DAH + eds, err = app.ExtendBlock(data, appconsts.LatestVersion) + require.NoError(t, err) + + dah, err := share.NewRoot(eds) + require.NoError(t, err) + assert.Equal(t, share.EmptyRoot().Hash(), dah.Hash()) +} diff --git a/core/exchange.go b/core/exchange.go new file mode 100644 index 0000000000..cf889a38bb --- /dev/null +++ b/core/exchange.go @@ -0,0 +1,199 @@ +package core + +import ( + "bytes" + "context" + "fmt" + "time" + + "golang.org/x/sync/errgroup" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +const concurrencyLimit = 4 + +type Exchange struct { + fetcher *BlockFetcher + store *eds.Store + construct header.ConstructFn + + metrics *exchangeMetrics +} + +func NewExchange( + fetcher *BlockFetcher, + store *eds.Store, + construct header.ConstructFn, + opts ...Option, +) (*Exchange, error) { + p := new(params) + for _, opt := range opts { + opt(p) + } + + var ( + metrics *exchangeMetrics + err error + ) + if p.metrics { + metrics, err = newExchangeMetrics() + if err != nil { + return nil, err + } + } + + return &Exchange{ + fetcher: fetcher, + store: store, + construct: construct, + metrics: metrics, + }, nil +} + +func (ce *Exchange) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + log.Debugw("requesting header", "height", height) + intHeight := int64(height) + return ce.getExtendedHeaderByHeight(ctx, &intHeight) +} + +func (ce *Exchange) GetRangeByHeight( + ctx context.Context, + from *header.ExtendedHeader, + to uint64, +) ([]*header.ExtendedHeader, error) { + start := time.Now() + + amount := to - (from.Height() + 1) + headers, err := ce.getRangeByHeight(ctx, from.Height()+1, amount) + if err != nil { + return nil, err + } + + ce.metrics.requestDurationPerHeader(ctx, time.Since(start), amount) + + for _, h := range headers { + err := libhead.Verify[*header.ExtendedHeader](from, h, libhead.DefaultHeightThreshold) + if err != nil { + return nil, fmt.Errorf("verifying next header against last verified height: %d: %w", + from.Height(), err) + } + from = h + } + return headers, nil +} + +func (ce *Exchange) getRangeByHeight(ctx context.Context, from, amount uint64) ([]*header.ExtendedHeader, error) { + if amount == 0 { + return nil, nil + } + + log.Debugw("requesting headers", "from", from, "to", from+amount) + headers := make([]*header.ExtendedHeader, amount) + + start := time.Now() + errGroup, ctx := errgroup.WithContext(ctx) + errGroup.SetLimit(concurrencyLimit) + for i := range headers { + i := i + errGroup.Go(func() error { + extHeader, err := ce.GetByHeight(ctx, from+uint64(i)) + if err != nil { + return err + } + + headers[i] = extHeader + return nil + }) + } + + if err := errGroup.Wait(); err != nil { + return nil, err + } + log.Debugw("received headers", "from", from, "to", from+amount, "after", time.Since(start)) + return headers, nil +} + +func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.ExtendedHeader, error) { + log.Debugw("requesting header", "hash", hash.String()) + block, err := ce.fetcher.GetBlockByHash(ctx, hash) + if err != nil { + return nil, fmt.Errorf("fetching block by hash %s: %w", hash.String(), err) + } + + comm, vals, err := ce.fetcher.GetBlockInfo(ctx, &block.Height) + if err != nil { + return nil, fmt.Errorf("fetching block info for height %d: %w", &block.Height, err) + } + + // extend block data + adder := ipld.NewProofsAdder(int(block.Data.SquareSize)) + defer adder.Purge() + + eds, err := extendBlock(block.Data, block.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + if err != nil { + return nil, fmt.Errorf("extending block data for height %d: %w", &block.Height, err) + } + // construct extended header + eh, err := ce.construct(&block.Header, comm, vals, eds) + if err != nil { + panic(fmt.Errorf("constructing extended header for height %d: %w", &block.Height, err)) + } + // verify hashes match + if !bytes.Equal(hash, eh.Hash()) { + return nil, fmt.Errorf("incorrect hash in header at height %d: expected %x, got %x", + &block.Height, hash, eh.Hash()) + } + + ctx = ipld.CtxWithProofsAdder(ctx, adder) + err = storeEDS(ctx, eh.DAH.Hash(), eds, ce.store) + if err != nil { + return nil, fmt.Errorf("storing EDS to eds.Store for height %d: %w", &block.Height, err) + } + return eh, nil +} + +func (ce *Exchange) Head( + ctx context.Context, + _ ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { + log.Debug("requesting head") + return ce.getExtendedHeaderByHeight(ctx, nil) +} + +func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64) (*header.ExtendedHeader, error) { + b, err := ce.fetcher.GetSignedBlock(ctx, height) + if err != nil { + if height == nil { + return nil, fmt.Errorf("fetching signed block for head from core: %w", err) + } + return nil, fmt.Errorf("fetching signed block at height %d from core: %w", *height, err) + } + log.Debugw("fetched signed block from core", "height", b.Header.Height) + + // extend block data + adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) + defer adder.Purge() + + eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + if err != nil { + return nil, fmt.Errorf("extending block data for height %d: %w", b.Header.Height, err) + } + // create extended header + eh, err := ce.construct(&b.Header, &b.Commit, &b.ValidatorSet, eds) + if err != nil { + panic(fmt.Errorf("constructing extended header for height %d: %w", b.Header.Height, err)) + } + + ctx = ipld.CtxWithProofsAdder(ctx, adder) + err = storeEDS(ctx, eh.DAH.Hash(), eds, ce.store) + if err != nil { + return nil, fmt.Errorf("storing EDS to eds.Store for block height %d: %w", b.Header.Height, err) + } + return eh, nil +} diff --git a/core/exchange_metrics.go b/core/exchange_metrics.go new file mode 100644 index 0000000000..4e5bf5956c --- /dev/null +++ b/core/exchange_metrics.go @@ -0,0 +1,49 @@ +package core + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +type exchangeMetrics struct { + getByHeightDuration metric.Float64Histogram +} + +func newExchangeMetrics() (*exchangeMetrics, error) { + m := new(exchangeMetrics) + + var err error + m.getByHeightDuration, err = meter.Float64Histogram( + "core_ex_get_by_height_request_time", + metric.WithDescription("core exchange client getByHeight request time in seconds (per single height)"), + ) + if err != nil { + return nil, err + } + + return m, nil +} + +func (m *exchangeMetrics) observe(ctx context.Context, observeFn func(ctx context.Context)) { + if m == nil { + return + } + + ctx = utils.ResetContextOnError(ctx) + + observeFn(ctx) +} + +func (m *exchangeMetrics) requestDurationPerHeader(ctx context.Context, duration time.Duration, amount uint64) { + m.observe(ctx, func(ctx context.Context) { + if amount == 0 { + return + } + durationPerHeader := duration.Seconds() / float64(amount) + m.getByHeightDuration.Record(ctx, durationPerHeader) + }) +} diff --git a/core/exchange_test.go b/core/exchange_test.go new file mode 100644 index 0000000000..95c7f83385 --- /dev/null +++ b/core/exchange_test.go @@ -0,0 +1,81 @@ +package core + +import ( + "context" + "testing" + "time" + + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/test/util/testnode" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/eds" +) + +func TestCoreExchange_RequestHeaders(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + cfg := DefaultTestConfig() + cfg.ChainID = networkID + fetcher, _ := createCoreFetcher(t, cfg) + + // generate 10 blocks + generateBlocks(t, fetcher) + + store := createStore(t) + + ce, err := NewExchange(fetcher, store, header.MakeExtendedHeader) + require.NoError(t, err) + + // initialize store with genesis block + genHeight := int64(1) + genBlock, err := fetcher.GetBlock(ctx, &genHeight) + require.NoError(t, err) + genHeader, err := ce.Get(ctx, genBlock.Header.Hash().Bytes()) + require.NoError(t, err) + + to := uint64(10) + expectedFirstHeightInRange := genHeader.Height() + 1 + expectedLastHeightInRange := to - 1 + expectedLenHeaders := to - expectedFirstHeightInRange + + // request headers from height 1 to 10 [2:10) + headers, err := ce.GetRangeByHeight(context.Background(), genHeader, to) + require.NoError(t, err) + + assert.Len(t, headers, int(expectedLenHeaders)) + assert.Equal(t, expectedFirstHeightInRange, headers[0].Height()) + assert.Equal(t, expectedLastHeightInRange, headers[len(headers)-1].Height()) +} + +func createCoreFetcher(t *testing.T, cfg *testnode.Config) (*BlockFetcher, testnode.Context) { + cctx := StartTestNodeWithConfig(t, cfg) + // wait for height 2 in order to be able to start submitting txs (this prevents + // flakiness with accessing account state) + _, err := cctx.WaitForHeightWithTimeout(2, time.Second*2) // TODO @renaynay: configure? + require.NoError(t, err) + return NewBlockFetcher(cctx.Client), cctx +} + +func createStore(t *testing.T) *eds.Store { + t.Helper() + + storeCfg := eds.DefaultParameters() + store, err := eds.NewStore(storeCfg, t.TempDir(), ds_sync.MutexWrap(ds.NewMapDatastore())) + require.NoError(t, err) + return store +} + +func generateBlocks(t *testing.T, fetcher *BlockFetcher) { + sub, err := fetcher.SubscribeNewBlockEvent(context.Background()) + require.NoError(t, err) + + for i := 0; i < 10; i++ { + <-sub + } +} diff --git a/core/fetcher.go b/core/fetcher.go index c75cb2a6b2..35c9a83dc9 100644 --- a/core/fetcher.go +++ b/core/fetcher.go @@ -2,25 +2,28 @@ package core import ( "context" + "errors" "fmt" logging "github.com/ipfs/go-log/v2" - tmbytes "github.com/tendermint/tendermint/libs/bytes" + coretypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" + + libhead "github.com/celestiaorg/go-header" ) const newBlockSubscriber = "NewBlock/Events" var ( - log = logging.Logger("core/fetcher") - newBlockEventQuery = types.QueryForEvent(types.EventNewBlockValue).String() + log = logging.Logger("core") + newDataSignedBlockQuery = types.QueryForEvent(types.EventSignedBlock).String() ) type BlockFetcher struct { client Client - newBlockCh chan *types.Block - doneCh chan struct{} + doneCh chan struct{} + cancel context.CancelFunc } // NewBlockFetcher returns a new `BlockFetcher`. @@ -34,7 +37,7 @@ func NewBlockFetcher(client Client) *BlockFetcher { func (f *BlockFetcher) GetBlockInfo(ctx context.Context, height *int64) (*types.Commit, *types.ValidatorSet, error) { commit, err := f.Commit(ctx, height) if err != nil { - return nil, nil, fmt.Errorf("core/fetcher: getting commit: %w", err) + return nil, nil, fmt.Errorf("core/fetcher: getting commit at height %d: %w", height, err) } // If a nil `height` is given as a parameter, there is a chance @@ -44,7 +47,7 @@ func (f *BlockFetcher) GetBlockInfo(ctx context.Context, height *int64) (*types. // prevent this potential inconsistency. valSet, err := f.ValidatorSet(ctx, &commit.Height) if err != nil { - return nil, nil, fmt.Errorf("core/fetcher: getting validator set: %w", err) + return nil, nil, fmt.Errorf("core/fetcher: getting validator set at height %d: %w", height, err) } return commit, valSet, nil @@ -58,25 +61,30 @@ func (f *BlockFetcher) GetBlock(ctx context.Context, height *int64) (*types.Bloc } if res != nil && res.Block == nil { - return nil, fmt.Errorf("core/fetcher: block not found") + return nil, fmt.Errorf("core/fetcher: block not found, height: %d", height) } return res.Block, nil } -func (f *BlockFetcher) GetBlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*types.Block, error) { +func (f *BlockFetcher) GetBlockByHash(ctx context.Context, hash libhead.Hash) (*types.Block, error) { res, err := f.client.BlockByHash(ctx, hash) if err != nil { return nil, err } if res != nil && res.Block == nil { - return nil, fmt.Errorf("core/fetcher: block not found") + return nil, fmt.Errorf("core/fetcher: block not found, hash: %s", hash.String()) } return res.Block, nil } +// GetSignedBlock queries Core for a `Block` at the given height. +func (f *BlockFetcher) GetSignedBlock(ctx context.Context, height *int64) (*coretypes.ResultSignedBlock, error) { + return f.client.SignedBlock(ctx, height) +} + // Commit queries Core for a `Commit` from the block at // the given height. func (f *BlockFetcher) Commit(ctx context.Context, height *int64) (*types.Commit, error) { @@ -86,7 +94,7 @@ func (f *BlockFetcher) Commit(ctx context.Context, height *int64) (*types.Commit } if res != nil && res.Commit == nil { - return nil, fmt.Errorf("core/fetcher: commit not found") + return nil, fmt.Errorf("core/fetcher: commit not found at height %d", height) } return res.Commit, nil @@ -105,7 +113,7 @@ func (f *BlockFetcher) ValidatorSet(ctx context.Context, height *int64) (*types. } if res != nil && len(res.Validators) == 0 { - return nil, fmt.Errorf("core/fetcher: validators not found") + return nil, fmt.Errorf("core/fetcher: validator set not found at height %d", height) } total = res.Total @@ -117,69 +125,56 @@ func (f *BlockFetcher) ValidatorSet(ctx context.Context, height *int64) (*types. // SubscribeNewBlockEvent subscribes to new block events from Core, returning // a new block event channel on success. -func (f *BlockFetcher) SubscribeNewBlockEvent(ctx context.Context) (<-chan *types.Block, error) { +func (f *BlockFetcher) SubscribeNewBlockEvent(ctx context.Context) (<-chan types.EventDataSignedBlock, error) { // start the client if not started yet if !f.client.IsRunning() { - return nil, fmt.Errorf("client not running") + return nil, errors.New("client not running") } - eventChan, err := f.client.Subscribe(ctx, newBlockSubscriber, newBlockEventQuery) + + ctx, cancel := context.WithCancel(ctx) + f.cancel = cancel + f.doneCh = make(chan struct{}) + + eventChan, err := f.client.Subscribe(ctx, newBlockSubscriber, newDataSignedBlockQuery) if err != nil { return nil, err } - // create a wrapper channel for translating ResultEvent to "raw" block - if f.newBlockCh != nil { - return nil, fmt.Errorf("new block event channel exists") - } - - f.newBlockCh = make(chan *types.Block) - f.doneCh = make(chan struct{}) - + signedBlockCh := make(chan types.EventDataSignedBlock) go func() { + defer close(f.doneCh) + defer close(signedBlockCh) for { select { - case <-f.doneCh: + case <-ctx.Done(): return case newEvent, ok := <-eventChan: if !ok { + log.Errorw("fetcher: new blocks subscription channel closed unexpectedly") return } - newBlock, ok := newEvent.Data.(types.EventDataNewBlock) - if !ok { - log.Warnf("unexpected event: %v", newEvent) - continue - } + signedBlock := newEvent.Data.(types.EventDataSignedBlock) select { - case f.newBlockCh <- newBlock.Block: - case <-f.doneCh: + case signedBlockCh <- signedBlock: + case <-ctx.Done(): return } } } }() - return f.newBlockCh, nil + return signedBlockCh, nil } // UnsubscribeNewBlockEvent stops the subscription to new block events from Core. func (f *BlockFetcher) UnsubscribeNewBlockEvent(ctx context.Context) error { - if f.newBlockCh == nil { - return fmt.Errorf("no new block event channel found") + f.cancel() + select { + case <-f.doneCh: + case <-ctx.Done(): + return fmt.Errorf("fetcher: unsubscribe from new block events: %w", ctx.Err()) } - if f.doneCh == nil { - return fmt.Errorf("no stop signal chan found in fetcher") - } - defer func() { - // send stop signal - f.doneCh <- struct{}{} - // close out fetcher channels - close(f.newBlockCh) - close(f.doneCh) - f.newBlockCh = nil - f.doneCh = nil - }() - - return f.client.Unsubscribe(ctx, newBlockSubscriber, newBlockEventQuery) + return f.client.Unsubscribe(ctx, newBlockSubscriber, newDataSignedBlockQuery) } // IsSyncing returns the sync status of the Core connection: true for diff --git a/core/fetcher_no_race_test.go b/core/fetcher_no_race_test.go new file mode 100644 index 0000000000..890b7c35c1 --- /dev/null +++ b/core/fetcher_no_race_test.go @@ -0,0 +1,55 @@ +//go:build !race + +package core + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" +) + +// TestBlockFetcherHeaderValues tests that both the Commit and ValidatorSet +// endpoints are working as intended. +func TestBlockFetcherHeaderValues(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + client := StartTestNode(t).Client + fetcher := NewBlockFetcher(client) + + // generate some blocks + newBlockChan, err := fetcher.SubscribeNewBlockEvent(ctx) + require.NoError(t, err) + // read once from channel to generate next block + var h int64 + select { + case evt := <-newBlockChan: + h = evt.Header.Height + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + } + // get Commit from current height + commit, err := fetcher.Commit(ctx, &h) + require.NoError(t, err) + // get ValidatorSet from current height + valSet, err := fetcher.ValidatorSet(ctx, &h) + require.NoError(t, err) + // get next block + var nextBlock types.EventDataSignedBlock + select { + case nextBlock = <-newBlockChan: + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + } + // compare LastCommit from next block to Commit from first block height + assert.Equal(t, nextBlock.Header.LastCommitHash, commit.Hash()) + assert.Equal(t, nextBlock.Header.Height, commit.Height+1) + // compare ValidatorSet hash to the ValidatorsHash from first block height + hexBytes := valSet.Hash() + assert.Equal(t, nextBlock.ValidatorSet.Hash(), hexBytes) + require.NoError(t, fetcher.UnsubscribeNewBlockEvent(ctx)) +} diff --git a/core/fetcher_test.go b/core/fetcher_test.go index 49d02fd4a9..261b84d78c 100644 --- a/core/fetcher_test.go +++ b/core/fetcher_test.go @@ -7,16 +7,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/libs/bytes" ) func TestBlockFetcher_GetBlock_and_SubscribeNewBlockEvent(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) t.Cleanup(cancel) - _, client := StartTestClient(ctx, t) + client := StartTestNode(t).Client fetcher := NewBlockFetcher(client) // generate some blocks @@ -26,59 +23,17 @@ func TestBlockFetcher_GetBlock_and_SubscribeNewBlockEvent(t *testing.T) { for i := 1; i < 3; i++ { select { case newBlockFromChan := <-newBlockChan: - h := newBlockFromChan.Height - block, err := fetcher.GetBlock(ctx, &h) + h := newBlockFromChan.Header.Height + block, err := fetcher.GetSignedBlock(ctx, &h) require.NoError(t, err) - assert.Equal(t, newBlockFromChan, block) - require.GreaterOrEqual(t, block.Height, int64(i)) + assert.Equal(t, newBlockFromChan.Data, block.Data) + assert.Equal(t, newBlockFromChan.Header, block.Header) + assert.Equal(t, newBlockFromChan.Commit, block.Commit) + assert.Equal(t, newBlockFromChan.ValidatorSet, block.ValidatorSet) + require.GreaterOrEqual(t, newBlockFromChan.Header.Height, int64(i)) case <-ctx.Done(): require.NoError(t, ctx.Err()) } } require.NoError(t, fetcher.UnsubscribeNewBlockEvent(ctx)) } - -// TestBlockFetcherHeaderValues tests that both the Commit and ValidatorSet -// endpoints are working as intended. -func TestBlockFetcherHeaderValues(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - t.Cleanup(cancel) - - _, client := StartTestClient(ctx, t) - fetcher := NewBlockFetcher(client) - - // generate some blocks - newBlockChan, err := fetcher.SubscribeNewBlockEvent(ctx) - require.NoError(t, err) - // read once from channel to generate next block - var h int64 - select { - case evt := <-newBlockChan: - h = evt.Header.Height - case <-ctx.Done(): - require.NoError(t, ctx.Err()) - } - // get Commit from current height - commit, err := fetcher.Commit(ctx, &h) - require.NoError(t, err) - // get ValidatorSet from current height - valSet, err := fetcher.ValidatorSet(ctx, &h) - require.NoError(t, err) - // get next block - var nextBlock *types.Block - select { - case nextBlock = <-newBlockChan: - case <-ctx.Done(): - require.NoError(t, ctx.Err()) - } - // compare LastCommit from next block to Commit from first block height - assert.Equal(t, nextBlock.LastCommit.Hash(), commit.Hash()) - assert.Equal(t, nextBlock.LastCommit.Height, commit.Height) - assert.Equal(t, nextBlock.LastCommit.Signatures, commit.Signatures) - // compare ValidatorSet hash to the ValidatorsHash from first block height - hexBytes := bytes.HexBytes{} - err = hexBytes.Unmarshal(valSet.Hash()) - require.NoError(t, err) - assert.Equal(t, nextBlock.ValidatorsHash, hexBytes) - require.NoError(t, fetcher.UnsubscribeNewBlockEvent(ctx)) -} diff --git a/core/header_test.go b/core/header_test.go new file mode 100644 index 0000000000..ee5d10170e --- /dev/null +++ b/core/header_test.go @@ -0,0 +1,50 @@ +package core + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" +) + +func TestMakeExtendedHeaderForEmptyBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + client := StartTestNode(t).Client + fetcher := NewBlockFetcher(client) + + sub, err := fetcher.SubscribeNewBlockEvent(ctx) + require.NoError(t, err) + <-sub + + height := int64(1) + b, err := fetcher.GetBlock(ctx, &height) + require.NoError(t, err) + + comm, val, err := fetcher.GetBlockInfo(ctx, &height) + require.NoError(t, err) + + eds, err := extendBlock(b.Data, b.Header.Version.App) + require.NoError(t, err) + + headerExt, err := header.MakeExtendedHeader(&b.Header, comm, val, eds) + require.NoError(t, err) + + assert.Equal(t, share.EmptyRoot(), headerExt.DAH) +} + +func TestMismatchedDataHash_ComputedRoot(t *testing.T) { + header := headertest.RandExtendedHeader(t) + header.DataHash = rand.Bytes(32) + + err := header.Validate() + assert.Contains(t, err.Error(), "mismatch between data hash commitment from"+ + " core header and computed data root") +} diff --git a/core/listener.go b/core/listener.go new file mode 100644 index 0000000000..367aa34181 --- /dev/null +++ b/core/listener.go @@ -0,0 +1,257 @@ +package core + +import ( + "context" + "errors" + "fmt" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/tendermint/tendermint/types" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +var ( + tracer = otel.Tracer("core/listener") + retrySubscriptionDelay = 5 * time.Second + + errInvalidSubscription = errors.New("invalid subscription") +) + +// Listener is responsible for listening to Core for +// new block events and converting new Core blocks into +// the main data structure used in the Celestia DA network: +// `ExtendedHeader`. After digesting the Core block, extending +// it, and generating the `ExtendedHeader`, the Listener +// broadcasts the new `ExtendedHeader` to the header-sub gossipsub +// network. +type Listener struct { + fetcher *BlockFetcher + + construct header.ConstructFn + store *eds.Store + + headerBroadcaster libhead.Broadcaster[*header.ExtendedHeader] + hashBroadcaster shrexsub.BroadcastFn + + metrics *listenerMetrics + + chainID string + + listenerTimeout time.Duration + cancel context.CancelFunc +} + +func NewListener( + bcast libhead.Broadcaster[*header.ExtendedHeader], + fetcher *BlockFetcher, + hashBroadcaster shrexsub.BroadcastFn, + construct header.ConstructFn, + store *eds.Store, + blocktime time.Duration, + opts ...Option, +) (*Listener, error) { + p := new(params) + for _, opt := range opts { + opt(p) + } + + var ( + metrics *listenerMetrics + err error + ) + if p.metrics { + metrics, err = newListenerMetrics() + if err != nil { + return nil, err + } + } + + return &Listener{ + fetcher: fetcher, + headerBroadcaster: bcast, + hashBroadcaster: hashBroadcaster, + construct: construct, + store: store, + listenerTimeout: 5 * blocktime, + metrics: metrics, + chainID: p.chainID, + }, nil +} + +// Start kicks off the Listener listener loop. +func (cl *Listener) Start(context.Context) error { + if cl.cancel != nil { + return errors.New("listener: already started") + } + + ctx, cancel := context.WithCancel(context.Background()) + cl.cancel = cancel + + sub, err := cl.fetcher.SubscribeNewBlockEvent(ctx) + if err != nil { + return err + } + go cl.runSubscriber(ctx, sub) + return nil +} + +// Stop stops the listener loop. +func (cl *Listener) Stop(context.Context) error { + cl.cancel() + cl.cancel = nil + return cl.metrics.Close() +} + +// runSubscriber runs a subscriber to receive event data of new signed blocks. It will attempt to +// resubscribe in case error happens during listening of subscription +func (cl *Listener) runSubscriber(ctx context.Context, sub <-chan types.EventDataSignedBlock) { + for { + err := cl.listen(ctx, sub) + if ctx.Err() != nil { + // listener stopped because external context was canceled + return + } + if errors.Is(err, errInvalidSubscription) { + // stop node if there is a critical issue with the block subscription + log.Fatalf("listener: %v", err) + } + + log.Warnw("listener: subscriber error, resubscribing...", "err", err) + sub = cl.resubscribe(ctx) + if sub == nil { + return + } + } +} + +func (cl *Listener) resubscribe(ctx context.Context) <-chan types.EventDataSignedBlock { + err := cl.fetcher.UnsubscribeNewBlockEvent(ctx) + if err != nil { + log.Warnw("listener: unsubscribe", "err", err) + } + + ticker := time.NewTicker(retrySubscriptionDelay) + defer ticker.Stop() + for { + sub, err := cl.fetcher.SubscribeNewBlockEvent(ctx) + if err == nil { + return sub + } + log.Errorw("listener: resubscribe", "err", err) + + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + } + } +} + +// listen kicks off a loop, listening for new block events from Core, +// generating ExtendedHeaders and broadcasting them to the header-sub +// gossipsub network. +func (cl *Listener) listen(ctx context.Context, sub <-chan types.EventDataSignedBlock) error { + defer log.Info("listener: listening stopped") + timeout := time.NewTimer(cl.listenerTimeout) + defer timeout.Stop() + for { + select { + case b, ok := <-sub: + if !ok { + return errors.New("underlying subscription was closed") + } + + if cl.chainID != "" && b.Header.ChainID != cl.chainID { + log.Errorf("listener: received block with unexpected chain ID: expected %s,"+ + " received %s", cl.chainID, b.Header.ChainID) + return errInvalidSubscription + } + + log.Debugw("listener: new block from core", "height", b.Header.Height) + + err := cl.handleNewSignedBlock(ctx, b) + if err != nil { + log.Errorw("listener: handling new block msg", + "height", b.Header.Height, + "hash", b.Header.Hash().String(), + "err", err) + } + + if !timeout.Stop() { + <-timeout.C + } + timeout.Reset(cl.listenerTimeout) + case <-timeout.C: + cl.metrics.subscriptionStuck(ctx) + return errors.New("underlying subscription is stuck") + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataSignedBlock) error { + ctx, span := tracer.Start(ctx, "handle-new-signed-block") + defer span.End() + span.SetAttributes( + attribute.Int64("height", b.Header.Height), + ) + // extend block data + adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) + defer adder.Purge() + + eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + if err != nil { + return fmt.Errorf("extending block data: %w", err) + } + + // generate extended header + eh, err := cl.construct(&b.Header, &b.Commit, &b.ValidatorSet, eds) + if err != nil { + panic(fmt.Errorf("making extended header: %w", err)) + } + + // attempt to store block data if not empty + ctx = ipld.CtxWithProofsAdder(ctx, adder) + err = storeEDS(ctx, b.Header.DataHash.Bytes(), eds, cl.store) + if err != nil { + return fmt.Errorf("storing EDS: %w", err) + } + + syncing, err := cl.fetcher.IsSyncing(ctx) + if err != nil { + return fmt.Errorf("getting sync state: %w", err) + } + + // notify network of new EDS hash only if core is already synced + if !syncing { + err = cl.hashBroadcaster(ctx, shrexsub.Notification{ + DataHash: eh.DataHash.Bytes(), + Height: eh.Height(), + }) + if err != nil && !errors.Is(err, context.Canceled) { + log.Errorw("listener: broadcasting data hash", + "height", b.Header.Height, + "hash", b.Header.Hash(), "err", err) //TODO: hash or datahash? + } + } + + // broadcast new ExtendedHeader, but if core is still syncing, notify only local subscribers + err = cl.headerBroadcaster.Broadcast(ctx, eh, pubsub.WithLocalPublication(syncing)) + if err != nil && !errors.Is(err, context.Canceled) { + log.Errorw("listener: broadcasting next header", + "height", b.Header.Height, + "err", err) + } + return nil +} diff --git a/core/listener_metrics.go b/core/listener_metrics.go new file mode 100644 index 0000000000..f17903a91a --- /dev/null +++ b/core/listener_metrics.go @@ -0,0 +1,81 @@ +package core + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +var meter = otel.Meter("core") + +type listenerMetrics struct { + lastTimeSubscriptionStuck time.Time + lastTimeSubscriptionStuckInst metric.Int64ObservableGauge + lastTimeSubscriptionStuckReg metric.Registration + + subscriptionStuckInst metric.Int64Counter +} + +func newListenerMetrics() (*listenerMetrics, error) { + m := new(listenerMetrics) + + var err error + m.subscriptionStuckInst, err = meter.Int64Counter( + "core_listener_subscription_stuck_count", + metric.WithDescription("number of times core listener block subscription has been stuck/retried"), + ) + if err != nil { + return nil, err + } + + m.lastTimeSubscriptionStuckInst, err = meter.Int64ObservableGauge( + "core_listener_last_time_subscription_stuck_timestamp", + metric.WithDescription("last time the listener subscription was stuck"), + ) + if err != nil { + return nil, err + } + m.lastTimeSubscriptionStuckReg, err = meter.RegisterCallback( + m.observeLastTimeStuckCallback, + m.lastTimeSubscriptionStuckInst, + ) + if err != nil { + return nil, err + } + + return m, nil +} + +func (m *listenerMetrics) observe(ctx context.Context, observeFn func(ctx context.Context)) { + if m == nil { + return + } + + ctx = utils.ResetContextOnError(ctx) + + observeFn(ctx) +} + +func (m *listenerMetrics) subscriptionStuck(ctx context.Context) { + m.observe(ctx, func(ctx context.Context) { + m.subscriptionStuckInst.Add(ctx, 1) + m.lastTimeSubscriptionStuck = time.Now() + }) +} + +func (m *listenerMetrics) observeLastTimeStuckCallback(_ context.Context, obs metric.Observer) error { + obs.ObserveInt64(m.lastTimeSubscriptionStuckInst, m.lastTimeSubscriptionStuck.Unix()) + return nil +} + +func (m *listenerMetrics) Close() error { + if m == nil { + return nil + } + + return m.lastTimeSubscriptionStuckReg.Unregister() +} diff --git a/core/listener_no_race_test.go b/core/listener_no_race_test.go new file mode 100644 index 0000000000..eac12785ee --- /dev/null +++ b/core/listener_no_race_test.go @@ -0,0 +1,71 @@ +//go:build !race + +package core + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" +) + +// TestListenerWithNonEmptyBlocks ensures that non-empty blocks are actually +// stored to eds.Store. +func TestListenerWithNonEmptyBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + t.Cleanup(cancel) + + // create mocknet with two pubsub endpoints + ps0, _ := createMocknetWithTwoPubsubEndpoints(ctx, t) + + // create one block to store as Head in local store and then unsubscribe from block events + cfg := DefaultTestConfig() + cfg.ChainID = networkID + fetcher, cctx := createCoreFetcher(t, cfg) + eds := createEdsPubSub(ctx, t) + + store := createStore(t) + err := store.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + err = store.Stop(ctx) + require.NoError(t, err) + }) + + // create Listener and start listening + cl := createListener(ctx, t, fetcher, ps0, eds, store, networkID) + err = cl.Start(ctx) + require.NoError(t, err) + + // listen for eds hashes broadcasted through eds-sub and ensure store has + // already stored them + sub, err := eds.Subscribe() + require.NoError(t, err) + t.Cleanup(sub.Cancel) + + empty := share.EmptyRoot() + // TODO extract 16 + for i := 0; i < 16; i++ { + _, err := cctx.FillBlock(16, cfg.Accounts, flags.BroadcastBlock) + require.NoError(t, err) + msg, err := sub.Next(ctx) + require.NoError(t, err) + + if bytes.Equal(empty.Hash(), msg.DataHash) { + continue + } + + has, err := store.Has(ctx, msg.DataHash) + require.NoError(t, err) + require.True(t, has) + } + + err = cl.Stop(ctx) + require.NoError(t, err) + require.Nil(t, cl.cancel) +} diff --git a/core/listener_test.go b/core/listener_test.go new file mode 100644 index 0000000000..b3ed11e571 --- /dev/null +++ b/core/listener_test.go @@ -0,0 +1,176 @@ +package core + +import ( + "context" + "testing" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/event" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/go-header/p2p" + + "github.com/celestiaorg/celestia-node/header" + nodep2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +const networkID = "private" + +// TestListener tests the lifecycle of the core listener. +func TestListener(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + t.Cleanup(cancel) + + // create mocknet with two pubsub endpoints + ps0, ps1 := createMocknetWithTwoPubsubEndpoints(ctx, t) + subscriber, err := p2p.NewSubscriber[*header.ExtendedHeader]( + ps1, + header.MsgID, + p2p.WithSubscriberNetworkID(networkID), + ) + require.NoError(t, err) + err = subscriber.SetVerifier(func(context.Context, *header.ExtendedHeader) error { + return nil + }) + require.NoError(t, err) + require.NoError(t, subscriber.Start(ctx)) + subs, err := subscriber.Subscribe() + require.NoError(t, err) + t.Cleanup(subs.Cancel) + + // create one block to store as Head in local store and then unsubscribe from block events + cfg := DefaultTestConfig() + cfg.ChainID = networkID + fetcher, _ := createCoreFetcher(t, cfg) + + eds := createEdsPubSub(ctx, t) + + // create Listener and start listening + cl := createListener(ctx, t, fetcher, ps0, eds, createStore(t), networkID) + err = cl.Start(ctx) + require.NoError(t, err) + + edsSubs, err := eds.Subscribe() + require.NoError(t, err) + t.Cleanup(edsSubs.Cancel) + + // ensure headers and dataHash are getting broadcasted to the relevant topics + for i := 0; i < 5; i++ { + _, err := subs.NextHeader(ctx) + require.NoError(t, err) + } + + err = cl.Stop(ctx) + require.NoError(t, err) + require.Nil(t, cl.cancel) +} + +func TestListenerWithWrongChainRPC(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + t.Cleanup(cancel) + + // create mocknet with two pubsub endpoints + ps0, _ := createMocknetWithTwoPubsubEndpoints(ctx, t) + + // create one block to store as Head in local store and then unsubscribe from block events + cfg := DefaultTestConfig() + cfg.ChainID = networkID + fetcher, _ := createCoreFetcher(t, cfg) + eds := createEdsPubSub(ctx, t) + + store := createStore(t) + err := store.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + err = store.Stop(ctx) + require.NoError(t, err) + }) + + // create Listener and start listening + cl := createListener(ctx, t, fetcher, ps0, eds, store, "wrong-chain-rpc") + sub, err := cl.fetcher.SubscribeNewBlockEvent(ctx) + require.NoError(t, err) + + err = cl.listen(ctx, sub) + assert.ErrorIs(t, err, errInvalidSubscription) +} + +func createMocknetWithTwoPubsubEndpoints(ctx context.Context, t *testing.T) (*pubsub.PubSub, *pubsub.PubSub) { + net, err := mocknet.FullMeshLinked(2) + require.NoError(t, err) + host0, host1 := net.Hosts()[0], net.Hosts()[1] + + // create pubsub for host + ps0, err := pubsub.NewGossipSub(context.Background(), host0, + pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) + require.NoError(t, err) + // create pubsub for peer-side (to test broadcast comes through network) + ps1, err := pubsub.NewGossipSub(context.Background(), host1, + pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) + require.NoError(t, err) + + sub0, err := host0.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) + require.NoError(t, err) + sub1, err := host1.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) + require.NoError(t, err) + + err = net.ConnectAllButSelf() + require.NoError(t, err) + + // wait on both peer identification events + for i := 0; i < 2; i++ { + select { + case <-sub0.Out(): + case <-sub1.Out(): + case <-ctx.Done(): + assert.FailNow(t, "timeout waiting for peers to connect") + } + } + + return ps0, ps1 +} + +func createListener( + ctx context.Context, + t *testing.T, + fetcher *BlockFetcher, + ps *pubsub.PubSub, + edsSub *shrexsub.PubSub, + store *eds.Store, + chainID string, +) *Listener { + p2pSub, err := p2p.NewSubscriber[*header.ExtendedHeader](ps, header.MsgID, p2p.WithSubscriberNetworkID(networkID)) + require.NoError(t, err) + + err = p2pSub.Start(ctx) + require.NoError(t, err) + err = p2pSub.SetVerifier(func(ctx context.Context, msg *header.ExtendedHeader) error { + return nil + }) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, p2pSub.Stop(ctx)) + }) + + listener, err := NewListener(p2pSub, fetcher, edsSub.Broadcast, header.MakeExtendedHeader, + store, nodep2p.BlockTime, WithChainID(nodep2p.Network(chainID))) + require.NoError(t, err) + return listener +} + +func createEdsPubSub(ctx context.Context, t *testing.T) *shrexsub.PubSub { + net, err := mocknet.FullMeshLinked(1) + require.NoError(t, err) + edsSub, err := shrexsub.NewPubSub(ctx, net.Hosts()[0], "eds-test") + require.NoError(t, err) + require.NoError(t, edsSub.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, edsSub.Stop(ctx)) + }) + return edsSub +} diff --git a/core/option.go b/core/option.go new file mode 100644 index 0000000000..6916ced4d8 --- /dev/null +++ b/core/option.go @@ -0,0 +1,25 @@ +package core + +import "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + +type Option func(*params) + +type params struct { + metrics bool + + chainID string +} + +// WithMetrics is a functional option that enables metrics +// inside the core package. +func WithMetrics() Option { + return func(p *params) { + p.metrics = true + } +} + +func WithChainID(id p2p.Network) Option { + return func(p *params) { + p.chainID = id.String() + } +} diff --git a/core/testing.go b/core/testing.go index 8b45ecc25f..8d29ce9bbc 100644 --- a/core/testing.go +++ b/core/testing.go @@ -1,152 +1,81 @@ package core import ( - "context" - "fmt" - "math/rand" "net" "net/url" - "sort" "testing" "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - tmservice "github.com/tendermint/tendermint/libs/service" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - rpctest "github.com/tendermint/tendermint/rpc/test" - tmtypes "github.com/tendermint/tendermint/types" + tmconfig "github.com/tendermint/tendermint/config" + tmrand "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/celestia-app/test/util/testnode" ) -// so that we never hit an issue where we request blocks that are removed -const defaultRetainBlocks int64 = 10000 +// DefaultTestConfig returns the default testing configuration for Tendermint + Celestia App tandem. +// +// It fetches free ports from OS and sets them into configs, s.t. +// user can make use of them(unlike 0 port) and allowing to run +// multiple tests nodes in parallel. +// +// Additionally, it instructs Tendermint + Celestia App tandem to setup 10 funded accounts. +func DefaultTestConfig() *testnode.Config { + cfg := testnode.DefaultConfig() + + // instructs creating funded accounts + // 10 usually is enough for testing + accounts := make([]string, 10) + for i := range accounts { + accounts[i] = tmrand.Str(9) + } -// StartTestNode starts a mock Core node background process and returns it. -func StartTestNode(ctx context.Context, t *testing.T, app types.Application, cfg *config.Config) tmservice.Service { - nd, closer, err := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout) - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, closer(ctx)) - }) - return nd -} + cfg.TmConfig.Consensus.TimeoutCommit = time.Millisecond * 200 -// StartTestKVApp starts Tendermint KVApp. -func StartTestKVApp(ctx context.Context, t *testing.T) (tmservice.Service, types.Application, *config.Config) { - cfg, err := rpctest.CreateConfig("Dummy_TmNode") - require.NoError(t, err) - app := CreateKVStore(defaultRetainBlocks) - return StartTestNode(ctx, t, app, cfg), app, cfg + cfg = cfg. + WithAccounts(accounts). + WithSupressLogs(true) + + return cfg } -// CreateKVStore creates a simple kv store app and gives the user -// ability to set desired amount of blocks to be retained. -func CreateKVStore(retainBlocks int64) *kvstore.Application { - app := kvstore.NewApplication() - app.RetainBlocks = retainBlocks - return app +// StartTestNode simply starts Tendermint and Celestia App tandem with default testing +// configuration. +func StartTestNode(t *testing.T) testnode.Context { + return StartTestNodeWithConfig(t, DefaultTestConfig()) } -// StartTestClient returns a started remote Core node process, as well its -// mock Core Client. -func StartTestClient(ctx context.Context, t *testing.T) (tmservice.Service, Client) { - nd, _, cfg := StartTestKVApp(ctx, t) - endpoint, err := GetEndpoint(cfg) - require.NoError(t, err) - ip, port, err := net.SplitHostPort(endpoint) +// StartTestNodeWithConfig starts Tendermint and Celestia App tandem with custom configuration. +func StartTestNodeWithConfig(t *testing.T, cfg *testnode.Config) testnode.Context { + cctx, _, _ := testnode.NewNetwork(t, cfg) + // we want to test over remote http client, + // so we are as close to the real environment as possible + // however, it might be useful to use local tendermint client + // if you need to debug something inside of it + ip, port, err := getEndpoint(cfg.TmConfig) require.NoError(t, err) client, err := NewRemote(ip, port) require.NoError(t, err) + + err = client.Start() + require.NoError(t, err) t.Cleanup(func() { err := client.Stop() require.NoError(t, err) }) - err = client.Start() - require.NoError(t, err) - return nd, client + + cctx.WithClient(client) + return cctx } -// GetEndpoint returns the remote node's RPC endpoint. -func GetEndpoint(cfg *config.Config) (string, error) { +func getEndpoint(cfg *tmconfig.Config) (string, string, error) { url, err := url.Parse(cfg.RPC.ListenAddress) if err != nil { - return "", err + return "", "", err } host, _, err := net.SplitHostPort(url.Host) if err != nil { - return "", err - } - return fmt.Sprintf("%s:%s", host, url.Port()), nil -} - -func RandValidator(randPower bool, minPower int64) (*tmtypes.Validator, tmtypes.PrivValidator) { - privVal := tmtypes.NewMockPV() - votePower := minPower - if randPower { - // nolint:gosec // G404: Use of weak random number generator - votePower += int64(rand.Uint32()) - } - pubKey, err := privVal.GetPubKey(context.Background()) - if err != nil { - panic(fmt.Errorf("could not retrieve pubkey %w", err)) - } - val := tmtypes.NewValidator(pubKey, votePower) - return val, privVal -} - -func RandValidatorSet(numValidators int, votingPower int64) (*tmtypes.ValidatorSet, []tmtypes.PrivValidator) { - var ( - valz = make([]*tmtypes.Validator, numValidators) - privValidators = make([]tmtypes.PrivValidator, numValidators) - ) - - for i := 0; i < numValidators; i++ { - val, privValidator := RandValidator(false, votingPower) - valz[i] = val - privValidators[i] = privValidator - } - - sort.Sort(tmtypes.PrivValidatorsByAddress(privValidators)) - - return tmtypes.NewValidatorSet(valz), privValidators -} - -func MakeCommit(blockID tmtypes.BlockID, height int64, round int32, - voteSet *tmtypes.VoteSet, validators []tmtypes.PrivValidator, now time.Time) (*tmtypes.Commit, error) { - - // all sign - for i := 0; i < len(validators); i++ { - pubKey, err := validators[i].GetPubKey(context.Background()) - if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) - } - vote := &tmtypes.Vote{ - ValidatorAddress: pubKey.Address(), - ValidatorIndex: int32(i), - Height: height, - Round: round, - Type: tmproto.PrecommitType, - BlockID: blockID, - Timestamp: now, - } - - _, err = signAddVote(validators[i], vote, voteSet) - if err != nil { - return nil, err - } - } - - return voteSet.MakeCommit(), nil -} - -func signAddVote(privVal tmtypes.PrivValidator, vote *tmtypes.Vote, voteSet *tmtypes.VoteSet) (signed bool, err error) { - v := vote.ToProto() - err = privVal.SignVote(context.Background(), voteSet.ChainID(), v) - if err != nil { - return false, err + return "", "", err } - vote.Signature = v.Signature - return voteSet.AddVote(vote) + return host, url.Port(), nil } diff --git a/core/testing_grpc.go b/core/testing_grpc.go new file mode 100644 index 0000000000..d831bc0724 --- /dev/null +++ b/core/testing_grpc.go @@ -0,0 +1,114 @@ +package core + +import ( + "net" + "strings" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + srvconfig "github.com/cosmos/cosmos-sdk/server/config" + "github.com/cosmos/cosmos-sdk/server/grpc/gogoreflection" + reflection "github.com/cosmos/cosmos-sdk/server/grpc/reflection/v2alpha1" + srvtypes "github.com/cosmos/cosmos-sdk/server/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/celestiaorg/celestia-app/test/util/testnode" +) + +/* +StartGRPCServer is a copy of https://github.com/celestiaorg/celestia-app/blob/e5a679d11b464d583b616d4d686de9dd44bdab2e/testutil/testnode/rpc_client.go#L46 +// It's copied as internal Cosmos SDK logic take 5 seconds to run: https://github.com/cosmos/cosmos-sdk/blob/6dfa0c98062d5d8b38d85ca1d2807937f47da4a3/server/grpc/server.go#L80 +// FIXME once the fix for https://github.com/cosmos/cosmos-sdk/issues/14429 lands in our fork +*/ +func StartGRPCServer( + app srvtypes.Application, + appCfg *srvconfig.Config, + cctx testnode.Context, +) (testnode.Context, func() error, error) { + emptycleanup := func() error { return nil } + // Add the tx service in the gRPC router. + app.RegisterTxService(cctx.Context) + + // Add the tendermint queries service in the gRPC router. + app.RegisterTendermintService(cctx.Context) + + grpcSrv, err := startGRPCServer(cctx.Context, app, appCfg.GRPC) + if err != nil { + return testnode.Context{}, emptycleanup, err + } + + nodeGRPCAddr := strings.Replace(appCfg.GRPC.Address, "0.0.0.0", "localhost", 1) + conn, err := grpc.Dial(nodeGRPCAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return testnode.Context{}, emptycleanup, err + } + + cctx.Context = cctx.WithGRPCClient(conn) + return cctx, func() error { + grpcSrv.Stop() + return nil + }, nil +} + +func startGRPCServer( + clientCtx client.Context, + app srvtypes.Application, + cfg srvconfig.GRPCConfig, +) (*grpc.Server, error) { + maxSendMsgSize := cfg.MaxSendMsgSize + if maxSendMsgSize == 0 { + maxSendMsgSize = srvconfig.DefaultGRPCMaxSendMsgSize + } + + maxRecvMsgSize := cfg.MaxRecvMsgSize + if maxRecvMsgSize == 0 { + maxRecvMsgSize = srvconfig.DefaultGRPCMaxRecvMsgSize + } + + grpcSrv := grpc.NewServer( + grpc.ForceServerCodec(codec.NewProtoCodec(clientCtx.InterfaceRegistry).GRPCCodec()), + grpc.MaxSendMsgSize(maxSendMsgSize), + grpc.MaxRecvMsgSize(maxRecvMsgSize), + ) + + app.RegisterGRPCServer(grpcSrv) + + // Reflection allows consumers to build dynamic clients that can write to any + // Cosmos SDK application without relying on application packages at compile + // time. + err := reflection.Register(grpcSrv, reflection.Config{ + SigningModes: func() map[string]int32 { + modes := make(map[string]int32, len(clientCtx.TxConfig.SignModeHandler().Modes())) + for _, m := range clientCtx.TxConfig.SignModeHandler().Modes() { + modes[m.String()] = (int32)(m) + } + return modes + }(), + ChainID: clientCtx.ChainID, + SdkConfig: sdk.GetConfig(), + InterfaceRegistry: clientCtx.InterfaceRegistry, + }) + if err != nil { + return nil, err + } + + // Reflection allows external clients to see what services and methods + // the gRPC server exposes. + gogoreflection.Register(grpcSrv) + + listener, err := net.Listen("tcp", cfg.Address) + if err != nil { + return nil, err + } + + go func() { + err = grpcSrv.Serve(listener) + if err != nil { + log.Error("serving GRPC: ", err) + } + }() + + return grpcSrv, nil +} diff --git a/das/backoff.go b/das/backoff.go new file mode 100644 index 0000000000..f92c04d1ef --- /dev/null +++ b/das/backoff.go @@ -0,0 +1,59 @@ +package das + +import ( + "time" +) + +var ( + // first retry attempt should happen after defaultBackoffInitialInterval + defaultBackoffInitialInterval = time.Minute + // next retry attempt will happen with delay of previous one multiplied by defaultBackoffMultiplier + defaultBackoffMultiplier = 4 + // after defaultBackoffMaxRetryCount amount of attempts retry backoff interval will stop growing + // and each retry attempt will produce WARN log + defaultBackoffMaxRetryCount = 4 +) + +// retryStrategy defines a backoff for retries. +type retryStrategy struct { + // attempts delays will follow durations stored in retryIntervals + retryIntervals []time.Duration +} + +// newRetryStrategy creates and initializes a new retry backoff. +func newRetryStrategy(retryIntervals []time.Duration) retryStrategy { + return retryStrategy{retryIntervals: retryIntervals} +} + +// nextRetry creates a retry attempt with a backoff delay based on the retry backoff. +// It takes the number of retry attempts and the time of the last attempt as inputs and returns a +// retry instance and a boolean value indicating whether the retries amount have exceeded. +func (s retryStrategy) nextRetry(lastRetry retryAttempt, lastAttempt time.Time, +) (retry retryAttempt, retriesExceeded bool) { + lastRetry.count++ + + if len(s.retryIntervals) == 0 { + return lastRetry, false + } + + if lastRetry.count > len(s.retryIntervals) { + // try count exceeded backoff try limit + lastRetry.after = lastAttempt.Add(s.retryIntervals[len(s.retryIntervals)-1]) + return lastRetry, true + } + + lastRetry.after = lastAttempt.Add(s.retryIntervals[lastRetry.count-1]) + return lastRetry, false +} + +// exponentialBackoff generates an array of time.Duration values using an exponential growth +// multiplier. +func exponentialBackoff(baseInterval time.Duration, multiplier, amount int) []time.Duration { + backoff := make([]time.Duration, 0, amount) + next := baseInterval + for i := 0; i < amount; i++ { + backoff = append(backoff, next) + next *= time.Duration(multiplier) + } + return backoff +} diff --git a/das/backoff_test.go b/das/backoff_test.go new file mode 100644 index 0000000000..e032ec175a --- /dev/null +++ b/das/backoff_test.go @@ -0,0 +1,108 @@ +package das + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_exponentialBackoff(t *testing.T) { + type args struct { + baseInterval time.Duration + factor int + amount int + } + tests := []struct { + name string + args args + want []time.Duration + }{ + { + name: "defaults", + args: args{ + baseInterval: time.Minute, + factor: 4, + amount: 4, + }, + want: []time.Duration{ + time.Minute, + 4 * time.Minute, + 16 * time.Minute, + 64 * time.Minute, + }}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, + tt.want, exponentialBackoff(tt.args.baseInterval, tt.args.factor, tt.args.amount), + "exponentialBackoff(%v, %v, %v)", tt.args.baseInterval, tt.args.factor, tt.args.amount) + }) + } +} + +func Test_retryStrategy_nextRetry(t *testing.T) { + tNow := time.Now() + type args struct { + retry retryAttempt + lastAttempt time.Time + } + tests := []struct { + name string + backoff retryStrategy + args args + wantRetry retryAttempt + wantRetriesExceeded bool + }{ + { + name: "empty_strategy", + backoff: newRetryStrategy(nil), + args: args{ + retry: retryAttempt{count: 1}, + lastAttempt: tNow, + }, + wantRetry: retryAttempt{ + count: 2, + }, + wantRetriesExceeded: false, + }, + { + name: "before_limit", + backoff: newRetryStrategy([]time.Duration{time.Second, time.Minute}), + args: args{ + retry: retryAttempt{count: 1}, + lastAttempt: tNow, + }, + wantRetry: retryAttempt{ + count: 2, + after: tNow.Add(time.Minute), + }, + wantRetriesExceeded: false, + }, + { + name: "after_limit", + backoff: newRetryStrategy([]time.Duration{time.Second, time.Minute}), + args: args{ + retry: retryAttempt{count: 2}, + lastAttempt: tNow, + }, + wantRetry: retryAttempt{ + count: 3, + after: tNow.Add(time.Minute), + }, + wantRetriesExceeded: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := retryStrategy{ + retryIntervals: tt.backoff.retryIntervals, + } + gotRetry, gotRetriesExceeded := s.nextRetry(tt.args.retry, tt.args.lastAttempt) + assert.Equalf(t, tt.wantRetry, gotRetry, + "nextRetry(%v, %v)", tt.args.retry, tt.args.lastAttempt) + assert.Equalf(t, tt.wantRetriesExceeded, gotRetriesExceeded, + "nextRetry(%v, %v)", tt.args.retry, tt.args.lastAttempt) + }) + } +} diff --git a/das/checkpoint.go b/das/checkpoint.go new file mode 100644 index 0000000000..bb023a19da --- /dev/null +++ b/das/checkpoint.go @@ -0,0 +1,56 @@ +package das + +import ( + "fmt" +) + +type checkpoint struct { + SampleFrom uint64 `json:"sample_from"` + NetworkHead uint64 `json:"network_head"` + // Failed heights will be retried + Failed map[uint64]int `json:"failed,omitempty"` + // Workers will resume on restart from previous state + Workers []workerCheckpoint `json:"workers,omitempty"` +} + +// workerCheckpoint will be used to resume worker on restart +type workerCheckpoint struct { + From uint64 `json:"from"` + To uint64 `json:"to"` + JobType jobType `json:"job_type"` +} + +func newCheckpoint(stats SamplingStats) checkpoint { + workers := make([]workerCheckpoint, 0, len(stats.Workers)) + for _, w := range stats.Workers { + // no need to resume recent jobs after restart. On the other hand, retry jobs will resume from + // failed heights map. it leaves only catchup jobs to be stored and resumed + if w.JobType == catchupJob { + workers = append(workers, workerCheckpoint{ + From: w.Curr, + To: w.To, + JobType: w.JobType, + }) + } + } + return checkpoint{ + SampleFrom: stats.CatchupHead + 1, + NetworkHead: stats.NetworkHead, + Failed: stats.Failed, + Workers: workers, + } +} + +func (c checkpoint) String() string { + str := fmt.Sprintf("SampleFrom: %v, NetworkHead: %v", c.SampleFrom, c.NetworkHead) + + if len(c.Workers) > 0 { + str += fmt.Sprintf(", Workers: %v", len(c.Workers)) + } + + if len(c.Failed) > 0 { + str += fmt.Sprintf("\nFailed: %v", c.Failed) + } + + return str +} diff --git a/das/checkpoint_store.go b/das/checkpoint_store.go deleted file mode 100644 index 7552c20bb2..0000000000 --- a/das/checkpoint_store.go +++ /dev/null @@ -1,47 +0,0 @@ -package das - -import ( - "context" - "encoding/binary" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" -) - -var ( - storePrefix = datastore.NewKey("das") - checkpointKey = datastore.NewKey("checkpoint") -) - -// wrapCheckpointStore wraps the given datastore.Datastore with the `das` -// prefix. The checkpoint store stores/loads the DASer's checkpoint to/from -// disk using the checkpointKey. The checkpoint is stored as a uint64 -// representation of the height of the latest successfully DASed header. -func wrapCheckpointStore(ds datastore.Datastore) datastore.Datastore { - return namespace.Wrap(ds, storePrefix) -} - -// loadCheckpoint loads the DAS checkpoint height from disk and returns it. -// If there is no known checkpoint, it returns height 0. -func loadCheckpoint(ctx context.Context, ds datastore.Datastore) (int64, error) { - checkpoint, err := ds.Get(ctx, checkpointKey) - if err != nil { - // if no checkpoint was found, return checkpoint as - // 0 since DASer begins sampling on checkpoint+1 - if err == datastore.ErrNotFound { - log.Debug("checkpoint not found, starting sampling at block height 1") - return 0, nil - } - - return 0, err - } - return int64(binary.BigEndian.Uint64(checkpoint)), err -} - -// storeCheckpoint stores the given DAS checkpoint to disk. -func storeCheckpoint(ctx context.Context, ds datastore.Datastore, checkpoint int64) error { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(checkpoint)) - - return ds.Put(ctx, checkpointKey, buf) -} diff --git a/das/checkpoint_store_test.go b/das/checkpoint_store_test.go deleted file mode 100644 index 243436e1b7..0000000000 --- a/das/checkpoint_store_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package das - -import ( - "context" - "testing" - "time" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCheckpointStore(t *testing.T) { - ds := wrapCheckpointStore(sync.MutexWrap(datastore.NewMapDatastore())) - checkpoint := int64(5) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer t.Cleanup(cancel) - err := storeCheckpoint(ctx, ds, checkpoint) - require.NoError(t, err) - got, err := loadCheckpoint(ctx, ds) - require.NoError(t, err) - - assert.Equal(t, checkpoint, got) -} diff --git a/das/checkpoint_test.go b/das/checkpoint_test.go new file mode 100644 index 0000000000..4ad2a952e6 --- /dev/null +++ b/das/checkpoint_test.go @@ -0,0 +1,42 @@ +package das + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCheckpointStore(t *testing.T) { + ds := newCheckpointStore(sync.MutexWrap(datastore.NewMapDatastore())) + failed := make(map[uint64]int) + failed[2] = 1 + failed[3] = 2 + cp := checkpoint{ + SampleFrom: 1, + NetworkHead: 6, + Failed: failed, + Workers: []workerCheckpoint{ + { + From: 1, + To: 2, + JobType: retryJob, + }, + { + From: 5, + To: 10, + JobType: recentJob, + }, + }, + } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer t.Cleanup(cancel) + assert.NoError(t, ds.store(ctx, cp)) + got, err := ds.load(ctx) + require.NoError(t, err) + assert.Equal(t, cp, got) +} diff --git a/das/coordinator.go b/das/coordinator.go new file mode 100644 index 0000000000..852a40d24d --- /dev/null +++ b/das/coordinator.go @@ -0,0 +1,155 @@ +package das + +import ( + "context" + "sync" + "time" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +// samplingCoordinator runs and coordinates sampling workers and updates current sampling state +type samplingCoordinator struct { + concurrencyLimit int + samplingTimeout time.Duration + + getter libhead.Getter[*header.ExtendedHeader] + sampleFn sampleFn + broadcastFn shrexsub.BroadcastFn + + state coordinatorState + + // resultCh fans-in sampling results from worker to coordinator + resultCh chan result + // updHeadCh signals to update network head header height + updHeadCh chan *header.ExtendedHeader + // waitCh signals to block coordinator for external access to state + waitCh chan *sync.WaitGroup + + workersWg sync.WaitGroup + metrics *metrics + done +} + +// result will carry errors to coordinator after worker finishes the job +type result struct { + job + failed map[uint64]int + err error +} + +func newSamplingCoordinator( + params Parameters, + getter libhead.Getter[*header.ExtendedHeader], + sample sampleFn, + broadcast shrexsub.BroadcastFn, +) *samplingCoordinator { + return &samplingCoordinator{ + concurrencyLimit: params.ConcurrencyLimit, + samplingTimeout: params.SampleTimeout, + getter: getter, + sampleFn: sample, + broadcastFn: broadcast, + state: newCoordinatorState(params), + resultCh: make(chan result), + updHeadCh: make(chan *header.ExtendedHeader), + waitCh: make(chan *sync.WaitGroup), + done: newDone("sampling coordinator"), + } +} + +func (sc *samplingCoordinator) run(ctx context.Context, cp checkpoint) { + sc.state.resumeFromCheckpoint(cp) + + // resume workers + for _, wk := range cp.Workers { + sc.runWorker(ctx, sc.state.newJob(wk.JobType, wk.From, wk.To)) + } + + for { + for !sc.concurrencyLimitReached() { + next, found := sc.state.nextJob() + if !found { + break + } + sc.runWorker(ctx, next) + } + + select { + case head := <-sc.updHeadCh: + if sc.state.isNewHead(head.Height()) { + if !sc.recentJobsLimitReached() { + sc.runWorker(ctx, sc.state.recentJob(head)) + } + sc.state.updateHead(head.Height()) + // run worker without concurrency limit restrictions to reduced delay + sc.metrics.observeNewHead(ctx) + } + case res := <-sc.resultCh: + sc.state.handleResult(res) + case wg := <-sc.waitCh: + wg.Wait() + case <-ctx.Done(): + sc.workersWg.Wait() + sc.indicateDone() + return + } + } +} + +// runWorker runs job in separate worker go-routine +func (sc *samplingCoordinator) runWorker(ctx context.Context, j job) { + w := newWorker(j, sc.getter, sc.sampleFn, sc.broadcastFn, sc.metrics) + sc.state.putInProgress(j.id, w.getState) + + // launch worker go-routine + sc.workersWg.Add(1) + go func() { + defer sc.workersWg.Done() + w.run(ctx, sc.samplingTimeout, sc.resultCh) + }() +} + +// listen notifies the coordinator about a new network head received via subscription. +func (sc *samplingCoordinator) listen(ctx context.Context, h *header.ExtendedHeader) { + select { + case sc.updHeadCh <- h: + case <-ctx.Done(): + } +} + +// stats pauses the coordinator to get stats in a concurrently safe manner +func (sc *samplingCoordinator) stats(ctx context.Context) (SamplingStats, error) { + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + + select { + case sc.waitCh <- &wg: + case <-ctx.Done(): + return SamplingStats{}, ctx.Err() + } + + return sc.state.unsafeStats(), nil +} + +func (sc *samplingCoordinator) getCheckpoint(ctx context.Context) (checkpoint, error) { + stats, err := sc.stats(ctx) + if err != nil { + return checkpoint{}, err + } + return newCheckpoint(stats), nil +} + +// concurrencyLimitReached indicates whether concurrencyLimit has been reached +func (sc *samplingCoordinator) concurrencyLimitReached() bool { + return len(sc.state.inProgress) >= sc.concurrencyLimit +} + +// recentJobsLimitReached indicates whether concurrency limit for recent jobs has been reached +func (sc *samplingCoordinator) recentJobsLimitReached() bool { + return len(sc.state.inProgress) >= 2*sc.concurrencyLimit +} diff --git a/das/coordinator_test.go b/das/coordinator_test.go new file mode 100644 index 0000000000..1ae54c470f --- /dev/null +++ b/das/coordinator_test.go @@ -0,0 +1,635 @@ +package das + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +func TestCoordinator(t *testing.T) { + t.Run("test run", func(t *testing.T) { + testParams := defaultTestParams() + + ctx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + sampler := newMockSampler(testParams.sampleFrom, testParams.networkHead) + coordinator := newSamplingCoordinator(testParams.dasParams, getterStub{}, onceMiddleWare(sampler.sample), nil) + + go coordinator.run(ctx, sampler.checkpoint) + + // check if all jobs were sampled successfully + assert.NoError(t, sampler.finished(ctx), "not all headers were sampled") + + // wait for coordinator to indicateDone catchup + assert.NoError(t, coordinator.state.waitCatchUp(ctx)) + assert.Emptyf(t, coordinator.state.failed, "failed list should be empty") + + cancel() + stopCtx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + defer cancel() + assert.NoError(t, coordinator.wait(stopCtx)) + assert.Equal(t, sampler.finalState(), newCheckpoint(coordinator.state.unsafeStats())) + }) + + t.Run("discovered new headers", func(t *testing.T) { + testParams := defaultTestParams() + + ctx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + + sampler := newMockSampler(testParams.sampleFrom, testParams.networkHead) + + newhead := testParams.networkHead + 200 + coordinator := newSamplingCoordinator(testParams.dasParams, getterStub{}, sampler.sample, newBroadcastMock(1)) + go coordinator.run(ctx, sampler.checkpoint) + + // discover new height + sampler.discover(ctx, newhead, coordinator.listen) + + // check if all jobs were sampled successfully + assert.NoError(t, sampler.finished(ctx), "not all headers were sampled") + + // wait for coordinator to indicateDone catchup + assert.NoError(t, coordinator.state.waitCatchUp(ctx)) + assert.Emptyf(t, coordinator.state.failed, "failed list should be empty") + + cancel() + stopCtx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + defer cancel() + assert.NoError(t, coordinator.wait(stopCtx)) + assert.Equal(t, sampler.finalState(), newCheckpoint(coordinator.state.unsafeStats())) + }) + + t.Run("prioritize newly discovered over known", func(t *testing.T) { + testParams := defaultTestParams() + + testParams.dasParams.ConcurrencyLimit = 1 + testParams.dasParams.SamplingRange = 4 + + testParams.sampleFrom = 1 + testParams.networkHead = 10 + toBeDiscovered := uint64(20) + + sampler := newMockSampler(testParams.sampleFrom, testParams.networkHead) + + ctx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + + // lock worker before start, to not let it indicateDone before discover + lk := newLock(testParams.sampleFrom, testParams.sampleFrom) + + order := newCheckOrder().addInterval(toBeDiscovered, toBeDiscovered) + + // expect worker to prioritize newly discovered + order.addInterval( + testParams.sampleFrom, + toBeDiscovered, + ) + + // start coordinator + coordinator := newSamplingCoordinator(testParams.dasParams, getterStub{}, + lk.middleWare( + order.middleWare(sampler.sample), + ), + newBroadcastMock(1), + ) + go coordinator.run(ctx, sampler.checkpoint) + + // discover new height + sampler.discover(ctx, toBeDiscovered, coordinator.listen) + + // check if no header were sampled yet + for sampler.sampledAmount() != 1 { + time.Sleep(time.Millisecond) + select { + case <-ctx.Done(): + assert.NoError(t, ctx.Err()) + default: + } + } + + // unblock worker + lk.release(testParams.sampleFrom) + + // check if all jobs were sampled successfully + assert.NoError(t, sampler.finished(ctx), "not all headers were sampled") + + // wait for coordinator to indicateDone catchup + assert.NoError(t, coordinator.state.waitCatchUp(ctx)) + assert.Emptyf(t, coordinator.state.failed, "failed list should be empty") + + cancel() + stopCtx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + defer cancel() + assert.NoError(t, coordinator.wait(stopCtx)) + assert.Equal(t, sampler.finalState(), newCheckpoint(coordinator.state.unsafeStats())) + }) + + t.Run("recent headers sampling routine should not lock other workers", func(t *testing.T) { + testParams := defaultTestParams() + + testParams.networkHead = uint64(20) + ctx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + + sampler := newMockSampler(testParams.sampleFrom, testParams.networkHead) + + lk := newLock(testParams.sampleFrom, testParams.networkHead) // lock all workers before start + coordinator := newSamplingCoordinator(testParams.dasParams, getterStub{}, + lk.middleWare(sampler.sample), newBroadcastMock(1)) + go coordinator.run(ctx, sampler.checkpoint) + + // discover new height and lock it + discovered := testParams.networkHead + 1 + lk.add(discovered) + sampler.discover(ctx, discovered, coordinator.listen) + + // check if no header were sampled yet + assert.Equal(t, 0, sampler.sampledAmount()) + + // unblock workers to resume sampling + lk.releaseAll(discovered) + + // wait for coordinator to run sample on all headers except discovered + time.Sleep(100 * time.Millisecond) + + // check that only last header is pending + assert.EqualValues(t, int(discovered-testParams.sampleFrom), sampler.doneAmount()) + assert.False(t, sampler.heightIsDone(discovered)) + + // release all headers for coordinator + lk.releaseAll() + + // check if all jobs were sampled successfully + assert.NoError(t, sampler.finished(ctx), "not all headers were sampled") + + // wait for coordinator to indicateDone catchup + assert.NoError(t, coordinator.state.waitCatchUp(ctx)) + assert.Emptyf(t, coordinator.state.failed, "failed list is not empty") + + cancel() + stopCtx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + defer cancel() + assert.NoError(t, coordinator.wait(stopCtx)) + assert.Equal(t, sampler.finalState(), newCheckpoint(coordinator.state.unsafeStats())) + }) + + t.Run("failed should be stored", func(t *testing.T) { + testParams := defaultTestParams() + testParams.sampleFrom = 1 + ctx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + + bornToFail := []uint64{4, 8, 15, 16, 23, 42} + sampler := newMockSampler(testParams.sampleFrom, testParams.networkHead, bornToFail...) + + coordinator := newSamplingCoordinator( + testParams.dasParams, + getterStub{}, + onceMiddleWare(sampler.sample), + newBroadcastMock(1), + ) + go coordinator.run(ctx, sampler.checkpoint) + + // wait for coordinator to go over all headers + assert.NoError(t, sampler.finished(ctx)) + + cancel() + stopCtx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + defer cancel() + assert.NoError(t, coordinator.wait(stopCtx)) + + // failed item should be either in failed map or be processed by worker + cp := newCheckpoint(coordinator.state.unsafeStats()) + for _, failedHeight := range bornToFail { + if _, ok := cp.Failed[failedHeight]; ok { + continue + } + for _, w := range cp.Workers { + if w.JobType == retryJob && w.From == failedHeight { + continue + } + } + t.Error("header is not found neither in failed nor in workers") + } + }) + + t.Run("failed should retry on restart", func(t *testing.T) { + testParams := defaultTestParams() + + testParams.sampleFrom = uint64(50) + ctx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + + failedLastRun := map[uint64]int{4: 1, 8: 2, 15: 1, 16: 1, 23: 1, 42: 1, testParams.sampleFrom - 1: 1} + + sampler := newMockSampler(testParams.sampleFrom, testParams.networkHead) + sampler.checkpoint.Failed = failedLastRun + + coordinator := newSamplingCoordinator( + testParams.dasParams, + getterStub{}, + onceMiddleWare(sampler.sample), + newBroadcastMock(1), + ) + go coordinator.run(ctx, sampler.checkpoint) + + // check if all jobs were sampled successfully + assert.NoError(t, sampler.finished(ctx), "not all headers were sampled") + + // wait for coordinator to indicateDone catchup + assert.NoError(t, coordinator.state.waitCatchUp(ctx)) + + cancel() + stopCtx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + defer cancel() + assert.NoError(t, coordinator.wait(stopCtx)) + + expectedState := sampler.finalState() + expectedState.Failed = make(map[uint64]int) + assert.Equal(t, expectedState, newCheckpoint(coordinator.state.unsafeStats())) + }) + + t.Run("persist retry count after on restart", func(t *testing.T) { + testParams := defaultTestParams() + testParams.dasParams.ConcurrencyLimit = 5 + ctx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + + ch := checkpoint{ + SampleFrom: testParams.sampleFrom, + NetworkHead: testParams.networkHead, + Failed: map[uint64]int{1: 1, 2: 2, 3: 3, 4: 4, 5: 5}, + Workers: []workerCheckpoint{}, + } + + waitCh := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(testParams.dasParams.ConcurrencyLimit) + sampleFn := func(ctx context.Context, h *header.ExtendedHeader) error { + wg.Done() + select { + case <-ctx.Done(): + return ctx.Err() + case <-waitCh: + return nil + } + } + + coordinator := newSamplingCoordinator( + testParams.dasParams, + getterStub{}, + sampleFn, + newBroadcastMock(1), + ) + + go coordinator.run(ctx, ch) + cancel() + wg.Wait() + close(waitCh) + + stopCtx, cancel := context.WithTimeout(context.Background(), testParams.timeoutDelay) + defer cancel() + assert.NoError(t, coordinator.wait(stopCtx)) + + st := coordinator.state.unsafeStats() + require.Equal(t, ch, newCheckpoint(st)) + }) +} + +func BenchmarkCoordinator(b *testing.B) { + timeoutDelay := 5 * time.Second + + params := DefaultParameters() + params.SamplingRange = 10 + params.ConcurrencyLimit = 100 + + b.Run("bench run", func(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), timeoutDelay) + coordinator := newSamplingCoordinator( + params, + newBenchGetter(), + func(ctx context.Context, h *header.ExtendedHeader) error { return nil }, + newBroadcastMock(1), + ) + go coordinator.run(ctx, checkpoint{ + SampleFrom: 1, + NetworkHead: uint64(b.N), + }) + + // wait for coordinator to indicateDone catchup + if err := coordinator.state.waitCatchUp(ctx); err != nil { + b.Error(err) + } + cancel() + }) +} + +// ensures all headers are sampled in range except ones that are born to fail +type mockSampler struct { + lock sync.Mutex + + checkpoint + bornToFail map[uint64]bool + done map[uint64]int + + isFinished bool + finishedCh chan struct{} +} + +func newMockSampler(sampledBefore, sampleTo uint64, bornToFail ...uint64) mockSampler { + failMap := make(map[uint64]bool) + for _, h := range bornToFail { + failMap[h] = true + } + return mockSampler{ + checkpoint: checkpoint{ + SampleFrom: sampledBefore, + NetworkHead: sampleTo, + Failed: make(map[uint64]int), + Workers: make([]workerCheckpoint, 0), + }, + bornToFail: failMap, + done: make(map[uint64]int), + finishedCh: make(chan struct{}), + } +} + +func (m *mockSampler) sample(ctx context.Context, h *header.ExtendedHeader) error { + if err := ctx.Err(); err != nil { + return err + } + + m.lock.Lock() + defer m.lock.Unlock() + + height := h.Height() + m.done[height]++ + + if len(m.done) > int(m.NetworkHead-m.SampleFrom) && !m.isFinished { + m.isFinished = true + close(m.finishedCh) + } + + if m.bornToFail[height] { + return errors.New("born to fail, sad life") + } + + if height > m.NetworkHead || height < m.SampleFrom { + if _, ok := m.checkpoint.Failed[height]; !ok { + return fmt.Errorf("header: %v out of range: %v-%v", h, m.SampleFrom, m.NetworkHead) + } + } + return nil +} + +// finished returns when all jobs were sampled successfully +func (m *mockSampler) finished(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-m.finishedCh: + } + return nil +} + +func (m *mockSampler) heightIsDone(h uint64) bool { + m.lock.Lock() + defer m.lock.Unlock() + return m.done[h] != 0 +} + +func (m *mockSampler) doneAmount() int { + m.lock.Lock() + defer m.lock.Unlock() + return len(m.done) +} + +func (m *mockSampler) finalState() checkpoint { + m.lock.Lock() + defer m.lock.Unlock() + + finalState := m.checkpoint + finalState.SampleFrom = finalState.NetworkHead + 1 + return finalState +} + +func (m *mockSampler) discover(ctx context.Context, newHeight uint64, emit listenFn) { + m.lock.Lock() + + if newHeight > m.checkpoint.NetworkHead { + m.checkpoint.NetworkHead = newHeight + if m.isFinished { + m.finishedCh = make(chan struct{}) + m.isFinished = false + } + } + m.lock.Unlock() + emit(ctx, &header.ExtendedHeader{ + Commit: &types.Commit{}, + RawHeader: header.RawHeader{Height: int64(newHeight)}, + DAH: &share.Root{RowRoots: make([][]byte, 0)}, + }) +} + +func (m *mockSampler) sampledAmount() int { + m.lock.Lock() + defer m.lock.Unlock() + return len(m.done) +} + +// ensures correct order of operations +type checkOrder struct { + lock sync.Mutex + queue []uint64 +} + +func newCheckOrder() *checkOrder { + return &checkOrder{} +} + +func (o *checkOrder) addInterval(start, end uint64) *checkOrder { + o.lock.Lock() + defer o.lock.Unlock() + + if end > start { + for end >= start { + o.queue = append(o.queue, start) + start++ + } + return o + } + + for start >= end { + o.queue = append(o.queue, start) + if start == 0 { + return o + } + start-- + + } + return o +} + +// splits interval into ranges with stackSize length and puts them with reverse order +func (o *checkOrder) addStacks(start, end, stackSize uint64) uint64 { + if start+stackSize-1 < end { + end = o.addStacks(start+stackSize, end, stackSize) + } + if start > end { + start = end + } + o.addInterval(start, end) + return start - 1 +} + +func TestOrder(t *testing.T) { + o := newCheckOrder().addInterval(0, 3).addInterval(3, 0) + assert.Equal(t, []uint64{0, 1, 2, 3, 3, 2, 1, 0}, o.queue) +} + +func TestStack(t *testing.T) { + o := newCheckOrder() + o.addStacks(10, 20, 3) + assert.Equal(t, []uint64{19, 20, 16, 17, 18, 13, 14, 15, 10, 11, 12}, o.queue) +} + +func (o *checkOrder) middleWare(out sampleFn) sampleFn { + return func(ctx context.Context, h *header.ExtendedHeader) error { + o.lock.Lock() + + if len(o.queue) > 0 { + // check last item in queue to be same as input + if o.queue[0] != h.Height() { + defer o.lock.Unlock() + return fmt.Errorf("expected height: %v,got: %v", o.queue[0], h.Height()) + } + o.queue = o.queue[1:] + } + + o.lock.Unlock() + return out(ctx, h) + } +} + +// blocks operations if item is in lock list +type lock struct { + m sync.Mutex + blockList map[uint64]chan struct{} +} + +func newLock(from, to uint64) *lock { + list := make(map[uint64]chan struct{}) + for from <= to { + list[from] = make(chan struct{}) + from++ + } + return &lock{ + blockList: list, + } +} + +func (l *lock) add(hs ...uint64) { + l.m.Lock() + defer l.m.Unlock() + for _, h := range hs { + l.blockList[h] = make(chan struct{}) + } +} + +func (l *lock) release(hs ...uint64) { + l.m.Lock() + defer l.m.Unlock() + + for _, h := range hs { + if ch, ok := l.blockList[h]; ok { + close(ch) + delete(l.blockList, h) + } + } +} + +func (l *lock) releaseAll(except ...uint64) { + m := make(map[uint64]bool) + for _, h := range except { + m[h] = true + } + + l.m.Lock() + defer l.m.Unlock() + + for h, ch := range l.blockList { + if m[h] { + continue + } + close(ch) + delete(l.blockList, h) + } +} + +func (l *lock) middleWare(out sampleFn) sampleFn { + return func(ctx context.Context, h *header.ExtendedHeader) error { + l.m.Lock() + ch, blocked := l.blockList[h.Height()] + l.m.Unlock() + if !blocked { + return out(ctx, h) + } + + select { + case <-ch: + return out(ctx, h) + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func onceMiddleWare(out sampleFn) sampleFn { + db := make(map[uint64]int) + m := sync.Mutex{} + return func(ctx context.Context, h *header.ExtendedHeader) error { + m.Lock() + db[h.Height()]++ + if db[h.Height()] > 1 { + m.Unlock() + return fmt.Errorf("header sampled more than once: %v", h.Height()) + } + m.Unlock() + return out(ctx, h) + } +} + +type testParams struct { + networkHead uint64 + sampleFrom uint64 + timeoutDelay time.Duration + dasParams Parameters +} + +func defaultTestParams() testParams { + dasParamsDefault := DefaultParameters() + return testParams{ + networkHead: uint64(500), + sampleFrom: dasParamsDefault.SampleFrom, + timeoutDelay: 5 * time.Second, + dasParams: dasParamsDefault, + } +} + +func newBroadcastMock(callLimit int) shrexsub.BroadcastFn { + var m sync.Mutex + return func(ctx context.Context, hash shrexsub.Notification) error { + m.Lock() + defer m.Unlock() + if callLimit == 0 { + return errors.New("exceeded mock call limit") + } + callLimit-- + return nil + } +} diff --git a/das/daser.go b/das/daser.go index 3676c28950..c04a8dcdb8 100644 --- a/das/daser.go +++ b/das/daser.go @@ -4,63 +4,82 @@ import ( "context" "errors" "fmt" + "sync/atomic" "time" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "github.com/celestiaorg/celestia-node/fraud" + "github.com/celestiaorg/go-fraud" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/ipld" - "github.com/celestiaorg/celestia-node/service/share" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) var log = logging.Logger("das") // DASer continuously validates availability of data committed to headers. type DASer struct { - ctx context.Context - cancel context.CancelFunc + params Parameters da share.Availability - bcast fraud.Broadcaster - hsub header.Subscriber // listens for new headers in the network - getter header.Getter // retrieves past headers - - cstore datastore.Datastore // checkpoint store - state state + bcast fraud.Broadcaster[*header.ExtendedHeader] + hsub libhead.Subscriber[*header.ExtendedHeader] // listens for new headers in the network + getter libhead.Getter[*header.ExtendedHeader] // retrieves past headers - jobsCh chan *catchUpJob + sampler *samplingCoordinator + store checkpointStore + subscriber subscriber - sampleDn chan struct{} // done signal for sample loop - catchUpDn chan struct{} // done signal for catchUp loop + cancel context.CancelFunc + subscriberDone chan struct{} + running int32 } +type listenFn func(context.Context, *header.ExtendedHeader) +type sampleFn func(context.Context, *header.ExtendedHeader) error + // NewDASer creates a new DASer. func NewDASer( da share.Availability, - hsub header.Subscriber, - getter header.Getter, - cstore datastore.Datastore, - bcast fraud.Broadcaster, -) *DASer { - wrappedDS := wrapCheckpointStore(cstore) - return &DASer{ - da: da, - bcast: bcast, - hsub: hsub, - getter: getter, - cstore: wrappedDS, - jobsCh: make(chan *catchUpJob, 16), - sampleDn: make(chan struct{}), - catchUpDn: make(chan struct{}), + hsub libhead.Subscriber[*header.ExtendedHeader], + getter libhead.Getter[*header.ExtendedHeader], + dstore datastore.Datastore, + bcast fraud.Broadcaster[*header.ExtendedHeader], + shrexBroadcast shrexsub.BroadcastFn, + options ...Option, +) (*DASer, error) { + d := &DASer{ + params: DefaultParameters(), + da: da, + bcast: bcast, + hsub: hsub, + getter: getter, + store: newCheckpointStore(dstore), + subscriber: newSubscriber(), + subscriberDone: make(chan struct{}), + } + + for _, applyOpt := range options { + applyOpt(d) + } + + err := d.params.Validate() + if err != nil { + return nil, err } + + d.sampler = newSamplingCoordinator(d.params, getter, d.sample, shrexBroadcast) + return d, nil } // Start initiates subscription for new ExtendedHeaders and spawns a sampling routine. -func (d *DASer) Start(context.Context) error { - if d.cancel != nil { - return fmt.Errorf("da: DASer already started") +func (d *DASer) Start(ctx context.Context) error { + if !atomic.CompareAndSwapInt32(&d.running, 0, 1) { + return errors.New("da: DASer already started") } sub, err := d.hsub.Subscribe() @@ -68,268 +87,104 @@ func (d *DASer) Start(context.Context) error { return err } - d.ctx, d.cancel = context.WithCancel(context.Background()) - // load latest DASed checkpoint - checkpoint, err := loadCheckpoint(d.ctx, d.cstore) + cp, err := d.store.load(ctx) if err != nil { - return err + log.Warnw("checkpoint not found, initializing with height 1") + + cp = checkpoint{ + SampleFrom: d.params.SampleFrom, + NetworkHead: d.params.SampleFrom, + } + + // attempt to get head info. No need to handle error, later DASer + // will be able to find new head from subscriber after it is started + if h, err := d.getter.Head(ctx); err == nil { + cp.NetworkHead = h.Height() + } } - log.Infow("loaded checkpoint", "height", checkpoint) + log.Info("starting DASer from checkpoint: ", cp.String()) + + runCtx, cancel := context.WithCancel(context.Background()) + d.cancel = cancel + + go d.sampler.run(runCtx, cp) + go d.subscriber.run(runCtx, sub, d.sampler.listen) + go d.store.runBackgroundStore(runCtx, d.params.BackgroundStoreInterval, d.sampler.getCheckpoint) - // kick off catch-up routine manager - go d.catchUpManager(d.ctx, checkpoint) - // kick off sampling routine for recently received headers - go d.sample(d.ctx, sub, checkpoint) return nil } // Stop stops sampling. func (d *DASer) Stop(ctx context.Context) error { - // Stop func can now be invoked twice in one Lifecycle, when: - // * BEFP is received; - // * node is stopping; - // this statement helps avoiding panic on the second Stop. - // If ctx.Err is not nil then it means that Stop was called for the first time. - // NOTE: we are expecting *only* ContextCancelled error here. - if d.ctx.Err() == context.Canceled { + if !atomic.CompareAndSwapInt32(&d.running, 1, 0) { return nil } - d.cancel() - // wait for both sampling routines to exit - for i := 0; i < 2; i++ { - select { - case <-d.catchUpDn: - case <-d.sampleDn: - case <-ctx.Done(): - return ctx.Err() - } - } - d.cancel = nil - return nil -} - -// sample validates availability for each Header received from header subscription. -func (d *DASer) sample(ctx context.Context, sub header.Subscription, checkpoint int64) { - // indicate sampling routine is running - d.indicateRunning() - defer func() { - sub.Cancel() - // indicate sampling routine is stopped - d.indicateStopped() - // send done signal - d.sampleDn <- struct{}{} - }() - - // sampleHeight tracks the last successful height of this routine - sampleHeight := checkpoint - - for { - h, err := sub.NextHeader(ctx) - if err != nil { - if err == context.Canceled { - return - } - - log.Errorw("failed to get next header", "err", err) - continue - } - - // If the next header coming through gossipsub is not adjacent - // to our last DASed header, kick off routine to DAS all headers - // between last DASed header and h. This situation could occur - // either on start or due to network latency/disconnection. - if h.Height > sampleHeight+1 { - // DAS headers between last DASed height up to the current - // header - job := &catchUpJob{ - from: sampleHeight, - to: h.Height - 1, - } - select { - case <-ctx.Done(): - return - case d.jobsCh <- job: - } - } - err = d.sampleHeader(ctx, h) - if err != nil { - // record error - d.updateSampleState(h, err) - log.Warn("DASer SAMPLING ROUTINE WILL BE STOPPED. IN ORDER TO CONTINUE SAMPLING, " + - "RE-START THE NODE") - return - } - - d.updateSampleState(h, nil) - sampleHeight = h.Height + // try to store checkpoint without waiting for coordinator and workers to stop + cp, err := d.sampler.getCheckpoint(ctx) + if err != nil { + log.Error("DASer coordinator checkpoint is unavailable") } -} -// catchUpJob represents a catch-up job. (from:to] -type catchUpJob struct { - from, to int64 -} - -// catchUpManager manages catch-up jobs, performing them one at a time, exiting -// only once context is canceled and storing latest DASed checkpoint to disk. -func (d *DASer) catchUpManager(ctx context.Context, checkpoint int64) { - defer func() { - // store latest DASed checkpoint to disk here to ensure that if DASer is not yet - // fully caught up to network head, it will resume DASing from this checkpoint - // up to current network head - // TODO @renaynay: Implement Share Cache #180 to ensure no duplicate DASing over same - // header - if err := storeCheckpoint(ctx, d.cstore, checkpoint); err != nil { - log.Errorw("storing checkpoint to disk", "height", checkpoint, "err", err) - } - log.Infow("stored checkpoint to disk", "checkpoint", checkpoint) - // signal that catch-up routine finished - d.catchUpDn <- struct{}{} - }() - - for { - select { - case <-ctx.Done(): - return - case job := <-d.jobsCh: - // record details of incoming job - d.recordJobDetails(job) - // perform catchUp routine - height, err := d.catchUp(ctx, job) - // store the height of the last successfully sampled header - checkpoint = height - // exit routine if a catch-up job was unsuccessful - if err != nil { - // record error - d.state.catchUpLk.Lock() - d.state.catchUp.Error = err - d.state.catchUpLk.Unlock() - - log.Errorw("catch-up routine failed", "attempted range: from", job.from, "to", job.to) - log.Warn("DASer CATCH-UP SAMPLING ROUTINE WILL BE STOPPED. IN ORDER TO CONTINUE SAMPLING, " + - "RE-START THE NODE") - return - } - } + if err = d.store.store(ctx, cp); err != nil { + log.Errorw("storing checkpoint to disk", "err", err) } -} -// catchUp starts a sampling routine for headers starting at the next header -// after the `from` height and exits the loop once `to` is reached. (from:to] -func (d *DASer) catchUp(ctx context.Context, job *catchUpJob) (int64, error) { - log.Infow("sampling past headers", "from", job.from, "to", job.to) - - // start sampling from height at checkpoint+1 since the - // checkpoint height is DASed by broader sample routine - for height := job.from + 1; height <= job.to; height++ { - h, err := d.getter.GetByHeight(ctx, uint64(height)) - if err != nil { - if err == context.Canceled { - // report previous height as the last successfully sampled height and - // error as nil since the routine was ordered to stop - return height - 1, nil - } + d.cancel() + if err = d.sampler.wait(ctx); err != nil { + return fmt.Errorf("DASer force quit: %w", err) + } - log.Errorw("failed to get next header", "height", height, "err", err) - // report previous height as the last successfully sampled height - return height - 1, err - } + // save updated checkpoint after sampler and all workers are shut down + if err = d.store.store(ctx, newCheckpoint(d.sampler.state.unsafeStats())); err != nil { + log.Errorw("storing checkpoint to disk", "err", err) + } - err = d.sampleHeader(ctx, h) - if err != nil { - return h.Height - 1, err - } - d.state.catchUpLk.Lock() - d.state.catchUp.Height = uint64(h.Height) - d.state.catchUpLk.Unlock() + if err = d.store.wait(ctx); err != nil { + return fmt.Errorf("DASer force quit with err: %w", err) } - d.state.catchUpLk.Lock() - d.state.catchUp.End = time.Now() - d.state.catchUpLk.Unlock() - - jobDetails := d.CatchUpRoutineState() - log.Infow("successfully sampled past headers", "from", job.from, - "to", job.to, "finished (s)", jobDetails.Duration()) - // report successful result - return job.to, nil + return d.subscriber.wait(ctx) } -func (d *DASer) sampleHeader(ctx context.Context, h *header.ExtendedHeader) error { - startTime := time.Now() +func (d *DASer) sample(ctx context.Context, h *header.ExtendedHeader) error { + // short-circuit if pruning is enabled and the header is outside the + // availability window + if !d.isWithinSamplingWindow(h) { + log.Debugw("skipping header outside sampling window", "height", h.Height(), + "time", h.Time()) + return nil + } - err := d.da.SharesAvailable(ctx, h.DAH) + err := d.da.SharesAvailable(ctx, h) if err != nil { - if err == context.Canceled { - return nil - } - var byzantineErr *ipld.ErrByzantine + var byzantineErr *byzantine.ErrByzantine if errors.As(err, &byzantineErr) { log.Warn("Propagating proof...") - sendErr := d.bcast.Broadcast(ctx, fraud.CreateBadEncodingProof(h.Hash(), uint64(h.Height), byzantineErr)) + sendErr := d.bcast.Broadcast(ctx, byzantine.CreateBadEncodingProof(h.Hash(), h.Height(), byzantineErr)) if sendErr != nil { log.Errorw("fraud proof propagating failed", "err", sendErr) } } - log.Errorw("sampling failed", "height", h.Height, "hash", h.Hash(), - "square width", len(h.DAH.RowsRoots), "data root", h.DAH.Hash(), "err", err) - // report previous height as the last successfully sampled height return err } - - sampleTime := time.Since(startTime) - log.Infow("sampled header", "height", h.Height, "hash", h.Hash(), - "square width", len(h.DAH.RowsRoots), "finished (s)", sampleTime.Seconds()) - return nil } -// SampleRoutineState reports the current state of the -// DASer's main sampling routine. -func (d *DASer) SampleRoutineState() RoutineState { - d.state.sampleLk.RLock() - state := d.state.sample - d.state.sampleLk.RUnlock() - return state -} - -func (d *DASer) updateSampleState(h *header.ExtendedHeader, err error) { - height := uint64(h.Height) - - d.state.sampleLk.Lock() - defer d.state.sampleLk.Unlock() - d.state.sample.LatestSampledHeight = height - d.state.sample.LatestSampledSquareWidth = uint64(len(h.DAH.RowsRoots)) - d.state.sample.Error = err -} - -func (d *DASer) indicateRunning() { - d.state.sampleLk.Lock() - defer d.state.sampleLk.Unlock() - d.state.sample.IsRunning = true -} - -func (d *DASer) indicateStopped() { - d.state.sampleLk.Lock() - defer d.state.sampleLk.Unlock() - d.state.sample.IsRunning = false +func (d *DASer) isWithinSamplingWindow(eh *header.ExtendedHeader) bool { + // if sampling window is not set, then all headers are within the window + if d.params.SamplingWindow == 0 { + return true + } + return time.Since(eh.Time()) <= d.params.SamplingWindow } -// CatchUpRoutineState reports the current state of the -// DASer's `catchUp` routine. -func (d *DASer) CatchUpRoutineState() JobInfo { - d.state.catchUpLk.RLock() - state := d.state.catchUp - d.state.catchUpLk.RUnlock() - return state +// SamplingStats returns the current statistics over the DA sampling process. +func (d *DASer) SamplingStats(ctx context.Context) (SamplingStats, error) { + return d.sampler.stats(ctx) } -func (d *DASer) recordJobDetails(job *catchUpJob) { - d.state.catchUpLk.Lock() - defer d.state.catchUpLk.Unlock() - d.state.catchUp.ID++ - d.state.catchUp.From = uint64(job.from) - d.state.catchUp.To = uint64(job.to) - d.state.catchUp.Start = time.Now() +// WaitCatchUp waits for DASer to indicate catchup is done +func (d *DASer) WaitCatchUp(ctx context.Context) error { + return d.sampler.state.waitCatchUp(ctx) } diff --git a/das/daser_test.go b/das/daser_test.go index 14a49bba35..9eec6392cc 100644 --- a/das/daser_test.go +++ b/das/daser_test.go @@ -2,23 +2,36 @@ package das import ( "context" - "sync" + "strconv" "testing" "time" - "github.com/ipfs/go-blockservice" + "github.com/golang/mock/gomock" + "github.com/ipfs/boxo/blockservice" "github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-datastore/sync" - mdutils "github.com/ipfs/go-merkledag/test" pubsub "github.com/libp2p/go-libp2p-pubsub" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/go-fraud/fraudserv" + "github.com/celestiaorg/go-fraud/fraudtest" + libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/celestia-node/fraud" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/service/share" + "github.com/celestiaorg/celestia-node/header/headertest" + headerfraud "github.com/celestiaorg/celestia-node/header/headertest/fraud" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/availability/full" + "github.com/celestiaorg/celestia-node/share/availability/light" + "github.com/celestiaorg/celestia-node/share/availability/mocks" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/ipld" ) var timeout = time.Second * 15 @@ -27,69 +40,70 @@ var timeout = time.Second * 15 // the DASer checkpoint is updated to network head. func TestDASerLifecycle(t *testing.T) { ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() - avail := share.TestLightAvailability(bServ) + bServ := ipld.NewMemBlockservice() + avail := light.TestAvailability(getters.NewIPLDGetter(bServ)) // 15 headers from the past and 15 future headers - mockGet, shareServ, sub, mockService := createDASerSubcomponents(t, bServ, 15, 15, avail) + mockGet, sub, mockService := createDASerSubcomponents(t, bServ, 15, 15) ctx, cancel := context.WithTimeout(context.Background(), timeout) t.Cleanup(cancel) - daser := NewDASer(shareServ, sub, mockGet, ds, mockService) + daser, err := NewDASer(avail, sub, mockGet, ds, mockService, newBroadcastMock(1)) + require.NoError(t, err) - err := daser.Start(ctx) + err = daser.Start(ctx) require.NoError(t, err) defer func() { err = daser.Stop(ctx) require.NoError(t, err) // load checkpoint and ensure it's at network head - checkpoint, err := loadCheckpoint(ctx, daser.cstore) + checkpoint, err := daser.store.load(ctx) require.NoError(t, err) - // ensure checkpoint is stored at 15 - assert.Equal(t, int64(15), checkpoint) + // ensure checkpoint is stored at 30 + assert.EqualValues(t, 30, checkpoint.SampleFrom-1) }() - // wait for dasing catch-up routine to finish + + // wait for mock to indicate that catchup is done select { case <-ctx.Done(): t.Fatal(ctx.Err()) case <-mockGet.doneCh: } + + // wait for DASer to indicate done + assert.NoError(t, daser.WaitCatchUp(ctx)) + // give catch-up routine a second to finish up sampling last header - for { - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - default: - if daser.CatchUpRoutineState().Finished() { - return - } - } - } + assert.NoError(t, daser.sampler.state.waitCatchUp(ctx)) } func TestDASer_Restart(t *testing.T) { ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() - avail := share.TestLightAvailability(bServ) + bServ := ipld.NewMemBlockservice() + avail := light.TestAvailability(getters.NewIPLDGetter(bServ)) // 15 headers from the past and 15 future headers - mockGet, shareServ, sub, mockService := createDASerSubcomponents(t, bServ, 15, 15, avail) + mockGet, sub, mockService := createDASerSubcomponents(t, bServ, 15, 15) ctx, cancel := context.WithTimeout(context.Background(), timeout) t.Cleanup(cancel) - daser := NewDASer(shareServ, sub, mockGet, ds, mockService) + daser, err := NewDASer(avail, sub, mockGet, ds, mockService, newBroadcastMock(1)) + require.NoError(t, err) - err := daser.Start(ctx) + err = daser.Start(ctx) require.NoError(t, err) - // wait for dasing catch-up routine to finish + // wait for mock to indicate that catchup is done select { case <-ctx.Done(): t.Fatal(ctx.Err()) case <-mockGet.doneCh: } + // wait for DASer to indicate done + assert.NoError(t, daser.WaitCatchUp(ctx)) + err = daser.Stop(ctx) require.NoError(t, err) @@ -98,168 +112,43 @@ func TestDASer_Restart(t *testing.T) { mockGet.doneCh = make(chan struct{}) // reset dummy subscriber mockGet.fillSubWithHeaders(t, sub, bServ, 45, 60) - // manually set mockGet head to trigger stop at 45 + // manually set mockGet head to trigger finished at 45 mockGet.head = int64(45) // restart DASer with new context restartCtx, restartCancel := context.WithTimeout(context.Background(), timeout) t.Cleanup(restartCancel) + daser, err = NewDASer(avail, sub, mockGet, ds, mockService, newBroadcastMock(1)) + require.NoError(t, err) + err = daser.Start(restartCtx) require.NoError(t, err) - // wait for dasing catch-up routine to finish + // wait for dasing catch-up routine to indicateDone select { case <-restartCtx.Done(): t.Fatal(restartCtx.Err()) case <-mockGet.doneCh: } + assert.NoError(t, daser.sampler.state.waitCatchUp(ctx)) err = daser.Stop(restartCtx) require.NoError(t, err) - assert.True(t, daser.CatchUpRoutineState().Finished()) - // load checkpoint and ensure it's at network head - checkpoint, err := loadCheckpoint(ctx, daser.cstore) + checkpoint, err := daser.store.load(ctx) require.NoError(t, err) // ensure checkpoint is stored at 45 - assert.Equal(t, int64(45), checkpoint) -} - -func TestDASer_catchUp(t *testing.T) { - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() - avail := share.TestLightAvailability(bServ) - mockGet, shareServ, _, mockService := createDASerSubcomponents(t, bServ, 5, 0, avail) - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - daser := NewDASer(shareServ, nil, mockGet, ds, mockService) - - type catchUpResult struct { - checkpoint int64 - err error - } - resultCh := make(chan *catchUpResult, 1) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - // catch up from height 2 to head - job := &catchUpJob{ - from: 2, - to: mockGet.head, - } - checkpt, err := daser.catchUp(ctx, job) - resultCh <- &catchUpResult{ - checkpoint: checkpt, - err: err, - } - }() - wg.Wait() - - result := <-resultCh - assert.Equal(t, mockGet.head, result.checkpoint) - require.NoError(t, result.err) -} - -// TestDASer_catchUp_oneHeader tests that catchUp works with a from-to -// difference of 1 -func TestDASer_catchUp_oneHeader(t *testing.T) { - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() - avail := share.TestLightAvailability(bServ) - mockGet, shareServ, _, mockService := createDASerSubcomponents(t, bServ, 6, 0, avail) - daser := NewDASer(shareServ, nil, mockGet, ds, mockService) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // store checkpoint - err := storeCheckpoint(ctx, daser.cstore, 5) // pick arbitrary height as last checkpoint - require.NoError(t, err) - - checkpoint, err := loadCheckpoint(ctx, daser.cstore) - require.NoError(t, err) - - type catchUpResult struct { - checkpoint int64 - err error - } - resultCh := make(chan *catchUpResult, 1) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - job := &catchUpJob{ - from: checkpoint, - to: mockGet.head, - } - checkpt, err := daser.catchUp(ctx, job) - resultCh <- &catchUpResult{ - checkpoint: checkpt, - err: err, - } - }() - wg.Wait() - - result := <-resultCh - assert.Equal(t, mockGet.head, result.checkpoint) - require.NoError(t, result.err) -} - -func TestDASer_catchUp_fails(t *testing.T) { - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() - avail := share.TestLightAvailability(bServ) - mockGet, _, _, mockService := createDASerSubcomponents(t, bServ, 6, 0, avail) - daser := NewDASer(share.NewTestBrokenAvailability(), nil, mockGet, ds, mockService) - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // store checkpoint - err := storeCheckpoint(ctx, daser.cstore, 5) // pick arbitrary height as last checkpoint - require.NoError(t, err) - - checkpoint, err := loadCheckpoint(ctx, daser.cstore) - require.NoError(t, err) - - type catchUpResult struct { - checkpoint int64 - err error - } - resultCh := make(chan *catchUpResult, 1) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - job := &catchUpJob{ - from: checkpoint, - to: mockGet.head, - } - checkpt, err := daser.catchUp(ctx, job) - resultCh <- &catchUpResult{ - checkpoint: checkpt, - err: err, - } - }() - wg.Wait() - - result := <-resultCh - require.ErrorIs(t, result.err, share.ErrNotAvailable) + assert.EqualValues(t, 60, checkpoint.SampleFrom-1) } func TestDASer_stopsAfter_BEFP(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) t.Cleanup(cancel) ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() + bServ := ipld.NewMemBlockservice() // create mock network net, err := mocknet.FullMeshLinked(1) require.NoError(t, err) @@ -267,21 +156,43 @@ func TestDASer_stopsAfter_BEFP(t *testing.T) { ps, err := pubsub.NewGossipSub(ctx, net.Hosts()[0], pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) require.NoError(t, err) - avail := share.TestFullAvailability(bServ) + avail := full.TestAvailability(t, getters.NewIPLDGetter(bServ)) // 15 headers from the past and 15 future headers - mockGet, shareServ, sub, _ := createDASerSubcomponents(t, bServ, 15, 15, avail) + mockGet, sub, _ := createDASerSubcomponents(t, bServ, 15, 15) // create fraud service and break one header - f := fraud.NewService(ps, mockGet.GetByHeight, ds) - mockGet.headers[1] = header.CreateFraudExtHeader(t, mockGet.headers[1], bServ) + getter := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return mockGet.GetByHeight(ctx, height) + } + unmarshaler := fraud.MultiUnmarshaler[*header.ExtendedHeader]{ + Unmarshalers: map[fraud.ProofType]func([]byte) (fraud.Proof[*header.ExtendedHeader], error){ + byzantine.BadEncoding: func(data []byte) (fraud.Proof[*header.ExtendedHeader], error) { + befp := &byzantine.BadEncodingProof{} + return befp, befp.UnmarshalBinary(data) + }, + }, + } + + fserv := fraudserv.NewProofService[*header.ExtendedHeader](ps, + net.Hosts()[0], + getter, + unmarshaler, + ds, + false, + "private", + ) + require.NoError(t, fserv.Start(ctx)) + mockGet.headers[1] = headerfraud.CreateFraudExtHeader(t, mockGet.headers[1], bServ) newCtx := context.Background() // create and start DASer - daser := NewDASer(shareServ, sub, mockGet, ds, f) + daser, err := NewDASer(avail, sub, mockGet, ds, fserv, newBroadcastMock(1)) + require.NoError(t, err) + resultCh := make(chan error) - go fraud.OnProof(newCtx, f, fraud.BadEncoding, - func(fraud.Proof) { - resultCh <- daser.Stop(ctx) + go fraud.OnProof[*header.ExtendedHeader](newCtx, fserv, byzantine.BadEncoding, + func(fraud.Proof[*header.ExtendedHeader]) { + resultCh <- daser.Stop(newCtx) }) require.NoError(t, daser.Start(newCtx)) @@ -292,124 +203,97 @@ func TestDASer_stopsAfter_BEFP(t *testing.T) { case res := <-resultCh: require.NoError(t, res) } - require.True(t, daser.ctx.Err() == context.Canceled) + // wait for manager to finish catchup + require.True(t, daser.running == 0) } -func TestDASerState(t *testing.T) { - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() - - // 30 headers in the past and 30 headers in the future (so final sample height should be 60) - mockGet, shareServ, sub, fraud := createDASerSubcomponents(t, bServ, 30, 30, share.NewTestSuccessfulAvailability()) - expectedFinalSampleHeight := uint64(60) - expectedFinalSampleWidth := uint64(len(sub.Headers[29].DAH.RowsRoots)) - - ctx, cancel := context.WithTimeout(context.Background(), timeout) +func TestDASerSampleTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - daser := NewDASer(shareServ, sub, mockGet, ds, fraud) - err := daser.Start(ctx) - require.NoError(t, err) - defer func() { - // wait for all "future" headers to be sampled - for { + getter := getterStub{} + avail := mocks.NewMockAvailability(gomock.NewController(t)) + doneCh := make(chan struct{}) + avail.EXPECT().SharesAvailable(gomock.Any(), gomock.Any()).DoAndReturn( + func(sampleCtx context.Context, h *header.ExtendedHeader) error { select { + case <-sampleCtx.Done(): + close(doneCh) + return nil case <-ctx.Done(): - t.Fatal(ctx.Err()) - default: - if daser.SampleRoutineState().LatestSampledHeight == expectedFinalSampleHeight { - assert.Equal(t, expectedFinalSampleWidth, daser.SampleRoutineState().LatestSampledSquareWidth) - - err := daser.Stop(ctx) - require.NoError(t, err) - assert.False(t, daser.SampleRoutineState().IsRunning) - return - } + t.Fatal("call context didn't timeout in time") + return ctx.Err() } - } - }() + }) + + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + sub := new(headertest.Subscriber) + fserv := &fraudtest.DummyService[*header.ExtendedHeader]{} + + // create and start DASer + daser, err := NewDASer(avail, sub, getter, ds, fserv, newBroadcastMock(1), + WithSampleTimeout(1)) + require.NoError(t, err) + + require.NoError(t, daser.Start(ctx)) + require.NoError(t, daser.sampler.state.waitCatchUp(ctx)) + select { + case <-doneCh: case <-ctx.Done(): - t.Fatal(ctx.Err()) - case <-mockGet.doneCh: - } - // give catchUp routine a second to exit - for { - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - default: - if daser.CatchUpRoutineState().Finished() { - return - } - } + t.Fatal("call context didn't timeout in time") } } -// TestDASerState_WithErrorInCatchUp tests for the case where an -// error has occurred inside the catchUp routine, ensuring the catchUp -// routine exits as expected and reports the error accurately. -func TestDASerState_WithErrorInCatchUp(t *testing.T) { +// TestDASer_SamplingWindow tests the sampling window determination +// for headers. +func TestDASer_SamplingWindow(t *testing.T) { ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := mdutils.Bserv() + sub := new(headertest.Subscriber) + fserv := &fraudtest.DummyService[*header.ExtendedHeader]{} + getter := getterStub{} + avail := mocks.NewMockAvailability(gomock.NewController(t)) - // 30 headers in the past and 30 headers in the future - // catchUp routine error should occur at height 16 - brokenHeight := 16 - mockGet, shareServ, sub, fraud := createDASerWithFailingAvailability(t, bServ, 30, 30, - int64(brokenHeight)) + // create and start DASer + daser, err := NewDASer(avail, sub, getter, ds, fserv, newBroadcastMock(1), + WithSamplingWindow(time.Second)) + require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), timeout) - t.Cleanup(cancel) + var tests = []struct { + timestamp time.Time + withinWindow bool + }{ + {timestamp: time.Now().Add(-(time.Second * 5)), withinWindow: false}, + {timestamp: time.Now().Add(-(time.Millisecond * 800)), withinWindow: true}, + {timestamp: time.Now().Add(-(time.Hour)), withinWindow: false}, + {timestamp: time.Now().Add(-(time.Hour * 24 * 30)), withinWindow: false}, + {timestamp: time.Now(), withinWindow: true}, + } - daser := NewDASer(shareServ, sub, mockGet, ds, fraud) - // allow catchUpDn signal to be read twice - daser.catchUpDn = make(chan struct{}, 2) + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + eh := headertest.RandExtendedHeader(t) + eh.RawHeader.Time = tt.timestamp - err := daser.Start(ctx) - require.NoError(t, err) - defer func() { - err = daser.Stop(ctx) - require.NoError(t, err) - }() - // wait until catchUp routine has fetched the broken header - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - case <-mockGet.brokenHeightCh: - } - // wait for daser to exit catchUp routine - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - case <-daser.catchUpDn: - catchUp := daser.CatchUpRoutineState() - // make sure catchUp routine didn't successfully complete - assert.False(t, catchUp.Finished()) - // ensure that the error has been reported - assert.NotNil(t, catchUp.Error) - assert.Equal(t, uint64(brokenHeight)-1, catchUp.Height) - // send another done signal so `Stop` can read it - daser.catchUpDn <- struct{}{} - return + assert.Equal(t, tt.withinWindow, daser.isWithinSamplingWindow(eh)) + }) } + } // createDASerSubcomponents takes numGetter (number of headers // to store in mockGetter) and numSub (number of headers to store // in the mock header.Subscriber), returning a newly instantiated -// mockGetter, share.Service, and mock header.Subscriber. +// mockGetter, share.Availability, and mock header.Subscriber. func createDASerSubcomponents( t *testing.T, bServ blockservice.BlockService, numGetter, numSub int, - availability share.Availability, -) (*mockGetter, *share.Service, *header.DummySubscriber, *fraud.DummyService) { - shareServ := share.NewService(bServ, availability) +) (*mockGetter, *headertest.Subscriber, *fraudtest.DummyService[*header.ExtendedHeader]) { mockGet, sub := createMockGetterAndSub(t, bServ, numGetter, numSub) - fraud := new(fraud.DummyService) - return mockGet, shareServ, sub, fraud + fraud := &fraudtest.DummyService[*header.ExtendedHeader]{} + return mockGet, sub, fraud } func createMockGetterAndSub( @@ -417,7 +301,7 @@ func createMockGetterAndSub( bServ blockservice.BlockService, numGetter, numSub int, -) (*mockGetter, *header.DummySubscriber) { +) (*mockGetter, *headertest.Subscriber) { mockGet := &mockGetter{ headers: make(map[int64]*header.ExtendedHeader), doneCh: make(chan struct{}), @@ -426,33 +310,15 @@ func createMockGetterAndSub( mockGet.generateHeaders(t, bServ, 0, numGetter) - sub := new(header.DummySubscriber) + sub := new(headertest.Subscriber) mockGet.fillSubWithHeaders(t, sub, bServ, numGetter, numGetter+numSub) - return mockGet, sub } -func createDASerWithFailingAvailability( - t *testing.T, - bServ blockservice.BlockService, - numGetter, - numSub int, - brokenHeight int64, -) (*mockGetter, *share.Service, *header.DummySubscriber, *fraud.DummyService) { - mockGet, sub := createMockGetterAndSub(t, bServ, numGetter, numSub) - mockGet.brokenHeight = brokenHeight - - shareServ := share.NewService(bServ, &share.TestBrokenAvailability{ - Root: mockGet.headers[brokenHeight].DAH, - }) - - return mockGet, shareServ, sub, new(fraud.DummyService) -} - // fillSubWithHeaders generates `num` headers from the future for p2pSub to pipe through to DASer. func (m *mockGetter) fillSubWithHeaders( t *testing.T, - sub *header.DummySubscriber, + sub *headertest.Subscriber, bServ blockservice.BlockService, startHeight, endHeight int, @@ -461,15 +327,13 @@ func (m *mockGetter) fillSubWithHeaders( index := 0 for i := startHeight; i < endHeight; i++ { - dah := share.RandFillBS(t, 16, bServ) + dah := availability_test.RandFillBS(t, 16, bServ) - randHeader := header.RandExtendedHeader(t) - randHeader.DataHash = dah.Hash() - randHeader.DAH = dah - randHeader.Height = int64(i + 1) + randHeader := headertest.RandExtendedHeaderWithRoot(t, dah) + randHeader.RawHeader.Height = int64(i + 1) sub.Headers[index] = randHeader - // also store to mock getter for duplicate fetching + // also checkpointStore to mock getter for duplicate sampling m.headers[int64(i+1)] = randHeader index++ @@ -477,6 +341,7 @@ func (m *mockGetter) fillSubWithHeaders( } type mockGetter struct { + getterStub doneCh chan struct{} // signals all stored headers have been retrieved brokenHeight int64 @@ -488,12 +353,10 @@ type mockGetter struct { func (m *mockGetter) generateHeaders(t *testing.T, bServ blockservice.BlockService, startHeight, endHeight int) { for i := startHeight; i < endHeight; i++ { - dah := share.RandFillBS(t, 16, bServ) + dah := availability_test.RandFillBS(t, 16, bServ) - randHeader := header.RandExtendedHeader(t) - randHeader.DataHash = dah.Hash() - randHeader.DAH = dah - randHeader.Height = int64(i + 1) + randHeader := headertest.RandExtendedHeaderWithRoot(t, dah) + randHeader.RawHeader.Height = int64(i + 1) m.headers[int64(i+1)] = randHeader } @@ -501,7 +364,10 @@ func (m *mockGetter) generateHeaders(t *testing.T, bServ blockservice.BlockServi m.head = int64(startHeight + endHeight) } -func (m *mockGetter) Head(context.Context) (*header.ExtendedHeader, error) { +func (m *mockGetter) Head( + context.Context, + ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { return m.headers[m.head], nil } @@ -509,19 +375,61 @@ func (m *mockGetter) GetByHeight(_ context.Context, height uint64) (*header.Exte defer func() { switch int64(height) { case m.brokenHeight: - close(m.brokenHeightCh) + select { + case <-m.brokenHeightCh: + default: + close(m.brokenHeightCh) + } case m.head: - close(m.doneCh) + select { + case <-m.doneCh: + default: + close(m.doneCh) + } } }() return m.headers[int64(height)], nil } -func (m *mockGetter) GetRangeByHeight(ctx context.Context, from, to uint64) ([]*header.ExtendedHeader, error) { +type benchGetterStub struct { + getterStub + header *header.ExtendedHeader +} + +func newBenchGetter() benchGetterStub { + return benchGetterStub{header: &header.ExtendedHeader{ + DAH: &share.Root{RowRoots: make([][]byte, 0)}}} +} + +func (m benchGetterStub) GetByHeight(context.Context, uint64) (*header.ExtendedHeader, error) { + return m.header, nil +} + +type getterStub struct{} + +func (m getterStub) Head( + context.Context, + ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { + return &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}}, nil +} + +func (m getterStub) GetByHeight(_ context.Context, height uint64) (*header.ExtendedHeader, error) { + return &header.ExtendedHeader{ + Commit: &types.Commit{}, + RawHeader: header.RawHeader{Height: int64(height)}, + DAH: &share.Root{RowRoots: make([][]byte, 0)}}, nil +} + +func (m getterStub) GetRangeByHeight( + context.Context, + *header.ExtendedHeader, + uint64, +) ([]*header.ExtendedHeader, error) { return nil, nil } -func (m *mockGetter) Get(context.Context, tmbytes.HexBytes) (*header.ExtendedHeader, error) { +func (m getterStub) Get(context.Context, libhead.Hash) (*header.ExtendedHeader, error) { return nil, nil } diff --git a/das/doc.go b/das/doc.go index 254ed65dfa..bc67fcc7f3 100644 --- a/das/doc.go +++ b/das/doc.go @@ -1,8 +1,8 @@ /* Package das contains the most important functionality provided by celestia-node. It contains logic for running data availability sampling (DAS) routines on block -headers in the network. DAS is the process of verifying the availability of block -data by sampling chunks or shares of those blocks. +headers in the network. DAS is the process of verifying the availability of +block data by sampling chunks or shares of those blocks. Package das can confirm the availability of block data in the network via the Availability interface which is implemented both in `full` and `light` mode. @@ -13,10 +13,11 @@ sufficiently likely that all block data is available as it is assumed that there are enough `light` availability instances active on the network doing sampling over the same block to collectively verify its availability. -The central component of this package is the `DASer`. It performs one basic function: -a sampling loop that performs DAS on new ExtendedHeaders in the network. The DASer kicks -off this loop by loading its last DASed header (`checkpoint`) and kicking off a `catchUp` -loop to DAS all headers between the checkpoint and the current network head. It simultaneously -continues to perform DAS over new ExtendedHeaders received via gossipsub. +The central component of this package is the `samplingCoordinator`. It launches parallel +workers that perform DAS on new ExtendedHeaders in the network. The DASer kicks off this +loop by loading its last DASed headers snapshot (`checkpoint`) and kicking off worker pool +to DAS all headers between the checkpoint and the current network head. It subscribes +to notifications about to new ExtendedHeaders, received via gossipsub. Newly found headers +are being put into workers directly, without applying concurrency limiting restrictions. */ package das diff --git a/das/done.go b/das/done.go new file mode 100644 index 0000000000..b2a6073f0a --- /dev/null +++ b/das/done.go @@ -0,0 +1,31 @@ +package das + +import ( + "context" + "fmt" +) + +type done struct { + name string + finished chan struct{} +} + +func newDone(name string) done { + return done{ + name: name, + finished: make(chan struct{}), + } +} + +func (d *done) indicateDone() { + close(d.finished) +} + +func (d *done) wait(ctx context.Context) error { + select { + case <-d.finished: + case <-ctx.Done(): + return fmt.Errorf("%v stuck: %w", d.name, ctx.Err()) + } + return nil +} diff --git a/das/metrics.go b/das/metrics.go new file mode 100644 index 0000000000..6454e9d138 --- /dev/null +++ b/das/metrics.go @@ -0,0 +1,184 @@ +package das + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" +) + +const ( + jobTypeLabel = "job_type" + headerWidthLabel = "header_width" + failedLabel = "failed" +) + +var meter = otel.Meter("das") + +type metrics struct { + sampled metric.Int64Counter + sampleTime metric.Float64Histogram + getHeaderTime metric.Float64Histogram + newHead metric.Int64Counter + + lastSampledTS uint64 +} + +func (d *DASer) InitMetrics() error { + sampled, err := meter.Int64Counter("das_sampled_headers_counter", + metric.WithDescription("sampled headers counter")) + if err != nil { + return err + } + + sampleTime, err := meter.Float64Histogram("das_sample_time_hist", + metric.WithDescription("duration of sampling a single header")) + if err != nil { + return err + } + + getHeaderTime, err := meter.Float64Histogram("das_get_header_time_hist", + metric.WithDescription("duration of getting header from header store")) + if err != nil { + return err + } + + newHead, err := meter.Int64Counter("das_head_updated_counter", + metric.WithDescription("amount of times DAS'er advanced network head")) + if err != nil { + return err + } + + lastSampledTS, err := meter.Int64ObservableGauge("das_latest_sampled_ts", + metric.WithDescription("latest sampled timestamp")) + if err != nil { + return err + } + + busyWorkers, err := meter.Int64ObservableGauge("das_busy_workers_amount", + metric.WithDescription("number of active parallel workers in DAS'er")) + if err != nil { + return err + } + + networkHead, err := meter.Int64ObservableGauge("das_network_head", + metric.WithDescription("most recent network head")) + if err != nil { + return err + } + + sampledChainHead, err := meter.Int64ObservableGauge("das_sampled_chain_head", + metric.WithDescription("height of the sampled chain - all previous headers have been successfully sampled")) + if err != nil { + return err + } + + totalSampled, err := meter.Int64ObservableGauge("das_total_sampled_headers", + metric.WithDescription("total sampled headers gauge"), + ) + if err != nil { + return err + } + + d.sampler.metrics = &metrics{ + sampled: sampled, + sampleTime: sampleTime, + getHeaderTime: getHeaderTime, + newHead: newHead, + } + + callback := func(ctx context.Context, observer metric.Observer) error { + stats, err := d.sampler.stats(ctx) + if err != nil { + log.Errorf("observing stats: %s", err.Error()) + return err + } + + for jobType, amount := range stats.workersByJobType() { + observer.ObserveInt64(busyWorkers, amount, + metric.WithAttributes( + attribute.String(jobTypeLabel, string(jobType)), + )) + } + + observer.ObserveInt64(networkHead, int64(stats.NetworkHead)) + observer.ObserveInt64(sampledChainHead, int64(stats.SampledChainHead)) + + if ts := atomic.LoadUint64(&d.sampler.metrics.lastSampledTS); ts != 0 { + observer.ObserveInt64(lastSampledTS, int64(ts)) + } + + observer.ObserveInt64(totalSampled, int64(stats.totalSampled())) + return nil + } + + _, err = meter.RegisterCallback(callback, + lastSampledTS, + busyWorkers, + networkHead, + sampledChainHead, + totalSampled, + ) + if err != nil { + return fmt.Errorf("registering metrics callback: %w", err) + } + + return nil +} + +// observeSample records the time it took to sample a header + +// the amount of sampled contiguous headers +func (m *metrics) observeSample( + ctx context.Context, + h *header.ExtendedHeader, + sampleTime time.Duration, + jobType jobType, + err error, +) { + if m == nil { + return + } + + ctx = utils.ResetContextOnError(ctx) + + m.sampleTime.Record(ctx, sampleTime.Seconds(), + metric.WithAttributes( + attribute.Bool(failedLabel, err != nil), + attribute.Int(headerWidthLabel, len(h.DAH.RowRoots)), + attribute.String(jobTypeLabel, string(jobType)), + )) + + m.sampled.Add(ctx, 1, + metric.WithAttributes( + attribute.Bool(failedLabel, err != nil), + attribute.Int(headerWidthLabel, len(h.DAH.RowRoots)), + attribute.String(jobTypeLabel, string(jobType)), + )) + + atomic.StoreUint64(&m.lastSampledTS, uint64(time.Now().UTC().Unix())) +} + +// observeGetHeader records the time it took to get a header from the header store. +func (m *metrics) observeGetHeader(ctx context.Context, d time.Duration) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + m.getHeaderTime.Record(ctx, d.Seconds()) +} + +// observeNewHead records the network head. +func (m *metrics) observeNewHead(ctx context.Context) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + m.newHead.Add(ctx, 1) +} diff --git a/das/options.go b/das/options.go new file mode 100644 index 0000000000..69deab52da --- /dev/null +++ b/das/options.go @@ -0,0 +1,171 @@ +package das + +import ( + "errors" + "fmt" + "time" +) + +// ErrInvalidOption is an error that is returned by Parameters.Validate +// when supplied with invalid values. +// This error will also be returned by NewDASer if supplied with an invalid option +var ErrInvalidOption = errors.New("das: invalid option") + +// errInvalidOptionValue is a utility function to dedup code for error-returning +// when dealing with invalid parameter values +func errInvalidOptionValue(optionName string, value string) error { + return fmt.Errorf("%w: value %s cannot be %s", ErrInvalidOption, optionName, value) +} + +// Option is the functional option that is applied to the daser instance +// to configure DASing parameters (the Parameters struct) +type Option func(*DASer) + +// Parameters is the set of parameters that must be configured for the daser +type Parameters struct { + // SamplingRange is the maximum amount of headers processed in one job. + SamplingRange uint64 + + // ConcurrencyLimit defines the maximum amount of sampling workers running in parallel. + ConcurrencyLimit int + + // BackgroundStoreInterval is the period of time for background checkpointStore to perform a + // checkpoint backup. + BackgroundStoreInterval time.Duration + + // SampleFrom is the height sampling will start from if no previous checkpoint was saved + SampleFrom uint64 + + // SampleTimeout is a maximum amount time sampling of single block may take until it will be + // canceled. High ConcurrencyLimit value may increase sampling time due to node resources being + // divided between parallel workers. SampleTimeout should be adjusted proportionally to + // ConcurrencyLimit. + SampleTimeout time.Duration + + // SamplingWindow determines the time window that headers should fall into + // in order to be sampled. If set to 0, the sampling window will include + // all headers. + SamplingWindow time.Duration +} + +// DefaultParameters returns the default configuration values for the daser parameters +func DefaultParameters() Parameters { + // TODO(@derrandz): parameters needs performance testing on real network to define optimal values + // (#1261) + concurrencyLimit := 16 + return Parameters{ + SamplingRange: 100, + ConcurrencyLimit: concurrencyLimit, + BackgroundStoreInterval: 10 * time.Minute, + SampleFrom: 1, + // SampleTimeout = approximate block time (with a bit of wiggle room) * max amount of catchup + // workers + SampleTimeout: 15 * time.Second * time.Duration(concurrencyLimit), + } +} + +// Validate validates the values in Parameters +// +// All parameters must be positive and non-zero, except: +// BackgroundStoreInterval = 0 disables background storer, +// PriorityQueueSize = 0 disables prioritization of recently produced blocks for sampling +func (p *Parameters) Validate() error { + // SamplingRange = 0 will cause the jobs' queue to be empty + // Therefore no sampling jobs will be reserved and more importantly the DASer will break + if p.SamplingRange <= 0 { + return errInvalidOptionValue( + "SamplingRange", + "negative or 0", + ) + } + + // ConcurrencyLimit = 0 will cause the number of workers to be 0 and + // Thus no threads will be assigned to the waiting jobs therefore breaking the DASer + if p.ConcurrencyLimit <= 0 { + return errInvalidOptionValue( + "ConcurrencyLimit", + "negative or 0", + ) + } + + // SampleFrom = 0 would tell the DASer to start sampling from block height 0 + // which does not exist therefore breaking the DASer. + if p.SampleFrom <= 0 { + return errInvalidOptionValue( + "SampleFrom", + "negative or 0", + ) + } + + // SampleTimeout = 0 would fail every sample operation with timeout error + if p.SampleTimeout <= 0 { + return errInvalidOptionValue( + "SampleTimeout", + "negative or 0", + ) + } + + return nil +} + +// WithSamplingRange is a functional option to configure the daser's `SamplingRange` parameter +// +// Usage: +// ``` +// WithSamplingRange(10)(daser) +// ``` +// +// or +// +// ``` +// option := WithSamplingRange(10) +// // shenanigans to create daser +// option(daser) +// +// ``` +func WithSamplingRange(samplingRange uint64) Option { + return func(d *DASer) { + d.params.SamplingRange = samplingRange + } +} + +// WithConcurrencyLimit is a functional option to configure the daser's `ConcurrencyLimit` parameter +// Refer to WithSamplingRange documentation to see an example of how to use this +func WithConcurrencyLimit(concurrencyLimit int) Option { + return func(d *DASer) { + d.params.ConcurrencyLimit = concurrencyLimit + } +} + +// WithBackgroundStoreInterval is a functional option to configure the daser's +// `backgroundStoreInterval` parameter Refer to WithSamplingRange documentation to see an example +// of how to use this +func WithBackgroundStoreInterval(backgroundStoreInterval time.Duration) Option { + return func(d *DASer) { + d.params.BackgroundStoreInterval = backgroundStoreInterval + } +} + +// WithSampleFrom is a functional option to configure the daser's `SampleFrom` parameter +// Refer to WithSamplingRange documentation to see an example of how to use this +func WithSampleFrom(sampleFrom uint64) Option { + return func(d *DASer) { + d.params.SampleFrom = sampleFrom + } +} + +// WithSampleTimeout is a functional option to configure the daser's `SampleTimeout` parameter +// Refer to WithSamplingRange documentation to see an example of how to use this +func WithSampleTimeout(sampleTimeout time.Duration) Option { + return func(d *DASer) { + d.params.SampleTimeout = sampleTimeout + } +} + +// WithSamplingWindow is a functional option to configure the DASer's +// `SamplingWindow` parameter. +func WithSamplingWindow(samplingWindow time.Duration) Option { + return func(d *DASer) { + d.params.SamplingWindow = samplingWindow + } +} diff --git a/das/state.go b/das/state.go index 87d728daa3..bd3a018a40 100644 --- a/das/state.go +++ b/das/state.go @@ -1,53 +1,316 @@ package das import ( - "sync" + "context" + "sync/atomic" "time" + + "github.com/celestiaorg/celestia-node/header" ) -// state collects information about the DASer process. Currently, there are -// only two sampling routines: the main sampling routine which performs sampling -// over current network headers, and the `catchUp` routine which performs sampling -// over past headers from the last sampled checkpoint. -type state struct { - sampleLk sync.RWMutex - sample RoutineState // tracks information related to the main sampling routine +// coordinatorState represents the current state of sampling process +type coordinatorState struct { + // sampleFrom is the height from which the DASer will start sampling + sampleFrom uint64 + // samplingRange is the maximum amount of headers processed in one job. + samplingRange uint64 + + // keeps track of running workers + inProgress map[int]func() workerState + + // retryStrategy implements retry backoff + retryStrategy retryStrategy + // stores heights of failed headers with amount of retry attempt as value + failed map[uint64]retryAttempt + // inRetry stores (height -> attempt count) of failed headers that are currently being retried by + // workers + inRetry map[uint64]retryAttempt + + // nextJobID is a unique identifier that will be used for creation of next job + nextJobID int + // all headers before next were sent to workers + next uint64 + // networkHead is the height of the latest known network head + networkHead uint64 - catchUpLk sync.RWMutex - catchUp JobInfo // tracks information related to the `catchUp` routine + // catchUpDone indicates if all headers are sampled + catchUpDone atomic.Bool + // catchUpDoneCh blocks until all headers are sampled + catchUpDoneCh chan struct{} } -// RoutineState contains important information about the state of a -// current sampling routine. -type RoutineState struct { - // reports if an error has occurred during the routine's - // sampling process - Error error `json:"error"` - // tracks the latest successfully sampled height of the routine - LatestSampledHeight uint64 `json:"latest_sampled_height"` - // tracks the square width of the latest successfully sampled - // height of the routine - LatestSampledSquareWidth uint64 `json:"latest_sampled_square_width"` - // tracks whether routine is running - IsRunning bool `json:"is_running"` +// retryAttempt represents a retry attempt with a backoff delay. +type retryAttempt struct { + // count specifies the number of retry attempts made so far. + count int + // after specifies the time for the next retry attempt. + after time.Time +} + +// newCoordinatorState initiates state for samplingCoordinator +func newCoordinatorState(params Parameters) coordinatorState { + return coordinatorState{ + sampleFrom: params.SampleFrom, + samplingRange: params.SamplingRange, + inProgress: make(map[int]func() workerState), + retryStrategy: newRetryStrategy(exponentialBackoff( + defaultBackoffInitialInterval, + defaultBackoffMultiplier, + defaultBackoffMaxRetryCount)), + failed: make(map[uint64]retryAttempt), + inRetry: make(map[uint64]retryAttempt), + nextJobID: 0, + next: params.SampleFrom, + networkHead: params.SampleFrom, + catchUpDoneCh: make(chan struct{}), + } +} + +func (s *coordinatorState) resumeFromCheckpoint(c checkpoint) { + s.next = c.SampleFrom + s.networkHead = c.NetworkHead + + for h, count := range c.Failed { + // resumed retries should start without backoff delay + s.failed[h] = retryAttempt{ + count: count, + after: time.Now(), + } + } +} + +func (s *coordinatorState) handleResult(res result) { + delete(s.inProgress, res.id) + + switch res.jobType { + case recentJob, catchupJob: + s.handleRecentOrCatchupResult(res) + case retryJob: + s.handleRetryResult(res) + } + + s.checkDone() +} + +func (s *coordinatorState) handleRecentOrCatchupResult(res result) { + // check if the worker retried any of the previously failed heights + for h := range s.failed { + if h < res.from || h > res.to { + continue + } + + if res.failed[h] == 0 { + delete(s.failed, h) + } + } + + // update failed heights + for h := range res.failed { + nextRetry, _ := s.retryStrategy.nextRetry(retryAttempt{}, time.Now()) + s.failed[h] = nextRetry + } +} + +func (s *coordinatorState) handleRetryResult(res result) { + // move heights that has failed again to failed with keeping retry count, they will be picked up by + // retry workers later + for h := range res.failed { + lastRetry := s.inRetry[h] + // height will be retried after backoff + nextRetry, retryExceeded := s.retryStrategy.nextRetry(lastRetry, time.Now()) + if retryExceeded { + log.Warnw("header exceeded maximum amount of sampling attempts", + "height", h, + "attempts", nextRetry.count) + } + s.failed[h] = nextRetry + } + + // processed height are either already moved to failed map or succeeded, cleanup inRetry + for h := res.from; h <= res.to; h++ { + delete(s.inRetry, h) + } +} + +func (s *coordinatorState) isNewHead(newHead uint64) bool { + // seen this header before + if newHead <= s.networkHead { + log.Warnf("received head height: %v, which is lower or the same as previously known: %v", newHead, s.networkHead) + return false + } + return true +} + +func (s *coordinatorState) updateHead(newHead uint64) { + if s.networkHead == s.sampleFrom { + log.Infow("found first header, starting sampling") + } + + s.networkHead = newHead + log.Debugw("updated head", "from_height", s.networkHead, "to_height", newHead) + s.checkDone() +} + +// recentJob creates a job to process a recent header. +func (s *coordinatorState) recentJob(header *header.ExtendedHeader) job { + // move next, to prevent catchup job from processing same height + if s.next == header.Height() { + s.next++ + } + s.nextJobID++ + return job{ + id: s.nextJobID, + jobType: recentJob, + header: header, + from: header.Height(), + to: header.Height(), + } +} + +// nextJob will return next catchup or retry job according to priority (retry -> catchup) +func (s *coordinatorState) nextJob() (next job, found bool) { + // check for if any retry jobs are available + if job, found := s.retryJob(); found { + return job, found + } + + // if no retry jobs, make a catchup job + return s.catchupJob() +} + +// catchupJob creates a catchup job if catchup is not finished +func (s *coordinatorState) catchupJob() (next job, found bool) { + if s.next > s.networkHead { + return job{}, false + } + + to := s.next + s.samplingRange - 1 + if to > s.networkHead { + to = s.networkHead + } + j := s.newJob(catchupJob, s.next, to) + s.next = to + 1 + return j, true +} + +// retryJob creates a job to retry previously failed header +func (s *coordinatorState) retryJob() (next job, found bool) { + for h, attempt := range s.failed { + if !attempt.canRetry() { + // height will be retried later + continue + } + + // move header from failed into retry + delete(s.failed, h) + s.inRetry[h] = attempt + j := s.newJob(retryJob, h, h) + return j, true + } + + return job{}, false +} + +func (s *coordinatorState) putInProgress(jobID int, getState func() workerState) { + s.inProgress[jobID] = getState +} + +func (s *coordinatorState) newJob(jobType jobType, from, to uint64) job { + s.nextJobID++ + return job{ + id: s.nextJobID, + jobType: jobType, + from: from, + to: to, + } +} + +// unsafeStats collects coordinator stats without thread-safety +func (s *coordinatorState) unsafeStats() SamplingStats { + workers := make([]WorkerStats, 0, len(s.inProgress)) + lowestFailedOrInProgress := s.next + failed := make(map[uint64]int) + + // gather worker stats + for _, getStats := range s.inProgress { + wstats := getStats() + var errMsg string + if wstats.err != nil { + errMsg = wstats.err.Error() + } + workers = append(workers, WorkerStats{ + JobType: wstats.job.jobType, + Curr: wstats.curr, + From: wstats.from, + To: wstats.to, + ErrMsg: errMsg, + }) + + for h := range wstats.failed { + failed[h]++ + if h < lowestFailedOrInProgress { + lowestFailedOrInProgress = h + } + } + + if wstats.curr < lowestFailedOrInProgress { + lowestFailedOrInProgress = wstats.curr + } + } + + // set lowestFailedOrInProgress to minimum failed - 1 + for h, retry := range s.failed { + failed[h] += retry.count + if h < lowestFailedOrInProgress { + lowestFailedOrInProgress = h + } + } + + for h, retry := range s.inRetry { + failed[h] += retry.count + } + + return SamplingStats{ + SampledChainHead: lowestFailedOrInProgress - 1, + CatchupHead: s.next - 1, + NetworkHead: s.networkHead, + Failed: failed, + Workers: workers, + Concurrency: len(workers), + CatchUpDone: s.catchUpDone.Load(), + IsRunning: len(workers) > 0 || s.catchUpDone.Load(), + } } -// JobInfo contains information about a catchUp job. -type JobInfo struct { - Start time.Time `json:"start"` - End time.Time `json:"end"` - Error error `json:"error"` +func (s *coordinatorState) checkDone() { + if len(s.inProgress) == 0 && len(s.failed) == 0 && s.next > s.networkHead { + if s.catchUpDone.CompareAndSwap(false, true) { + close(s.catchUpDoneCh) + } + return + } - ID uint64 `json:"id"` - Height uint64 `json:"height"` - From uint64 `json:"from"` - To uint64 `json:"to"` + if s.catchUpDone.Load() { + // overwrite channel before storing done flag + s.catchUpDoneCh = make(chan struct{}) + s.catchUpDone.Store(false) + } } -func (ji JobInfo) Finished() bool { - return ji.To == ji.Height +// waitCatchUp waits for sampling process to indicate catchup is done +func (s *coordinatorState) waitCatchUp(ctx context.Context) error { + if s.catchUpDone.Load() { + return nil + } + select { + case <-s.catchUpDoneCh: + case <-ctx.Done(): + return ctx.Err() + } + return nil } -func (ji JobInfo) Duration() time.Duration { - return ji.End.Sub(ji.Start) +// canRetry returns true if the time stored in the "after" has passed. +func (r retryAttempt) canRetry() bool { + return r.after.Before(time.Now()) } diff --git a/das/state_test.go b/das/state_test.go new file mode 100644 index 0000000000..57425082eb --- /dev/null +++ b/das/state_test.go @@ -0,0 +1,95 @@ +package das + +import ( + "errors" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_coordinatorStats(t *testing.T) { + tests := []struct { + name string + state *coordinatorState + want SamplingStats + }{ + { + "basic", + &coordinatorState{ + inProgress: map[int]func() workerState{ + 1: func() workerState { + return workerState{ + result: result{ + job: job{ + jobType: recentJob, + from: 21, + to: 30, + }, + failed: map[uint64]int{22: 1}, + err: errors.New("22: failed"), + }, + curr: 25, + } + }, + 2: func() workerState { + return workerState{ + result: result{ + job: job{ + jobType: catchupJob, + from: 11, + to: 20, + }, + failed: map[uint64]int{12: 1, 13: 1}, + err: errors.Join(errors.New("12: failed"), errors.New("13: failed")), + }, + curr: 15, + } + }, + }, + failed: map[uint64]retryAttempt{ + 22: {count: 1}, + 23: {count: 1}, + 24: {count: 2}, + }, + nextJobID: 0, + next: 31, + networkHead: 100, + }, + SamplingStats{ + SampledChainHead: 11, + CatchupHead: 30, + NetworkHead: 100, + Failed: map[uint64]int{22: 2, 23: 1, 24: 2, 12: 1, 13: 1}, + Workers: []WorkerStats{ + { + JobType: recentJob, + Curr: 25, + From: 21, + To: 30, + ErrMsg: "22: failed", + }, + { + JobType: catchupJob, + Curr: 15, + From: 11, + To: 20, + ErrMsg: "12: failed\n13: failed", + }, + }, + Concurrency: 2, + CatchUpDone: false, + IsRunning: true, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stats := tt.state.unsafeStats() + sort.Slice(stats.Workers, func(i, j int) bool { + return stats.Workers[i].From > stats.Workers[j].Curr + }) + assert.Equal(t, tt.want, stats, "stats are not equal") + }) + } +} diff --git a/das/stats.go b/das/stats.go new file mode 100644 index 0000000000..dda6be6cc0 --- /dev/null +++ b/das/stats.go @@ -0,0 +1,52 @@ +package das + +// SamplingStats collects information about the DASer process. +type SamplingStats struct { + // all headers before SampledChainHead were successfully sampled + SampledChainHead uint64 `json:"head_of_sampled_chain"` + // all headers before CatchupHead were submitted to sampling workers. They could be either already + // sampled, failed or still in progress. For in progress items check Workers stat. + CatchupHead uint64 `json:"head_of_catchup"` + // NetworkHead is the height of the most recent header in the network + NetworkHead uint64 `json:"network_head_height"` + // Failed contains all skipped headers heights with corresponding try count + Failed map[uint64]int `json:"failed,omitempty"` + // Workers has information about each currently running worker stats + Workers []WorkerStats `json:"workers,omitempty"` + // Concurrency amount of currently running parallel workers + Concurrency int `json:"concurrency"` + // CatchUpDone indicates whether all known headers are sampled + CatchUpDone bool `json:"catch_up_done"` + // IsRunning tracks whether the DASer service is running + IsRunning bool `json:"is_running"` +} + +type WorkerStats struct { + JobType jobType `json:"job_type"` + Curr uint64 `json:"current"` + From uint64 `json:"from"` + To uint64 `json:"to"` + + ErrMsg string `json:"error,omitempty"` +} + +// totalSampled returns the total amount of sampled headers +func (s SamplingStats) totalSampled() uint64 { + var inProgress uint64 + for _, w := range s.Workers { + // don't count recent jobs, since heights they are working on are after catchup head + if w.JobType != recentJob { + inProgress += w.To - w.Curr + 1 + } + } + return s.CatchupHead - inProgress - uint64(len(s.Failed)) +} + +// workersByJobType returns a map of job types to the number of workers assigned to those types. +func (s SamplingStats) workersByJobType() map[jobType]int64 { + workers := make(map[jobType]int64) + for _, w := range s.Workers { + workers[w.JobType]++ + } + return workers +} diff --git a/das/store.go b/das/store.go new file mode 100644 index 0000000000..1d63a5083b --- /dev/null +++ b/das/store.go @@ -0,0 +1,101 @@ +package das + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" +) + +var ( + storePrefix = datastore.NewKey("das") + checkpointKey = datastore.NewKey("checkpoint") +) + +// The checkpointStore stores/loads the DASer's checkpoint to/from +// disk using the checkpointKey. The checkpoint is stored as a struct +// representation of the latest successfully DASed state. +type checkpointStore struct { + datastore.Datastore + done +} + +// newCheckpointStore wraps the given datastore.Datastore with the `das` prefix. +func newCheckpointStore(ds datastore.Datastore) checkpointStore { + return checkpointStore{ + namespace.Wrap(ds, storePrefix), + newDone("checkpoint store")} +} + +// load loads the DAS checkpoint from disk and returns it. +func (s *checkpointStore) load(ctx context.Context) (checkpoint, error) { + bs, err := s.Get(ctx, checkpointKey) + if err != nil { + return checkpoint{}, err + } + + cp := checkpoint{} + err = json.Unmarshal(bs, &cp) + return cp, err +} + +// checkpointStore stores the given DAS checkpoint to disk. +func (s *checkpointStore) store(ctx context.Context, cp checkpoint) error { + // checkpointStore latest DASed checkpoint to disk here to ensure that if DASer is not yet + // fully caught up to network head, it will resume DASing from this checkpoint + // up to current network head + bs, err := json.Marshal(cp) + if err != nil { + return fmt.Errorf("marshal checkpoint: %w", err) + } + + if err = s.Put(ctx, checkpointKey, bs); err != nil { + return err + } + + log.Info("stored checkpoint to disk: ", cp.String()) + return nil +} + +// runBackgroundStore periodically saves current sampling state in case of DASer force quit before +// being able to store state on exit. The routine can be disabled by passing storeInterval = 0. +func (s *checkpointStore) runBackgroundStore( + ctx context.Context, + storeInterval time.Duration, + getCheckpoint func(ctx context.Context) (checkpoint, error)) { + defer s.indicateDone() + + // runBackgroundStore could be disabled by setting storeInterval = 0 + if storeInterval == 0 { + log.Info("DASer background checkpointStore is disabled") + return + } + + ticker := time.NewTicker(storeInterval) + defer ticker.Stop() + + var prev uint64 + for { + // blocked by ticker to perform storing only once in a period + select { + case <-ticker.C: + case <-ctx.Done(): + return + } + + cp, err := getCheckpoint(ctx) + if err != nil { + log.Debug("DASer coordinator checkpoint is unavailable") + continue + } + if cp.SampleFrom > prev { + if err = s.store(ctx, cp); err != nil { + log.Errorw("storing checkpoint to disk", "err", err) + } + prev = cp.SampleFrom + } + } +} diff --git a/das/subscriber.go b/das/subscriber.go new file mode 100644 index 0000000000..6af894d2dc --- /dev/null +++ b/das/subscriber.go @@ -0,0 +1,39 @@ +package das + +import ( + "context" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" +) + +// subscriber subscribes to notifications about new headers in the network to keep +// sampling process up-to-date with current network state. +type subscriber struct { + done +} + +func newSubscriber() subscriber { + return subscriber{newDone("subscriber")} +} + +func (s *subscriber) run(ctx context.Context, sub libhead.Subscription[*header.ExtendedHeader], emit listenFn) { + defer s.indicateDone() + defer sub.Cancel() + + for { + h, err := sub.NextHeader(ctx) + if err != nil { + if err == context.Canceled { + return + } + + log.Errorw("failed to get next header", "err", err) + continue + } + log.Debugw("new header received via subscription", "height", h.Height()) + + emit(ctx, h) + } +} diff --git a/das/worker.go b/das/worker.go new file mode 100644 index 0000000000..f2e8c4d821 --- /dev/null +++ b/das/worker.go @@ -0,0 +1,203 @@ +package das + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +const ( + catchupJob jobType = "catchup" + recentJob jobType = "recent" + retryJob jobType = "retry" +) + +type worker struct { + lock sync.Mutex + state workerState + + getter libhead.Getter[*header.ExtendedHeader] + sampleFn sampleFn + broadcast shrexsub.BroadcastFn + metrics *metrics +} + +// workerState contains important information about the state of a +// current sampling routine. +type workerState struct { + result + + curr uint64 +} + +type jobType string + +// job represents headers interval to be processed by worker +type job struct { + id int + jobType jobType + from uint64 + to uint64 + + // header is set only for recentJobs, avoiding an unnecessary call to the header store + header *header.ExtendedHeader +} + +func newWorker(j job, + getter libhead.Getter[*header.ExtendedHeader], + sample sampleFn, + broadcast shrexsub.BroadcastFn, + metrics *metrics, +) worker { + return worker{ + getter: getter, + sampleFn: sample, + broadcast: broadcast, + metrics: metrics, + state: workerState{ + curr: j.from, + result: result{ + job: j, + failed: make(map[uint64]int), + }, + }, + } +} + +func (w *worker) run(ctx context.Context, timeout time.Duration, resultCh chan<- result) { + jobStart := time.Now() + log.Debugw("start sampling worker", "from", w.state.from, "to", w.state.to) + + for curr := w.state.from; curr <= w.state.to; curr++ { + err := w.sample(ctx, timeout, curr) + if errors.Is(err, context.Canceled) { + // sampling worker will resume upon restart + return + } + w.setResult(curr, err) + } + + if w.state.jobType != recentJob { + log.Infow( + "finished sampling headers", + "type", w.state.jobType, + "from", w.state.from, + "to", w.state.curr, + "errors", len(w.state.failed), + "finished (s)", time.Since(jobStart), + ) + } + + select { + case resultCh <- w.state.result: + case <-ctx.Done(): + } +} + +func (w *worker) sample(ctx context.Context, timeout time.Duration, height uint64) error { + h, err := w.getHeader(ctx, height) + if err != nil { + return err + } + + start := time.Now() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + err = w.sampleFn(ctx, h) + w.metrics.observeSample(ctx, h, time.Since(start), w.state.jobType, err) + if err != nil { + if !errors.Is(err, context.Canceled) { + log.Debugw( + "failed to sample header", + "type", w.state.jobType, + "height", h.Height(), + "hash", h.Hash(), + "square width", len(h.DAH.RowRoots), + "data root", h.DAH.String(), + "err", err, + "finished (s)", time.Since(start), + ) + } + return err + } + + logout := log.Debugw + + // notify network about availability of new block data (note: only full nodes can notify) + if w.state.job.jobType == recentJob { + err = w.broadcast(ctx, shrexsub.Notification{ + DataHash: h.DataHash.Bytes(), + Height: h.Height(), + }) + if err != nil { + log.Warn("failed to broadcast availability message", + "height", h.Height(), "hash", h.Hash(), "err", err) + } + + logout = log.Infow + } + + logout( + "sampled header", + "type", w.state.jobType, + "height", h.Height(), + "hash", h.Hash(), + "square width", len(h.DAH.RowRoots), + "data root", h.DAH.String(), + "finished (s)", time.Since(start), + ) + return nil +} + +func (w *worker) getHeader(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + if w.state.header != nil { + return w.state.header, nil + } + + // TODO: get headers in batches + start := time.Now() + h, err := w.getter.GetByHeight(ctx, height) + if err != nil { + if !errors.Is(err, context.Canceled) { + log.Errorw("failed to get header from header store", "height", height, + "finished (s)", time.Since(start)) + } + return nil, err + } + + w.metrics.observeGetHeader(ctx, time.Since(start)) + + log.Debugw( + "got header from header store", + "height", h.Height(), + "hash", h.Hash(), + "square width", len(h.DAH.RowRoots), + "data root", h.DAH.String(), + "finished (s)", time.Since(start), + ) + return h, nil +} + +func (w *worker) setResult(curr uint64, err error) { + w.lock.Lock() + defer w.lock.Unlock() + if err != nil { + w.state.failed[curr]++ + w.state.err = errors.Join(w.state.err, fmt.Errorf("height: %d, err: %w", curr, err)) + } + w.state.curr = curr +} + +func (w *worker) getState() workerState { + w.lock.Lock() + defer w.lock.Unlock() + return w.state +} diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index a0637eb8ae..0000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -FROM --platform=$BUILDPLATFORM golang:1.18 as builder -RUN apt-get install make -WORKDIR /src -COPY go.mod go.sum ./ -RUN go mod download -COPY . . -ARG TARGETOS TARGETARCH -RUN env GOOS=$TARGETOS GOARCH=$TARGETARCH make build - -FROM ubuntu -# Default node type can be overwritten in deployment manifest -ENV NODE_TYPE bridge - -COPY docker/entrypoint.sh / - -# Copy in the binary -COPY --from=builder /src/build/celestia / - -EXPOSE 2121 - -ENTRYPOINT ["/entrypoint.sh"] -CMD ["celestia"] \ No newline at end of file diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 3548288d40..6f064be830 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,11 +1,24 @@ #!/bin/bash -set -e +set -e if [ "$1" = 'celestia' ]; then - ./celestia "${NODE_TYPE}" init + echo "Initializing Celestia Node with command:" - exec ./"$@" "--" + if [[ -n "$NODE_STORE" ]]; then + echo "celestia "${NODE_TYPE}" init --p2p.network "${P2P_NETWORK}" --node.store "${NODE_STORE}"" + celestia "${NODE_TYPE}" init --p2p.network "${P2P_NETWORK}" --node.store "${NODE_STORE}" + else + echo "celestia "${NODE_TYPE}" init --p2p.network "${P2P_NETWORK}"" + celestia "${NODE_TYPE}" init --p2p.network "${P2P_NETWORK}" + fi + + echo "" + echo "" fi +echo "Starting Celestia Node with command:" +echo "$@" +echo "" + exec "$@" diff --git a/docker/telemetry/docker-compose.yml b/docker/telemetry/docker-compose.yml new file mode 100644 index 0000000000..9a95551b0c --- /dev/null +++ b/docker/telemetry/docker-compose.yml @@ -0,0 +1,89 @@ +--- +version: '3.8' + +services: + loki: + container_name: loki + image: grafana/loki:2.6.1 + expose: + - 3100 + ports: + - "3100:3100" + restart: unless-stopped + volumes: + - loki-data:/loki + + promtail: + container_name: promtail + image: grafana/promtail:latest + volumes: + # custom config will read logs from the containers of + # this project + - ${PWD}/promtail:/etc/promtail + # to read container labels and logs + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/containers:/var/lib/docker/containers:ro + depends_on: + - loki + + prometheus: + container_name: prometheus + image: prom/prometheus + ports: + - 9000:9090 + volumes: + - ${PWD}/prometheus:/etc/prometheus + - prometheus-data:/prometheus + # yamllint disable-line rule:line-length + command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml + extra_hosts: + - "host.docker.internal:host-gateway" + + otel-collector: + container_name: otel-collector + image: otel/opentelemetry-collector + command: ["--config=/root/otel-collector/config.yml"] + volumes: + - ${PWD}/otel-collector:/root/otel-collector/ + ports: + - "8888:8888" # Prometheus metrics exposed by the collector + - "8889:8889" # Prometheus exporter metrics + - "55681:55681" + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP http receiver + + jaeger: + container_name: jaeger + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" + - "14268:14268" + - "14250:14250" + environment: + - COLLECTOR_OTLP_ENABLED=true + - LOG_LEVEL=debug + + grafana: + container_name: grafana + image: grafana/grafana:latest + user: "0" + ports: + - 3001:3000 + restart: unless-stopped + volumes: + - ${PWD}/grafana/:/etc/grafana/provisioning/ + - ${PWD}/grafana/:/var/lib/grafana/dashboards/ + - grafana-data:/var/lib/grafana + + pyroscope: + image: "pyroscope/pyroscope:latest" + ports: + - "4040:4040" + command: + - "server" + +volumes: + prometheus-data: + loki-data: + grafana-data: diff --git a/docker/telemetry/grafana/datasources/config.yml b/docker/telemetry/grafana/datasources/config.yml new file mode 100644 index 0000000000..a9ad009548 --- /dev/null +++ b/docker/telemetry/grafana/datasources/config.yml @@ -0,0 +1,14 @@ +--- +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + - name: Loki + type: loki + access: proxy + url: http://loki:3100 + jsonData: + maxLines: 1000 diff --git a/docker/telemetry/loki/config.yml b/docker/telemetry/loki/config.yml new file mode 100644 index 0000000000..467188f09b --- /dev/null +++ b/docker/telemetry/loki/config.yml @@ -0,0 +1,11 @@ +--- +auth_enabled: true + +http_prefix: + +server: + http_listen_address: 0.0.0.0 + grpc_listen_address: 0.0.0.0 + http_listen_port: 3100 + grpc_listen_port: 9095 + log_level: info diff --git a/docker/telemetry/otel-collector/config.yml b/docker/telemetry/otel-collector/config.yml new file mode 100644 index 0000000000..c74c8ca93c --- /dev/null +++ b/docker/telemetry/otel-collector/config.yml @@ -0,0 +1,32 @@ +--- +extensions: + health_check: + +receivers: + otlp: + protocols: + grpc: + # endpoint: "0.0.0.0:4317" + http: + # endpoint: "0.0.0.0:4318" + +exporters: + prometheus: + endpoint: "otel-collector:8889" + send_timestamps: true + metric_expiration: 1800m + jaeger: + endpoint: "jaeger:14250" + tls: + insecure: true + +service: + extensions: [health_check] + pipelines: + metrics: + receivers: [otlp] + exporters: [prometheus] + traces: + receivers: [otlp] + processors: [] + exporters: [jaeger] diff --git a/docker/telemetry/prometheus/prometheus.yml b/docker/telemetry/prometheus/prometheus.yml new file mode 100644 index 0000000000..7cc7395305 --- /dev/null +++ b/docker/telemetry/prometheus/prometheus.yml @@ -0,0 +1,25 @@ +--- +global: + scrape_interval: 15s + scrape_timeout: 10s + evaluation_interval: 15s + +scrape_configs: + - job_name: 'collector' + metrics_path: /metrics + honor_timestamps: true + scrape_interval: 15s + scrape_timeout: 10s + scheme: http + static_configs: + - targets: + - 'otel-collector:8889' + - job_name: 'p2p-metrics' + metrics_path: /metrics + honor_timestamps: true + scrape_interval: 15s + scrape_timeout: 10s + scheme: http + static_configs: + - targets: + - 'host.docker.internal:8890' diff --git a/docker/telemetry/promtail/config.yml b/docker/telemetry/promtail/config.yml new file mode 100644 index 0000000000..95267000a9 --- /dev/null +++ b/docker/telemetry/promtail/config.yml @@ -0,0 +1,29 @@ +# https://grafana.com/docs/loki/latest/clients/promtail/configuration/ +# https://docs.docker.com/engine/api/v1.41/#operation/ContainerList +--- +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: flog_scrape + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: ['__meta_docker_container_name'] + regex: '/(.*)' + target_label: 'container' + - source_labels: ['__meta_docker_container_log_stream'] + target_label: 'logstream' + - source_labels: ['__meta_docker_container_label_logging_jobname'] + target_label: 'job' diff --git a/docs/adr/README.md b/docs/adr/README.md index 3f6266337a..c2ade99ea4 100644 --- a/docs/adr/README.md +++ b/docs/adr/README.md @@ -27,6 +27,10 @@ If recorded decisions turned out to be lacking, convene a discussion, record the Note the context/background should be written in the present tense. -To start a new ADR, you can use this template: [adr-template.md](./adr-template.md) +To start a new ADR, you can generate a new file with the following command: + +```bash +make adr-gen NUM=001 TITLE=my-adr-title +``` ## Table of Contents diff --git a/docs/adr/adr-001-predevnet-celestia-node.md b/docs/adr/adr-001-predevnet-celestia-node.md index 0a3cff0402..b5ac40f8bc 100644 --- a/docs/adr/adr-001-predevnet-celestia-node.md +++ b/docs/adr/adr-001-predevnet-celestia-node.md @@ -100,7 +100,7 @@ For devnet, it should be possible for Celestia `full` Nodes to receive informati For the Celestia Node to be able to propagate `StateFraudProof`s, we must modify Celestia Core to store blocks with invalid state and serve them to both the Celestia Node and the Celestia App, **and** the Celestia App must be able to generate and serve `StateFraudProof`s via RPC to Celestia nodes. -This feature is not necessarily required for devnet (so state exection functionality for Celestia Full Nodes can be stubbed out), but it would be nice to have for devnet as we will likely allow Celestia Full Nodes to speak with other Celestia Full Nodes instead of running a trusted Celestia Core node simultaenously and relying on it for information. +This feature is not necessarily required for devnet (so state execution functionality for Celestia Full Nodes can be stubbed out), but it would be nice to have for devnet as we will likely allow Celestia Full Nodes to speak with other Celestia Full Nodes instead of running a trusted Celestia Core node simultaneously and relying on it for information. A roadmap to implementation could look like the following: diff --git a/docs/adr/adr-005-plugins.md b/docs/adr/adr-005-plugins.md index b01c772b54..7d98b63718 100644 --- a/docs/adr/adr-005-plugins.md +++ b/docs/adr/adr-005-plugins.md @@ -259,7 +259,7 @@ Proposed - easier to create custom applications that run on top of celestia - allows for developers to create a better UX for their custom celestia-nodes - isolates the added functionality to its own service(s), which could potentially be combined with other plugins -- helps move us towards our goal of reducing any any duplicate functionality coded in optimint's dalc +- helps move us towards our goal of reducing any duplicate functionality coded in optimint's dalc ### Negative diff --git a/docs/adr/adr-006-fraud-service.md b/docs/adr/adr-006-fraud-service.md index 0abb1064d9..fa88f8c81d 100644 --- a/docs/adr/adr-006-fraud-service.md +++ b/docs/adr/adr-006-fraud-service.md @@ -10,10 +10,13 @@ - changed from NamespaceShareWithProof to ShareWithProof; - made ProofUnmarshaler public and extended return params; - fixed typo issues; -- 2022.06.15 - Extend Proof interface with HeaderHash method -- 2022.06.22 - Updated rsmt2d to change isRow to Axis -- 2022.07.03 - Add storage description -- 2022.07.23 - rework unmarshalers registration +- 2022.06.15 - Extend Proof interface with HeaderHash method; +- 2022.06.22 - Updated rsmt2d to change isRow to Axis; +- 2022.07.03 - Added storage description; +- 2022.07.23 - Reworked unmarshalers registration; +- 2022.08.25 - + - Added BinaryUnmarshaller to Proof interface; + - Changed ProofType type from int to string; ## Authors @@ -68,7 +71,7 @@ In addition, `das.Daser`: ```go // Currently, we support only one fraud proof. But this enum will be extended in the future with other const ( - BadEncoding ProofType = 0 + BadEncoding ProofType = "badencoding" ) type BadEncodingProof struct { @@ -126,7 +129,7 @@ In addition, `das.Daser`: ```go // ProofType is a enum type that represents a particular type of fraud proof. - type ProofType int + type ProofType string // Proof is a generic interface that will be used for all types of fraud proofs in the network. type Proof interface { @@ -136,6 +139,7 @@ In addition, `das.Daser`: Validate(*header.ExtendedHeader) error encoding.BinaryMarshaller + encoding.BinaryUnmarshaler } ``` @@ -193,7 +197,7 @@ Both full and light nodes should stop `DAS`, `Syncer` and `SubmitTx` services. 1. Valid BadEncodingFraudProofs should be stored on the disk using `FraudStore` interface: -### BEFP storage +### Fraud storage BEFP storage will be created on first subscription to Bad Encoding Fraud Proof. BEFP will be stored in datastore once it will be received, using `fraud/badEncodingProof` path and the corresponding block hash as the key: @@ -211,6 +215,17 @@ func getAll(ctx context.Context, ds datastore.Datastore) ([][]byte, error) In case if response error will be empty (and not ```datastore.ErrNotFound```), then a BEFP has been already added to storage and the node should be halted. +### Fraud sync + +The main purpose of FraudSync is to deliver fraud proofs to nodes that were started after a BEFP appears. Since full nodes create the BEFP during reconstruction, FraudSync is mainly implemented for light nodes: + +- Once a light node checks that its local fraud storage is empty, it starts waiting for new connections with the remote peers(full/bridge nodes) using `share/discovery`. +- The light node will send 5 requests to newly connected peers to get a fraud proof. +- If a fraud proof is received from a remote peer, then it should be validated and propagated across all local subscriptions in order to stop the respective services. + +NOTE: if a received fraud proof ends up being invalid, then the remote peer will be added to the black list. +Both full/light nodes register a stream handler for handling fraud proof requests. + ### Bridge node behaviour Bridge nodes will behave as light nodes do by subscribing to BEFP fraud sub and listening for BEFPs. If a BEFP is received, it will similarly shut down all dependent services, including broadcasting new `ExtendedHeader`s to the network. diff --git a/docs/adr/adr-009-public-api.md b/docs/adr/adr-009-public-api.md new file mode 100644 index 0000000000..be9e8827eb --- /dev/null +++ b/docs/adr/adr-009-public-api.md @@ -0,0 +1,453 @@ +# ADR 009: Public API + +## Authors + +@Wondertan @renaynay + +## Changelog + +- 2022-03-08: initial version +- 2022-08-03: additional details + +## Context + +Celestia Node has been built for almost half a year with a bottom-up approach to +development. The core lower level components are built first and public API +around them is getting organically shaped. Now that the project is maturing and +its architecture is better defined, it's a good time to formally define a set of +modules provided by the node and their respective APIs. + +## Alternative Approaches + +### Node Type centric Design + +Another discussed approach to defining an API could be an onion style, where +each new layer is a feature set of a broader node type. However, it turns out +that this approach does not seem to match the reality we have, where each node +type implements all the possible APIs but with the variable implementations +matching resource constraints of a type. + +## Design + +### Goals + +- Ergonomic. Simple, idiomatic and self-explanatory. +- Module centric(modular). The API is not monolithic and is segregated into +different categorized and independent modules. +- Unified. All the node types implement the same set of APIs. The difference is +defined by different implementations of some modules to meet resource +requirements of a type. Example: FullAvailability and LightAvailability. +- Embeddable. Simply constructable Node with library style API. Not an +SDK/Framework which dictates users the way to build an app, but users are those +who decide how they want to build the app using the API. +- Language agnostic. It should be simple to implement similar module +interfaces/traits in other languages over RPC clients. + +### Implementation + +The tracking issue can be found +[here](https://github.com/celestiaorg/celestia-node/issues/944). It provides a +more detailed step-by-step process for how the below described design will be +implemented. + +### High-level description + +All node types in `celestia-node` will be referred to as `data availability +nodes (DA nodes)` whose sole purpose is to interact with the `data availability +network layer (DA layer)` such that the node contains all functionality +necessary to post and retrieve messages from the DA layer. + +This means that DA nodes will be able to query for / modify celestia state such +that the DA nodes are able to pay for posting their messages on the network. +The state-related API will be documented below in detail. + +Furthermore, interaction between the celestia consensus network and the +celestia data availability network will be the responsibility of the **bridge** +node type. However, that interaction will not be exposed on a public level +(meaning a **bridge** node will not expose the same API as the +celestia-core node to which it is connected). A **bridge** node, for all intents +and purposes, will provide the same API as that of a **full** node (with a +stubbed-out DAS module as bridge nodes do not perform sampling). + +### Details + +#### Services Deprecation + +The initial step is to deprecate services in favor of modules. Ex. +`HeaderService` -> `HeaderModule`. + +- We're organically moving towards the direction of modularized libraries. That +is, our `share`, `header` and `state` packages are getting shaped as independent +modules which now lack their own API definition. +- Consistency. Semantically, modules are closer to what Celestia's overarching +project goals and slogans. +- Disassociate centralization. Services have always been associated with +centralized infrastructure with older monolithic services and newer distributed +microservice architectures. + +#### Modules Definition + +##### Header + +```go +type HeaderModule interface { +// LocalHead returns the node's local head (tip of the chain of the header store). +LocalHead(ctx context.Context) (*header.ExtendedHeader, error) +// GetByHash returns the header of the given hash from the node's header store. +GetByHash(ctx context.Context, hash tmbytes.HexBytes) (*header.ExtendedHeader, error) +// GetByHeight returns the header of the given height if it is available. +GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) +// WaitForHeight blocks until the header at the given height has been processed +// by the node's header store or until context deadline is exceeded. +WaitForHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) +// GetRangeByHeight returns the given range (from:to) of ExtendedHeaders +// from the node's header store and verifies that the returned headers are +// adjacent to each other. +GetRangeByHeight(ctx context.Context, from, to uint64) ([]*ExtendedHeader, error) +// Subscribe creates long-living Subscription for newly validated +// ExtendedHeaders. Multiple Subscriptions can be created. +Subscribe(context.Context) (<-chan *header.ExtendedHeader, error) +// SyncState returns the current state of the header Syncer. +SyncState(context.Context) (sync.State, error) +// SyncWait blocks until the header Syncer is synced to network head. +SyncWait(ctx context.Context) error +// NetworkHead provides the Syncer's view of the current network head. +NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) +} + +``` + +##### Shares + +```go + type SharesModule interface { + // GetShare returns the Share from the given data Root at the given row/col + // coordinates. + GetShare(ctx context.Context, root *Root, row, col int) (Share, error) + // GetEDS gets the full EDS identified by the given root. + GetEDS(ctx context.Context, root *share.Root) (*rsmt2d.ExtendedDataSquare, error) + // GetSharesByNamespace gets all shares from an EDS within the given namespace. + // Shares are returned in a row-by-row order if the namespace spans multiple rows. + GetSharesByNamespace( + ctx context.Context, + root *Root, + nID namespace.ID, + ) (share.NamespacedShares, error) + // SharesAvailable subjectively validates if Shares committed to the given data + // Root are available on the network. + SharesAvailable(ctx context.Context, root *Root) error + // ProbabilityOfAvailability calculates the probability of the data square + // being available based on the number of samples collected. + ProbabilityOfAvailability() float64 + } +``` + +##### P2P + +```go + type P2PModule interface { + // Info returns address information about the host. + Info(context.Context) (peer.AddrInfo, error) + // Peers returns all peer IDs used across all inner stores. + Peers(context.Context) ([]peer.ID, error) + // PeerInfo returns a small slice of information Peerstore has on the + // given peer. + PeerInfo(context.Context, peer.ID) (peer.AddrInfo, error) + + // Connect ensures there is a connection between this host and the peer with + // given peer. + Connect(ctx context.Context, pi peer.AddrInfo) error + // ClosePeer closes the connection to a given peer. + ClosePeer(ctx context.Context, id peer.ID) error + // Connectedness returns a state signaling connection capabilities. + Connectedness(ctx context.Context, id peer.ID) network.Connectedness + // NATStatus returns the current NAT status. + NATStatus(context.Context) network.Reachability + + // BlockPeer adds a peer to the set of blocked peers. + BlockPeer(ctx context.Context, p peer.ID) error + // UnblockPeer removes a peer from the set of blocked peers. + UnblockPeer(ctx context.Context, p peer.ID) error + // ListBlockedPeers returns a list of blocked peers. + ListBlockedPeers(context.Context) []peer.ID + + // MutualAdd adds a peer to the list of peers who have a bidirectional + // peering agreement that they are protected from being trimmed, dropped + // or negatively scored. + MutualAdd(ctx context.Context, id peer.ID, tag string) + // MutualAdd removes a peer from the list of peers who have a bidirectional + // peering agreement that they are protected from being trimmed, dropped + // or negatively scored, returning a bool representing whether the given + // peer is protected or not. + MutualRm(ctx context.Context, id peer.ID, tag string) bool + // IsMutual returns whether the given peer is a mutual peer. + IsMutual(ctx context.Context, id peer.ID, tag string) bool + + // BandwidthStats returns a Stats struct with bandwidth metrics for all + // data sent/received by the local peer, regardless of protocol or remote + // peer IDs. + BandwidthStats(context.Context) Stats + // BandwidthForPeer returns a Stats struct with bandwidth metrics associated + // with the given peer.ID. The metrics returned include all traffic sent / + // received for the peer, regardless of protocol. + BandwidthForPeer(ctx context.Context, id peer.ID) Stats + // BandwidthForProtocol returns a Stats struct with bandwidth metrics + // associated with the given protocol.ID. + BandwidthForProtocol(ctx context.Context, proto protocol.ID) Stats + + // ResourceState returns the state of the resource manager. + ResourceState(context.Context) rcmgr.ResourceManagerStat + + // PubSubPeers returns the peer IDs of the peers joined on + // the given topic. + PubSubPeers(ctx context.Context, topic string) ([]peer.ID, error) + } +``` + +### NodeModule + +```go + + type NodeModule interface { + // Info returns administrative information about the node. + Info(context.Context) (Info, error) + + // LogLevelSet sets the given component log level to the given level. + LogLevelSet(ctx context.Context, name, level string) error + + // AuthVerify returns the permissions assigned to the given token. + AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) + // AuthNew signs and returns a new token with the given permissions. + AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) + } + +``` + +#### DAS + +```go + type DASModule interface { + // Stats returns current stats of the DASer. + Stats() (das.SamplingStats, error) + } +``` + +#### State + +```go + type StateModule interface { + // AccountAddress retrieves the address of the node's account/signer + AccountAddress(ctx context.Context) (state.Address, error) + // Balance retrieves the Celestia coin balance for the node's account/signer + // and verifies it against the corresponding block's AppHash. + Balance(ctx context.Context) (*state.Balance, error) + // BalanceForAddress retrieves the Celestia coin balance for the given + // address and verifies the returned balance against the corresponding + // block's AppHash. + BalanceForAddress(ctx context.Context, addr state.Address) (*state.Balance, error) + // SubmitTx submits the given transaction/message to the Celestia network + // and blocks until the tx is included in a block. + SubmitTx(ctx context.Context, tx state.Tx) (*state.TxResponse, error) + // SubmitPayForBlob builds, signs and submits a PayForBlob transaction. + SubmitPayForBlob( + ctx context.Context, + nID namespace.ID, + data []byte, + fee types.Int, + gasLimit uint64, + ) (*state.TxResponse, error) + // Transfer sends the given amount of coins from default wallet of the node + // to the given account address. + Transfer( + ctx context.Context, + to types.Address, + amount types.Int, + fee types.Int, + gasLimit uint64, + ) (*state.TxResponse, error) + + // StateModule also provides StakingModule + StakingModule + } +``` + +Ideally all the state modules below should be implemented on top of only +StateModule, but there is no way we can have an abstract state requesting method, +yet. + +##### Staking + +```go + type StakingModule interface { + // Delegate sends a user's liquid tokens to a validator for delegation. + Delegate( + ctx context.Context, + delAddr state.ValAddress, + amount state.Int, + fee types.Int, + gasLim uint64, + ) (*state.TxResponse, error) + // BeginRedelegate sends a user's delegated tokens to a new validator for redelegation. + BeginRedelegate( + ctx context.Context, + srcValAddr, + dstValAddr state.ValAddress, + amount state.Int, + fee types.Int, + gasLim uint64, + ) (*state.TxResponse, error) + // Undelegate undelegates a user's delegated tokens, unbonding them from the + // current validator. + Undelegate( + ctx context.Context, + delAddr state.ValAddress, + amount state.Int, + fee types.Int, + gasLim uint64, + ) (*state.TxResponse, error) + + // CancelUnbondingDelegation cancels a user's pending undelegation from a + // validator. + CancelUnbondingDelegation( + ctx context.Context, + valAddr state.ValAddress, + amount types.Int, + height types.Int, + fee types.Int, + gasLim uint64, + ) (*state.TxResponse, error) + + // QueryDelegation retrieves the delegation information between a delegator + // and a validator. + QueryDelegation( + ctx context.Context, + valAddr state.ValAddress, + ) (*types.QueryDelegationResponse, error) + // QueryRedelegations retrieves the status of the redelegations between a + // delegator and a validator. + QueryRedelegations( + ctx context.Context, + srcValAddr, + dstValAddr state.ValAddress, + ) (*types.QueryRedelegationsResponse, error) + // QueryUnbonding retrieves the unbonding status between a delegator and a validator. + QueryUnbonding( + ctx context.Context, + valAddr state.ValAddress, + ) (*types.QueryUnbondingDelegationResponse, error) + + } +``` + +#### Fraud + +```go + type FraudModule interface { + // Subscribe subscribes to the given fraud proof type. + Subscribe(proof fraud.ProofType) error + // List lists all proof types to which the fraud module is currently + // subscribed. + List() []fraud.ProofType + + // Get returns any stored fraud proofs of the given type. + Get(proof fraud.ProofType) ([]Proof, error) + } +``` + +#### Metrics + +```go + type MetricsModule interface { + // List shows all the registered meters. + List(ctx) (string[], error) + + // Enable turns on the specific meter. + Enable(string) error + Disable(string) error + + // ExportTo sets the endpoint the metrics should be exported to + ExportTo(string) + } +``` + +### Nice to have (post-mainnet) + +#### State-related modules + +Eventually, it would be nice to break up `StateModule` into `StateModule`, +`BankModule` and `StakingModule`. + +##### State (general) + +```go +type StateModule interface { + // QueryABCI proxies a generic ABCI query to the core endpoint. + QueryABCI(ctx context.Context, request abci.RequestQuery) + (*coretypes.ResultABCIQuery, error) + // SubmitTx submits the given transaction/message to the Celestia network + // and blocks until the tx is included in a block. + SubmitTx(ctx context.Context, tx state.Tx) (*state.TxResponse, error) +} +``` + +##### Bank + +```go +type BankModule interface { + // Balance retrieves the Celestia coin balance for the node's account/signer + // and verifies it against the corresponding block's AppHash. + Balance(ctx context.Context) (*state.Balance, error) + // BalanceForAddress retrieves the Celestia coin balance for the given + // address and verifies the returned balance against the corresponding + // block's AppHash. + BalanceForAddress(ctx context.Context, addr state.Address) (*state.Balance, error) + // SubmitPayForBlob builds, signs and submits a PayForBlob transaction. + SubmitPayForBlob( + ctx context.Context, + nID namespace.ID, + data []byte, + gasLimit uint64, + ) (*state.TxResponse, error) + // Transfer sends the given amount of coins from default wallet of the node + // to the given account address. + Transfer( + ctx context.Context, + to types.Address, + amount types.Int, + gasLimit uint64, + ) (*state.TxResponse, error) +} +``` + +##### Staking (same as pre-mainnet staking module) + +```go + type StakingModule interface { + Delegate + Redelegate + Unbond + CancelUnbond + + QueryDelegation + QueryRedelegation + QueryUnbondingDelegation + } +``` + +##### Account + +```go + type AccountModule interface { + Add + Delete + Show + List + Sign + Export + Import + } +``` + +## Status + +Proposed diff --git a/docs/adr/adr-010-incentivized-testnet-monitoring.md b/docs/adr/adr-010-incentivized-testnet-monitoring.md new file mode 100644 index 0000000000..2d197bb920 --- /dev/null +++ b/docs/adr/adr-010-incentivized-testnet-monitoring.md @@ -0,0 +1,300 @@ +# ADR #010: Incentivized Testnet Monitoring + +## Changelog + +- 2022-7-19: Started +- 2022-7-22: Add section on "How to monitor celestia-node with Grafana Cloud" +- 2022-7-26: Add section on "How to monitor celestia-node with Uptrace" +- 2022-7-29: Add section on "How to send data over HTTPS" +- 2022-8-1: Revise architecture to minimize Celestia managed components +- 2022-8-4: Add section on "Why doesn't the Celestia team host OTEL Collectors for node operators?" +- 2022-8-8: Rename section to "Which actor should run OTEL Collector(s) during the incentivized testnet?" +- 2022-8-9: Update diagrams and add "Scenario D" +- 2022-8-10: Add decision for "Which actor should run OTEL Collector(s) during the incentivized testnet?" + +## Context + +We're adding telemetry to celestia-node by instrumenting our codebase with metrics (see [ADR-009-telemetry](./adr-009-telemetry.md)). If the option to report metrics is enabled on celestia-node, then celestia-node will push metrics via [OTLP Exporter](https://opentelemetry.io/docs/reference/specification/protocol/exporter/) to an [OTEL Collector](https://opentelemetry.io/docs/collector/) instance. + +We would like to make the metrics exported by celestia-node actionable by making them queryable in internal Grafana dashboards. We additionally want a subset of metrics to be queryable by a public incentivized testnet leaderboard frontend. + +We would like to make it possible for node operators to monitor their own nodes with existing telemetry tools (e.g. Grafana and Uptrace). + +This document proposes a strategy for making data available for use in internal Grafana dashboards and a public leaderboard. Additionally it describes how a node operator can deploy and configure their own OTEL Collector instance. + +## Detailed Design + +### Where to export data to? + +Grafana can query data from [multiple data sources](https://grafana.com/docs/grafana/latest/datasources/#supported-data-sources). This document explores two of these data sources: + +1. [Prometheus](https://github.com/prometheus/prometheus) is an open-source time series database written in Go. Prometheus uses the [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) query language. We can deploy Prometheus ourselves or use a hosted Prometheus provider (ex. [Google](https://cloud.google.com/stackdriver/docs/managed-prometheus), [AWS](https://aws.amazon.com/prometheus/), [Grafana](https://grafana.com/go/hosted-prometheus-monitoring/), etc.). Prometheus is pull-based which means services that would like to expose Prometheus metrics must provide an HTTP endpoint (ex. `/metrics`) that a Prometheus instance can poll (see [instrumenting a Go application for Prometheus](https://prometheus.io/docs/guides/go-application/)). Prometheus is used by [Cosmos SDK telemetry](https://docs.cosmos.network/main/core/telemetry.html) and [Tendermint telemetry](https://docs.tendermint.com/v0.35/nodes/metrics.html) so one major benefit to using Prometheus is that metrics emitted by celestia-core, celestia-app, and celestia-node can share the same database. +2. [InfluxDB](https://github.com/influxdata/influxdb) is another open-source time series database written in Go. It is free to deploy InfluxDB but there is a commercial offering from [influxdata](https://www.influxdata.com/get-influxdb/) that provides clustering and on-prem deployments. InfluxDB uses the [InfluxQL](https://docs.influxdata.com/influxdb/v1.8/query_language/) query language which appears less capable at advanced queries than PromQL ([article](https://www.robustperception.io/translating-between-monitoring-languages/)). InfluxDB is push-based which means services can push metrics directly to an InfluxDB instance ([ref](https://logz.io/blog/prometheus-influxdb/#:~:text=InfluxDB%20is%20a%20push%2Dbased,and%20Prometheus%20fetches%20them%20periodically.)). See [Prometheus vs. InfluxDB](https://prometheus.io/docs/introduction/comparison/#prometheus-vs-influxdb) for a more detailed comparison. + +If alternative data sources should be evaluated, please share them with us. + +#### Decision + +We agreed on using Prometheus at this time. + +### How to export data out of OTEL Collector? + +[Exporters](https://opentelemetry.io/docs/collector/configuration/#exporters) provide a way to export data from an OTEL Collector to a supported destination. + +We configure OTEL Collector to export data to Prometheus like this: + +```yaml +exporters: + # Data sources: metrics + prometheus: + endpoint: "prometheus:8889" + namespace: "default" +``` + +We must additionally enable this exporter via configuration like this: + +```yaml +service: + pipelines: + metrics: + exporters: [prometheus] +``` + +OTEL Collector support for exporting to InfluxDB is still in [beta](https://github.com/open-telemetry/opentelemetry-collector#beta=). See [InfluxDB Exporter](https://pkg.go.dev/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter#section-readme). + +### How to query data in Prometheus from Grafana? + +In order to query Prometheus data from Grafana, we must add a Prometheus datasource. Steps outlined [here](https://prometheus.io/docs/visualization/grafana/#creating-a-prometheus-data-source). + +### How to query data in Prometheus from incentivized testnet leaderboard? + +Prometheus server exposes an HTTP API for querying metrics (see [docs](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-exemplars)). + +Implementation details for the incentivized testnet leaderboard are not yet known (likely built by an external vendor). Two possible implementations are: + +1. If the incentivized testnet has a dedicated backend, it can query the HTTP API above +2. If the incentivized testnet has **no** dedicated backend and the frontend queries Prometheus directly, then there exists a TypeScript library: [prometheus-query-js](https://github.com/samber/prometheus-query-js) which may be helpful. + +### How can a node operator monitor their own node? + +Node operators have the option of adding an additional exporter to their OTEL Collector configuration in order to export to multiple backends. This may be useful for node operators who want to configure alerting on metrics emitted by their node. A minimal guide for node operators to collect telemetry from their nodes follows: + +
+ How to monitor celestia-node with Grafana Cloud + +1. [Install celestia-node](https://docs.celestia.org/developers/celestia-node) +2. Sign up for an account on [Grafana](https://grafana.com/) +3. [Install OTEL Collector](https://opentelemetry.io/docs/collector/getting-started/) on the same machine as celestia-node. If on a Linux machine follow [these steps](https://opentelemetry.io/docs/collector/getting-started/#linux-packaging=). OTEL Collector should start automatically immediately after installation. +4. Configure OTEL Collector to receive metrics from celestia-node by confirming your `/etc/otelcol/config.yaml` has the default config: + + ```yaml + receivers: + otlp: + protocols: + grpc: + http: + ``` + + This starts the [OTLP receiver](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md) on port 4317 for gRPC and 4318 for HTTP. Celestia-node will by default emit HTTP metrics to `localhost:4318` so if you deployed OTEL Collector on the same machine as celestia-node, you can preserve the default config. +5. Configure OTEL Collector to send metrics to Prometheus. If you are using cloud-hosted Grafana, add something like the following to your `/etc/otelcol/config.yaml`: + + ```yaml + exporters: + prometheusremotewrite: + endpoint: https://361398:eyJrIjoiYTNlZTFiOTc2NjA2ODJlOGY1ZGRlNGJkNWMwODRkMDY2M2U2MTE3NiIsIm4iOiJtZXRyaWNzLWtleSIsImlkIjo2MTU4ODJ9@prometheus-prod-01-eu-west-0.grafana.net/api/prom/push + ``` + +6. Configure OTEL Collector to enable the `otlp` receiver and the `prometheusremotewrite` exporter. In `/etc/otelcol/config.yaml`: + + ```yaml + service: + pipelines: + metrics: + receivers: [otlp] + exporters: [prometheusremotewrite] + ``` + + See [this article](https://grafana.com/blog/2022/05/10/how-to-collect-prometheus-metrics-with-the-opentelemetry-collector-and-grafana/) for more details. You may need to specify port 443 in the endpoint like this: `endpoint: "https://USER:PASSWORD@prometheus-blocks-prod-us-central1.grafana.net:443/api/prom/push"` + +7. Restart OTEL Collector with `sudo systemctl restart otelcol` +8. Monitor that OTEL Collector started correctly with `systemctl status otelcol.service` and confirming no errors in `journalctl | grep otelcol | grep Error` +9. Start celestia-node with metrics enabled `celestia light start --core.ip https://rpc-mamaki.pops.one --metrics` +10. Verify that metrics are being displayed in Grafana. +11. [Optional] Import a [OpenTelemetry Collector Dashboard](https://grafana.com/grafana/dashboards/12553-opentelemetry-collector/) into Grafana to monitor your OTEL Collector. + +
+ +
+ How to monitor celestia-node with Uptrace + +1. [Install celestia-node](https://docs.celestia.org/developers/celestia-node). +2. Create an account on [Uptrace](https://app.uptrace.dev/). +3. Create a project on Uptrace. +4. Follow [these steps](https://uptrace.dev/opentelemetry/collector.html#when-to-use-opentelemetry-collector=) to install OTEL Collector Contrib on the same host as celestia-node. +5. Configure OTEL Collector Contrib based on the [configuration](https://uptrace.dev/opentelemetry/collector.html#configuration=) section in the Uptrace docs. Ensure you selected your newly created project in the dropdown. If you'd like to collect traces and metrics, you need to add the `metrics` section under `services.pipelines`: + + ```yaml + service: + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + ``` + +6. Restart OTEL Collector contrib with `sudo systemctl restart otelcol-contrib`. Check that OTEL Collector Contrib is running with `sudo systemctl status otelcol-contrib` and confirm there are no errors in `sudo journalctl -u otelcol-contrib -f`. If you encounter `No journal files were found.` then reference this [StackOverflow post](https://stackoverflow.com/questions/30783134/systemd-user-journals-not-being-created/47930381#47930381). +7. Start celestia-node with metrics and traces enabled: `celestia light start --core.ip https://rpc-mamaki.pops.one --tracing --metrics`. +8. Navigate to Uptrace and create a dashboard. Confirm you can see a metric. + +
+ +### Should we host a Prometheus instance ourselves or use a hosted provider? + +We currently host a Prometheus instance on DigitalOcean (host mamaki-prometheus) for development. However, cloud hosted Prometheus providers take on the responsibility of running, upgrading, and scaling a Prometheus instance for us (see [oss-vs-cloud](https://grafana.com/oss-vs-cloud/). Although multiple hosted providers exist, we propose using Grafana Cloud's hosted Prometheus at this time. + +### Should we host a Grafana instance ourselves or use a hosted provider? + +We already host a Grafana instance on DigitalOcean (host mamaki-prometheus). We propose using Grafana Cloud's hosted Grafana at this time due to it's tight integration with Grafana Cloud Prometheus. + +### Should we host separate Prometheus instances per use case? I.e. one for internal dashboards and one for public leaderboard? + +The Prometheus docs state the following with regard to [Denial of Service](https://prometheus.io/docs/operating/security/#denial-of-service): + +> There are some mitigations in place for excess load or expensive queries. However, if too many or too expensive queries/metrics are provided components will fall over. It is more likely that a component will be accidentally taken out by a trusted user than by malicious action. + +So if we are concerned about the public leaderboard crashing the Prometheus instance that we use for internal dashboards, we may want to host two separate instances. This seems feasible by configuring OTEL Collector to export to two different Prometheus instances. This is a one way door, I suggest sticking with one instance because Grafana Cloud guarantees 99.5% uptime. + +### Which actor should run OTEL Collector(s) during the incentivized testnet? + +#### Scenario A: Node operators + +![scenario a](./img/incentivized-testnet-monitoring-scenario-a.png) + +Pros + +- This deployment architecture is more representative of mainnet where full storage node operators will run their own telemetry stack to monitor their node. Exposing node operators to OTEL Collector during incentivized testnet allows them to practice this deployment architecture during incentivized testnet prior to mainnet. +- Node operators will have an "incentive" to maintain high uptime for their OTEL Collector. + +Cons + +- Additional operational burden for incentivized testnet participants. We can mitigate this concern by providing easy install steps and scripts. + +#### Scenario B: Celestia team + +![scenario b](./img/incentivized-testnet-monitoring-scenario-b.png) + +Pros + +- It will be easier for nodes to participate if they only have to deploy one piece of software (celestia-node) and not two (celestia-node and OTEL Collector). + +Cons + +- At this time, there are no cloud managed offerings for OTEL Collector. There is minimal documentation on the scale of workload an individual OTEL Collector can handle. We'd have to design and operate a highly available OTEL Collector fleet to maintain high uptime for node operators. We'd also have to mitigate DDOS attacks against our OTEL Collector endpoint (cursory investigation below). + +#### Scenario C: Node operators by default and Celestia team as a fallback + +![scenario c](./img/incentivized-testnet-monitoring-scenario-c.png) + +Pros + +- Optionality for node operators who don't want to deploy an OTEL Collector to rely on a best-effort OTEL Collector provided by Celestia team. + +Cons + +- This option increases the cognitive load on node operators who now have an additional decision at deployment time. +- Increased operational burden on Celestia team during incentivized testnet (and beyond). + +#### Scenario D: Celestia team by default and node operators if they want + +The diagram, pros, and cons are the same as scenario C. + +This scenario differs from Scenario C in the docs for node deployment. The docs specify a Celestia team managed OTEL Collector endpoint by default. For node operators who want self-managed telemetry, the docs contain steps on how to setup a node operator managed agent OTEL Collector and how to proxy metrics to the Celestia team managed gateway OTEL Collector. + +The docs may also contain steps on connecting the agent OTEL Collector to Prometheus and Grafana. + + +#### Decision + +We agreed on **Scenario D** at this time. Our docs will describe how to connect to the Celestia team managed OTEL Collector (i.e. `--metrics.endpoint`). Our docs will also include an advanced guide for node operators if they wish to deploy a self-managed OTEL Collector. + +### Should node operators be able to configure celestia-node to export to multiple OTEL collectors? + +This is not supported by [open-telemetry/opentelemetry-go#3055](https://github.com/open-telemetry/opentelemetry-go/issues/3055). This means node operators can only configure one OTLP backend for their node. If they wish to export metrics to multiple OTEL Collectors then they must route traffic through an agent OTEL Collector that they have deployed. Their agent OTEL Collector can forward metrics to any other OTEL Collector that they have access to. + +### How to mitigate DDOS attacks against OTEL Collector? + +- [https://medium.com/opentelemetry/securing-your-opentelemetry-collector-1a4f9fa5bd6f](https://medium.com/opentelemetry/securing-your-opentelemetry-collector-1a4f9fa5bd6f) + - Uses an authentication server (Keycloak) to handle OAuth2 between service and remote OTEL Collector. +- [https://medium.com/@michaelericksen_12434/securing-your-opentelemetry-collector-updated-3f9884e37a09](https://medium.com/@michaelericksen_12434/securing-your-opentelemetry-collector-updated-3f9884e37a09) + - Also uses authentication server (Keycloak). Runs services in Docker. + +### How to mitigate DDOS attacks against Prometheus? + +It’s possible to create an API key with the `MetricsPublisher` role on cloud hosted Prometheus. These API keys can be distributed to participants if they are expected to remote write to Prometheus. + +### How to send data over HTTPS + +#### OTEL Collector -> Prometheus + +Uses HTTPS by default. No additional configuration needed besides copying remote endpoint from Grafana Cloud. + +#### OTEL Collector -> Uptrace + +Uses HTTPS by default. No additional configuration needed besides copying the data source name from Uptrace. + +#### celestia-node -> OTEL Collector with public certificate + +In the case where an OTEL Collector is running on a different host than celestia-node, then the OTEL Collector must be configured with a public certificate so that celestia-node can send data to it over HTTPS. + +1. Ensure that celestia-node doesn't use [`WithInsecure`](https://github.com/open-telemetry/opentelemetry-go/blob/main/exporters/otlp/otlpmetric/otlpmetrichttp/options.go#L161) when constructing otlptracehttp client +1. Configure the OTEL Collector receiver to run with a TLS certificate and key. A TLS certificate can be generated with [LetsEncrypt](https://letsencrypt.org/). Example: + +```yaml +receivers: + otlp: + protocols: + grpc: + http: + endpoint: otel.collector.celestia.observer:4318 + tls: + cert_file: /home/fullchain.pem + key_file: /home/privkey.pem +``` + +#### celestia-node -> OTEL Collector without public certificate + +In the case where a node operator wants to send data from celestia-node to an OTEL Collector without a public certificate (e.g. node-operator managed OTEL Collector), they can issue self-signed certificate in order send data over HTTPS. Alternatively they can send data over HTTP. + +1. Follow the steps at [setting up certificates](https://opentelemetry.io/docs/collector/configuration/#setting-up-certificates) +1. Configure the OTEL Collector receiver to run with this self-signed certificate. Example: + + ```yaml + receivers: + otlp: + protocols: + grpc: + http: + tls: + cert_file: /home/cert.pem + key_file: /home/cert-key.pem + ``` + +1. Ensure that celestia-node runs with a TLS config that contains the Root CA created in step 1. See [sample code](https://github.com/celestiaorg/celestia-node/blob/rp/tracing-with-tls/cmd/flags_misc.go#L173-L199) + +#### What are the resource requirements of OTEL collector? + +Official resource requirements are not stated in the OTEL Collector docs. However, [performance benchmarks](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/performance.md#results-without-tail-based-sampling) indicate that OTEL Collector is able to handle up to 10K traces ([units unclear](https://github.com/open-telemetry/opentelemetry-collector/issues/5780)) on 1 CPU and 2GB RAM. Given [light node](https://docs.celestia.org/nodes/light-node#hardware-requirements) runs on 1 CPU and 2GB RAM, it seems feasible to run an OTEL Collector agent on the most resource constrained target hardware. + +## Status + +Proposed + +## References + +- +- +- +- +- +- +- diff --git a/docs/adr/adr-011-blocksync-overhaul-part-1.md b/docs/adr/adr-011-blocksync-overhaul-part-1.md new file mode 100644 index 0000000000..2967f696ac --- /dev/null +++ b/docs/adr/adr-011-blocksync-overhaul-part-1.md @@ -0,0 +1,420 @@ +# ADR #011: Block Data Sync Overhaul: Part I - Storage + +## Changelog + +- 23.08.22: Initial unfinished draft +- 14.09.22: The first finished version +- 02.12.22: Fixing missed gaps + +## Authors + +- @Wondertan + +> I start to like writing ADRs step-by-step. Also, there is a trick that helps: imagine like you are talking to a dev +> who just joined the team to onboard him. + +## Glossary + +- LN - Light Node +- FN - Full Node +- BN - Bridge Node +- [EDS(Extended Data Square)][eds] - plain Block data omitting headers and other block metadata +- ODS - Original Data Square or the first quadrant of the EDS. Contains real user data and padding +- [NMT][nmt] - Namespaced Merkle Tree +- [DataHash][dh] - Hash commitment over [DAHeader][dah] + +## Context + +### Status Quo + +Current block data synchronization is done over Bitswap, traversing NMT trees of rows and columns of data square quadrants. +We know from empirical evidence that it takes more than 200 seconds(~65000 network requests) to download a 4MB block of +256kb shares, which is unacceptable and must be much less than the block time(15-30sec). + +The DASing, on the other hand, shows acceptable metrics for the block sizes we are aiming for initially. In the case of +the same block, a DAS operation takes 50ms * 8(technically 9) blocking requests, which is ~400ms in an ideal scenario +(excluding disk IO). + +Getting data by namespace also needs to be improved. The time it takes currently lies between BlockSync and DASing, +where more data equals more requests and more time to fulfill the requests. + +### Mini Node Offsite 2022 Berlin + +To facilitate and speed up the resolution of the problem, we decided to make the team gathering in Berlin for four days. +With the help of preliminary preparations by @Wondertan and guest @willscott, we were able to find a solution +in 2 days to match the following requirements: + +- Sync time less than block time(ideally sub-second) +- Data by namespace less than block time(ideally sub-second) +- Pragmatic timeframe + - We need this done before incentivized testnet + - We don't have time to redesign the protocol from scratch +- Keep Bitswap, as it suffices for DAS and solves the data withholding attack + - Existing Bitswap logic kept as a fallback mechanism for the case of reconstruction from light nodes +- Keeping random hash-addressed access to shares for Bitswap to work + +## Decision + +This ADR intends to outline design decisions for block data storage. In a nutshell, the decision is to use +___[CAR format][car]___ and ___[Dagstore][dagstore]___ +for ___extended block storage___ and ___custom p2p Req/Resp protocol for block data syncing___(whole block and data by +namespace id) in the happy path. The p2p portion of the document will come in the subsequent Part II document. + +### Key Design Decisions + +- __FNs/BNs store EDSes serialized as [CAR files][car].__ CAR format provides an + efficient way to store Merkle DAG data, like EDS with NMT. It packs such DAG data into a single blob which can be read + sequentially in one read and transferred over the wire. Additionally, [CARv2][carv2] + introduces pluggable indexes over the blob allowing efficient random access to shares and NMT Proofs in one read + (if the index is cached in memory). + +- __FNs/BNs manage a top-level index for _hash_ to _CAR block file_ mapping.__ Current DASing for LNs requires FNs/BNs + to serve simple hash to data requests. The top-level index maps any Share/NMT Proof hash to any block CARv1 file so + that FNs/BNs can quickly serve DA requests. + +- __FNs/BNs address EDSes by `DataHash`.__ The only alternative is by height; however, it does not allow block + data deduplication in case EDSes are equal and couples the Data layer/pkg with the Header layer/pkg. + +- __FNs/BNs run a single instance of [`DAGStore`][dagstore] to manage CAR block + files.__ DAGStore provides the top-level indexing and CARv2-based indexing per each CAR file. In essence, it's an + engine for managing any CAR files with indexing, convenient abstractions, tools, recovery mechanisms, etc. + - __EDSes as _CARv1_ files over _CARv2_.__ CARv2 encodes indexes into the file, while DAGStore maintains CARv2-based + indexing. Usage of CARv1 keeps only one copy of the index, stores/transfers less metadata per EDS, and simplifies + reading EDS from a file. + +- __LNs DASing remains untouched__. The networking protocol and storage for LNs remain intact as it fulfills the + requirements. Bitswap usage as the backbone protocol for requesting samples and global Badger KVStore remain unaffected. + +### Detailed Design + +> All the comments on the API definitions should be preserved and potentially improved by implementations. + +The new block storage design is solely additive. All the existing storage-related components and functionality +are kept with additional components introduced. Altogether, existing and new components will be recomposed to serve as the +foundation of our improved block storage subsystem. + +The central data structure representing Celestia block data is EDS(`rsmt2d.ExtendedDataSquare`), and the new storage design +is focused around storing entire EDSes as a whole rather than a set of individual chunks, s.t. storage subsystem +can handle storing and streaming/serving blocks of 4MB and more. + +#### EDS (De-)Serialization + +Storing EDS as a whole requires EDS (de)serialization. For this, the [CAR format][car] is chosen. + +##### `eds.WriteEDS` + +To write EDS into a stream/file, `WriteEDS` is introduced. Internally, it + +- [Re-imports](https://github.com/celestiaorg/rsmt2d/blob/80d231f733e9dd8ca166c3d670470ed9a1c165d9/extendeddatasquare.go#L44) EDS similarly to + [`ipld.ImportShares`](https://github.com/celestiaorg/celestia-node/blob/da4f54bca1bfef86f53880ced569d37ffb4b8b84/share/add.go#L48) + - Using [`Blockservice`][blockservice] with [offline + exchange][offexchange] and in-memory [`Blockstore`][blockstore] + - With [`NodeVisitor`](https://github.com/celestiaorg/celestia-node/blob/da4f54bca1bfef86f53880ced569d37ffb4b8b84/share/add.go#L63), which saves to the + [`Blockstore`][blockstore] only NMT Merkle proofs(no shares) _NOTE: `len(node.Links()) == 2`_ + - Actual shares are written further in a particular way explained further +- Creates and [writes](https://github.com/ipld/go-car/blob/dab0fd5bb19dead0da1377270f37be9acf858cf0/car.go#L86) header [`CARv1Header`](https://github.com/ipld/go-car/blob/dab0fd5bb19dead0da1377270f37be9acf858cf0/car.go#L30) + - Fills up `Roots` field with `EDS.RowRoots/EDS.ColRoots` roots converted into CIDs +- Iterates over shares in quadrant-by-quadrant order via `EDS.GetCell` + - [Writes](https://github.com/ipld/go-car/blob/dab0fd5bb19dead0da1377270f37be9acf858cf0/car.go#L118) the shares in row-by-row order +- Iterates over in-memory Blockstore and [writes](https://github.com/ipld/go-car/blob/dab0fd5bb19dead0da1377270f37be9acf858cf0/car.go#L118) NMT Merkle + proofs stored in it + +___NOTES:___ + +- _CAR provides [a utility](https://github.com/ipld/go-car/blob/dab0fd5bb19dead0da1377270f37be9acf858cf0/car.go#L47) to serialize any DAG into the file + and there is a way to serialize EDS into DAG(`share/ipld.ImportShares`). This approach is the simplest and traverses + shares and Merkle Proofs in a depth-first manner packing them in a CAR file. However, this is incompatible with the + requirement of being able to truncate the CAR file reading out __only__ the first quadrant out of it without NMT proofs, + so serialization must be different from the utility to support that._ +- _Alternatively to `WriteEDS`, an `EDSReader` could be introduced to make EDS-to-stream handling more idiomatic + and efficient in some cases, with the cost of more complex implementation._ + +```go +// WriteEDS writes the whole EDS into the given io.Writer as CARv1 file. +// All its shares and recomputed NMT proofs. +func WriteEDS(context.Context, *rsmt2d.ExtendedDataSquare, io.Writer) error +``` + +##### `eds.ReadEDS` + +To read an EDS out of a stream/file, `ReadEDS` is introduced. Internally, it + +- Imports EDS with an empty pre-allocated slice. _NOTE: Size can be taken from CARHeader. +- Wraps given `io.Reader` with [`BlockReader`](https://github.com/ipld/go-car/blob/dab0fd5bb19dead0da1377270f37be9acf858cf0/v2/block_reader.go#L17) +- Reads out blocks one by one and fills up the EDS quadrant via `EDS.SetCell` + - In total there should be shares-in-quadrant amount of reads. +- Recomputes and validates via `EDS.Repair` + +```go +// ReadEDS reads an EDS quadrant(1/4) from an io.Reader CAR file. +// It expects strictly first EDS quadrant(top left). +// The returned EDS is guaranteed to be full and valid against the DataHash, otherwise ReadEDS errors. +func ReadEDS(context.Context, io.Reader, DataHash) (*rsmt2d.ExtendedDataSquare, error) +``` + +##### `eds.ODSReader` + +To read only a quadrant/ODS out of full EDS, `ODSReader` is introduced. + +Its constructor wraps any `io.Reader` containing EDS generated by `WriteEDS` and produces an `io.Reader` which reads +exactly an ODS out of it, similar to `io.LimitReader`. The size of the EDS and ODS can be determined by the amount +of CIDs in the `CARHeader`. + +#### `eds.Store` + +FNs/BNs keep an `eds.Store` to manage every EDS on the disk. The `eds.Store` type is introduced in the `eds` pkg. +Each EDS together with its Merkle Proofs serializes into a CARv1 file. All the serialized CARv1 file blobs are mounted on +DAGStore via [Local FS Mounts](https://github.com/filecoin-project/dagstore/blob/master/docs/design.md#mounts) and registered +as [Shards](https://github.com/filecoin-project/dagstore/blob/master/docs/design.md#shards). + +The introduced `eds.Store` also maintains (via DAGStore) a top-level index enabling granular and efficient random access +to every share and/or Merkle proof over every registered CARv1 file. The `eds.Store` provides a custom `Blockstore` interface +implementation to achieve access. The main use-case is randomized sampling over the whole chain of EDS block data +and getting data by namespace. + +```go +type Store struct { + basepath string + dgstr dagstore.DAGStore + topIdx index.Inverted + carIdx index.FullIndexRepo + mounts *mount.Registry + ... +} + +// NewStore constructs Store over OS directory to store CARv1 files of EDSes and indices for them. +// Datastore is used to keep the inverted/top-level index. +func NewStore(basepath string, ds datastore.Batching) *Store { + topIdx := index.NewInverted(datastore) + carIdx := index.NewFSRepo(basepath + "/index") + mounts := mount.NewRegistry() + r := mount.NewRegistry() + err = r.Register("fs", &mount.FSMount{FS: os.DirFS(basePath + "/eds/")}) // registration is a must + if err != nil { + panic(err) + } + + return &Store{ + basepath: basepath, + dgst: dagstore.New(dagstore.Config{...}), + topIdx: index.NewInverted(datastore), + carIdx: index.NewFSRepo(basepath + "/index") + mounts: mounts, + } +} +``` + +___NOTE:___ _EDSStore should have lifecycle methods(Start/Stop)._ + +##### `eds.Store.Put` + +To write an entire EDS `Put` method is introduced. Internally it + +- Opens a file under `Store.Path/DataHash` path +- Serializes the EDS into the file via `share.WriteEDS` +- Wraps it with `DAGStore`'s [FileMount][filemount] +- Converts `DataHash` to the [`shard.Key`][shardkey] +- Registers the `Mount` as a `Shard` on the `DAGStore` + +___NOTE:___ _Registering on the DAGStore populates the top-level index with shares/proofs accessible from stored EDS, which is +out of the scope of the document._ + +```go +// Put stores the given data square with DataHash as a key. +// +// The square is verified on the Exchange level and Put only stores the square trusting it. +// The resulting file stores all the shares and NMT Merkle Proofs of the EDS. +// Additionally, the file gets indexed s.t. Store.Blockstore can access them. +func (s *Store) Put(context.Context, DataHash, *rsmt2d.ExtendedDataSquare) error +``` + +##### `eds.Store.GetCAR` + +To read an EDS as a byte stream `GetCAR` method is introduced. Internally it + +- Converts `DataHash` to the [`shard.Key`][shardkey] +- Acquires `ShardAccessor` and returns `io.ReadCloser` from it + +___NOTES:___ + +- _`DAGStores`'s `ShardAccessor` has to be extended to return an `io.ReadCloser`. Currently, it only returns + a `Blockstore` of the CAR_ +- _The returned `io.ReadCloer` represents full EDS exchanged. To get a quadrant an ODSReader should be used instead_ + +```go +// GetCAR takes a DataHash and returns a buffered reader to the respective EDS serialized as a CARv1 file. +// +// The Reader strictly reads our full EDS, and it's integrity is not verified. +// +// Caller must Close returned reader after reading. +func (s *Store) GetCAR(context.Context, DataHash) (io.ReadCloser, error) +``` + +##### `eds.Store.Blockstore` + +`Blockstore` method returns a [`Blockstore`][blockstore] interface implementation instance, providing random access over +share and NMT Merkle proof in every stored EDS. It is required for FNs/BNs to serve DAS requests over the Bitswap. + +There is a `Blockstore` over [`DAGStore`][dagstore] and [`CARv2`][carv2] indexes. + +___NOTES:___ + +- _We can either use DAGStore's one or implement custom optimized for our needs._ +- _The Blockstore does not store whole Celestia Blocks, but IPFS blocks. We represent Merkle proofs and shares in IPFS + blocks._ +- EDIT: We went with custom implementation. + +```go +// Blockstore returns an IPFS Blockstore providing access to individual shares/nodes of all EDS +// registered on the Store. NOTE: The Blockstore does not store whole Celestia Blocks but IPFS blocks. +// We represent `shares` and NMT Merkle proofs as IPFS blocks and IPLD nodes so Bitswap can access those. +func (s *Store) Blockstore() blockstore.Blockstore +``` + +##### `eds.Store.CARBlockstore` + +`CARBlockstore` method returns a read-only [`Blockstore`][blockstore] interface implementation +instance to provide random access over share and NMT Merkle proof in a specific EDS identified by +DataHash, along with its corresponding DAH. It is required for FNs/BNs to enable [reading data by +namespace](#reading-data-by-namespace). + +___NOTES:___ + +- _The returned Blockstore does not store whole Celestia Blocks, but IPFS blocks. We represent Merkle proofs and shares in IPFS + blocks._ + +```go +// CARBlockstore returns an IPFS Blockstore providing access to individual shares/nodes of a specific EDS identified by +// DataHash and registered on the Store. NOTE: The Blockstore does not store whole Celestia Blocks but IPFS blocks. +// We represent `shares` and NMT Merkle proofs as IPFS blocks and IPLD nodes so Bitswap can access those. +func (s *Store) CARBlockstore(context.Context, DataHash) (dagstore.ReadBlockstore, error) +``` + +##### `eds.Store.GetDAH` + +The `GetDAH` method returns the DAH (`share.Root`) of the EDS identified by `DataHash`. Internally it: + +- Acquires a `ShardAccessor` for the corresponding shard +- Reads the CAR Header from the accessor +- Converts the header's root CIDs into a `share.Root` +- Verifies the integrity of the `share.Root` by comparing it with the `DataHash` + +```go +// GetDAH returns the DataAvailabilityHeader for the EDS identified by DataHash. +func (s *Store) GetDAH(context.Context, share.DataHash) (*share.Root, error) +``` + +##### `eds.Store.Get` + +To read an entire EDS `Get` method is introduced. Internally it: + +- Gets a serialized EDS `io.Reader` via `Store.GetCAR` +- Deserializes the EDS and validates it via `share.ReadEDS` + +___NOTE:___ _It's unnecessary, but an API ergonomics/symmetry nice-to-have._ + +```go +// Get reads EDS out of Store by given DataHash. +// +// It reads only one quadrant(1/4) of the EDS and verifies the integrity of the stored data by recomputing it. +func (s *Store) Get(context.Context, DataHash) (*rsmt2d.ExtendedDataSquare, error) +``` + +##### `eds.Store.Has` + +To check if EDSStore keeps an EDS `Has` method is introduced. Internally it: + +- Converts `DataHash` to the [`shard.Key`][shardkey] +- Checks if [`GetShardInfo`](https://github.com/filecoin-project/dagstore/blob/f9e7b7b4594221c8a4840a1e9f3f6e003c1b4c52/dagstore.go#L483) does not return + [ErrShardUnknown](https://github.com/filecoin-project/dagstore/blob/eac7733212fdd7c80be5078659f7450b3956d2a6/dagstore.go#L55) + +___NOTE:___ _It's unnecessary, but an API ergonomics/symmetry nice-to-have._ + +```go +// Has checks if EDS exists by the given DataHash. +func (s *Store) Has(context.Context, DataHash) (bool, error) +``` + +##### `eds.Store.Remove` + +To remove stored EDS `Remove` method is introduced. Internally it: + +- Converts `DataHash` to the [`shard.Key`][shardkey] +- Destroys `Shard` via `DAGStore` + - Internally removes its `Mount` as well +- Removes CARv1 file from disk under `Store.Path/DataHash` path +- Drops indecies + +___NOTES:___ + +- _It's unnecessary, but an API ergonomics/symmetry nice-to-have_ +- _GC logic on the DAGStore has to be investigated so that Removing is correctly implemented_ + +```go +// Remove removes EDS from Store by the given DataHash and cleans up all the indexing. +func (s *Store) Remove(context.Context, DataHash) error +``` + +#### Reading Data By Namespace + +Generally stays unchanged with minor edits: + +- `share/ipld.GetByNamespace` is kept to load data from disk only and not from the network anymore + - Using [`Blockservice`][blockservice] with [offline exchange][offexchange] + - Using [`Blockstore`][blockstore] provided by `eds.Store` +- `share/ipld.GetByNamespace` is extended to return NMT Merkle proofs + - Similar to `share/ipld.GetProofsForShares` + - Ensure Merkle proofs are not duplicated! + +As an extension, `share/ipld.GetByNamespace` can be modified to `share.CARByNamespace`, returning CARv1 Reader with +encoded shares and NMT Merkle Proofs. + +#### EDS Deduplication + +Addressing EDS by DataHash allows us to deduplicate equal EDSes. EDS equality is very unlikely to happen in practice, +beside empty Block case, which always produces the same EDS. + +#### Empty Block/EDS + +The empty block is valid and small EDS. It can happen in the early stage of the network. Its body is constant, and to avoid +transferring it over the wire, the `eds.Store` should be pre-initialized with an empty EDS value. + +#### EDSStore Directory Path + +The EDSStore on construction expects a directory to store CAR files and indices. The path should be gotten based +on `node.Store.Path`. + +## Alternative Approaches + +- Extended blocks as a set of share blobs and Merkle proofs in global Store (_current approach with KVStore_) +- Extended block as a single blob only(computing Merkle proofs) +- Extended block as a single blob and Merkle proofs +- Extended block as a set of DAG/CAR blobs +- Extended block as a single DAG/CAR blob + +## Considerations + +- ___EDS to/from CARv2 converting performance.___ Current sync design assumes two converts from CAR to EDS on the + protocol layer and back to CAR when storing the EDS. Rsmt2d allocates on most operations with individual shares, and for + more giant blocks during sync, these allocations put significant pressure on GC. One way to substantially alleviate this + is to integrate the bytes buffer pool into rmst2d. + +- ___Disk usage increases from the top-level index.___ This is a temporary solution. The index will have to be removed. + LNs know which block they sample and can provide `DataHash`together with sample request over Bitswap, removing + the need for hash-to-eds-file mapping. This requires us to either facilitate implementation of [Bitswap's auth extension + ](https://github.com/ipfs/specs/pull/270) or proposing a custom Bitswap message extension. Subsequently, the Blockstore + implementation provided via `eds.Store` would have to be changed to expect DataHash to be passed through the + `context.Context`. + +[dah]: https://github.com/celestiaorg/celestia-app/blob/86c9bf6b981a8b25033357fddc89ef70abf80681/pkg/da/data_availability_header.go#L28 +[dh]: https://github.com/celestiaorg/celestia-core/blob/f76d026f3525d2d4fa309c62df29d42d33d0e9c6/types/block.go#L354 +[eds]: https://github.com/celestiaorg/rsmt2d/blob/76b270f80f0b9ac966c6f6b043e31514574f90f3/extendeddatasquare.go#L10 +[nmt]: https://github.com/celestiaorg/nmt +[car]: https://ipld.io/specs/transport/car +[carv2]: https://ipld.io/specs/transport/car/carv2/ +[dagstore]: https://github.com/filecoin-project/dagstore +[blockstore]: https://github.com/ipfs/go-ipfs-blockstore/blob/master/blockstore.go#L33 +[blockservice]: https://github.com/ipfs/go-blockservice/blob/master/blockservice.go#L46 +[offexchange]: https://github.com/ipfs/go-ipfs-exchange-offline/blob/master/offline.go#L16 +[shardkey]: https://github.com/filecoin-project/dagstore/blob/f9e7b7b4594221c8a4840a1e9f3f6e003c1b4c52/shard/key.go#L12 +[filemount]: https://github.com/filecoin-project/dagstore/blob/f9e7b7b4594221c8a4840a1e9f3f6e003c1b4c52/mount/file.go#L10 diff --git a/docs/adr/adr-012-daser-parallelization.md b/docs/adr/adr-012-daser-parallelization.md new file mode 100644 index 0000000000..a1f3af885b --- /dev/null +++ b/docs/adr/adr-012-daser-parallelization.md @@ -0,0 +1,77 @@ +# ADR #012: DASer parallelization + +## Changelog + +- 2022-9-12: Started + +## Authors + +@walldiss + +## Context + +DAS is the process of verifying the availability of block data by sampling chunks or shares of those blocks. The `das` package implements an engine to ensure the availability of the chain's block data via the `Availability` interface. +Verifying the availability of block data is a priority functionality for celestia-node. Its performance could benefit significantly from parallelization optimisation to make it able to fully utilise network bandwidth. + +## Previous approach + +Share sampling, by its nature, is a network-bound operation that implies multiple network round-trips. +The previous implementation of DAS'er used a single-thread synchronous approach, +meaning there was only one process sequentially performing sampling operations over past headers that were blocked by awaiting a response. + +## Decision + +Using multiple coordinated workers running in parallel drastically improves the DASer's performance through better network bandwidth utilization. On the downside, the proposed solution brings concurrency complexity. + +## Detailed Design + +To achieve parallelization, the DASer was split into the following core components: + +1. The `Coordinator` holds the current state of sampled headers and defines what headers should be sampled next. +2. `Workers` perform sampling over a range of headers and communicate the results back to the coordinator. Workers are created on demand, when `Jobs` are available. The amount of concurrently running workers is limited by the const `concurrencyLimit`. Length of the sampling range is defined by DASer configuration param `samplingRange`. +3. The `Subscriber` subscribes to network head updates. When new headers are found, it will notify the `Coordinator`. Recent network head blocks will be prioritized for sampling to increase the availability of the most demanded blocks. +4. The `CheckpointStore` stores/loads the `Coordinator` state as a checkpoint to allow for seamless resuming upon restart. The `Coordinator` stores the state as a checkpoint on exit and resumes sampling from the latest state. +It also periodically stores checkpoints to storage to avoid the situation when no checkpoint is stored upon a hard shutdown of the node. + +![image](./img/daser-architecture-diagram.png) + +Sampling flow: + +1. `Coordinator` checks if worker concurrency limit is reached. +2. If the limit is not reached, `Coordinator` forms a new sampling `Job` + 1. Looks if there is a `Job` in the top of the priority stack. + 2. If nothing is in the priority stack, picks the next not sampled range for `Job`. +3. Launches new `Worker` with formed `Job`. +4. `Worker` gets headers for given ranges and sample shares. +5. After `Worker` is done, it communicates results back to `Coordinator` +6. `Coordinator` updates sampling state according to worker results. + +## Concurrency limit + +The maximum amount of concurrently running workers is defined by the const `concurrencyLimit` = 16. This value is an approximation that came from the first basic performance tests. +During the test, samples/sec rate was observed with moving average over 30 sec window for a period of 5min. The metric was triggered only by a sampled header with width > 2. + +```text +amount of workers: 8, speed: 8.66 + +amount of workers: 16, speed: 11.13 + +amount of workers: 32, speed: 11.33 + +amount of workers: 64, speed: 11.83 +``` + +Based on basic experiment results, values higher than 16 don’t bring much benefit. At the same time, increased parallelization comes with a cost of higher memory consumption. +Future improvements will be discussed later and are out of the scope of this ADR. + +## Status + +Implemented + +## Future plans + +Several params values that come hardcoded in DASer (`samplingRange`, `concurrencyLimit`, `priorityQueueSize`, `genesisHeight`, `backgroundStoreInterval`) should become configurable, so the node runner can define them based on the specific node setup. Default values should be optimized by performance testing for most common setups, and could potentially vary for different node types. + +## References + +- [DASer PR](https://github.com/celestiaorg/celestia-node/pull/988/) diff --git a/docs/adr/adr-template.md b/docs/adr/adr-template.md deleted file mode 100644 index 5f8e09e49c..0000000000 --- a/docs/adr/adr-template.md +++ /dev/null @@ -1,72 +0,0 @@ -# ADR {ADR-NUMBER}: {TITLE} - -## Changelog - -- {date}: {changelog} - -## Context - -> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. - -## Alternative Approaches - -> This section contains information around alternative options that are considered before making a decision. It should contain a explanation on why the alternative approach(es) were not chosen. - -## Decision - -> This section records the decision that was made. -> It is best to record as much info as possible from the discussion that happened. This aids in not having to go back to the Pull Request to get the needed information. - -## Detailed Design - -> This section does not need to be filled in at the start of the ADR, but must be completed prior to the merging of the implementation. -> -> Here are some common questions that get answered as part of the detailed design: -> -> - What are the user requirements? -> -> - What systems will be affected? -> -> - What new data structures are needed, what data structures will be changed? -> -> - What new APIs will be needed, what APIs will be changed? -> -> - What are the efficiency considerations (time/space)? -> -> - What are the expected access patterns (load/throughput)? -> -> - Are there any logging, monitoring or observability needs? -> -> - Are there any security considerations? -> -> - Are there any privacy considerations? -> -> - How will the changes be tested? -> -> - If the change is large, how will the changes be broken up for ease of review? -> -> - Will these changes require a breaking (major) release? -> -> - Does this change require coordination with the Celestia fork of the SDK, celestia-app/-core, or any other celestiaorg repository? - -## Status - -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. Once the ADR has been implemented mark the ADR as "implemented". If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. - -{Deprecated|Proposed|Accepted|Declined} - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! - -- {reference link} diff --git a/docs/adr/img/daser-architecture-diagram.png b/docs/adr/img/daser-architecture-diagram.png new file mode 100644 index 0000000000..9effa2f566 Binary files /dev/null and b/docs/adr/img/daser-architecture-diagram.png differ diff --git a/docs/adr/img/incentivized-testnet-monitoring-scenario-a.png b/docs/adr/img/incentivized-testnet-monitoring-scenario-a.png new file mode 100644 index 0000000000..248a201ab0 Binary files /dev/null and b/docs/adr/img/incentivized-testnet-monitoring-scenario-a.png differ diff --git a/docs/adr/img/incentivized-testnet-monitoring-scenario-b.png b/docs/adr/img/incentivized-testnet-monitoring-scenario-b.png new file mode 100644 index 0000000000..6d59799c09 Binary files /dev/null and b/docs/adr/img/incentivized-testnet-monitoring-scenario-b.png differ diff --git a/docs/adr/img/incentivized-testnet-monitoring-scenario-c.png b/docs/adr/img/incentivized-testnet-monitoring-scenario-c.png new file mode 100644 index 0000000000..14c8dc5b4b Binary files /dev/null and b/docs/adr/img/incentivized-testnet-monitoring-scenario-c.png differ diff --git a/fraud/bad_encoding.go b/fraud/bad_encoding.go deleted file mode 100644 index 071d672920..0000000000 --- a/fraud/bad_encoding.go +++ /dev/null @@ -1,181 +0,0 @@ -package fraud - -import ( - "bytes" - "errors" - "fmt" - - "github.com/tendermint/tendermint/pkg/consts" - "github.com/tendermint/tendermint/pkg/wrapper" - - "github.com/celestiaorg/rsmt2d" - - pb "github.com/celestiaorg/celestia-node/fraud/pb" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/ipld" - ipld_pb "github.com/celestiaorg/celestia-node/ipld/pb" - "github.com/celestiaorg/celestia-node/ipld/plugin" -) - -func init() { - Register(BadEncoding, UnmarshalBEFP) -} - -type BadEncodingProof struct { - headerHash []byte - BlockHeight uint64 - // ShareWithProof contains all shares from row or col. - // Shares that did not pass verification in rsmt2d will be nil. - // For non-nil shares MerkleProofs are computed. - Shares []*ipld.ShareWithProof - // Index represents the row/col index where ErrByzantineRow/ErrByzantineColl occurred. - Index uint32 - // Axis represents the axis that verification failed on. - Axis rsmt2d.Axis -} - -// CreateBadEncodingProof creates a new Bad Encoding Fraud Proof that should be propagated through network. -// The fraud proof will contain shares that did not pass verification and their relevant Merkle proofs. -func CreateBadEncodingProof( - hash []byte, - height uint64, - errByzantine *ipld.ErrByzantine, -) Proof { - - return &BadEncodingProof{ - headerHash: hash, - BlockHeight: height, - Shares: errByzantine.Shares, - Index: errByzantine.Index, - Axis: errByzantine.Axis, - } -} - -// Type returns type of fraud proof. -func (p *BadEncodingProof) Type() ProofType { - return BadEncoding -} - -// HeaderHash returns block hash. -func (p *BadEncodingProof) HeaderHash() []byte { - return p.headerHash -} - -// Height returns block height. -func (p *BadEncodingProof) Height() uint64 { - return p.BlockHeight -} - -// MarshalBinary converts BadEncodingProof to binary. -func (p *BadEncodingProof) MarshalBinary() ([]byte, error) { - shares := make([]*ipld_pb.Share, 0, len(p.Shares)) - for _, share := range p.Shares { - shares = append(shares, share.ShareWithProofToProto()) - } - - badEncodingFraudProof := pb.BadEncoding{ - HeaderHash: p.headerHash, - Height: p.BlockHeight, - Shares: shares, - Index: p.Index, - Axis: pb.Axis(p.Axis), - } - return badEncodingFraudProof.Marshal() -} - -// UnmarshalBEFP converts given data to BadEncodingProof. -func UnmarshalBEFP(data []byte) (Proof, error) { - befp := &BadEncodingProof{} - if err := befp.UnmarshalBinary(data); err != nil { - return nil, err - } - return befp, nil -} - -// UnmarshalBinary converts binary to BadEncodingProof. -func (p *BadEncodingProof) UnmarshalBinary(data []byte) error { - in := pb.BadEncoding{} - if err := in.Unmarshal(data); err != nil { - return err - } - befp := &BadEncodingProof{ - headerHash: in.HeaderHash, - BlockHeight: in.Height, - Shares: ipld.ProtoToShare(in.Shares), - Index: in.Index, - Axis: rsmt2d.Axis(in.Axis), - } - - *p = *befp - - return nil -} - -// Validate ensures that fraud proof is correct. -// Validate checks that provided Merkle Proofs correspond to the shares, -// rebuilds bad row or col from received shares, computes Merkle Root -// and compares it with block's Merkle Root. -func (p *BadEncodingProof) Validate(header *header.ExtendedHeader) error { - if header.Height != int64(p.BlockHeight) { - return errors.New("fraud: incorrect block height") - } - merkleRowRoots := header.DAH.RowsRoots - merkleColRoots := header.DAH.ColumnRoots - if len(merkleRowRoots) != len(merkleColRoots) { - // NOTE: This should never happen as callers of this method should not feed it with a - // malformed extended header. - panic(fmt.Sprintf( - "fraud: invalid extended header: length of row and column roots do not match. (rowRoots=%d) (colRoots=%d)", - len(merkleRowRoots), - len(merkleColRoots)), - ) - } - if int(p.Index) >= len(merkleRowRoots) { - return fmt.Errorf("fraud: invalid proof: index out of bounds (%d >= %d)", int(p.Index), len(merkleRowRoots)) - } - if len(merkleRowRoots) != len(p.Shares) { - return fmt.Errorf("fraud: invalid proof: incorrect number of shares %d != %d", len(p.Shares), len(merkleRowRoots)) - } - - root := merkleRowRoots[p.Index] - if p.Axis == rsmt2d.Col { - root = merkleColRoots[p.Index] - } - - shares := make([][]byte, len(merkleRowRoots)) - - // verify that Merkle proofs correspond to particular shares. - for index, share := range p.Shares { - if share == nil { - continue - } - shares[index] = share.Share - if ok := share.Validate(plugin.MustCidFromNamespacedSha256(root)); !ok { - return fmt.Errorf("fraud: invalid proof: incorrect share received at index %d", index) - } - } - - codec := consts.DefaultCodec() - // rebuild a row or col. - rebuiltShares, err := codec.Decode(shares) - if err != nil { - return err - } - rebuiltExtendedShares, err := codec.Encode(rebuiltShares[0 : len(shares)/2]) - if err != nil { - return err - } - rebuiltShares = append(rebuiltShares, rebuiltExtendedShares...) - - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shares) / 2)) - for i, share := range rebuiltShares { - tree.Push(share, rsmt2d.SquareIndex{Axis: uint(p.Index), Cell: uint(i)}) - } - - // comparing rebuilt Merkle Root of bad row/col with respective Merkle Root of row/col from block. - if bytes.Equal(tree.Root(), root) { - return errors.New("fraud: invalid proof: recomputed Merkle root matches the DAH's row/column root") - } - - return nil -} diff --git a/fraud/bad_encoding_test.go b/fraud/bad_encoding_test.go deleted file mode 100644 index 5af32c7680..0000000000 --- a/fraud/bad_encoding_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package fraud - -import ( - "context" - "errors" - "testing" - "time" - - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/ipld" -) - -func TestFraudProofValidation(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - defer t.Cleanup(cancel) - bServ := mdutils.Bserv() - _, store := createService(t) - h, err := store.GetByHeight(ctx, 1) - require.NoError(t, err) - - faultDAH, err := generateByzantineError(ctx, t, h, bServ) - var errByz *ipld.ErrByzantine - require.True(t, errors.As(err, &errByz)) - p := CreateBadEncodingProof([]byte("hash"), uint64(faultDAH.Height), errByz) - err = p.Validate(faultDAH) - require.NoError(t, err) -} diff --git a/fraud/helpers.go b/fraud/helpers.go deleted file mode 100644 index 9cc685bc1d..0000000000 --- a/fraud/helpers.go +++ /dev/null @@ -1,29 +0,0 @@ -package fraud - -import ( - "context" - - "github.com/libp2p/go-libp2p-core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -const fraudSubSuffix = "-sub" - -func getSubTopic(p ProofType) string { - return p.String() + fraudSubSuffix -} - -func join(p *pubsub.PubSub, proofType ProofType, - validate func(context.Context, ProofType, peer.ID, *pubsub.Message) pubsub.ValidationResult) (*pubsub.Topic, error) { - t, err := p.Join(getSubTopic(proofType)) - if err != nil { - return nil, err - } - err = p.RegisterTopicValidator( - getSubTopic(proofType), - func(ctx context.Context, from peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - return validate(ctx, proofType, from, msg) - }, - ) - return t, err -} diff --git a/fraud/interface.go b/fraud/interface.go deleted file mode 100644 index e592ffd7f2..0000000000 --- a/fraud/interface.go +++ /dev/null @@ -1,52 +0,0 @@ -package fraud - -import ( - "context" - - logging "github.com/ipfs/go-log/v2" - - "github.com/celestiaorg/celestia-node/header" -) - -var log = logging.Logger("fraud") - -// headerFetcher aliases a function that is used to fetch an ExtendedHeader from store by height. -type headerFetcher func(context.Context, uint64) (*header.ExtendedHeader, error) - -// ProofUnmarshaler aliases a function that parses data to `Proof`. -type ProofUnmarshaler func([]byte) (Proof, error) - -// Service encompasses the behavior necessary to subscribe and broadcast -// Fraud Proofs within the network. -type Service interface { - Subscriber - Broadcaster - Getter -} - -// Broadcaster is a generic interface that sends a `Proof` to all nodes subscribed on the Broadcaster's topic. -type Broadcaster interface { - // Broadcast takes a fraud `Proof` data structure that implements standard BinaryMarshal - // interface and broadcasts it to all subscribed peers. - Broadcast(context.Context, Proof) error -} - -// Subscriber encompasses the behavior necessary to -// subscribe/unsubscribe from new FraudProof events from the -// network. -type Subscriber interface { - // Subscribe allows to subscribe on a Proof pub sub topic by its type. - Subscribe(ProofType) (Subscription, error) -} - -// Getter encompasses the behavior to fetch stored FraudProofs. -type Getter interface { - Get(context.Context, ProofType) ([]Proof, error) -} - -// Subscription returns a valid proof if one is received on the topic. -type Subscription interface { - // Proof returns already verified valid proof. - Proof(context.Context) (Proof, error) - Cancel() -} diff --git a/fraud/pb/proof.pb.go b/fraud/pb/proof.pb.go deleted file mode 100644 index 389725e5ba..0000000000 --- a/fraud/pb/proof.pb.go +++ /dev/null @@ -1,516 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: fraud/pb/proof.proto - -package fraud_pb - -import ( - fmt "fmt" - pb "github.com/celestiaorg/celestia-node/ipld/pb" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Axis int32 - -const ( - Axis_ROW Axis = 0 - Axis_COL Axis = 1 -) - -var Axis_name = map[int32]string{ - 0: "ROW", - 1: "COL", -} - -var Axis_value = map[string]int32{ - "ROW": 0, - "COL": 1, -} - -func (x Axis) String() string { - return proto.EnumName(Axis_name, int32(x)) -} - -func (Axis) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_318cb87a8bb2d394, []int{0} -} - -type BadEncoding struct { - HeaderHash []byte `protobuf:"bytes,1,opt,name=HeaderHash,proto3" json:"HeaderHash,omitempty"` - Height uint64 `protobuf:"varint,2,opt,name=Height,proto3" json:"Height,omitempty"` - Shares []*pb.Share `protobuf:"bytes,3,rep,name=Shares,proto3" json:"Shares,omitempty"` - Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"Index,omitempty"` - Axis Axis `protobuf:"varint,5,opt,name=Axis,proto3,enum=fraud.pb.Axis" json:"Axis,omitempty"` -} - -func (m *BadEncoding) Reset() { *m = BadEncoding{} } -func (m *BadEncoding) String() string { return proto.CompactTextString(m) } -func (*BadEncoding) ProtoMessage() {} -func (*BadEncoding) Descriptor() ([]byte, []int) { - return fileDescriptor_318cb87a8bb2d394, []int{0} -} -func (m *BadEncoding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BadEncoding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BadEncoding.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BadEncoding) XXX_Merge(src proto.Message) { - xxx_messageInfo_BadEncoding.Merge(m, src) -} -func (m *BadEncoding) XXX_Size() int { - return m.Size() -} -func (m *BadEncoding) XXX_DiscardUnknown() { - xxx_messageInfo_BadEncoding.DiscardUnknown(m) -} - -var xxx_messageInfo_BadEncoding proto.InternalMessageInfo - -func (m *BadEncoding) GetHeaderHash() []byte { - if m != nil { - return m.HeaderHash - } - return nil -} - -func (m *BadEncoding) GetHeight() uint64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *BadEncoding) GetShares() []*pb.Share { - if m != nil { - return m.Shares - } - return nil -} - -func (m *BadEncoding) GetIndex() uint32 { - if m != nil { - return m.Index - } - return 0 -} - -func (m *BadEncoding) GetAxis() Axis { - if m != nil { - return m.Axis - } - return Axis_ROW -} - -func init() { - proto.RegisterEnum("fraud.pb.Axis", Axis_name, Axis_value) - proto.RegisterType((*BadEncoding)(nil), "fraud.pb.BadEncoding") -} - -func init() { proto.RegisterFile("fraud/pb/proof.proto", fileDescriptor_318cb87a8bb2d394) } - -var fileDescriptor_318cb87a8bb2d394 = []byte{ - // 252 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x49, 0x2b, 0x4a, 0x2c, - 0x4d, 0xd1, 0x2f, 0x48, 0xd2, 0x2f, 0x28, 0xca, 0xcf, 0x4f, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0xe2, 0x00, 0x8b, 0xea, 0x15, 0x24, 0x49, 0x09, 0x67, 0x16, 0xe4, 0x80, 0xa5, 0x8b, 0x33, - 0x12, 0x8b, 0x52, 0x21, 0xd2, 0x4a, 0xcb, 0x19, 0xb9, 0xb8, 0x9d, 0x12, 0x53, 0x5c, 0xf3, 0x92, - 0xf3, 0x53, 0x32, 0xf3, 0xd2, 0x85, 0xe4, 0xb8, 0xb8, 0x3c, 0x52, 0x13, 0x53, 0x52, 0x8b, 0x3c, - 0x12, 0x8b, 0x33, 0x24, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0x90, 0x44, 0x84, 0xc4, 0xb8, 0xd8, - 0x3c, 0x52, 0x33, 0xd3, 0x33, 0x4a, 0x24, 0x98, 0x14, 0x18, 0x35, 0x58, 0x82, 0xa0, 0x3c, 0x21, - 0x35, 0x2e, 0xb6, 0x60, 0x90, 0xb1, 0xc5, 0x12, 0xcc, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x7c, 0x7a, - 0x20, 0xdb, 0xf4, 0x0a, 0x92, 0xf4, 0xc0, 0xc2, 0x41, 0x50, 0x59, 0x21, 0x11, 0x2e, 0x56, 0xcf, - 0xbc, 0x94, 0xd4, 0x0a, 0x09, 0x16, 0x05, 0x46, 0x0d, 0xde, 0x20, 0x08, 0x47, 0x48, 0x89, 0x8b, - 0xc5, 0xb1, 0x22, 0xb3, 0x58, 0x82, 0x55, 0x81, 0x51, 0x83, 0xcf, 0x88, 0x4f, 0x0f, 0xe6, 0x66, - 0xbd, 0xc4, 0x8a, 0xcc, 0xe2, 0x20, 0xb0, 0x9c, 0x96, 0x04, 0x17, 0x0b, 0x88, 0x27, 0xc4, 0xce, - 0xc5, 0x1c, 0xe4, 0x1f, 0x2e, 0xc0, 0x00, 0x62, 0x38, 0xfb, 0xfb, 0x08, 0x30, 0x3a, 0x49, 0x9c, - 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, - 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0xd8, 0x93, 0xc6, 0x80, 0x00, - 0x00, 0x00, 0xff, 0xff, 0xe8, 0xc8, 0xdd, 0xab, 0x1b, 0x01, 0x00, 0x00, -} - -func (m *BadEncoding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BadEncoding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BadEncoding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Axis != 0 { - i = encodeVarintProof(dAtA, i, uint64(m.Axis)) - i-- - dAtA[i] = 0x28 - } - if m.Index != 0 { - i = encodeVarintProof(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x20 - } - if len(m.Shares) > 0 { - for iNdEx := len(m.Shares) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Shares[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProof(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Height != 0 { - i = encodeVarintProof(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x10 - } - if len(m.HeaderHash) > 0 { - i -= len(m.HeaderHash) - copy(dAtA[i:], m.HeaderHash) - i = encodeVarintProof(dAtA, i, uint64(len(m.HeaderHash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProof(dAtA []byte, offset int, v uint64) int { - offset -= sovProof(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *BadEncoding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.HeaderHash) - if l > 0 { - n += 1 + l + sovProof(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovProof(uint64(m.Height)) - } - if len(m.Shares) > 0 { - for _, e := range m.Shares { - l = e.Size() - n += 1 + l + sovProof(uint64(l)) - } - } - if m.Index != 0 { - n += 1 + sovProof(uint64(m.Index)) - } - if m.Axis != 0 { - n += 1 + sovProof(uint64(m.Axis)) - } - return n -} - -func sovProof(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProof(x uint64) (n int) { - return sovProof(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *BadEncoding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProof - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BadEncoding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BadEncoding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HeaderHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProof - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProof - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProof - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HeaderHash = append(m.HeaderHash[:0], dAtA[iNdEx:postIndex]...) - if m.HeaderHash == nil { - m.HeaderHash = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProof - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shares", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProof - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProof - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProof - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shares = append(m.Shares, &pb.Share{}) - if err := m.Shares[len(m.Shares)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProof - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Axis", wireType) - } - m.Axis = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProof - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Axis |= Axis(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipProof(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProof(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProof - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProof - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProof - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProof - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProof - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProof - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProof = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProof = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProof = fmt.Errorf("proto: unexpected end of group") -) diff --git a/fraud/proof.go b/fraud/proof.go deleted file mode 100644 index 86e29c5e6b..0000000000 --- a/fraud/proof.go +++ /dev/null @@ -1,90 +0,0 @@ -package fraud - -import ( - "context" - "encoding" - "fmt" - - "github.com/celestiaorg/celestia-node/header" -) - -type ErrFraudExists struct { - Proof []Proof -} - -func (e *ErrFraudExists) Error() string { - return fmt.Sprintf("fraud: %s proof exists\n", e.Proof[0].Type()) -} - -type errNoUnmarshaler struct { - proofType ProofType -} - -func (e *errNoUnmarshaler) Error() string { - return fmt.Sprintf("fraud: unmarshaler for %s type is not registered", e.proofType) -} - -type ProofType int - -const ( - BadEncoding ProofType = iota -) - -func (p ProofType) String() string { - switch p { - case BadEncoding: - return "badencoding" - default: - panic(fmt.Sprintf("fraud: invalid proof type: %d", p)) - } -} - -// Proof is a generic interface that will be used for all types of fraud proofs in the network. -type Proof interface { - // Type returns the exact type of fraud proof. - Type() ProofType - // HeaderHash returns the block hash. - HeaderHash() []byte - // Height returns the block height corresponding to the Proof. - Height() uint64 - // Validate check the validity of fraud proof. - // Validate throws an error if some conditions don't pass and thus fraud proof is not valid. - // NOTE: header.ExtendedHeader should pass basic validation otherwise it will panic if it's malformed. - Validate(*header.ExtendedHeader) error - - encoding.BinaryMarshaler -} - -// OnProof subscribes to the given Fraud Proof topic via the given Subscriber. -// In case a Fraud Proof is received, then the given handle function will be invoked. -func OnProof(ctx context.Context, subscriber Subscriber, p ProofType, handle func(proof Proof)) { - subscription, err := subscriber.Subscribe(p) - if err != nil { - log.Error(err) - return - } - defer subscription.Cancel() - - // At this point we receive already verified fraud proof, - // so there is no need to call Validate. - proof, err := subscription.Proof(ctx) - if err != nil { - if err != context.Canceled { - log.Errorw("reading next proof failed", "err", err) - } - return - } - - handle(proof) -} - -// Unmarshal converts raw bytes into respective Proof type. -func Unmarshal(proofType ProofType, msg []byte) (Proof, error) { - unmarshalersLk.RLock() - defer unmarshalersLk.RUnlock() - unmarshaler, ok := defaultUnmarshalers[proofType] - if !ok { - return nil, &errNoUnmarshaler{proofType: proofType} - } - return unmarshaler(msg) -} diff --git a/fraud/registry.go b/fraud/registry.go deleted file mode 100644 index 388ce26cc0..0000000000 --- a/fraud/registry.go +++ /dev/null @@ -1,17 +0,0 @@ -package fraud - -import ( - "sync" -) - -var ( - unmarshalersLk = sync.RWMutex{} - defaultUnmarshalers = map[ProofType]ProofUnmarshaler{} -) - -// Register sets unmarshaler in map by provided ProofType. -func Register(proofType ProofType, unmarshaler ProofUnmarshaler) { - unmarshalersLk.Lock() - defer unmarshalersLk.Unlock() - defaultUnmarshalers[proofType] = unmarshaler -} diff --git a/fraud/service.go b/fraud/service.go deleted file mode 100644 index c1990987e4..0000000000 --- a/fraud/service.go +++ /dev/null @@ -1,144 +0,0 @@ -package fraud - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "sync" - "time" - - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -const ( - // fetchHeaderTimeout duration of GetByHeight request to fetch an ExtendedHeader. - fetchHeaderTimeout = time.Minute * 2 -) - -// service is responsible for validating and propagating Fraud Proofs. -// It implements the Service interface. -type service struct { - topicsLk sync.RWMutex - topics map[ProofType]*pubsub.Topic - - storesLk sync.RWMutex - stores map[ProofType]datastore.Datastore - - pubsub *pubsub.PubSub - getter headerFetcher - ds datastore.Datastore -} - -func NewService(p *pubsub.PubSub, getter headerFetcher, ds datastore.Datastore) Service { - return &service{ - pubsub: p, - getter: getter, - topics: make(map[ProofType]*pubsub.Topic), - stores: make(map[ProofType]datastore.Datastore), - ds: ds, - } -} - -func (f *service) Subscribe(proofType ProofType) (_ Subscription, err error) { - f.topicsLk.Lock() - defer f.topicsLk.Unlock() - t, ok := f.topics[proofType] - if !ok { - t, err = join(f.pubsub, proofType, f.processIncoming) - if err != nil { - return nil, err - } - f.topics[proofType] = t - } - subs, err := t.Subscribe() - if err != nil { - return nil, err - } - return &subscription{subs}, nil -} - -func (f *service) Broadcast(ctx context.Context, p Proof) error { - bin, err := p.MarshalBinary() - if err != nil { - return err - } - f.topicsLk.RLock() - t, ok := f.topics[p.Type()] - f.topicsLk.RUnlock() - if !ok { - return fmt.Errorf("fraud: unmarshaler for %s proof is not registered", p.Type()) - } - return t.Publish(ctx, bin) -} - -func (f *service) processIncoming( - ctx context.Context, - proofType ProofType, - from peer.ID, - msg *pubsub.Message, -) pubsub.ValidationResult { - proof, err := Unmarshal(proofType, msg.Data) - if err != nil { - log.Error(err) - if !errors.Is(err, &errNoUnmarshaler{}) { - f.pubsub.BlacklistPeer(from) - } - return pubsub.ValidationReject - } - - newCtx, cancel := context.WithTimeout(ctx, fetchHeaderTimeout) - extHeader, err := f.getter(newCtx, proof.Height()) - defer cancel() - if err != nil { - // Timeout means there is a problem with the network. - // As we cannot prove or discard Fraud Proof, user must restart the node. - if errors.Is(err, context.DeadlineExceeded) { - log.Errorw("failed to fetch header. Timeout reached.") - // TODO(@vgonkivs): add handling for this case. As we are not able to verify fraud proof. - } - log.Errorw("failed to fetch header to verify a fraud proof", - "err", err, "proofType", proof.Type(), "height", proof.Height()) - return pubsub.ValidationIgnore - } - err = proof.Validate(extHeader) - if err != nil { - log.Errorw("proof validation err: ", - "err", err, "proofType", proof.Type(), "height", proof.Height()) - f.pubsub.BlacklistPeer(from) - return pubsub.ValidationReject - } - log.Warnw("received fraud proof", "proofType", proof.Type(), - "height", proof.Height(), - "hash", hex.EncodeToString(extHeader.DAH.Hash()), - "from", from.String(), - ) - msg.ValidatorData = proof - f.storesLk.Lock() - store, ok := f.stores[proofType] - if !ok { - store = initStore(proofType, f.ds) - f.stores[proofType] = store - } - f.storesLk.Unlock() - err = put(ctx, store, string(proof.HeaderHash()), msg.Data) - if err != nil { - log.Error(err) - } - log.Warn("Shutting down services...") - return pubsub.ValidationAccept -} - -func (f *service) Get(ctx context.Context, proofType ProofType) ([]Proof, error) { - f.storesLk.Lock() - store, ok := f.stores[proofType] - if !ok { - store = initStore(proofType, f.ds) - f.stores[proofType] = store - } - f.storesLk.Unlock() - - return getAll(ctx, store, proofType) -} diff --git a/fraud/service_test.go b/fraud/service_test.go deleted file mode 100644 index b9065c5823..0000000000 --- a/fraud/service_test.go +++ /dev/null @@ -1,390 +0,0 @@ -package fraud - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/libp2p/go-libp2p-core/event" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - - "github.com/celestiaorg/celestia-node/ipld" -) - -func TestService_Subscribe(t *testing.T) { - s, _ := createService(t) - - _, err := s.Subscribe(BadEncoding) - require.NoError(t, err) -} - -func TestService_BroadcastFails(t *testing.T) { - s, _ := createService(t) - p := CreateBadEncodingProof([]byte("hash"), 0, &ipld.ErrByzantine{ - Index: 0, - Shares: make([]*ipld.ShareWithProof, 0), - }, - ) - require.Error(t, s.Broadcast(context.TODO(), p)) -} - -func TestService_Broadcast(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - t.Cleanup(cancel) - - s, store := createService(t) - h, err := store.GetByHeight(ctx, 1) - require.NoError(t, err) - - faultHeader, err := generateByzantineError(ctx, t, h, mdutils.Bserv()) - require.Error(t, err) - var errByz *ipld.ErrByzantine - require.True(t, errors.As(err, &errByz)) - - subs, err := s.Subscribe(BadEncoding) - require.NoError(t, err) - require.NoError(t, s.Broadcast(ctx, CreateBadEncodingProof([]byte("hash"), uint64(h.Height), errByz))) - p, err := subs.Proof(ctx) - require.NoError(t, err) - require.NoError(t, p.Validate(faultHeader)) -} - -func TestService_BlackListPeer(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - // create mock network - net, err := mocknet.FullMeshLinked(3) - require.NoError(t, err) - - // create first fraud service that will broadcast incorrect Fraud Proof - serviceA, store1 := createServiceWithHost(ctx, t, net.Hosts()[0]) - - h, err := store1.GetByHeight(ctx, 1) - require.NoError(t, err) - - // create and break byzantine error - _, err = generateByzantineError(ctx, t, h, mdutils.Bserv()) - require.Error(t, err) - var errByz *ipld.ErrByzantine - require.True(t, errors.As(err, &errByz)) - errByz.Index = 2 - - fserviceA := serviceA.(*service) - require.NotNil(t, fserviceA) - - // create second service that will receive and validate Fraud Proof - serviceB, _ := createServiceWithHost(ctx, t, net.Hosts()[1]) - - fserviceB := serviceB.(*service) - require.NotNil(t, fserviceB) - - blackList, err := pubsub.NewTimeCachedBlacklist(time.Hour * 1) - require.NoError(t, err) - // create pub sub in order to listen for Fraud Proof - psC, err := pubsub.NewGossipSub(ctx, net.Hosts()[2], // -> C - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign), pubsub.WithBlacklist(blackList)) - require.NoError(t, err) - - addrB := host.InfoFromHost(net.Hosts()[1]) // -> B - - serviceC := NewService(psC, store1.GetByHeight, sync.MutexWrap(datastore.NewMapDatastore())) - - sub0, err := net.Hosts()[0].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - sub2, err := net.Hosts()[2].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - - // connect peers: A -> B -> C, so A and C are not connected to each other - require.NoError(t, net.Hosts()[0].Connect(ctx, *addrB)) // host[0] is A - require.NoError(t, net.Hosts()[2].Connect(ctx, *addrB)) // host[2] is C - - // wait on both peer identification events - for i := 0; i < 2; i++ { - select { - case <-sub0.Out(): - case <-sub2.Out(): - case <-ctx.Done(): - assert.FailNow(t, "timeout waiting for peers to connect") - } - } - - // subscribe to BEFP - subsA, err := serviceA.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsA.Cancel() - - subsB, err := serviceB.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsB.Cancel() - - subsC, err := serviceC.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsC.Cancel() - - befp := CreateBadEncodingProof([]byte("hash"), uint64(h.Height), errByz) - // deregister validator in order to send Fraud Proof - fserviceA.pubsub.UnregisterTopicValidator(getSubTopic(BadEncoding)) //nolint:errcheck - // create a new validator for serviceB - fserviceB.pubsub.UnregisterTopicValidator(getSubTopic(BadEncoding)) //nolint:errcheck - f := func(ctx context.Context, from peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - msg.ValidatorData = befp - return pubsub.ValidationAccept - } - fserviceB.pubsub.RegisterTopicValidator(getSubTopic(BadEncoding), f) //nolint:errcheck - bin, err := befp.MarshalBinary() - require.NoError(t, err) - topic, ok := fserviceA.topics[BadEncoding] - require.True(t, ok) - // we cannot avoid sleep because it helps to avoid flakiness - time.Sleep(time.Millisecond * 100) - err = topic.Publish(ctx, bin, pubsub.WithReadiness(pubsub.MinTopicSize(1))) - require.NoError(t, err) - - _, err = subsB.Proof(ctx) - require.NoError(t, err) - - newCtx, cancel := context.WithTimeout(ctx, time.Second*1) - t.Cleanup(cancel) - - _, err = subsC.Proof(newCtx) - require.Error(t, err) - require.False(t, blackList.Contains(net.Hosts()[0].ID())) - require.True(t, blackList.Contains(net.Hosts()[1].ID())) - // we cannot avoid sleep because it helps to avoid flakiness - time.Sleep(time.Millisecond * 100) -} - -func TestService_GossipingOfFaultBEFP(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - t.Cleanup(cancel) - // create mock network - net, err := mocknet.FullMeshLinked(3) - require.NoError(t, err) - - // create first fraud service that will broadcast incorrect Fraud Proof - serviceA, store1 := createServiceWithHost(ctx, t, net.Hosts()[0]) - - h, err := store1.GetByHeight(ctx, 1) - require.NoError(t, err) - - // create and break byzantine error - _, err = generateByzantineError(ctx, t, h, mdutils.Bserv()) - require.Error(t, err) - var errByz *ipld.ErrByzantine - require.True(t, errors.As(err, &errByz)) - errByz.Index = 2 - - fserviceA := serviceA.(*service) - require.NotNil(t, fserviceA) - - blackList, err := pubsub.NewTimeCachedBlacklist(time.Hour) - require.NoError(t, err) - // create pub sub in order to listen for Fraud Proof - psB, err := pubsub.NewGossipSub(ctx, net.Hosts()[1], // -> B - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign), pubsub.WithBlacklist(blackList)) - require.NoError(t, err) - // create second service that will receive and validate Fraud Proof - serviceB := NewService(psB, store1.GetByHeight, sync.MutexWrap(datastore.NewMapDatastore())) - fserviceB := serviceB.(*service) - require.NotNil(t, fserviceB) - addrB := host.InfoFromHost(net.Hosts()[1]) // -> B - - // create pub sub in order to listen for Fraud Proof - psC, err := pubsub.NewGossipSub(ctx, net.Hosts()[2], // -> C - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - serviceC := NewService(psC, store1.GetByHeight, sync.MutexWrap(datastore.NewMapDatastore())) - - // perform subscriptions - sub0, err := net.Hosts()[0].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - sub2, err := net.Hosts()[2].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - - // establish connections - // connect peers: A -> B -> C, so A and C are not connected to each other - require.NoError(t, net.Hosts()[0].Connect(ctx, *addrB)) // host[0] is A - require.NoError(t, net.Hosts()[2].Connect(ctx, *addrB)) // host[2] is C - - // wait on both peer identification events - for i := 0; i < 2; i++ { - select { - case <-sub0.Out(): - case <-sub2.Out(): - case <-ctx.Done(): - assert.FailNow(t, "timeout waiting for peers to connect") - } - } - - // subscribe to BEFP - subsA, err := serviceA.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsA.Cancel() - - subsB, err := serviceB.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsB.Cancel() - - subsC, err := serviceC.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsC.Cancel() - - // deregister validator in order to send Fraud Proof - fserviceA.pubsub.UnregisterTopicValidator(getSubTopic(BadEncoding)) //nolint:errcheck - // Broadcast BEFP - befp := CreateBadEncodingProof([]byte("hash"), uint64(h.Height), errByz) - bin, err := befp.MarshalBinary() - require.NoError(t, err) - // we cannot avoid sleep because it helps to avoid flakiness - time.Sleep(time.Millisecond * 100) - err = fserviceA.topics[BadEncoding].Publish(ctx, bin, pubsub.WithReadiness(pubsub.MinTopicSize(1))) - require.NoError(t, err) - - newCtx, cancel := context.WithTimeout(ctx, time.Millisecond*100) - t.Cleanup(cancel) - - _, err = subsB.Proof(newCtx) - require.Error(t, err) - require.True(t, blackList.Contains(net.Hosts()[0].ID())) - - proofs, err := serviceC.Get(ctx, BadEncoding) - require.Error(t, err) - require.Nil(t, proofs) - // we cannot avoid sleep because it helps to avoid flakiness - time.Sleep(time.Millisecond * 100) -} - -func TestService_GossipingOfBEFP(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - t.Cleanup(cancel) - // create mock network - net, err := mocknet.FullMeshLinked(3) - require.NoError(t, err) - - // create first fraud service that will broadcast incorrect Fraud Proof - serviceA, store1 := createServiceWithHost(ctx, t, net.Hosts()[0]) - - h, err := store1.GetByHeight(ctx, 1) - require.NoError(t, err) - - // create and break byzantine error - _, err = generateByzantineError(ctx, t, h, mdutils.Bserv()) - require.Error(t, err) - var errByz *ipld.ErrByzantine - require.True(t, errors.As(err, &errByz)) - - fserviceA := serviceA.(*service) - require.NotNil(t, fserviceA) - - // create pub sub in order to listen for Fraud Proof - psB, err := pubsub.NewGossipSub(ctx, net.Hosts()[1], // -> B - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - // create second service that will receive and validate Fraud Proof - serviceB := NewService(psB, store1.GetByHeight, sync.MutexWrap(datastore.NewMapDatastore())) - fserviceB := serviceB.(*service) - require.NotNil(t, fserviceB) - addrB := host.InfoFromHost(net.Hosts()[1]) // -> B - - // create pub sub in order to listen for Fraud Proof - psC, err := pubsub.NewGossipSub(ctx, net.Hosts()[2], // -> C - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - serviceC := NewService(psC, store1.GetByHeight, sync.MutexWrap(datastore.NewMapDatastore())) - - // perform subscriptions - sub0, err := net.Hosts()[0].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - sub2, err := net.Hosts()[2].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - - // establish connections - // connect peers: A -> B -> C, so A and C are not connected to each other - require.NoError(t, net.Hosts()[0].Connect(ctx, *addrB)) // host[0] is A - require.NoError(t, net.Hosts()[2].Connect(ctx, *addrB)) // host[2] is C - - // wait on both peer identification events - for i := 0; i < 2; i++ { - select { - case <-sub0.Out(): - case <-sub2.Out(): - case <-ctx.Done(): - assert.FailNow(t, "timeout waiting for peers to connect") - } - } - - // subscribe to BEFP - subsA, err := serviceA.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsA.Cancel() - - subsB, err := serviceB.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsB.Cancel() - - subsC, err := serviceC.Subscribe(BadEncoding) - require.NoError(t, err) - defer subsC.Cancel() - - // deregister validator in order to send Fraud Proof - fserviceA.pubsub.UnregisterTopicValidator(getSubTopic(BadEncoding)) //nolint:errcheck - // Broadcast BEFP - befp := CreateBadEncodingProof([]byte("hash"), uint64(h.Height), errByz) - bin, err := befp.MarshalBinary() - require.NoError(t, err) - // we cannot avoid sleep because it helps to avoid flakiness - time.Sleep(time.Millisecond * 100) - err = fserviceA.topics[BadEncoding].Publish(ctx, bin, pubsub.WithReadiness(pubsub.MinTopicSize(1))) - require.NoError(t, err) - - newCtx, cancel := context.WithTimeout(ctx, time.Millisecond*100) - t.Cleanup(cancel) - - p, err := subsB.Proof(newCtx) - require.NoError(t, err) - require.NoError(t, p.Validate(h)) - - p, err = subsC.Proof(ctx) - require.NoError(t, err) - require.NoError(t, p.Validate(h)) - - proofs, err := serviceC.Get(ctx, BadEncoding) - require.NoError(t, err) - require.NoError(t, proofs[0].Validate(h)) - // we cannot avoid sleep because it helps to avoid flakiness - time.Sleep(time.Millisecond * 100) -} - -func createService(t *testing.T) (Service, *mockStore) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - t.Cleanup(cancel) - - // create mock network - net, err := mocknet.FullMeshLinked(1) - require.NoError(t, err) - // create pubsub for host - ps, err := pubsub.NewGossipSub(ctx, net.Hosts()[0], - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - store := createStore(t, 10) - return NewService(ps, store.GetByHeight, sync.MutexWrap(datastore.NewMapDatastore())), store -} - -func createServiceWithHost(ctx context.Context, t *testing.T, host host.Host) (Service, *mockStore) { - // create pubsub for host - ps, err := pubsub.NewGossipSub(ctx, host, - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - store := createStore(t, 10) - return NewService(ps, store.GetByHeight, sync.MutexWrap(datastore.NewMapDatastore())), store -} diff --git a/fraud/store.go b/fraud/store.go deleted file mode 100644 index 9d97419c77..0000000000 --- a/fraud/store.go +++ /dev/null @@ -1,66 +0,0 @@ -package fraud - -import ( - "context" - "errors" - "fmt" - "sort" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - q "github.com/ipfs/go-datastore/query" -) - -var ( - storePrefix = "fraud" -) - -// put adds a Fraud Proof to the datastore with the given hash as the key. -func put(ctx context.Context, ds datastore.Datastore, hash string, value []byte) error { - return ds.Put(ctx, datastore.NewKey(hash), value) -} - -// query performs a custom query on the given datastore. -func query(ctx context.Context, ds datastore.Datastore, q q.Query) ([]q.Entry, error) { - results, err := ds.Query(ctx, q) - if err != nil { - return nil, err - } - - return results.Rest() -} - -// getAll queries all Fraud Proofs by their type. -func getAll(ctx context.Context, ds datastore.Datastore, proofType ProofType) ([]Proof, error) { - entries, err := query(ctx, ds, q.Query{}) - if err != nil { - return nil, err - } - if len(entries) == 0 { - return nil, datastore.ErrNotFound - } - proofs := make([]Proof, 0) - for _, data := range entries { - proof, err := Unmarshal(proofType, data.Value) - if err != nil { - if errors.Is(err, &errNoUnmarshaler{}) { - return nil, err - } - log.Warn(err) - continue - } - proofs = append(proofs, proof) - } - sort.Slice(proofs, func(i, j int) bool { - return proofs[i].Height() < proofs[j].Height() - }) - return proofs, nil -} - -func initStore(proofType ProofType, ds datastore.Datastore) datastore.Datastore { - return namespace.Wrap(ds, makeKey(proofType)) -} - -func makeKey(p ProofType) datastore.Key { - return datastore.NewKey(fmt.Sprintf("%s/%s", storePrefix, p)) -} diff --git a/fraud/store_test.go b/fraud/store_test.go deleted file mode 100644 index 3510aab9f9..0000000000 --- a/fraud/store_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package fraud - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - ds_sync "github.com/ipfs/go-datastore/sync" - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/ipld" -) - -func TestStore_Put(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - defer t.Cleanup(cancel) - bServ := mdutils.Bserv() - _, store := createService(t) - h, err := store.GetByHeight(ctx, 1) - require.NoError(t, err) - - faultDAH, err := generateByzantineError(ctx, t, h, bServ) - var errByz *ipld.ErrByzantine - require.True(t, errors.As(err, &errByz)) - p := CreateBadEncodingProof([]byte("hash"), uint64(faultDAH.Height), errByz) - bin, err := p.MarshalBinary() - require.NoError(t, err) - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - badEncodingStore := namespace.Wrap(ds, makeKey(BadEncoding)) - err = put(ctx, badEncodingStore, string(p.HeaderHash()), bin) - require.NoError(t, err) -} - -func TestStore_GetAll(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - defer t.Cleanup(cancel) - bServ := mdutils.Bserv() - _, store := createService(t) - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - badEncodingStore := namespace.Wrap(ds, makeKey(BadEncoding)) - faultHeaders := make([]*header.ExtendedHeader, 0) - for i := 0; i < 3; i++ { - h, err := store.GetByHeight(ctx, uint64(i+1)) - require.NoError(t, err) - faultDAH, err := generateByzantineError(ctx, t, h, bServ) - var errByz *ipld.ErrByzantine - require.True(t, errors.As(err, &errByz)) - - p := CreateBadEncodingProof(h.Hash(), uint64(faultDAH.Height), errByz) - bin, err := p.MarshalBinary() - require.NoError(t, err) - err = put(ctx, badEncodingStore, string(p.HeaderHash()), bin) - require.NoError(t, err) - faultHeaders = append(faultHeaders, faultDAH) - } - befp, err := getAll(ctx, badEncodingStore, BadEncoding) - require.NoError(t, err) - require.NotEmpty(t, befp) - for i := 0; i < len(befp); i++ { - require.NoError(t, befp[i].Validate(faultHeaders[i])) - } -} - -func Test_GetAllFailed(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - defer t.Cleanup(cancel) - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - badEncodingStore := namespace.Wrap(ds, makeKey(BadEncoding)) - - proofs, err := getAll(ctx, badEncodingStore, BadEncoding) - require.Error(t, err) - require.ErrorIs(t, err, datastore.ErrNotFound) - require.Nil(t, proofs) -} diff --git a/fraud/subscription.go b/fraud/subscription.go deleted file mode 100644 index e9cef33974..0000000000 --- a/fraud/subscription.go +++ /dev/null @@ -1,33 +0,0 @@ -package fraud - -import ( - "context" - "fmt" - "reflect" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// subscription wraps pubsub subscription and handles Fraud Proof from the pubsub topic. -type subscription struct { - subscription *pubsub.Subscription -} - -func (s *subscription) Proof(ctx context.Context) (Proof, error) { - if s.subscription == nil { - panic("fraud: subscription is not created") - } - data, err := s.subscription.Next(ctx) - if err != nil { - return nil, err - } - proof, ok := data.ValidatorData.(Proof) - if !ok { - panic(fmt.Sprintf("fraud: unexpected type received %s", reflect.TypeOf(data.ValidatorData))) - } - return proof, nil -} - -func (s *subscription) Cancel() { - s.subscription.Cancel() -} diff --git a/fraud/testing.go b/fraud/testing.go deleted file mode 100644 index 386f2bceff..0000000000 --- a/fraud/testing.go +++ /dev/null @@ -1,68 +0,0 @@ -package fraud - -import ( - "context" - "testing" - - "github.com/ipfs/go-blockservice" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/ipld" -) - -type DummyService struct { -} - -func (d *DummyService) Broadcast(context.Context, Proof) error { - return nil -} - -func (d *DummyService) Subscribe(ProofType) (Subscription, error) { - return &subscription{}, nil -} - -func (d *DummyService) Get(context.Context, ProofType) ([]Proof, error) { - return nil, nil -} - -type mockStore struct { - headers map[int64]*header.ExtendedHeader - headHeight int64 -} - -// createStore creates a mock store and adds several random -// headers. -func createStore(t *testing.T, numHeaders int) *mockStore { - store := &mockStore{ - headers: make(map[int64]*header.ExtendedHeader), - headHeight: 0, - } - - suite := header.NewTestSuite(t, numHeaders) - - for i := 0; i < numHeaders; i++ { - header := suite.GenExtendedHeader() - store.headers[header.Height] = header - - if header.Height > store.headHeight { - store.headHeight = header.Height - } - } - return store -} - -func (m *mockStore) GetByHeight(_ context.Context, height uint64) (*header.ExtendedHeader, error) { - return m.headers[int64(height)], nil -} - -func generateByzantineError( - ctx context.Context, - t *testing.T, - h *header.ExtendedHeader, - bServ blockservice.BlockService, -) (*header.ExtendedHeader, error) { - faultHeader := header.CreateFraudExtHeader(t, h, bServ) - rtrv := ipld.NewRetriever(bServ) - _, err := rtrv.Retrieve(ctx, faultHeader.DAH) - return faultHeader, err -} diff --git a/go.mod b/go.mod index 34381f9e1d..ca449edfe7 100644 --- a/go.mod +++ b/go.mod @@ -1,292 +1,350 @@ module github.com/celestiaorg/celestia-node -go 1.18 - -replace github.com/ipfs/go-verifcid => github.com/celestiaorg/go-verifcid v0.0.1-lazypatch +go 1.21.1 require ( - github.com/BurntSushi/toml v1.2.0 - github.com/celestiaorg/celestia-app v0.5.4 - github.com/celestiaorg/go-libp2p-messenger v0.1.0 - github.com/celestiaorg/nmt v0.10.0 - github.com/celestiaorg/rsmt2d v0.5.0 - github.com/cosmos/cosmos-sdk v0.46.0-beta2.0.20220418184507-c53157dd63f6 + cosmossdk.io/errors v1.0.1 + cosmossdk.io/math v1.2.0 + github.com/BurntSushi/toml v1.3.2 + github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b + github.com/benbjohnson/clock v1.3.5 + github.com/celestiaorg/celestia-app v1.4.0 + github.com/celestiaorg/go-fraud v0.2.0 + github.com/celestiaorg/go-header v0.5.3 + github.com/celestiaorg/go-libp2p-messenger v0.2.0 + github.com/celestiaorg/nmt v0.20.0 + github.com/celestiaorg/rsmt2d v0.11.0 + github.com/cosmos/cosmos-sdk v0.46.14 github.com/cosmos/cosmos-sdk/api v0.1.0 - github.com/cosmos/ibc-go/v4 v4.0.0-rc0 - github.com/dgraph-io/badger/v2 v2.2007.4 - github.com/gammazero/workerpool v1.1.2 + github.com/cristalhq/jwt v1.2.0 + github.com/dgraph-io/badger/v4 v4.2.1-0.20240106094458-1c417aa3799c + github.com/etclabscore/go-openrpc-reflect v0.0.37 + github.com/filecoin-project/dagstore v0.5.6 + github.com/filecoin-project/go-jsonrpc v0.3.1 + github.com/gammazero/workerpool v1.1.3 github.com/gogo/protobuf v1.3.3 - github.com/gorilla/mux v1.8.0 - github.com/hashicorp/go-retryablehttp v0.7.1-0.20211018174820-ff6d014e72d9 - github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d - github.com/ipfs/go-bitswap v0.7.0 - github.com/ipfs/go-block-format v0.0.3 - github.com/ipfs/go-blockservice v0.3.0 - github.com/ipfs/go-cid v0.2.0 - github.com/ipfs/go-datastore v0.5.1 - github.com/ipfs/go-ds-badger2 v0.1.3 - github.com/ipfs/go-ipfs-blockstore v1.2.0 - github.com/ipfs/go-ipfs-exchange-interface v0.1.0 - github.com/ipfs/go-ipfs-exchange-offline v0.2.0 - github.com/ipfs/go-ipfs-routing v0.2.1 - github.com/ipfs/go-ipld-format v0.4.0 + github.com/golang/mock v1.6.0 + github.com/gorilla/mux v1.8.1 + github.com/hashicorp/go-retryablehttp v0.7.5 + github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/imdario/mergo v0.3.16 + github.com/ipfs/boxo v0.16.0 + github.com/ipfs/go-block-format v0.2.0 + github.com/ipfs/go-blockservice v0.5.0 + github.com/ipfs/go-cid v0.4.1 + github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-ds-badger4 v0.1.5 + github.com/ipfs/go-ipld-cbor v0.1.0 + github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 - github.com/ipfs/go-merkledag v0.6.0 - github.com/libp2p/go-libp2p v0.20.2 - github.com/libp2p/go-libp2p-core v0.17.0 - github.com/libp2p/go-libp2p-kad-dht v0.16.0 - github.com/libp2p/go-libp2p-peerstore v0.6.0 - github.com/libp2p/go-libp2p-pubsub v0.7.0 - github.com/libp2p/go-libp2p-record v0.1.3 - github.com/libp2p/go-libp2p-routing-helpers v0.2.3 + github.com/ipld/go-car v0.6.2 + github.com/libp2p/go-libp2p v0.32.2 + github.com/libp2p/go-libp2p-kad-dht v0.25.2 + github.com/libp2p/go-libp2p-pubsub v0.10.0 + github.com/libp2p/go-libp2p-record v0.2.0 + github.com/libp2p/go-libp2p-routing-helpers v0.7.3 github.com/mitchellh/go-homedir v1.1.0 - github.com/multiformats/go-base32 v0.0.4 - github.com/multiformats/go-multiaddr v0.6.0 - github.com/multiformats/go-multihash v0.2.0 - github.com/raulk/go-watchdog v1.3.0 - github.com/spf13/cobra v1.5.0 + github.com/multiformats/go-base32 v0.1.0 + github.com/multiformats/go-multiaddr v0.12.2 + github.com/multiformats/go-multiaddr-dns v0.3.1 + github.com/multiformats/go-multihash v0.2.3 + github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 + github.com/prometheus/client_golang v1.18.0 + github.com/pyroscope-io/client v0.7.2 + github.com/pyroscope-io/otel-profiling-go v0.5.0 + github.com/rollkit/go-da v0.4.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.0 - github.com/tendermint/tendermint v0.35.4 - go.opentelemetry.io/otel v1.8.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.8.0 - go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.8.0 - go.opentelemetry.io/otel/sdk/metric v0.31.0 - go.opentelemetry.io/otel/trace v1.8.0 - go.uber.org/fx v1.17.1 - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 - google.golang.org/grpc v1.48.0 + github.com/stretchr/testify v1.8.4 + github.com/tendermint/tendermint v0.34.28 + go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0 + go.opentelemetry.io/otel v1.22.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 + go.opentelemetry.io/otel/metric v1.22.0 + go.opentelemetry.io/otel/sdk v1.22.0 + go.opentelemetry.io/otel/sdk/metric v1.22.0 + go.opentelemetry.io/otel/trace v1.22.0 + go.opentelemetry.io/proto/otlp v1.0.0 + go.uber.org/fx v1.20.1 + go.uber.org/zap v1.26.0 + golang.org/x/crypto v0.19.0 + golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e + golang.org/x/sync v0.6.0 + golang.org/x/text v0.14.0 + google.golang.org/grpc v1.60.1 + google.golang.org/protobuf v1.32.0 ) require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/compute v1.6.1 // indirect - cloud.google.com/go/iam v0.3.0 // indirect - cloud.google.com/go/storage v1.14.0 // indirect + cloud.google.com/go v0.110.10 // indirect + cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/storage v1.30.1 // indirect filippo.io/edwards25519 v1.0.0-rc.1 // indirect - github.com/99designs/keyring v1.1.6 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/Workiva/go-datastructures v1.0.53 // indirect - github.com/armon/go-metrics v0.4.0 // indirect - github.com/aws/aws-sdk-go v1.40.45 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/btcsuite/btcd v0.22.1 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.1.3 // indirect - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect - github.com/celestiaorg/go-leopard v0.1.0 // indirect + github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cheekybits/genny v1.0.0 // indirect - github.com/coinbase/rosetta-sdk-go v0.7.8 // indirect - github.com/confio/ics23/go v0.7.0 // indirect - github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/cosmos/btcutil v1.0.4 // indirect - github.com/cosmos/cosmos-proto v1.0.0-alpha7 // indirect - github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.3 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.1 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-alpha8 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogoproto v1.4.11 // indirect github.com/cosmos/gorocksdb v1.2.0 // indirect - github.com/cosmos/iavl v0.18.0 // indirect - github.com/cosmos/ledger-cosmos-go v0.11.1 // indirect - github.com/cosmos/ledger-go v0.9.2 // indirect + github.com/cosmos/iavl v0.19.6 // indirect + github.com/cosmos/ibc-go/v6 v6.2.0 // indirect + github.com/cosmos/ledger-cosmos-go v0.13.2 // indirect + github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect github.com/creachadair/taskgroup v0.3.2 // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/danieljoos/wincred v1.0.2 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + github.com/deckarep/golang-set/v2 v2.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect - github.com/elastic/gosigar v0.12.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac // indirect + github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect + github.com/ethereum/c-kzg-4844 v0.3.1 // indirect + github.com/ethereum/go-ethereum v1.13.2 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/gammazero/deque v0.1.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gammazero/deque v0.2.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.4 // indirect + github.com/go-openapi/spec v0.19.11 // indirect + github.com/go-openapi/swag v0.19.11 // indirect + github.com/go-stack/stack v1.8.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect - github.com/godbus/dbus/v5 v5.0.4 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/gateway v1.1.0 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/orderedcode v0.0.1 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.5.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-getter v1.5.11 // indirect + github.com/hashicorp/go-getter v1.7.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect - github.com/hashicorp/go-version v1.4.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect - github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204 // indirect + github.com/holiman/uint256 v1.2.3 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/iancoleman/orderedmap v0.1.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/influxdata/influxdb-client-go/v2 v2.12.2 // indirect + github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect - github.com/ipfs/go-ipfs-pq v0.0.2 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-ipld-cbor v0.0.5 // indirect - github.com/ipfs/go-ipld-legacy v0.1.0 // indirect - github.com/ipfs/go-ipns v0.1.2 // indirect + github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect + github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipfs-util v0.0.3 // indirect + github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-merkledag v0.11.0 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/go-peertaskqueue v0.7.0 // indirect - github.com/ipfs/go-verifcid v0.0.1 // indirect - github.com/ipld/go-codec-dagpb v1.3.0 // indirect - github.com/ipld/go-ipld-prime v0.16.0 // indirect + github.com/ipfs/go-peertaskqueue v0.8.1 // indirect + github.com/ipfs/go-verifcid v0.0.2 // indirect + github.com/ipld/go-car/v2 v2.11.0 // indirect + github.com/ipld/go-codec-dagpb v1.6.0 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d // indirect - github.com/klauspost/compress v1.15.1 // indirect - github.com/klauspost/cpuid/v2 v2.0.12 // indirect - github.com/koron/go-ssdp v0.0.2 // indirect - github.com/lib/pq v1.10.6 // indirect - github.com/libp2p/go-buffer-pool v0.0.2 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/reedsolomon v1.11.8 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/lib/pq v1.10.7 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-eventbus v0.2.1 // indirect - github.com/libp2p/go-flow-metrics v0.0.3 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect - github.com/libp2p/go-libp2p-discovery v0.6.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect - github.com/libp2p/go-libp2p-loggables v0.1.0 // indirect - github.com/libp2p/go-libp2p-resource-manager v0.3.0 // indirect - github.com/libp2p/go-msgio v0.2.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect - github.com/libp2p/go-netroute v0.2.0 // indirect - github.com/libp2p/go-openssl v0.0.7 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/libp2p/go-yamux/v3 v3.1.2 // indirect - github.com/lucas-clemente/quic-go v0.27.1 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect - github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect - github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/miekg/dns v1.1.43 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/miekg/dns v1.1.56 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/minio/highwayhash v1.0.2 // indirect - github.com/minio/sha256-simd v1.0.0 // indirect - github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/mtibben/percent v0.2.1 // indirect - github.com/multiformats/go-base36 v0.1.0 // indirect - github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.0.3 // indirect - github.com/multiformats/go-multicodec v0.4.1 // indirect - github.com/multiformats/go-multistream v0.3.2 // indirect - github.com/multiformats/go-varint v0.0.6 // indirect - github.com/nxadm/tail v1.4.8 // indirect - github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b // indirect - github.com/onsi/ginkgo v1.16.4 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multistream v0.5.0 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/onsi/ginkgo/v2 v2.13.0 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.1 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect - github.com/prometheus/client_golang v1.12.2 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.33.0 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/polydawn/refmt v0.89.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/pyroscope-io/godeltaprof v0.1.2 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/qtls-go1-20 v0.3.4 // indirect + github.com/quic-go/quic-go v0.39.4 // indirect + github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/rakyll/statik v0.1.7 // indirect - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/regen-network/cosmos-proto v0.3.1 // indirect + github.com/rivo/uniseg v0.4.4 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/rs/zerolog v1.26.1 // indirect - github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa // indirect - github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/rs/zerolog v1.31.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/shirou/gopsutil v3.21.6+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/afero v1.9.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.12.0 // indirect - github.com/subosito/gotenv v1.3.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/tendermint/btcd v0.1.1 // indirect - github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 // indirect + github.com/spf13/viper v1.14.0 // indirect + github.com/subosito/gotenv v1.4.1 // indirect + github.com/supranational/blst v0.3.11 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tendermint/go-amino v0.16.0 // indirect github.com/tendermint/tm-db v0.6.7 // indirect - github.com/ulikunitz/xz v0.5.8 // indirect - github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect - github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 // indirect + github.com/tidwall/btree v1.5.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/ulikunitz/xz v0.5.10 // indirect + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect - github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect - github.com/zondax/hid v0.9.1-0.20220302062450-5552068d2266 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 // indirect - go.opentelemetry.io/proto/otlp v0.18.0 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/dig v1.14.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect - golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect - golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect - google.golang.org/api v0.81.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect - google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/ini.v1 v1.66.4 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.uber.org/mock v0.3.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/tools v0.16.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + gonum.org/v1/gonum v0.13.0 // indirect + google.golang.org/api v0.149.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect - nhooyr.io/websocket v1.8.6 // indirect + lukechampine.com/blake3 v1.2.1 // indirect + nhooyr.io/websocket v1.8.7 // indirect + rsc.io/tmplfunc v0.0.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) replace ( - github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.1.0-sdk-v0.46.0 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.18.3-sdk-v0.46.14 + github.com/filecoin-project/dagstore => github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/libp2p/go-libp2p-pubsub v0.7.0 => github.com/celestiaorg/go-libp2p-pubsub v0.6.2-0.20220628100036-657948473f1f - github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.2.4-tm-v0.35.6 + // broken goleveldb needs to be replaced for the cosmos-sdk and celestia-app + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 ) diff --git a/go.sum b/go.sum index ef04032eca..6fe259ff0c 100644 --- a/go.sum +++ b/go.sum @@ -3,12 +3,14 @@ cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -30,96 +32,281 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/math v1.2.0 h1:8gudhTkkD3NxOP2YyyJIYYmt6dQ55ZfJkDOaxXpy7Ig= +cosmossdk.io/math v1.2.0/go.mod h1:l2Gnda87F0su8a/7FEKJfFdJrM0JZRXQaohlgJeyQh0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/99designs/keyring v1.1.6 h1:kVDC2uCgVwecxCk+9zoCt2uEL6dt+dfVzMvGgnVcIuM= -github.com/99designs/keyring v1.1.6/go.mod h1:16e0ds7LGQQcT59QqkTg72Hh5ShM51Byv5PEmW6uoRU= +git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw= +git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= +github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= +github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -github.com/adlio/schema v1.3.0 h1:eSVYLxYWbm/6ReZBCkLw4Fz7uqC+ZNoPvA39bOwi52A= +github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM= +github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= +github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b h1:doCpXjVwui6HUN+xgNsNS3SZ0/jUZ68Eb+mJRNOZfog= +github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.40.45 h1:QN1nsY27ssD/JmW4s83qmSb+uL6DG4GmCDzjmJB4xUI= -github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= +github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= +github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -128,26 +315,35 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1U github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= +github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs= github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= -github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= +github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= @@ -156,50 +352,69 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s= +github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v0.5.4 h1:X8FJxaxa8oEF/A4Bg11dGEp1BKgAGWatdghGXG1+24k= -github.com/celestiaorg/celestia-app v0.5.4/go.mod h1:z4GmdNZAcPBkAQ76tCeZwBxr5nlsbqzWKb0PMZEI9Dc= -github.com/celestiaorg/celestia-core v1.2.4-tm-v0.35.6 h1:vKYvdlHpmWDcDP+jKfU64oEp2H74jAWJxCDjTHlXYSs= -github.com/celestiaorg/celestia-core v1.2.4-tm-v0.35.6/go.mod h1:4J51zu8jaHB7vlgwfKC1eyGKpRP3C0REiipTwPPfvCE= -github.com/celestiaorg/cosmos-sdk v1.1.0-sdk-v0.46.0 h1:Avs0lxSYabPTGwCfS7nV0yCF1QKO0O1mOEE1L6C8tGo= -github.com/celestiaorg/cosmos-sdk v1.1.0-sdk-v0.46.0/go.mod h1:SqOn+Sol7ydGocB4Qqo24chrwG6YW4OhFh6NSpsFYbk= -github.com/celestiaorg/go-leopard v0.1.0 h1:28z2EkvKJIez5J9CEaiiUEC+OxalRLtTGJJ1oScfE1g= -github.com/celestiaorg/go-leopard v0.1.0/go.mod h1:NtO/rjlB8dw2aq7jr06vZFKGvryQcTDXaNHelmPNOAM= -github.com/celestiaorg/go-libp2p-messenger v0.1.0 h1:rFldTa3ZWcRRn8E2bRWS94Qp1GFYXO2a0uvqpIey1B8= -github.com/celestiaorg/go-libp2p-messenger v0.1.0/go.mod h1:XzNksXrH0VxuNRGOnjPL9Ck4UyQlbmMpCYg9YwSBerI= -github.com/celestiaorg/go-libp2p-pubsub v0.6.2-0.20220628100036-657948473f1f h1:jS/yGji/w6ddoSEd6cAwoV3jycdKIg9mJ7avwoiMCk0= -github.com/celestiaorg/go-libp2p-pubsub v0.6.2-0.20220628100036-657948473f1f/go.mod h1:EuyBJFtF8qF67IEA98biwK8Xnw5MNJpJ/Z+8iWCMFwc= -github.com/celestiaorg/go-verifcid v0.0.1-lazypatch h1:9TSe3w1cmJmbWlweCwCTIZkan7jV8M+KwglXpdD+UG8= -github.com/celestiaorg/go-verifcid v0.0.1-lazypatch/go.mod h1:kXPYu0XqTNUKWA1h3M95UHjUqBzDwXVVt/RXZDjKJmQ= +github.com/celestiaorg/celestia-app v1.4.0 h1:hTId3xL8GssN5sHSHPP7svHi/iWp+XVxphStiR7ADiY= +github.com/celestiaorg/celestia-app v1.4.0/go.mod h1:zhdQIFGFZRRxrDVtFE4OFIT7/12RE8DRyfvNZdW8ceM= +github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 h1:Fd7ymPUzExPGNl2gZw4i5S74arMw+iDHLE78M/cCxl4= +github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29/go.mod h1:xrICN0PBhp3AdTaZ8q4wS5Jvi32V02HNjaC2EsWiEKk= +github.com/celestiaorg/cosmos-sdk v1.18.3-sdk-v0.46.14 h1:+Te28r5Zp4Vp69f82kcON9/BIF8f1BNXb0go2+achuc= +github.com/celestiaorg/cosmos-sdk v1.18.3-sdk-v0.46.14/go.mod h1:Og5KKgoBEiQlI6u56lDLG191pfknIdXctFn3COWLQP8= +github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJx5/hgZ+IeOLEIoLsAveJN/7/ZtQQtPSVw= +github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403/go.mod h1:cCGM1UoMvyTk8k62mkc+ReVu8iHBCtSBAAL4wYU7KEI= +github.com/celestiaorg/go-fraud v0.2.0 h1:aaq2JiW0gTnhEdac3l51UCqSyJ4+VjFGTTpN83V4q7I= +github.com/celestiaorg/go-fraud v0.2.0/go.mod h1:lNY1i4K6kUeeE60Z2VK8WXd+qXb8KRzfBhvwPkK6aUc= +github.com/celestiaorg/go-header v0.5.3 h1:8CcflT6aIlcQXKNWcMekoBNs3EU50mEmDp17gbn1pP4= +github.com/celestiaorg/go-header v0.5.3/go.mod h1:7BVR6myjRfACbqW1de6s8OjuK66XzHm8MpFNYr0G+nU= +github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO3jHV7wKHvWD/Irao= +github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= -github.com/celestiaorg/nmt v0.10.0 h1:HLfVWvpagHz5+uiE0QSjzv350wLhhnybNmrxq9NHLKc= -github.com/celestiaorg/nmt v0.10.0/go.mod h1:3bqzTj8xKj0DgQUpOgZzoxvtNkC3MS/hTbQ6dn8SIa0= -github.com/celestiaorg/rsmt2d v0.5.0 h1:Wa0uNZUXl8lIMJnSunjoD65ktqBedXZD0z2ZU3xKYYw= -github.com/celestiaorg/rsmt2d v0.5.0/go.mod h1:EZ+O2KdCq8xI7WFwjATLdhtMdrdClmAs2w7zENDr010= +github.com/celestiaorg/nmt v0.20.0 h1:9i7ultZ8Wv5ytt8ZRaxKQ5KOOMo4A2K2T/aPGjIlSas= +github.com/celestiaorg/nmt v0.20.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= +github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= +github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= +github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= +github.com/celestiaorg/rsmt2d v0.11.0/go.mod h1:6Y580I3gVr0+OVFfW6m2JTwnCCmvW3WfbwSLfuT+HCA= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -210,59 +425,97 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= +github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw= +github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coinbase/rosetta-sdk-go v0.7.8 h1:op/O3/ZngTfcrZnp3p/TziRfKGdo7AUZGUmBu6+8qCc= -github.com/coinbase/rosetta-sdk-go v0.7.8/go.mod h1:vB6hZ0ZnZmln3ThA4x0mZvOAPDJ5BhfgnjH76hxoy10= -github.com/confio/ics23/go v0.7.0 h1:00d2kukk7sPoHWL4zZBZwzxnpA2pec1NPdwbSokJ5w8= -github.com/confio/ics23/go v0.7.0/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= -github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= +github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= +github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI= +github.com/coinbase/rosetta-sdk-go v0.7.9 h1:lqllBjMnazTjIqYrOGv8h8jxjg9+hJazIGZr9ZvoCcA= +github.com/coinbase/rosetta-sdk-go v0.7.9/go.mod h1:0/knutI7XGVqXmmH4OQD8OckFrbQ8yMsUZTG7FXCR2M= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.1 h1:3MV46eeWwO3xCauKyAtuAdJYMyPnnchW4iLr2bTw6/U= +github.com/confio/ics23/go v0.9.1/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= +github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cosmos/btcutil v1.0.4 h1:n7C2ngKXo7UC9gNyMNLbzqz7Asuf+7Qv4gnX/rOdQ44= -github.com/cosmos/btcutil v1.0.4/go.mod h1:Ffqc8Hn6TJUdDgHBwIZLtrLQC1KdJ9jGJl/TvgUaxbU= -github.com/cosmos/cosmos-proto v1.0.0-alpha7 h1:yqYUOHF2jopwZh4dVQp3xgqwftE5/2hkrwIV6vkUbO0= -github.com/cosmos/cosmos-proto v1.0.0-alpha7/go.mod h1:dosO4pSAbJF8zWCzCoTWP7nNsjcvSUBQmniFxDg5daw= +github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-alpha8 h1:d3pCRuMYYvGA5bM0ZbbjKn+AoQD4A7dyNG2wzwWalUw= +github.com/cosmos/cosmos-proto v1.0.0-alpha8/go.mod h1:6/p+Bc4O8JKeZqe0VqUGTX31eoYqemTT4C1hLCWsO7I= github.com/cosmos/cosmos-sdk/api v0.1.0 h1:xfSKM0e9p+EJTMQnf5PbWE6VT8ruxTABIJ64Rd064dE= github.com/cosmos/cosmos-sdk/api v0.1.0/go.mod h1:CupqQBskAOiTXO1XDZ/wrtWzN/wTxUvbQmOqdUhR8wI= -github.com/cosmos/cosmos-sdk/db v1.0.0-beta.1 h1:6YvzjQtc+cDwCe9XwYPPa8zFCxNG79N7vmCjpK+vGOg= -github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.3 h1:Ep7FHNViVwwGnwLFEPewZYsyN2CJNVMmMvFmtNQtbnw= -github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.3/go.mod h1:HFea93YKmoMJ/mNKtkSeJZDtyJ4inxBsUK928KONcqo= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= -github.com/cosmos/iavl v0.18.0 h1:02ur4vnalMR2GuWCFNkuseUcl/BCVmg9tOeHOGiZOkE= -github.com/cosmos/iavl v0.18.0/go.mod h1:L0VZHfq0tqMNJvXlslGExaaiZM7eSm+90Vh9QUbp6j4= -github.com/cosmos/ibc-go/v4 v4.0.0-rc0 h1:zeMr6PNE7L300AcGkrMwRvtp62/RpGc7qU1LwhUcPKc= -github.com/cosmos/ibc-go/v4 v4.0.0-rc0/go.mod h1:4LK+uPycPhebJrJ8ebIqvsMEZ0lVRVNTiEyeI9zfB0U= -github.com/cosmos/ledger-cosmos-go v0.11.1 h1:9JIYsGnXP613pb2vPjFeMMjBI5lEDsEaF6oYorTy6J4= -github.com/cosmos/ledger-cosmos-go v0.11.1/go.mod h1:J8//BsAGTo3OC/vDLjMRFLW6q0WAaXvHnVc7ZmE8iUY= -github.com/cosmos/ledger-go v0.9.2 h1:Nnao/dLwaVTk1Q5U9THldpUMMXU94BOTWPddSmVB6pI= -github.com/cosmos/ledger-go v0.9.2/go.mod h1:oZJ2hHAZROdlHiwTg4t7kP+GKIIkBT+o6c9QWFanOyI= +github.com/cosmos/iavl v0.19.6 h1:XY78yEeNPrEYyNCKlqr9chrwoeSDJ0bV2VjocTk//OU= +github.com/cosmos/iavl v0.19.6/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= +github.com/cosmos/ibc-go/v6 v6.2.0 h1:HKS5WNxQrlmjowHb73J9LqlNJfvTnvkbhXZ9QzNTU7Q= +github.com/cosmos/ibc-go/v6 v6.2.0/go.mod h1:+S3sxcNwOhgraYDJAhIFDg5ipXHaUnJrg7tOQqGyWlc= +github.com/cosmos/ledger-cosmos-go v0.13.2 h1:aY0KZSmUwNKbBm9OvbIjvf7Ozz2YzzpAbgvN2C8x2T0= +github.com/cosmos/ledger-cosmos-go v0.13.2/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5XV6svsnkk9vdJtLr8= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= +github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cristalhq/jwt v1.2.0 h1:fHmMkFJvEbS4o04aQP8BmtJg7fqkYvd7r8er3sUdS4Q= +github.com/cristalhq/jwt v1.2.0/go.mod h1:QQFazsDzoqeucUEEV0h16uPTZXBAi2SVA8cQ9JEDuFw= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/danieljoos/wincred v1.0.2 h1:zf4bhty2iLuwgjgpraD2E9UbvO+fe54XXGJbOwe23fU= -github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -270,44 +523,69 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= +github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= +github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/badger/v4 v4.2.1-0.20240106094458-1c417aa3799c h1:Z9rm0wkQBM+VF7vpyrbKnCcSbww0PKygLoptTpkX3d4= +github.com/dgraph-io/badger/v4 v4.2.1-0.20240106094458-1c417aa3799c/go.mod h1:T/uWAYxrXdaXw64ihI++9RMbKTCpKd/yE9+saARew7k= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b h1:HBah4D48ypg3J7Np4N+HY/ZR76fx3HEUGxDU6Uk39oQ= -github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac h1:opbrjaN/L8gg6Xh5D04Tem+8xVcz6ajZlGCs49mQgyg= +github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/gosigar v0.12.0 h1:AsdhYCJlTudhfOYQyFNgx+fIVTfrDO0V1ST0vHgiapU= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -319,39 +597,74 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etclabscore/go-jsonschema-walk v0.0.6 h1:DrNzoKWKd8f8XB5nFGBY00IcjakRE22OTI12k+2LkyY= +github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= +github.com/etclabscore/go-openrpc-reflect v0.0.37 h1:IH0e7JqIvR9OhbbFWi/BHIkXrqbR3Zyia3RJ733eT6c= +github.com/etclabscore/go-openrpc-reflect v0.0.37/go.mod h1:0404Ky3igAasAOpyj1eESjstTyneBAIk5PgJFbK4s5E= +github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= +github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.10.17/go.mod h1:Lt5WzjM07XlXc95YzrhosmR4J9Ahd6X2wyEV2SvGhk0= +github.com/ethereum/go-ethereum v1.13.2 h1:g9mCpfPWqCA1OL4e6C98PeVttb0HadfBRuKTGvMnOvw= +github.com/ethereum/go-ethereum v1.13.2/go.mod h1:gkQ5Ygi64ZBh9M/4iXY1R8WqoNCx1Ey0CkYn2BD4/fw= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/filecoin-project/go-jsonrpc v0.3.1 h1:qwvAUc5VwAkooquKJmfz9R2+F8znhiqcNHYjEp/NM10= +github.com/filecoin-project/go-jsonrpc v0.3.1/go.mod h1:jBSvPTl8V1N7gSTuCR4bis8wnQnIjHbRPpROol6iQKM= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/gammazero/deque v0.1.0 h1:f9LnNmq66VDeuAlSAapemq/U7hJ2jpIWa4c09q8Dlik= -github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= -github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g= -github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= +github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= +github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= +github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -363,15 +676,41 @@ github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg= +github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.11 h1:ogU5q8dtp3MMPn59a9VRrPKVxvJHEs5P7yNMR5sNnis= +github.com/go-openapi/spec v0.19.11/go.mod h1:vqK/dIdLGCosfvYsQV3WfC7N3TiZSnGY2RZKoFK7X28= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.8/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.11 h1:RFTu/dlFySpyVvJDfp/7674JY4SDglYWKztbiIGFpmc= +github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= @@ -379,27 +718,45 @@ github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD87 github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0= github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= +github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -435,17 +792,22 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -459,12 +821,15 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= @@ -474,8 +839,9 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -490,16 +856,29 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -508,39 +887,51 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -548,25 +939,27 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.5.11 h1:wioTuNmaBU3IE9vdFtFMcmZWj0QzLc6DYaP6sNe5onY= -github.com/hashicorp/go-getter v1.5.11/go.mod h1:9i48BP6wpWweI/0/+FBjqLrp9S8XtwUGjiu0QkWHEaY= +github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= +github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.1-0.20211018174820-ff6d014e72d9 h1:RSp277I12pTdb5wiZWy9qtE0IN4oC4BH5a/LIcNxw0w= -github.com/hashicorp/go-retryablehttp v0.7.1-0.20211018174820-ff6d014e72d9/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= +github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= @@ -575,16 +968,21 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= -github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -593,30 +991,79 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 h1:aSVUgRRRtOrZOC1fYmY9gV0e9z/Iu+xNVSASWjsuyGU= github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3/go.mod h1:5PC6ZNPde8bBqU/ewGZig35+UIZtw9Ytxez8/q5ZyFE= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= +github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= -github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204 h1:+EYBkW+dbi3F/atB+LSQZSWh7+HNrV3A/N0y6DSoy9k= github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= +github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxdb-client-go/v2 v2.12.2 h1:uYABKdrEKlYm+++qfKdbgaHKBPmoWR5wpbmj6MBB/2g= +github.com/influxdata/influxdb-client-go/v2 v2.12.2/go.mod h1:YteV91FiQxRdccyJ2cHvj2f/5sq4y4Njqu1fQzsQCOU= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/boxo v0.16.0 h1:A9dUmef5a+mEFki6kbyG7el5gl65CiUBzrDeZxzTWKY= +github.com/ipfs/boxo v0.16.0/go.mod h1:jAgpNQn7T7BnibUeReXcKU9Ha1xmYNyOlwVEl193ow0= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= +github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-bitswap v0.6.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA= -github.com/ipfs/go-bitswap v0.7.0 h1:vSte4lll4Rob7cMQERUouxtFbuD7Vl4Hq+XEAp2ipKY= -github.com/ipfs/go-bitswap v0.7.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA= +github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= +github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-blockservice v0.3.0 h1:cDgcZ+0P0Ih3sl8+qjFr2sVaMdysg/YZpLj5WJ8kiiw= +github.com/ipfs/go-block-format v0.1.1/go.mod h1:+McEIT+g52p+zz5xGAABGSOKrzmrdX97bc0USBdWPUs= +github.com/ipfs/go-block-format v0.1.2/go.mod h1:mACVcrxarQKstUU3Yf/RdwbC4DzPV6++rO2a3d+a/KE= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-blockservice v0.3.0/go.mod h1:P5ppi8IHDC7O+pA0AlGTF09jruB2h+oP3wVVaZl8sfk= +github.com/ipfs/go-blockservice v0.5.0 h1:B2mwhhhVQl2ntW2EIpaWPwSCxSuqr5fFA93Ms4bYLEY= +github.com/ipfs/go-blockservice v0.5.0/go.mod h1:W6brZ5k20AehbmERplmERn8o2Ni3ZZubvAxaIUeaT6w= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -624,19 +1071,27 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= +github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -644,47 +1099,96 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= +github.com/ipfs/go-ds-badger4 v0.1.5 h1:MwrTsIUJIqH/ChuDdUOzxwxMxHx/Li1ECoSCKsCUxiA= +github.com/ipfs/go-ds-badger4 v0.1.5/go.mod h1:LUU2FbhNdmhAbJmMeoahVRbe4GsduAODSJHWJJh2Vo4= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= -github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= +github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= +github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= +github.com/ipfs/go-ipfs-blockstore v1.3.0/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= +github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= +github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= +github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-offline v0.2.0 h1:2PF4o4A7W656rC0RxuhUace997FTcDTcIQ6NoEtyjAI= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.1 h1:jMzo2VhLKSHbVe+mHNzYgs95n0+t0Q69GQ5WhRDZV/s= +github.com/ipfs/go-ipfs-exchange-interface v0.2.1/go.mod h1:MUsYn6rKbG6CTtsDp+lKJPmVt3ZrCViNyH3rfPGsZ2E= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-exchange-offline v0.2.0/go.mod h1:HjwBeW0dvZvfOMwDP0TSKXIHf2s+ksdP4E3MLDRtLKY= -github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= +github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiStR2P6sn9tIM= +github.com/ipfs/go-ipfs-keystore v0.1.0/go.mod h1:LvLw7Qhnb0RlMOfCzK6OmyWxICip6lQ06CCmdbee75U= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= +github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= +github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= +github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.3.1/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= +github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= +github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= +github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= -github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= -github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= +github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= +github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-libipfs v0.1.0/go.mod h1:qX0d9h+wu53PFtCTXxdXVBakd6ZCvGDdkZUKmdLMLx0= +github.com/ipfs/go-libipfs v0.3.0/go.mod h1:pSUHZ5qPJTAidsxe9bAeHp3KIiw2ODEW2a2kM3v+iXI= +github.com/ipfs/go-libipfs v0.4.0/go.mod h1:XsU2cP9jBhDrXoJDe0WxikB8XcVmD3k2MEZvB3dbYu8= +github.com/ipfs/go-libipfs v0.6.0 h1:3FuckAJEm+zdHbHbf6lAyk0QUzc45LsFcGw102oBCZM= +github.com/ipfs/go-libipfs v0.6.0/go.mod h1:UjjDIuehp2GzlNP0HEr5I9GfFT7zWgst+YfpUEIThtw= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= @@ -700,19 +1204,72 @@ github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72g github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-merkledag v0.6.0 h1:oV5WT2321tS4YQVOPgIrWHvJ0lJobRTerU+i9nmUCuA= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0= +github.com/ipfs/go-merkledag v0.9.0/go.mod h1:bPHqkHt5OZ0p1n3iqPeDiw2jIBkjAytRjS3WSBwjq90= +github.com/ipfs/go-merkledag v0.10.0/go.mod h1:zkVav8KiYlmbzUzNM6kENzkdP5+qR7+2mCwxkQ6GIj8= +github.com/ipfs/go-merkledag v0.11.0 h1:DgzwK5hprESOzS4O1t/wi6JDpyVQdvm9Bs59N/jqfBY= +github.com/ipfs/go-merkledag v0.11.0/go.mod h1:Q4f/1ezvBiJV0YCIXvt51W/9/kqJGH4I1LsA7+djsM4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= +github.com/ipfs/go-namesys v0.7.0/go.mod h1:KYSZBVZG3VJC34EfqqJPG7T48aWgxseoMPAPA5gLyyQ= +github.com/ipfs/go-path v0.1.1/go.mod h1:vC8q4AKOtrjJz2NnllIrmr2ZbGlF5fW2OKKyhV9ggb0= +github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= +github.com/ipfs/go-peertaskqueue v0.8.0/go.mod h1:cz8hEnnARq4Du5TGqiWKgMr/BOSQ5XOgMOh1K5YYKKM= +github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= +github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfs v0.4.3/go.mod h1:TSG7G1UuT+l4pNj91raXAPkX0BhJi3jST1FDTfQ5QyM= +github.com/ipfs/go-unixfs v0.4.4/go.mod h1:TSG7G1UuT+l4pNj91raXAPkX0BhJi3jST1FDTfQ5QyM= +github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= +github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= +github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= +github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= +github.com/ipfs/go-unixfsnode v1.5.1/go.mod h1:ed79DaG9IEuZITJVQn4U6MZDftv6I3ygUBLPfhEbHvk= +github.com/ipfs/go-unixfsnode v1.5.2/go.mod h1:NlOebRwYx8lMCNMdhAhEspYPBD3obp7TE0LvBqHY+ks= +github.com/ipfs/go-unixfsnode v1.7.1/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-unixfsnode v1.7.4 h1:iLvKyAVKUYOIAW2t4kDYqsT7VLGj31eXJE2aeqGfbwA= +github.com/ipfs/go-unixfsnode v1.7.4/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= +github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= +github.com/ipfs/interface-go-ipfs-core v0.9.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= +github.com/ipfs/interface-go-ipfs-core v0.10.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= +github.com/ipld/go-car v0.5.0/go.mod h1:ppiN5GWpjOZU9PgpAZ9HbZd9ZgSpwPMr48fGRJOWmvE= +github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= +github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.5.1/go.mod h1:jKjGOqoCj5zn6KjnabD6JbnCsMntqU2hLiU6baZVO3E= +github.com/ipld/go-car/v2 v2.8.0/go.mod h1:a+BnAxUqgr7wcWxW/lI6ctyEQ2v9gjBChPytwFMp2f4= +github.com/ipld/go-car/v2 v2.10.1/go.mod h1:sQEkXVM3csejlb1kCCb+vQ/pWBKX9QtvsrysMQjOgOg= +github.com/ipld/go-car/v2 v2.11.0 h1:lkAPwbbTFqbdfawgm+bfmFc8PjGC7D12VcaLXPCLNfM= +github.com/ipld/go-car/v2 v2.11.0/go.mod h1:aDszqev0zjtU8l96g4lwXHaU9bzArj56Y7eEN0q/xqA= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= +github.com/ipld/go-codec-dagpb v1.4.1/go.mod h1:XdXTO/TUD/ra9RcK/NfmwBfr1JpFxM2uRKaB9oe4LxE= +github.com/ipld/go-codec-dagpb v1.5.0/go.mod h1:0yRIutEFD8o1DGVqw4RSHh+BUTlJA9XWldxaaWR/o4g= +github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= +github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.16.0 h1:RS5hhjB/mcpeEPJvfyj0qbOj/QL+/j05heZ0qa97dVo= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= +github.com/ipld/go-ipld-prime v0.18.0/go.mod h1:735yXW548CKrLwVCYXzqx90p5deRJMVVxM9eJ4Qe+qE= +github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= +github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= @@ -726,11 +1283,13 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.12.0 h1:1NQ4FpWMgn3by/n1X0fbeKEUxP1wBt7+Oitpv01HR10= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -739,6 +1298,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -751,145 +1312,174 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= -github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= -github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc= +github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= +github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.2 h1:fL3wAoyT6hXHQlORyXUW4Q23kkQpJRgEAYcZB5BR71o= github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nkc/T53PKMAn3/k9ivBAVc= github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.15.0/go.mod h1:8Ljmwon0cZZYKrOCjFeLwQEK8bqR42dOheUZ1kSKhP0= -github.com/libp2p/go-libp2p v0.18.0/go.mod h1:+veaZ9z1SZQhmc5PW78jvnnxZ89Mgvmh4cggO11ETmw= -github.com/libp2p/go-libp2p v0.20.2 h1:uPCbLjx1VIGt4noOoGsSQKsoUqd+WwOq0IeFbrAThXM= -github.com/libp2p/go-libp2p v0.20.2/go.mod h1:heAEqZPMOagd26sado6/P4ifArxkUe9uV8PGrTn9K2k= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p v0.19.0/go.mod h1:Ki9jJXLO2YqrTIFxofV7Twyd3INWPT97+r8hGt7XPjI= +github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= +github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= +github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o= +github.com/libp2p/go-libp2p v0.25.1/go.mod h1:xnK9/1d9+jeQCVvi/f1g12KqtVi/jP/SijtKV1hML3g= +github.com/libp2p/go-libp2p v0.32.2 h1:s8GYN4YJzgUoyeYNPdW7JZeZ5Ee31iNaIBfGYMAY4FQ= +github.com/libp2p/go-libp2p v0.32.2/go.mod h1:E0LKe+diV/ZVJVnOJby8VC5xzHF0660osg71skcxJvk= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= +github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= -github.com/libp2p/go-libp2p-blankhost v0.3.0 h1:kTnLArltMabZlzY63pgGDA4kkUcLkBFSM98zBssn/IY= github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= -github.com/libp2p/go-libp2p-circuit v0.6.0 h1:rw/HlhmUB3OktS/Ygz6+2XABOmHKzZpPUuMNUMosj8w= github.com/libp2p/go-libp2p-circuit v0.6.0/go.mod h1:kB8hY+zCpMeScyvFrKrGicRdid6vNXbunKE4rXATZ0M= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= -github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.14.0/go.mod h1:tLasfcVdTXnixsLB0QYaT1syJOhsbrhG7q6pGrHtBg8= github.com/libp2p/go-libp2p-core v0.15.1/go.mod h1:agSaboYM4hzB1cWekgVReqV5M4g5M+2eNNejV+1EEhs= -github.com/libp2p/go-libp2p-core v0.17.0 h1:QGU8mlxHytwTc4pq/aVQX9VDoAPiCHxfe/oOSwF+YDg= -github.com/libp2p/go-libp2p-core v0.17.0/go.mod h1:h/iAbFij28ASmI+tvXfjoipg1g2N33O4UN6LIb6QfoU= +github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= -github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= -github.com/libp2p/go-libp2p-kad-dht v0.16.0 h1:epVRYl3O8dn47uV3wVD2+IobEvBPapEMVj4sWlvwQHU= -github.com/libp2p/go-libp2p-kad-dht v0.16.0/go.mod h1:YYLlG8AbpWVGbI/zFeSbiGT0n0lluH7IG0sHeounyWA= +github.com/libp2p/go-libp2p-kad-dht v0.19.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= +github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I= +github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= +github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= -github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= -github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= -github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= @@ -898,15 +1488,14 @@ github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxW github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-mplex v0.5.0/go.mod h1:eLImPJLkj3iG5t5lq68w3Vm5NAQ5BcKwrrb2VmOYb3M= -github.com/libp2p/go-libp2p-mplex v0.6.0/go.mod h1:i3usuPrBbh9FD2fLZjGpotyNkwr42KStYZQY7BeTiu4= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= -github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= +github.com/libp2p/go-libp2p-noise v0.4.0/go.mod h1:BzzY5pyzCYSyJbQy9oD8z5oP2idsafjt4/X42h9DjZU= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= @@ -916,28 +1505,22 @@ github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= -github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= -github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA= +github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= -github.com/libp2p/go-libp2p-quic-transport v0.16.1/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= -github.com/libp2p/go-libp2p-quic-transport v0.17.0 h1:yFh4Gf5MlToAYLuw/dRvuzYd1EnE2pX3Lq1N6KDiWRQ= github.com/libp2p/go-libp2p-quic-transport v0.17.0/go.mod h1:x4pw61P3/GRCcSLypcQJE/Q2+E9f4X+5aRcZLXf20LM= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= -github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= -github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= -github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= -github.com/libp2p/go-libp2p-resource-manager v0.1.5/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= -github.com/libp2p/go-libp2p-resource-manager v0.3.0 h1:2+cYxUNi33tcydsVLt6K5Fv2E3OTiVeafltecAj15E0= -github.com/libp2p/go-libp2p-resource-manager v0.3.0/go.mod h1:K+eCkiapf+ey/LADO4TaMpMTP9/Qde/uLlrnRqV4PLQ= -github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= -github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-resource-manager v0.2.1/go.mod h1:K+eCkiapf+ey/LADO4TaMpMTP9/Qde/uLlrnRqV4PLQ= +github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= @@ -947,11 +1530,10 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= -github.com/libp2p/go-libp2p-swarm v0.10.2 h1:UaXf+CTq6Ns1N2V1EgqJ9Q3xaRsiN7ImVlDMpirMAWw= github.com/libp2p/go-libp2p-swarm v0.10.2/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -961,40 +1543,38 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-testing v0.7.0/go.mod h1:OLbdn9DbgdMwv00v+tlp1l3oe2Cl+FAjoWIA2pa0X6E= -github.com/libp2p/go-libp2p-testing v0.8.0/go.mod h1:gRdsNxQSxAZowTgcLY7CC33xPmleZzoBpqSYbWenqPc= github.com/libp2p/go-libp2p-testing v0.9.0/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= -github.com/libp2p/go-libp2p-testing v0.9.2 h1:dCpODRtRaDZKF8HXT9qqqgON+OMEB423Knrgeod8j84= +github.com/libp2p/go-libp2p-testing v0.9.2/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= +github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= -github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= -github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.4.1/go.mod h1:EKCixHEysLNDlLUoKxv+3f/Lp90O2EXNjTr0UQDnrIw= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= -github.com/libp2p/go-libp2p-transport-upgrader v0.7.1 h1:MSMe+tUfxpC9GArTz7a4G5zQKQgGh00Vio87d3j3xIg= github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= -github.com/libp2p/go-libp2p-yamux v0.8.2 h1:6GKWntresp0TFxMP/oSoH96nV8XKJRdynXsdp43dn0Y= -github.com/libp2p/go-libp2p-yamux v0.8.2/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= +github.com/libp2p/go-libp2p-yamux v0.9.1/go.mod h1:wRc6wvyxQINFcKe7daL4BeQ02Iyp+wxyC8WCNfngBrA= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= @@ -1005,39 +1585,42 @@ github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3 github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.4.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= -github.com/libp2p/go-mplex v0.6.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= -github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= -github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= -github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= -github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= @@ -1045,46 +1628,49 @@ github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2L github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= -github.com/libp2p/go-stream-muxer-multistream v0.4.0 h1:HsM/9OdtqnIzjVXcxTXjmqKrj3gJ8kacaOJwJS1ipaY= github.com/libp2p/go-stream-muxer-multistream v0.4.0/go.mod h1:nb+dGViZleRP4XcyHuZSVrJCBl55nRBOMmiSL/dyziw= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= -github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= -github.com/libp2p/go-tcp-transport v0.5.1 h1:edOOs688VLZAozWC7Kj5/6HHXKNwi9M6wgRmmLa8M6Q= github.com/libp2p/go-tcp-transport v0.5.1/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= github.com/libp2p/go-ws-transport v0.6.0/go.mod h1:dXqtI9e2JV9FtF1NOtWVZSKXh5zXvnuwPXfj8GPBbYU= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= github.com/libp2p/go-yamux/v3 v3.0.1/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= github.com/libp2p/go-yamux/v3 v3.0.2/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= -github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= +github.com/libp2p/go-yamux/v3 v3.1.1/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= -github.com/libp2p/zeroconf/v2 v2.0.0/go.mod h1:J85R/d9joD8u8F9aHM8pBXygtG9W02enEwS+wWeL6yo= +github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg= github.com/lucas-clemente/quic-go v0.27.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI= -github.com/lucas-clemente/quic-go v0.27.1 h1:sOw+4kFSVrdWOYmUjufQ9GBVPqZ+tu+jMtXxXNmRJyk= -github.com/lucas-clemente/quic-go v0.27.1/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI= +github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lucas-clemente/quic-go v0.29.1/go.mod h1:CTcNfLYJS2UuRNB+zcNlgvkjBhxX6Hm3WUxxAQx2mgE= +github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1092,51 +1678,87 @@ github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamh github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= -github.com/marten-seemann/qtls-go1-17 v0.1.1 h1:DQjHPq+aOzUeh9/lixAGunn6rIOQyWChPSI4+hgW7jc= github.com/marten-seemann/qtls-go1-17 v0.1.1/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI= -github.com/marten-seemann/qtls-go1-18 v0.1.1 h1:qp7p7XXUFL7fpBvSS1sWD+uSqPvzNQK43DH+/qEkj0Y= github.com/marten-seemann/qtls-go1-18 v0.1.1/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/marten-seemann/webtransport-go v0.1.1/go.mod h1:kBEh5+RSvOA4troP1vyOVBWK4MIMzDICXVrvCPrYcrM= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= @@ -1145,20 +1767,29 @@ github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+ github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1166,19 +1797,23 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= -github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= @@ -1193,8 +1828,11 @@ github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9x github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= -github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= +github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.12.2 h1:9G9sTY/wCYajKa9lyfWPmpZAwe6oV+Wb1zcmMS1HG24= +github.com/multiformats/go-multiaddr v0.12.2/go.mod h1:GKyaTYjZRdcUhyOetrxTk9z0cW+jA/YrnqTOvKgi44M= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1212,38 +1850,57 @@ github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.4.1 h1:BSJbf+zpghcZMZrwTYBGwy0CPcVZGWiC72Cp8bBd4R4= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.8.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= -github.com/multiformats/go-multihash v0.2.0 h1:oytJb9ZA1OUW0r0f9ea18GiaPOo4SXyc7p2movyUuo4= github.com/multiformats/go-multihash v0.2.0/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.3.2 h1:YRJzBzM8BdZuOn3FjIns1ceKEyEQrT+8JJ581PNyGyI= -github.com/multiformats/go-multistream v0.3.2/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.3.0/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.3.1/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4= +github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -1253,14 +1910,18 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/neilotoole/errgroup v0.1.6/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE= +github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI= -github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1268,8 +1929,18 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1277,17 +1948,30 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= +github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -1297,36 +1981,48 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1339,28 +2035,35 @@ github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66Id github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1370,8 +2073,30 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/pyroscope-io/client v0.7.2 h1:OX2qdUQsS8RSkn/3C8isD7f/P0YiZQlRbAlecAaj/R8= +github.com/pyroscope-io/client v0.7.2/go.mod h1:FEocnjn+Ngzxy6EtU9ZxXWRvQ0+pffkrBxHLnPpxwi8= +github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= +github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= +github.com/pyroscope-io/otel-profiling-go v0.5.0 h1:LsTP9VuQ5TgeSiyY2gPHy1de/q3jbFyGWE1v3LtHzMk= +github.com/pyroscope-io/otel-profiling-go v0.5.0/go.mod h1:jUUUXTTgntvGJKS8p5uzypXwTyuGnQP31VnWauH/lUg= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= +github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= +github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= +github.com/quic-go/quic-go v0.39.4 h1:PelfiuG7wXEffUT2yceiqz5V6Pc0TA5ruOd1LcmFc1s= +github.com/quic-go/quic-go v0.39.4/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q= +github.com/quic-go/webtransport-go v0.5.1/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= +github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= @@ -1379,32 +2104,51 @@ github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6R github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/regen-network/cosmos-proto v0.3.1 h1:rV7iM4SSFAagvy8RiyhiACbWEGotmqzywPxOvwMdxcg= github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNwWejXC7QqCOnH3O0+YM= github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rollkit/go-da v0.4.0 h1:/s7ZrVq7DC2aK8UXIvB7rsXrZ2mVGRw7zrexcxRvhlw= +github.com/rollkit/go-da v0.4.0/go.mod h1:Kef0XI5ecEKd3TXzI8S+9knAUJnZg0svh2DuXoCsPlM= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= -github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samber/lo v1.36.0/go.mod h1:HLeWcJRRyLKp3+/XBJvOrerCQn9mhdKMHyd7IRlgeQ8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= -github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.6+incompatible h1:mmZtAlWSd8U2HeRTjswbnDLPxqsEoK01NK+GZ1P+nEM= +github.com/shirou/gopsutil v3.21.6+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -1432,33 +2176,38 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= -github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1467,90 +2216,154 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= +github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= -github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tendermint/btcd v0.1.1 h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s= -github.com/tendermint/btcd v0.1.1/go.mod h1:DC6/m53jtQzr/NFmMNEu0rxf18/ktVoVtMrnDD5pN+U= -github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 h1:hqAk8riJvK4RMWx1aInLzndwxKalgi5rTqgfXxOxbEI= -github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15/go.mod h1:z4YtwM70uOnk8h0pjJYlj3zdYwi9l03By6iAIF5j/Pk= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu8= github.com/tendermint/tm-db v0.6.7/go.mod h1:byQDzFkZV1syXr/ReXS808NxA2xvyuuVgXOJ/088L6I= +github.com/thoas/go-funk v0.9.1/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= +github.com/tidwall/btree v1.5.0 h1:iV0yVY/frd7r6qGBXfEYs7DH0gTDgrKTrDjS7xt/IyQ= +github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= +github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k= -github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= -github.com/warpfork/go-testmark v0.3.0 h1:Q81c4u7hT+BR5kNfNQhEF0VT2pmL7+Kk0wD+ORYl7iA= github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.11.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20221220214510-0333c149dec0/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25 h1:yVYDLoN2gmB3OdBXFW8e1UwgVbmCvNlnAKhvHPaNARI= +github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/ybbus/jsonrpc v2.1.2+incompatible/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/hid v0.9.1-0.20220302062450-5552068d2266 h1:O9XLFXGkVswDFmH9LaYpqu+r/AAFWqr0DL6V00KEVFg= -github.com/zondax/hid v0.9.1-0.20220302062450-5552068d2266/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1567,66 +2380,91 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0 h1:2JydY5UiDpqvj2p7sO9bgHuhTy4hgTZ0ymehdq/Ob0Q= +go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0/go.mod h1:ch3a5QxOqVWxas4CzjCFFOOQe+7HgAXC/N1oVxS9DK4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= -go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 h1:ao8CJIShCaIbaMsGxy+jp2YHSudketpDgDRcbirov78= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0 h1:H0+xwv4shKw0gfj/ZqR13qO2N/dBQogB1OcRjJjV39Y= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0/go.mod h1:nkenGD8vcvs0uN6WhR90ZVHQlgDsRmXicnNadMnk+XQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.31.0 h1:MuEG0gG27QZQrqhNl0f7vQ5Nl03OQfFeDAqWkGt+1zM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.31.0/go.mod h1:52qtPFDDaa0FaSyyzPnxWMehx2SZv0xuobTlNEZA2JA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 h1:LrHL1A3KqIgAgi6mK7Q0aczmzU414AONAGT5xtnp+uo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0/go.mod h1:w8aZL87GMOvOBa2lU/JlVXE1q4chk/0FX+8ai4513bw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.8.0 h1:SMO1HopgdAqNRit+WA3w3dcJSGANuH/ihKXDekEHfuY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.8.0/go.mod h1:tsw+QO2+pGo7xOrPXrS27HxW8uqGQkw5AzJwdsoyvgw= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.8.0 h1:xwu69/fNuwbSHWe/0PGS888RmjWY181OmcXDQKu7ZQk= -go.opentelemetry.io/otel/sdk v1.8.0/go.mod h1:uPSfc+yfDH2StDM/Rm35WE8gXSNdvCg023J6HeGNO0c= -go.opentelemetry.io/otel/sdk/metric v0.31.0 h1:2sZx4R43ZMhJdteKAlKoHvRgrMp53V1aRxvEf5lCq8Q= -go.opentelemetry.io/otel/sdk/metric v0.31.0/go.mod h1:fl0SmNnX9mN9xgU6OLYLMBMrNAsaZQi7qBwprwO3abk= +go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0 h1:+RbSCde0ERway5FwKvXR3aRJIFeDu9rtwC6E7BC6uoM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0/go.mod h1:zcI8u2EJxbLPyoZ3SkVAAcQPgYb1TDRzW93xLFnsggU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 h1:2PunuO5SbkN5MhCbuHCd3tC6qrcaj+uDAkX/qBU5BAs= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1/go.mod h1:q8+Tha+5LThjeSU8BW93uUC5w5/+DnYHMKBMpRCsui0= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/sdk/metric v1.22.0 h1:ARrRetm1HCVxq0cbnaZQlfwODYJHo3gFL8Z3tSmHBcI= +go.opentelemetry.io/otel/sdk/metric v1.22.0/go.mod h1:KjQGeMIDlBNEOo6HvjhxIec1p/69/kULDcp4gr0oLQQ= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= -go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= -go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= -go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/dig v1.14.0 h1:VmGvIH45/aapXPQkaOrK5u4B5B7jxZB98HM/utx0eME= -go.uber.org/dig v1.14.0/go.mod h1:jHAn/z1Ld1luVVyGKOAIFYz/uBFqKjjEEdIqVAqfQ2o= -go.uber.org/fx v1.17.1 h1:S42dZ6Pok8hQ3jxKwo6ZMYcCgHQA/wAS/gnpRa1Pksg= -go.uber.org/fx v1.17.1/go.mod h1:yO7KN5rhlARljyo4LR047AjaV6J+KFzd/Z7rnTbEn0A= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= +go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= +go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1641,14 +2479,18 @@ golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1659,15 +2501,27 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -1676,6 +2530,16 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230129154200-a960b3787bd2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e h1:723BNChdd0c2Wk6WOE320qGBiPtYx0F0Bbm1kriShfE= +golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1693,17 +2557,25 @@ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1724,12 +2596,15 @@ golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1749,12 +2624,15 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -1762,18 +2640,35 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220517181318-183a9ca12b87/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1794,8 +2689,15 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1808,8 +2710,12 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1826,6 +2732,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1833,14 +2740,17 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1851,6 +2761,7 @@ golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1872,6 +2783,7 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1888,9 +2800,12 @@ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1905,8 +2820,11 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1914,16 +2832,47 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1932,17 +2881,29 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1969,6 +2930,8 @@ golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1997,22 +2960,38 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -2053,9 +3032,20 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.81.0 h1:o8WF5AvfidafWbFjsRyupxyEQJNUWxLZJCK5NXrxZZ8= -google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2064,8 +3054,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -2076,6 +3067,7 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -2083,6 +3075,7 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= @@ -2115,6 +3108,7 @@ google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -2148,9 +3142,40 @@ google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2187,13 +3212,17 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2208,12 +3237,16 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -2222,13 +3255,18 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2241,9 +3279,11 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2253,16 +3293,22 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v0.5.3 h1:163N50IHFqr1phZens4FQOdPgfJscR7a562mjQqeo4M= +pgregory.net/rapid v0.5.3/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/header/core/exchange.go b/header/core/exchange.go deleted file mode 100644 index b3c6d917d0..0000000000 --- a/header/core/exchange.go +++ /dev/null @@ -1,100 +0,0 @@ -package core - -import ( - "bytes" - "context" - "fmt" - - "github.com/ipfs/go-blockservice" - logging "github.com/ipfs/go-log/v2" - - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/header" -) - -var log = logging.Logger("header/core") - -type Exchange struct { - fetcher *core.BlockFetcher - shareStore blockservice.BlockService - construct header.ConstructFn -} - -func NewExchange(fetcher *core.BlockFetcher, bServ blockservice.BlockService, construct header.ConstructFn) *Exchange { - return &Exchange{ - fetcher: fetcher, - shareStore: bServ, - construct: construct, - } -} - -func (ce *Exchange) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - log.Debugw("requesting header", "height", height) - intHeight := int64(height) - return ce.getExtendedHeaderByHeight(ctx, &intHeight) -} - -func (ce *Exchange) GetRangeByHeight(ctx context.Context, from, amount uint64) ([]*header.ExtendedHeader, error) { - if amount == 0 { - return nil, nil - } - - log.Debugw("requesting headers", "from", from, "to", from+amount) - headers := make([]*header.ExtendedHeader, amount) - for i := range headers { - extHeader, err := ce.GetByHeight(ctx, from+uint64(i)) - if err != nil { - return nil, err - } - - headers[i] = extHeader - } - - return headers, nil -} - -func (ce *Exchange) Get(ctx context.Context, hash tmbytes.HexBytes) (*header.ExtendedHeader, error) { - log.Debugw("requesting header", "hash", hash.String()) - block, err := ce.fetcher.GetBlockByHash(ctx, hash) - if err != nil { - return nil, err - } - - comm, vals, err := ce.fetcher.GetBlockInfo(ctx, &block.Height) - if err != nil { - return nil, err - } - - eh, err := ce.construct(ctx, block, comm, vals, ce.shareStore) - if err != nil { - return nil, err - } - - // verify hashes match - if !bytes.Equal(hash, eh.Hash()) { - return nil, fmt.Errorf("incorrect hash in header: expected %x, got %x", hash, eh.Hash()) - } - - return eh, nil -} - -func (ce *Exchange) Head(ctx context.Context) (*header.ExtendedHeader, error) { - log.Debug("requesting head") - return ce.getExtendedHeaderByHeight(ctx, nil) -} - -func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64) (*header.ExtendedHeader, error) { - b, err := ce.fetcher.GetBlock(ctx, height) - if err != nil { - return nil, err - } - - comm, vals, err := ce.fetcher.GetBlockInfo(ctx, &b.Height) - if err != nil { - return nil, err - } - - return ce.construct(ctx, b, comm, vals, ce.shareStore) -} diff --git a/header/core/exchange_test.go b/header/core/exchange_test.go deleted file mode 100644 index 4808771427..0000000000 --- a/header/core/exchange_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package core - -import ( - "bytes" - "context" - "testing" - - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/header" -) - -func TestCoreExchange_RequestHeaders(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - fetcher := createCoreFetcher(ctx, t) - store := mdutils.Bserv() - - // generate 10 blocks - generateBlocks(t, fetcher) - - ce := NewExchange(fetcher, store, header.MakeExtendedHeader) - headers, err := ce.GetRangeByHeight(context.Background(), 1, 10) - require.NoError(t, err) - - assert.Equal(t, 10, len(headers)) -} - -func Test_hashMatch(t *testing.T) { - expected := []byte("AE0F153556A4FA5C0B7C3BFE0BAF0EC780C031933B281A8D759BB34C1DA31C56") - mismatch := []byte("57A0D7FE69FE88B3D277C824B3ACB9B60E5E65837A802485DE5CBB278C43576A") - - assert.False(t, bytes.Equal(expected, mismatch)) -} - -func createCoreFetcher(ctx context.Context, t *testing.T) *core.BlockFetcher { - _, client := core.StartTestClient(ctx, t) - return core.NewBlockFetcher(client) -} - -func generateBlocks(t *testing.T, fetcher *core.BlockFetcher) { - sub, err := fetcher.SubscribeNewBlockEvent(context.Background()) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - <-sub - } -} diff --git a/header/core/listener.go b/header/core/listener.go deleted file mode 100644 index ee11021580..0000000000 --- a/header/core/listener.go +++ /dev/null @@ -1,108 +0,0 @@ -package core - -import ( - "context" - "fmt" - - "github.com/ipfs/go-blockservice" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/tendermint/tendermint/types" - - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/header" -) - -// Listener is responsible for listening to Core for -// new block events and converting new Core blocks into -// the main data structure used in the Celestia DA network: -// `ExtendedHeader`. After digesting the Core block, extending -// it, and generating the `ExtendedHeader`, the Listener -// broadcasts the new `ExtendedHeader` to the header-sub gossipsub -// network. -type Listener struct { - bcast header.Broadcaster - fetcher *core.BlockFetcher - bServ blockservice.BlockService - construct header.ConstructFn - cancel context.CancelFunc -} - -func NewListener( - bcast header.Broadcaster, - fetcher *core.BlockFetcher, - bServ blockservice.BlockService, - construct header.ConstructFn, -) *Listener { - return &Listener{ - bcast: bcast, - fetcher: fetcher, - bServ: bServ, - construct: construct, - } -} - -// Start kicks off the Listener listener loop. -func (cl *Listener) Start(ctx context.Context) error { - if cl.cancel != nil { - return fmt.Errorf("listener: already started") - } - - sub, err := cl.fetcher.SubscribeNewBlockEvent(ctx) - if err != nil { - return err - } - - ctx, cancel := context.WithCancel(context.Background()) - go cl.listen(ctx, sub) - cl.cancel = cancel - return nil -} - -// Stop stops the Listener listener loop. -func (cl *Listener) Stop(ctx context.Context) error { - cl.cancel() - cl.cancel = nil - return cl.fetcher.UnsubscribeNewBlockEvent(ctx) -} - -// listen kicks off a loop, listening for new block events from Core, -// generating ExtendedHeaders and broadcasting them to the header-sub -// gossipsub network. -func (cl *Listener) listen(ctx context.Context, sub <-chan *types.Block) { - defer log.Info("listener: listening stopped") - for { - select { - case b, ok := <-sub: - if !ok { - return - } - - syncing, err := cl.fetcher.IsSyncing(ctx) - if err != nil { - log.Errorw("listener: getting sync state", "err", err) - return - } - - comm, vals, err := cl.fetcher.GetBlockInfo(ctx, &b.Height) - if err != nil { - log.Errorw("listener: getting block info", "err", err) - return - } - - eh, err := cl.construct(ctx, b, comm, vals, cl.bServ) - if err != nil { - log.Errorw("listener: making extended header", "err", err) - return - } - - // broadcast new ExtendedHeader, but if core is still syncing, notify only local subscribers - err = cl.bcast.Broadcast(ctx, eh, pubsub.WithLocalPublication(syncing)) - if err != nil { - log.Errorw("listener: broadcasting next header", "height", eh.Height, - "err", err) - } - case <-ctx.Done(): - return - } - } -} diff --git a/header/core/listener_test.go b/header/core/listener_test.go deleted file mode 100644 index b66ed2f4d4..0000000000 --- a/header/core/listener_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package core - -import ( - "context" - "testing" - "time" - - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/libp2p/go-libp2p-core/event" - pubsub "github.com/libp2p/go-libp2p-pubsub" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/p2p" -) - -// TestListener tests the lifecycle of the core listener. -func TestListener(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - t.Cleanup(cancel) - - // create mocknet with two pubsub endpoints - ps0, ps1 := createMocknetWithTwoPubsubEndpoints(ctx, t) - // create second subscription endpoint to listen for Listener's pubsub messages - topic, err := ps1.Join(p2p.PubSubTopic) - require.NoError(t, err) - sub, err := topic.Subscribe() - require.NoError(t, err) - - // create one block to store as Head in local store and then unsubscribe from block events - fetcher := createCoreFetcher(ctx, t) - - // create Listener and start listening - cl := createListener(ctx, t, fetcher, ps0) - err = cl.Start(ctx) - require.NoError(t, err) - - // ensure headers are getting broadcasted to the gossipsub topic - for i := 1; i < 6; i++ { - msg, err := sub.Next(ctx) - require.NoError(t, err) - - var resp header.ExtendedHeader - err = resp.UnmarshalBinary(msg.Data) - require.NoError(t, err) - } - - err = cl.Stop(ctx) - require.NoError(t, err) - require.Nil(t, cl.cancel) -} - -func createMocknetWithTwoPubsubEndpoints(ctx context.Context, t *testing.T) (*pubsub.PubSub, *pubsub.PubSub) { - net, err := mocknet.FullMeshLinked(2) - require.NoError(t, err) - host0, host1 := net.Hosts()[0], net.Hosts()[1] - - // create pubsub for host - ps0, err := pubsub.NewGossipSub(context.Background(), host0, - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - // create pubsub for peer-side (to test broadcast comes through network) - ps1, err := pubsub.NewGossipSub(context.Background(), host1, - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - - sub0, err := host0.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - sub1, err := host1.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - - err = net.ConnectAllButSelf() - require.NoError(t, err) - - // wait on both peer identification events - for i := 0; i < 2; i++ { - select { - case <-sub0.Out(): - case <-sub1.Out(): - case <-ctx.Done(): - assert.FailNow(t, "timeout waiting for peers to connect") - } - } - - return ps0, ps1 -} - -func createListener( - ctx context.Context, - t *testing.T, - fetcher *core.BlockFetcher, - ps *pubsub.PubSub, -) *Listener { - p2pSub := p2p.NewSubscriber(ps) - err := p2pSub.Start(ctx) - require.NoError(t, err) - t.Cleanup(func() { - err := p2pSub.Stop(ctx) - require.NoError(t, err) - }) - - return NewListener(p2pSub, fetcher, mdutils.Bserv(), header.MakeExtendedHeader) -} diff --git a/header/doc.go b/header/doc.go index f0696e46da..277027d4f9 100644 --- a/header/doc.go +++ b/header/doc.go @@ -14,34 +14,34 @@ to HeaderSub will receive and validate the ExtendedHeader, and store it, making other dependent services (such as the DataAvailabilitySampler, or DASer) to access. There are 5 main components in the header package: - 1. core.Listener listens for new blocks from the celestia-core network (run by bridge nodes only), - extends them, generates a new ExtendedHeader, and publishes it to the HeaderSub. - 2. p2p.Subscriber listens for new ExtendedHeaders from the Celestia Data Availability (DA) network (via - the HeaderSub) - 3. p2p.Exchange or core.Exchange request ExtendedHeaders from other celestia DA nodes (default for - full and light nodes) or from a celestia-core node connection (bridge nodes only) - 4. Syncer manages syncing of past and recent ExtendedHeaders from either the DA network or a celestia-core - connection (bridge nodes only). - 5. Store manages storing ExtendedHeaders and making them available for access by other dependent services. + 1. core.Listener listens for new blocks from the celestia-core network (run by bridge nodes only), + extends them, generates a new ExtendedHeader, and publishes it to the HeaderSub. + 2. p2p.Subscriber listens for new ExtendedHeaders from the Celestia Data Availability (DA) network (via + the HeaderSub) + 3. p2p.Exchange or core.Exchange request ExtendedHeaders from other celestia DA nodes (default for + full and light nodes) or from a celestia-core node connection (bridge nodes only) + 4. Syncer manages syncing of past and recent ExtendedHeaders from either the DA network or a celestia-core + connection (bridge nodes only). + 5. Store manages storing ExtendedHeaders and making them available for access by other dependent services. For bridge nodes, the general flow of the header Service is as follows: - 1. core.Listener listens for new blocks from the celestia-core connection - 2. core.Listener validates the block and generates the ExtendedHeader, simultaneously storing the - extended block shares to disk - 3. core.Listener publishes the new ExtendedHeader to HeaderSub, notifying all subscribed peers of the new - ExtendedHeader - 4. Syncer is already subscribed to the HeaderSub, so it receives new ExtendedHeaders locally from - the core.Listener and stores them to disk via Store. - 4a. If the celestia-core connection is started simultaneously with the bridge node, then the celestia-core - connection will handle the syncing component, piping every synced block to the core.Listener - 4b. If the celestia-core connection is already synced to the network, the Syncer handles requesting past - headers up to the network head from the celestia-core connection (using core.Exchange rather than p2p.Exchange). + 1. core.Listener listens for new blocks from the celestia-core connection + 2. core.Listener validates the block and generates the ExtendedHeader, simultaneously storing the + extended block shares to disk + 3. core.Listener publishes the new ExtendedHeader to HeaderSub, notifying all subscribed peers of the new + ExtendedHeader + 4. Syncer is already subscribed to the HeaderSub, so it receives new ExtendedHeaders locally from + the core.Listener and stores them to disk via Store. + - If the celestia-core connection is started simultaneously with the bridge node, then the celestia-core + connection will handle the syncing component, piping every synced block to the core.Listener + - If the celestia-core connection is already synced to the network, the Syncer handles requesting past + headers up to the network head from the celestia-core connection (using core.Exchange rather than p2p.Exchange). For full and light nodes, the general flow of the header Service is as follows: - 1. Syncer listens for new ExtendedHeaders from HeaderSub - 2. If there is a gap between the local head of chain and the new, validated network head, the Syncer - kicks off a sync routine to request all ExtendedHeaders between local head and network head. - 3. While the Syncer requests headers between the local head and network head in batches, it appends them to the - subjective chain via Store with the last batched header as the new local head. + 1. Syncer listens for new ExtendedHeaders from HeaderSub + 2. If there is a gap between the local head of chain and the new, validated network head, the Syncer + kicks off a sync routine to request all ExtendedHeaders between local head and network head. + 3. While the Syncer requests headers between the local head and network head in batches, it appends them to the + subjective chain via Store with the last batched header as the new local head. */ package header diff --git a/header/header.go b/header/header.go index c0240caf29..01a84a2c0a 100644 --- a/header/header.go +++ b/header/header.go @@ -2,25 +2,28 @@ package header import ( "bytes" - "context" + "encoding/json" + "errors" "fmt" + "time" - "github.com/ipfs/go-blockservice" - logging "github.com/ipfs/go-log/v2" - - bts "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/pkg/da" + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/light" core "github.com/tendermint/tendermint/types" - "github.com/celestiaorg/celestia-node/ipld" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/da" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/rsmt2d" ) -var log = logging.Logger("header") - -type DataAvailabilityHeader = da.DataAvailabilityHeader - -// EmptyDAH provides DAH of the empty block. -var EmptyDAH = da.MinDataAvailabilityHeader +// ConstructFn aliases a function that creates an ExtendedHeader. +type ConstructFn = func( + *core.Header, + *core.Commit, + *core.ValidatorSet, + *rsmt2d.ExtendedDataSquare, +) (*ExtendedHeader, error) // RawHeader is an alias to core.Header. It is // "raw" because it is not yet wrapped to include @@ -32,85 +35,175 @@ type RawHeader = core.Header // block headers and perform Data Availability Sampling. type ExtendedHeader struct { RawHeader `json:"header"` - Commit *core.Commit `json:"commit"` - ValidatorSet *core.ValidatorSet `json:"validator_set"` - DAH *DataAvailabilityHeader `json:"dah"` + Commit *core.Commit `json:"commit"` + ValidatorSet *core.ValidatorSet `json:"validator_set"` + DAH *da.DataAvailabilityHeader `json:"dah"` } // MakeExtendedHeader assembles new ExtendedHeader. func MakeExtendedHeader( - ctx context.Context, - b *core.Block, + h *core.Header, comm *core.Commit, vals *core.ValidatorSet, - bServ blockservice.BlockService, + eds *rsmt2d.ExtendedDataSquare, ) (*ExtendedHeader, error) { - var dah DataAvailabilityHeader - if len(b.Txs) > 0 { - namespacedShares, _, err := b.Data.ComputeShares(b.OriginalSquareSize) + var ( + dah da.DataAvailabilityHeader + err error + ) + switch eds { + case nil: + dah = da.MinDataAvailabilityHeader() + default: + dah, err = da.NewDataAvailabilityHeader(eds) if err != nil { return nil, err } - extended, err := ipld.AddShares(ctx, namespacedShares.RawShares(), bServ) - if err != nil { - return nil, err - } - dah = da.NewDataAvailabilityHeader(extended) - } else { - // use MinDataAvailabilityHeader for empty block - dah = EmptyDAH() - log.Debugw("empty block received", "height", "blockID", "time", b.Height, b.Time.String(), comm.BlockID) } eh := &ExtendedHeader{ - RawHeader: b.Header, + RawHeader: *h, DAH: &dah, Commit: comm, ValidatorSet: vals, } - return eh, eh.ValidateBasic() + return eh, nil +} + +func (eh *ExtendedHeader) New() *ExtendedHeader { + return new(ExtendedHeader) +} + +func (eh *ExtendedHeader) IsZero() bool { + return eh == nil +} + +func (eh *ExtendedHeader) ChainID() string { + return eh.RawHeader.ChainID +} + +func (eh *ExtendedHeader) Height() uint64 { + return uint64(eh.RawHeader.Height) +} + +func (eh *ExtendedHeader) Time() time.Time { + return eh.RawHeader.Time } // Hash returns Hash of the wrapped RawHeader. -// NOTE: It purposely overrides Hash method of RawHeader to get it directly from Commit without recomputing. -func (eh *ExtendedHeader) Hash() bts.HexBytes { - return eh.Commit.BlockID.Hash +// NOTE: It purposely overrides Hash method of RawHeader to get it directly from Commit without +// recomputing. +func (eh *ExtendedHeader) Hash() libhead.Hash { + return eh.Commit.BlockID.Hash.Bytes() } // LastHeader returns the Hash of the last wrapped RawHeader. -func (eh *ExtendedHeader) LastHeader() bts.HexBytes { - return eh.RawHeader.LastBlockID.Hash +func (eh *ExtendedHeader) LastHeader() libhead.Hash { + return libhead.Hash(eh.RawHeader.LastBlockID.Hash) } -// ValidateBasic performs *basic* validation to check for missed/incorrect fields. -func (eh *ExtendedHeader) ValidateBasic() error { +// Equals returns whether the hash and height of the given header match. +func (eh *ExtendedHeader) Equals(header *ExtendedHeader) bool { + return eh.Height() == header.Height() && bytes.Equal(eh.Hash(), header.Hash()) +} + +// Validate performs *basic* validation to check for missed/incorrect fields. +func (eh *ExtendedHeader) Validate() error { err := eh.RawHeader.ValidateBasic() if err != nil { - return err + return fmt.Errorf("ValidateBasic error on RawHeader at height %d: %w", eh.Height(), err) + } + + if eh.RawHeader.Version.App != appconsts.LatestVersion { + return fmt.Errorf("app version mismatch, expected: %d, got %d", appconsts.LatestVersion, + eh.RawHeader.Version.App) } err = eh.Commit.ValidateBasic() if err != nil { - return err + return fmt.Errorf("ValidateBasic error on Commit at height %d: %w", eh.Height(), err) } err = eh.ValidatorSet.ValidateBasic() if err != nil { - return err + return fmt.Errorf("ValidateBasic error on ValidatorSet at height %d: %w", eh.Height(), err) } // make sure the validator set is consistent with the header if valSetHash := eh.ValidatorSet.Hash(); !bytes.Equal(eh.ValidatorsHash, valSetHash) { - return fmt.Errorf("expected validator hash of header to match validator set hash (%X != %X)", - eh.ValidatorsHash, valSetHash, + return fmt.Errorf("expected validator hash of header to match validator set hash (%X != %X) at height %d", + eh.ValidatorsHash, valSetHash, eh.Height(), ) } - if err := eh.ValidatorSet.VerifyCommitLight(eh.ChainID, eh.Commit.BlockID, eh.Height, eh.Commit); err != nil { - return err + // ensure data root from raw header matches computed root + if !bytes.Equal(eh.DAH.Hash(), eh.DataHash) { + return fmt.Errorf("mismatch between data hash commitment from core header and computed data root "+ + "at height %d: data hash: %X, computed root: %X", eh.Height(), eh.DataHash, eh.DAH.Hash()) + } + + // Make sure the header is consistent with the commit. + if eh.Commit.Height != eh.RawHeader.Height { + return fmt.Errorf("header and commit height mismatch: %d vs %d", eh.RawHeader.Height, eh.Commit.Height) + } + if hhash, chash := eh.RawHeader.Hash(), eh.Commit.BlockID.Hash; !bytes.Equal(hhash, chash) { + return fmt.Errorf("commit signs block %X, header is block %X", chash, hhash) + } + + err = eh.ValidatorSet.VerifyCommitLight(eh.ChainID(), eh.Commit.BlockID, int64(eh.Height()), eh.Commit) + if err != nil { + return fmt.Errorf("VerifyCommitLight error at height %d: %w", eh.Height(), err) } - return eh.DAH.ValidateBasic() + err = eh.DAH.ValidateBasic() + if err != nil { + return fmt.Errorf("ValidateBasic error on DAH at height %d: %w", eh.RawHeader.Height, err) + } + return nil +} + +var ( + ErrValidatorHashMismatch = errors.New("validator hash mismatch") + ErrLastHeaderHashMismatch = errors.New("last header hash mismatch") + ErrVerifyCommitLightTrustingFailed = errors.New("commit light trusting verification failed") +) + +// Verify validates given untrusted Header against trusted ExtendedHeader. +func (eh *ExtendedHeader) Verify(untrst *ExtendedHeader) error { + isAdjacent := eh.Height()+1 == untrst.Height() + if isAdjacent { + // Optimized verification for adjacent headers + // Check the validator hashes are the same + if !bytes.Equal(untrst.ValidatorsHash, eh.NextValidatorsHash) { + return &libhead.VerifyError{ + Reason: fmt.Errorf("%w: expected (%X), but got (%X)", + ErrValidatorHashMismatch, + eh.NextValidatorsHash, + untrst.ValidatorsHash, + ), + } + } + + if !bytes.Equal(untrst.LastHeader(), eh.Hash()) { + return &libhead.VerifyError{ + Reason: fmt.Errorf("%w: expected (%X), but got (%X)", + ErrLastHeaderHashMismatch, + eh.Hash(), + untrst.LastHeader(), + ), + } + } + + return nil + } + + if err := eh.ValidatorSet.VerifyCommitLightTrusting(eh.ChainID(), untrst.Commit, light.DefaultTrustLevel); err != nil { + return &libhead.VerifyError{ + Reason: fmt.Errorf("%w: %w", ErrVerifyCommitLightTrustingFailed, err), + SoftFailure: true, + } + } + return nil } // MarshalBinary marshals ExtendedHeader to binary. @@ -132,3 +225,57 @@ func (eh *ExtendedHeader) UnmarshalBinary(data []byte) error { *eh = *out return nil } + +// MarshalJSON marshals an ExtendedHeader to JSON. The ValidatorSet is wrapped with amino encoding, +// to be able to unmarshal the crypto.PubKey type back from JSON. +func (eh *ExtendedHeader) MarshalJSON() ([]byte, error) { + type Alias ExtendedHeader + validatorSet, err := tmjson.Marshal(eh.ValidatorSet) + if err != nil { + return nil, err + } + rawHeader, err := tmjson.Marshal(eh.RawHeader) + if err != nil { + return nil, err + } + return json.Marshal(&struct { + RawHeader json.RawMessage `json:"header"` + ValidatorSet json.RawMessage `json:"validator_set"` + *Alias + }{ + ValidatorSet: validatorSet, + RawHeader: rawHeader, + Alias: (*Alias)(eh), + }) +} + +// UnmarshalJSON unmarshals an ExtendedHeader from JSON. The ValidatorSet is wrapped with amino +// encoding, to be able to unmarshal the crypto.PubKey type back from JSON. +func (eh *ExtendedHeader) UnmarshalJSON(data []byte) error { + type Alias ExtendedHeader + aux := &struct { + RawHeader json.RawMessage `json:"header"` + ValidatorSet json.RawMessage `json:"validator_set"` + *Alias + }{ + Alias: (*Alias)(eh), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + valSet := new(core.ValidatorSet) + if err := tmjson.Unmarshal(aux.ValidatorSet, valSet); err != nil { + return err + } + rawHeader := new(RawHeader) + if err := tmjson.Unmarshal(aux.RawHeader, rawHeader); err != nil { + return err + } + + eh.ValidatorSet = valSet + eh.RawHeader = *rawHeader + return nil +} + +var _ libhead.Header[*ExtendedHeader] = &ExtendedHeader{} diff --git a/header/header_test.go b/header/header_test.go deleted file mode 100644 index 419f6e93f9..0000000000 --- a/header/header_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package header - -import ( - "context" - "testing" - - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/core" -) - -func TestMakeExtendedHeaderForEmptyBlock(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - _, client := core.StartTestClient(ctx, t) - fetcher := core.NewBlockFetcher(client) - - store := mdutils.Bserv() - - sub, err := fetcher.SubscribeNewBlockEvent(ctx) - require.NoError(t, err) - <-sub - - height := int64(1) - b, err := fetcher.GetBlock(ctx, &height) - require.NoError(t, err) - - comm, val, err := fetcher.GetBlockInfo(ctx, &height) - require.NoError(t, err) - - headerExt, err := MakeExtendedHeader(ctx, b, comm, val, store) - require.NoError(t, err) - - assert.Equal(t, EmptyDAH(), *headerExt.DAH) -} diff --git a/header/headertest/fraud/testing.go b/header/headertest/fraud/testing.go new file mode 100644 index 0000000000..e2ff13a4e0 --- /dev/null +++ b/header/headertest/fraud/testing.go @@ -0,0 +1,101 @@ +package headerfraud + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/boxo/blockservice" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/bytes" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +// FraudMaker allows to produce an invalid header at the specified height in order to produce the +// BEFP. +type FraudMaker struct { + t *testing.T + + vals []types.PrivValidator + valSet *types.ValidatorSet + + // height of the invalid header + height int64 + + prevHash bytes.HexBytes +} + +func NewFraudMaker(t *testing.T, height int64, vals []types.PrivValidator, valSet *types.ValidatorSet) *FraudMaker { + return &FraudMaker{ + t: t, + vals: vals, + valSet: valSet, + height: height, + } +} + +func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header.ConstructFn { + return func( + h *types.Header, + comm *types.Commit, + vals *types.ValidatorSet, + eds *rsmt2d.ExtendedDataSquare, + ) (*header.ExtendedHeader, error) { + if h.Height < f.height { + return header.MakeExtendedHeader(h, comm, vals, eds) + } + + hdr := *h + if h.Height == f.height { + adder := ipld.NewProofsAdder(odsSize) + square := edstest.RandByzantineEDS(f.t, odsSize, nmt.NodeVisitor(adder.VisitFn())) + dah, err := da.NewDataAvailabilityHeader(square) + require.NoError(f.t, err) + hdr.DataHash = dah.Hash() + + ctx := ipld.CtxWithProofsAdder(context.Background(), adder) + require.NoError(f.t, edsStore.Put(ctx, h.DataHash.Bytes(), square)) + + *eds = *square + } + if h.Height > f.height { + hdr.LastBlockID.Hash = f.prevHash + } + + blockID := comm.BlockID + blockID.Hash = hdr.Hash() + voteSet := types.NewVoteSet(hdr.ChainID, hdr.Height, 0, tmproto.PrecommitType, f.valSet) + commit, err := headertest.MakeCommit(blockID, hdr.Height, 0, voteSet, f.vals, time.Now()) + require.NoError(f.t, err) + + *h = hdr + *comm = *commit + f.prevHash = h.Hash() + return header.MakeExtendedHeader(h, comm, vals, eds) + } +} +func CreateFraudExtHeader( + t *testing.T, + eh *header.ExtendedHeader, + serv blockservice.BlockService, +) *header.ExtendedHeader { + square := edstest.RandByzantineEDS(t, len(eh.DAH.RowRoots)) + err := ipld.ImportEDS(context.Background(), square, serv) + require.NoError(t, err) + dah, err := da.NewDataAvailabilityHeader(square) + require.NoError(t, err) + eh.DAH = &dah + eh.RawHeader.DataHash = dah.Hash() + return eh +} diff --git a/header/headertest/serde_test.go b/header/headertest/serde_test.go new file mode 100644 index 0000000000..d06f7dfd41 --- /dev/null +++ b/header/headertest/serde_test.go @@ -0,0 +1,41 @@ +package headertest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/header" +) + +func TestMarshalUnmarshalExtendedHeader(t *testing.T) { + in := RandExtendedHeader(t) + binaryData, err := in.MarshalBinary() + require.NoError(t, err) + + out := &header.ExtendedHeader{} + err = out.UnmarshalBinary(binaryData) + require.NoError(t, err) + equalExtendedHeader(t, in, out) + + // A custom JSON marshal/unmarshal is necessary which wraps the ValidatorSet with amino + // encoding, to be able to marshal the crypto.PubKey type back from JSON. + jsonData, err := in.MarshalJSON() + require.NoError(t, err) + + out = &header.ExtendedHeader{} + err = out.UnmarshalJSON(jsonData) + require.NoError(t, err) + equalExtendedHeader(t, in, out) +} + +func equalExtendedHeader(t *testing.T, in, out *header.ExtendedHeader) { + // ValidatorSet.totalVotingPower is not set (is a cached value that can be recomputed client side) + assert.Equal(t, in.ValidatorSet.Validators, out.ValidatorSet.Validators) + assert.Equal(t, in.ValidatorSet.Proposer, out.ValidatorSet.Proposer) + assert.True(t, in.DAH.Equals(out.DAH)) + // not the check for equality as time.Time is not serialized exactly 1:1 + assert.NotZero(t, out.RawHeader) + assert.NotNil(t, out.Commit) +} diff --git a/header/headertest/testing.go b/header/headertest/testing.go new file mode 100644 index 0000000000..e97f7f7825 --- /dev/null +++ b/header/headertest/testing.go @@ -0,0 +1,330 @@ +package headertest + +import ( + "crypto/rand" + "fmt" + mrand "math/rand" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/libs/bytes" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proto/tendermint/version" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" + + "github.com/celestiaorg/celestia-app/pkg/da" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/headertest" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" +) + +// TestSuite provides everything you need to test chain of Headers. +// If not, please don't hesitate to extend it for your case. +type TestSuite struct { + t *testing.T + + vals []types.PrivValidator + valSet *types.ValidatorSet + valPntr int + + head *header.ExtendedHeader +} + +func NewStore(t *testing.T) libhead.Store[*header.ExtendedHeader] { + return headertest.NewStore[*header.ExtendedHeader](t, NewTestSuite(t, 3), 10) +} + +// NewTestSuite setups a new test suite with a given number of validators. +func NewTestSuite(t *testing.T, num int) *TestSuite { + valSet, vals := RandValidatorSet(num, 10) + return &TestSuite{ + t: t, + vals: vals, + valSet: valSet, + } +} + +func (s *TestSuite) genesis() *header.ExtendedHeader { + dah := share.EmptyRoot() + + gen := RandRawHeader(s.t) + + gen.DataHash = dah.Hash() + gen.ValidatorsHash = s.valSet.Hash() + gen.NextValidatorsHash = s.valSet.Hash() + gen.Height = 1 + voteSet := types.NewVoteSet(gen.ChainID, gen.Height, 0, tmproto.PrecommitType, s.valSet) + blockID := RandBlockID(s.t) + blockID.Hash = gen.Hash() + commit, err := MakeCommit(blockID, gen.Height, 0, voteSet, s.vals, time.Now()) + require.NoError(s.t, err) + + eh := &header.ExtendedHeader{ + RawHeader: *gen, + Commit: commit, + ValidatorSet: s.valSet, + DAH: dah, + } + require.NoError(s.t, eh.Validate()) + return eh +} + +func MakeCommit(blockID types.BlockID, height int64, round int32, + voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.Commit, error) { + + // all sign + for i := 0; i < len(validators); i++ { + pubKey, err := validators[i].GetPubKey() + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + vote := &types.Vote{ + ValidatorAddress: pubKey.Address(), + ValidatorIndex: int32(i), + Height: height, + Round: round, + Type: tmproto.PrecommitType, + BlockID: blockID, + Timestamp: now, + } + + _, err = signAddVote(validators[i], vote, voteSet) + if err != nil { + return nil, err + } + } + + return voteSet.MakeCommit(), nil +} + +func signAddVote(privVal types.PrivValidator, vote *types.Vote, voteSet *types.VoteSet) (signed bool, err error) { + v := vote.ToProto() + err = privVal.SignVote(voteSet.ChainID(), v) + if err != nil { + return false, err + } + vote.Signature = v.Signature + return voteSet.AddVote(vote) +} + +func (s *TestSuite) Head() *header.ExtendedHeader { + if s.head == nil { + s.head = s.genesis() + } + return s.head +} + +func (s *TestSuite) GenExtendedHeaders(num int) []*header.ExtendedHeader { + headers := make([]*header.ExtendedHeader, num) + for i := range headers { + headers[i] = s.NextHeader() + } + return headers +} + +var _ headertest.Generator[*header.ExtendedHeader] = &TestSuite{} + +func (s *TestSuite) NextHeader() *header.ExtendedHeader { + if s.head == nil { + s.head = s.genesis() + return s.head + } + + dah := share.EmptyRoot() + height := s.Head().Height() + 1 + rh := s.GenRawHeader(height, s.Head().Hash(), libhead.Hash(s.Head().Commit.Hash()), dah.Hash()) + s.head = &header.ExtendedHeader{ + RawHeader: *rh, + Commit: s.Commit(rh), + ValidatorSet: s.valSet, + DAH: dah, + } + require.NoError(s.t, s.head.Validate()) + return s.head +} + +func (s *TestSuite) GenRawHeader( + height uint64, lastHeader, lastCommit, dataHash libhead.Hash) *header.RawHeader { + rh := RandRawHeader(s.t) + rh.Height = int64(height) + rh.Time = time.Now() + rh.LastBlockID = types.BlockID{Hash: bytes.HexBytes(lastHeader)} + rh.LastCommitHash = bytes.HexBytes(lastCommit) + rh.DataHash = bytes.HexBytes(dataHash) + rh.ValidatorsHash = s.valSet.Hash() + rh.NextValidatorsHash = s.valSet.Hash() + rh.ProposerAddress = s.nextProposer().Address + return rh +} + +func (s *TestSuite) Commit(h *header.RawHeader) *types.Commit { + bid := types.BlockID{ + Hash: h.Hash(), + // Unfortunately, we still have to commit PartSetHeader even we don't need it in Celestia + PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}, + } + round := int32(0) + comms := make([]types.CommitSig, len(s.vals)) + for i, val := range s.vals { + v := &types.Vote{ + ValidatorAddress: s.valSet.Validators[i].Address, + ValidatorIndex: int32(i), + Height: h.Height, + Round: round, + Timestamp: tmtime.Now(), + Type: tmproto.PrecommitType, + BlockID: bid, + } + sgntr, err := val.(types.MockPV).PrivKey.Sign(types.VoteSignBytes(h.ChainID, v.ToProto())) + require.Nil(s.t, err) + v.Signature = sgntr + comms[i] = v.CommitSig() + } + + return types.NewCommit(h.Height, round, bid, comms) +} + +func (s *TestSuite) nextProposer() *types.Validator { + if s.valPntr == len(s.valSet.Validators)-1 { + s.valPntr = 0 + } else { + s.valPntr++ + } + val := s.valSet.Validators[s.valPntr] + return val +} + +// RandExtendedHeader provides an ExtendedHeader fixture. +func RandExtendedHeader(t testing.TB) *header.ExtendedHeader { + dah := share.EmptyRoot() + + rh := RandRawHeader(t) + rh.DataHash = dah.Hash() + + valSet, vals := RandValidatorSet(3, 1) + rh.ValidatorsHash = valSet.Hash() + voteSet := types.NewVoteSet(rh.ChainID, rh.Height, 0, tmproto.PrecommitType, valSet) + blockID := RandBlockID(t) + blockID.Hash = rh.Hash() + commit, err := MakeCommit(blockID, rh.Height, 0, voteSet, vals, time.Now()) + require.NoError(t, err) + + return &header.ExtendedHeader{ + RawHeader: *rh, + Commit: commit, + ValidatorSet: valSet, + DAH: dah, + } +} + +func RandExtendedHeaderWithRoot(t testing.TB, dah *da.DataAvailabilityHeader) *header.ExtendedHeader { + h := RandExtendedHeader(t) + h.DataHash = dah.Hash() + h.DAH = dah + return h +} + +func RandValidatorSet(numValidators int, votingPower int64) (*types.ValidatorSet, []types.PrivValidator) { + var ( + valz = make([]*types.Validator, numValidators) + privValidators = make([]types.PrivValidator, numValidators) + ) + + for i := 0; i < numValidators; i++ { + val, privValidator := RandValidator(false, votingPower) + valz[i] = val + privValidators[i] = privValidator + } + + sort.Sort(types.PrivValidatorsByAddress(privValidators)) + + return types.NewValidatorSet(valz), privValidators +} + +func RandValidator(randPower bool, minPower int64) (*types.Validator, types.PrivValidator) { + privVal := types.NewMockPV() + votePower := minPower + if randPower { + votePower += int64(mrand.Uint32()) //nolint:gosec + } + pubKey, err := privVal.GetPubKey() + if err != nil { + panic(fmt.Errorf("could not retrieve pubkey %w", err)) + } + val := types.NewValidator(pubKey, votePower) + return val, privVal +} + +// RandRawHeader provides a RawHeader fixture. +func RandRawHeader(t testing.TB) *header.RawHeader { + return &header.RawHeader{ + Version: version.Consensus{Block: 11, App: 1}, + ChainID: "test", + Height: mrand.Int63(), //nolint:gosec + Time: time.Now(), + LastBlockID: RandBlockID(t), + LastCommitHash: tmrand.Bytes(32), + DataHash: tmrand.Bytes(32), + ValidatorsHash: tmrand.Bytes(32), + NextValidatorsHash: tmrand.Bytes(32), + ConsensusHash: tmrand.Bytes(32), + AppHash: tmrand.Bytes(32), + LastResultsHash: tmrand.Bytes(32), + EvidenceHash: tmhash.Sum([]byte{}), + ProposerAddress: tmrand.Bytes(20), + } +} + +// RandBlockID provides a BlockID fixture. +func RandBlockID(testing.TB) types.BlockID { + bid := types.BlockID{ + Hash: make([]byte, 32), + PartSetHeader: types.PartSetHeader{ + Total: 123, + Hash: make([]byte, 32), + }, + } + _, _ = rand.Read(bid.Hash) + _, _ = rand.Read(bid.PartSetHeader.Hash) + return bid +} + +func ExtendedHeaderFromEDS(t testing.TB, height uint64, eds *rsmt2d.ExtendedDataSquare) *header.ExtendedHeader { + valSet, vals := RandValidatorSet(10, 10) + gen := RandRawHeader(t) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + gen.DataHash = dah.Hash() + gen.ValidatorsHash = valSet.Hash() + gen.NextValidatorsHash = valSet.Hash() + gen.Height = int64(height) + blockID := RandBlockID(t) + blockID.Hash = gen.Hash() + voteSet := types.NewVoteSet(gen.ChainID, gen.Height, 0, tmproto.PrecommitType, valSet) + commit, err := MakeCommit(blockID, gen.Height, 0, voteSet, vals, time.Now()) + require.NoError(t, err) + + eh := &header.ExtendedHeader{ + RawHeader: *gen, + Commit: commit, + ValidatorSet: valSet, + DAH: dah, + } + require.NoError(t, eh.Validate()) + return eh +} + +type Subscriber struct { + headertest.Subscriber[*header.ExtendedHeader] +} + +var _ libhead.Subscriber[*header.ExtendedHeader] = &Subscriber{} diff --git a/header/headertest/verify_test.go b/header/headertest/verify_test.go new file mode 100644 index 0000000000..82795ca5ff --- /dev/null +++ b/header/headertest/verify_test.go @@ -0,0 +1,63 @@ +package headertest + +import ( + "errors" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + tmrand "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/celestia-node/header" +) + +func TestVerify(t *testing.T) { + h := NewTestSuite(t, 2).GenExtendedHeaders(3) + trusted, untrustedAdj, untrustedNonAdj := h[0], h[1], h[2] + tests := []struct { + prepare func() *header.ExtendedHeader + err error + }{ + { + prepare: func() *header.ExtendedHeader { return untrustedAdj }, + err: nil, + }, + { + prepare: func() *header.ExtendedHeader { + return untrustedNonAdj + }, + err: nil, + }, + { + prepare: func() *header.ExtendedHeader { + untrusted := *untrustedAdj + untrusted.ValidatorsHash = tmrand.Bytes(32) + return &untrusted + }, + err: header.ErrValidatorHashMismatch, + }, + { + prepare: func() *header.ExtendedHeader { + untrusted := *untrustedAdj + untrusted.RawHeader.LastBlockID.Hash = tmrand.Bytes(32) + return &untrusted + }, + err: header.ErrLastHeaderHashMismatch, + }, + { + prepare: func() *header.ExtendedHeader { + untrusted := *untrustedNonAdj + untrusted.Commit = NewTestSuite(t, 2).Commit(RandRawHeader(t)) + return &untrusted + }, + err: header.ErrVerifyCommitLightTrustingFailed, + }, + } + + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + err := trusted.Verify(test.prepare()) + assert.ErrorIs(t, errors.Unwrap(err), test.err) + }) + } +} diff --git a/header/interface.go b/header/interface.go deleted file mode 100644 index b8debed8e0..0000000000 --- a/header/interface.go +++ /dev/null @@ -1,118 +0,0 @@ -package header - -import ( - "context" - "errors" - "fmt" - - "github.com/ipfs/go-blockservice" - pubsub "github.com/libp2p/go-libp2p-pubsub" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - core "github.com/tendermint/tendermint/types" -) - -// ConstructFn aliases a function that creates an ExtendedHeader. -type ConstructFn = func( - context.Context, - *core.Block, - *core.Commit, - *core.ValidatorSet, - blockservice.BlockService, -) (*ExtendedHeader, error) - -// Validator aliases a func that validates ExtendedHeader. -type Validator = func(context.Context, *ExtendedHeader) pubsub.ValidationResult - -// Subscriber encompasses the behavior necessary to -// subscribe/unsubscribe from new ExtendedHeader events from the -// network. -type Subscriber interface { - // Subscribe creates long-living Subscription for validated ExtendedHeaders. - // Multiple Subscriptions can be created. - Subscribe() (Subscription, error) - // AddValidator registers a Validator for all Subscriptions. - // Registered Validators screen ExtendedHeaders for their validity - // before they are sent through Subscriptions. - // Multiple validators can be registered. - AddValidator(Validator) error - // Stop removes header-sub validator and closes the topic. - Stop(context.Context) error -} - -// Subscription can retrieve the next ExtendedHeader from the -// network. -type Subscription interface { - // NextHeader returns the newest verified and valid ExtendedHeader - // in the network. - NextHeader(ctx context.Context) (*ExtendedHeader, error) - // Cancel cancels the subscription. - Cancel() -} - -// Broadcaster broadcasts an ExtendedHeader to the network. -type Broadcaster interface { - Broadcast(ctx context.Context, header *ExtendedHeader, opts ...pubsub.PubOpt) error -} - -// Exchange encompasses the behavior necessary to request ExtendedHeaders -// from the network. -type Exchange interface { - Getter -} - -var ( - // ErrNotFound is returned when there is no requested header. - ErrNotFound = errors.New("header: not found") - - // ErrNoHead is returned when Store is empty (does not contain any known header). - ErrNoHead = fmt.Errorf("header/store: no chain head") - - // ErrNonAdjacent is returned when Store is appended with a header not adjacent to the stored head. - ErrNonAdjacent = fmt.Errorf("header/store: non-adjacent") -) - -// Store encompasses the behavior necessary to store and retrieve ExtendedHeaders -// from a node's local storage. -type Store interface { - // Start starts the store. - Start(context.Context) error - - // Stop stops the store by preventing further writes - // and waiting till the ongoing ones are done. - Stop(context.Context) error - - // Getter encompasses all getter methods for headers. - Getter - - // Init initializes Store with the given head, meaning it is initialized with the genesis header. - Init(context.Context, *ExtendedHeader) error - - // Height reports current height of the chain head. - Height() uint64 - - // Has checks whether ExtendedHeader is already stored. - Has(context.Context, tmbytes.HexBytes) (bool, error) - - // Append stores and verifies the given ExtendedHeader(s). - // It requires them to be adjacent and in ascending order, - // as it applies them contiguously on top of the current head height. - // It returns the amount of successfully applied headers, - // so caller can understand what given header was invalid, if any. - Append(context.Context, ...*ExtendedHeader) (int, error) -} - -// Getter contains the behavior necessary for a component to retrieve -// headers that have been processed during header sync. -type Getter interface { - // Head returns the ExtendedHeader of the chain head. - Head(context.Context) (*ExtendedHeader, error) - - // Get returns the ExtendedHeader corresponding to the given hash. - Get(context.Context, tmbytes.HexBytes) (*ExtendedHeader, error) - - // GetByHeight returns the ExtendedHeader corresponding to the given block height. - GetByHeight(context.Context, uint64) (*ExtendedHeader, error) - - // GetRangeByHeight returns the given range [from:to) of ExtendedHeaders. - GetRangeByHeight(ctx context.Context, from, to uint64) ([]*ExtendedHeader, error) -} diff --git a/header/local/exchange.go b/header/local/exchange.go deleted file mode 100644 index 9b284930a4..0000000000 --- a/header/local/exchange.go +++ /dev/null @@ -1,48 +0,0 @@ -package local - -import ( - "context" - - "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" -) - -// NewExchange is a simple Exchange that reads Headers from Store without any networking. -type Exchange struct { - store header.Store -} - -// NewExchange creates a new local Exchange. -func NewExchange(store header.Store) header.Exchange { - return &Exchange{ - store: store, - } -} - -func (l *Exchange) Start(context.Context) error { - return nil -} - -func (l *Exchange) Stop(context.Context) error { - return nil -} - -func (l *Exchange) Head(ctx context.Context) (*header.ExtendedHeader, error) { - return l.store.Head(ctx) -} - -func (l *Exchange) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return l.store.GetByHeight(ctx, height) -} - -func (l *Exchange) GetRangeByHeight(ctx context.Context, origin, amount uint64) ([]*header.ExtendedHeader, error) { - if amount == 0 { - return nil, nil - } - return l.store.GetRangeByHeight(ctx, origin, origin+amount) -} - -func (l *Exchange) Get(ctx context.Context, hash bytes.HexBytes) (*header.ExtendedHeader, error) { - return l.store.Get(ctx, hash) -} diff --git a/header/metrics.go b/header/metrics.go deleted file mode 100644 index 93407b1147..0000000000 --- a/header/metrics.go +++ /dev/null @@ -1,43 +0,0 @@ -package header - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" -) - -var meter = global.MeterProvider().Meter("header") - -// MonitorHead enables Otel metrics to monitor head. -func MonitorHead(store Store) { - headC, _ := meter.AsyncInt64().Counter( - "head", - instrument.WithUnit(unit.Dimensionless), - instrument.WithDescription("Subjective head of the node"), - ) - - err := meter.RegisterCallback( - []instrument.Asynchronous{ - headC, - }, - func(ctx context.Context) { - head, err := store.Head(ctx) - if err != nil { - headC.Observe(ctx, 0, attribute.String("err", err.Error())) - return - } - - headC.Observe( - ctx, - head.Height, - attribute.Int("square_size", len(head.DAH.RowsRoots)), - ) - }, - ) - if err != nil { - panic(err) - } -} diff --git a/header/p2p/exchange.go b/header/p2p/exchange.go deleted file mode 100644 index 26da41f831..0000000000 --- a/header/p2p/exchange.go +++ /dev/null @@ -1,162 +0,0 @@ -package p2p - -import ( - "bytes" - "context" - "fmt" - "math/rand" - - logging "github.com/ipfs/go-log/v2" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/protocol" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" - p2p_pb "github.com/celestiaorg/celestia-node/header/p2p/pb" - header_pb "github.com/celestiaorg/celestia-node/header/pb" - "github.com/celestiaorg/celestia-node/params" - "github.com/celestiaorg/go-libp2p-messenger/serde" -) - -var log = logging.Logger("header/p2p") - -// PubSubTopic hardcodes the name of the ExtendedHeader -// gossipsub topic. -const PubSubTopic = "header-sub" - -var exchangeProtocolID = protocol.ID(fmt.Sprintf("/header-ex/v0.0.2/%s", params.DefaultNetwork())) - -// Exchange enables sending outbound ExtendedHeaderRequests to the network as well as -// handling inbound ExtendedHeaderRequests from the network. -type Exchange struct { - host host.Host - - trustedPeers peer.IDSlice -} - -func NewExchange(host host.Host, peers peer.IDSlice) *Exchange { - return &Exchange{ - host: host, - trustedPeers: peers, - } -} - -// Head requests the latest ExtendedHeader. Note that the ExtendedHeader -// must be verified thereafter. -func (ex *Exchange) Head(ctx context.Context) (*header.ExtendedHeader, error) { - log.Debug("requesting head") - // create request - req := &p2p_pb.ExtendedHeaderRequest{ - Origin: uint64(0), - Amount: 1, - } - headers, err := ex.performRequest(ctx, req) - if err != nil { - return nil, err - } - return headers[0], nil -} - -// GetByHeight performs a request for the ExtendedHeader at the given -// height to the network. Note that the ExtendedHeader must be verified -// thereafter. -func (ex *Exchange) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - log.Debugw("requesting header", "height", height) - // sanity check height - if height == 0 { - return nil, fmt.Errorf("specified request height must be greater than 0") - } - // create request - req := &p2p_pb.ExtendedHeaderRequest{ - Origin: height, - Amount: 1, - } - headers, err := ex.performRequest(ctx, req) - if err != nil { - return nil, err - } - return headers[0], nil -} - -// GetRangeByHeight performs a request for the given range of ExtendedHeaders -// to the network. Note that the ExtendedHeaders must be verified thereafter. -func (ex *Exchange) GetRangeByHeight(ctx context.Context, from, amount uint64) ([]*header.ExtendedHeader, error) { - log.Debugw("requesting headers", "from", from, "to", from+amount) - // create request - req := &p2p_pb.ExtendedHeaderRequest{ - Origin: from, - Amount: amount, - } - return ex.performRequest(ctx, req) -} - -// Get performs a request for the ExtendedHeader by the given hash corresponding -// to the RawHeader. Note that the ExtendedHeader must be verified thereafter. -func (ex *Exchange) Get(ctx context.Context, hash tmbytes.HexBytes) (*header.ExtendedHeader, error) { - log.Debugw("requesting header", "hash", hash.String()) - // create request - req := &p2p_pb.ExtendedHeaderRequest{ - Hash: hash.Bytes(), - Amount: 1, - } - headers, err := ex.performRequest(ctx, req) - if err != nil { - return nil, err - } - - if !bytes.Equal(headers[0].Hash().Bytes(), hash) { - return nil, fmt.Errorf("incorrect hash in header: expected %x, got %x", hash, headers[0].Hash().Bytes()) - } - return headers[0], nil -} - -func (ex *Exchange) performRequest( - ctx context.Context, - req *p2p_pb.ExtendedHeaderRequest, -) ([]*header.ExtendedHeader, error) { - if req.Amount == 0 { - return make([]*header.ExtendedHeader, 0), nil - } - - if len(ex.trustedPeers) == 0 { - return nil, fmt.Errorf("no trusted peers") - } - - // nolint:gosec // G404: Use of weak random number generator - index := rand.Intn(len(ex.trustedPeers)) - stream, err := ex.host.NewStream(ctx, ex.trustedPeers[index], exchangeProtocolID) - if err != nil { - return nil, err - } - // send request - _, err = serde.Write(stream, req) - if err != nil { - stream.Reset() //nolint:errcheck - return nil, err - } - // read responses - headers := make([]*header.ExtendedHeader, req.Amount) - for i := 0; i < int(req.Amount); i++ { - resp := new(header_pb.ExtendedHeader) - _, err := serde.Read(stream, resp) - if err != nil { - stream.Reset() //nolint:errcheck - return nil, err - } - - header, err := header.ProtoToExtendedHeader(resp) - if err != nil { - stream.Reset() //nolint:errcheck - return nil, err - } - - headers[i] = header - } - // ensure at least one header was retrieved - if len(headers) == 0 { - return nil, header.ErrNotFound - } - return headers, stream.Close() -} diff --git a/header/p2p/exchange_test.go b/header/p2p/exchange_test.go deleted file mode 100644 index c97cf5f4f2..0000000000 --- a/header/p2p/exchange_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package p2p - -import ( - "bytes" - "context" - "testing" - - libhost "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" - p2p_pb "github.com/celestiaorg/celestia-node/header/p2p/pb" - header_pb "github.com/celestiaorg/celestia-node/header/pb" - "github.com/celestiaorg/go-libp2p-messenger/serde" -) - -func TestExchange_RequestHead(t *testing.T) { - host, peer := createMocknet(t) - exchg, store := createP2PExAndServer(t, host, peer) - // perform header request - header, err := exchg.Head(context.Background()) - require.NoError(t, err) - - assert.Equal(t, store.headers[store.headHeight].Height, header.Height) - assert.Equal(t, store.headers[store.headHeight].Hash(), header.Hash()) -} - -func TestExchange_RequestHeader(t *testing.T) { - host, peer := createMocknet(t) - exchg, store := createP2PExAndServer(t, host, peer) - // perform expected request - header, err := exchg.GetByHeight(context.Background(), 5) - require.NoError(t, err) - assert.Equal(t, store.headers[5].Height, header.Height) - assert.Equal(t, store.headers[5].Hash(), header.Hash()) -} - -func TestExchange_RequestHeaders(t *testing.T) { - host, peer := createMocknet(t) - exchg, store := createP2PExAndServer(t, host, peer) - // perform expected request - gotHeaders, err := exchg.GetRangeByHeight(context.Background(), 1, 5) - require.NoError(t, err) - for _, got := range gotHeaders { - assert.Equal(t, store.headers[got.Height].Height, got.Height) - assert.Equal(t, store.headers[got.Height].Hash(), got.Hash()) - } -} - -// TestExchange_RequestByHash tests that the Exchange instance can -// respond to an ExtendedHeaderRequest for a hash instead of a height. -func TestExchange_RequestByHash(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - net, err := mocknet.FullMeshConnected(2) - require.NoError(t, err) - // get host and peer - host, peer := net.Hosts()[0], net.Hosts()[1] - // create and start the ExchangeServer - store := createStore(t, 5) - serv := NewExchangeServer(host, store) - err = serv.Start(ctx) - require.NoError(t, err) - t.Cleanup(func() { - serv.Stop(context.Background()) //nolint:errcheck - }) - - // start a new stream via Peer to see if Host can handle inbound requests - stream, err := peer.NewStream(context.Background(), libhost.InfoFromHost(host).ID, exchangeProtocolID) - require.NoError(t, err) - // create request for a header at a random height - reqHeight := store.headHeight - 2 - req := &p2p_pb.ExtendedHeaderRequest{ - Hash: store.headers[reqHeight].Hash(), - Amount: 1, - } - // send request - _, err = serde.Write(stream, req) - require.NoError(t, err) - // read resp - resp := new(header_pb.ExtendedHeader) - _, err = serde.Read(stream, resp) - require.NoError(t, err) - // compare - eh, err := header.ProtoToExtendedHeader(resp) - require.NoError(t, err) - - assert.Equal(t, store.headers[reqHeight].Height, eh.Height) - assert.Equal(t, store.headers[reqHeight].Hash(), eh.Hash()) -} - -func createMocknet(t *testing.T) (libhost.Host, libhost.Host) { - net, err := mocknet.FullMeshConnected(2) - require.NoError(t, err) - // get host and peer - return net.Hosts()[0], net.Hosts()[1] -} - -// createP2PExAndServer creates a Exchange with 5 headers already in its store. -func createP2PExAndServer(t *testing.T, host, tpeer libhost.Host) (header.Exchange, *mockStore) { - store := createStore(t, 5) - serverSideEx := NewExchangeServer(tpeer, store) - err := serverSideEx.Start(context.Background()) - require.NoError(t, err) - - t.Cleanup(func() { - serverSideEx.Stop(context.Background()) //nolint:errcheck - }) - - return NewExchange(host, []peer.ID{tpeer.ID()}), store -} - -type mockStore struct { - headers map[int64]*header.ExtendedHeader - headHeight int64 -} - -// createStore creates a mock store and adds several random -// headers -func createStore(t *testing.T, numHeaders int) *mockStore { - store := &mockStore{ - headers: make(map[int64]*header.ExtendedHeader), - headHeight: 0, - } - - suite := header.NewTestSuite(t, numHeaders) - - for i := 0; i < numHeaders; i++ { - header := suite.GenExtendedHeader() - store.headers[header.Height] = header - - if header.Height > store.headHeight { - store.headHeight = header.Height - } - } - return store -} - -func (m *mockStore) Init(context.Context, *header.ExtendedHeader) error { return nil } -func (m *mockStore) Start(context.Context) error { return nil } -func (m *mockStore) Stop(context.Context) error { return nil } - -func (m *mockStore) Height() uint64 { - return uint64(m.headHeight) -} - -func (m *mockStore) Head(context.Context) (*header.ExtendedHeader, error) { - return m.headers[m.headHeight], nil -} - -func (m *mockStore) Get(ctx context.Context, hash tmbytes.HexBytes) (*header.ExtendedHeader, error) { - for _, header := range m.headers { - if bytes.Equal(header.Hash(), hash) { - return header, nil - } - } - return nil, nil -} - -func (m *mockStore) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return m.headers[int64(height)], nil -} - -func (m *mockStore) GetRangeByHeight(ctx context.Context, from, to uint64) ([]*header.ExtendedHeader, error) { - headers := make([]*header.ExtendedHeader, to-from) - for i := range headers { - headers[i] = m.headers[int64(from)] - from++ - } - return headers, nil -} - -func (m *mockStore) Has(context.Context, tmbytes.HexBytes) (bool, error) { - return false, nil -} - -func (m *mockStore) Append(ctx context.Context, headers ...*header.ExtendedHeader) (int, error) { - for _, header := range headers { - m.headers[header.Height] = header - // set head - if header.Height > m.headHeight { - m.headHeight = header.Height - } - } - return len(headers), nil -} diff --git a/header/p2p/pb/extended_header_request.pb.go b/header/p2p/pb/extended_header_request.pb.go deleted file mode 100644 index 0a427b2eb3..0000000000 --- a/header/p2p/pb/extended_header_request.pb.go +++ /dev/null @@ -1,389 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: header/p2p/pb/extended_header_request.proto - -package p2p_pb - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExtendedHeaderRequest struct { - Origin uint64 `protobuf:"varint,1,opt,name=origin,proto3" json:"origin,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` -} - -func (m *ExtendedHeaderRequest) Reset() { *m = ExtendedHeaderRequest{} } -func (m *ExtendedHeaderRequest) String() string { return proto.CompactTextString(m) } -func (*ExtendedHeaderRequest) ProtoMessage() {} -func (*ExtendedHeaderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ea2a1467b965216e, []int{0} -} -func (m *ExtendedHeaderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtendedHeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExtendedHeaderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExtendedHeaderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtendedHeaderRequest.Merge(m, src) -} -func (m *ExtendedHeaderRequest) XXX_Size() int { - return m.Size() -} -func (m *ExtendedHeaderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExtendedHeaderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtendedHeaderRequest proto.InternalMessageInfo - -func (m *ExtendedHeaderRequest) GetOrigin() uint64 { - if m != nil { - return m.Origin - } - return 0 -} - -func (m *ExtendedHeaderRequest) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *ExtendedHeaderRequest) GetAmount() uint64 { - if m != nil { - return m.Amount - } - return 0 -} - -func init() { - proto.RegisterType((*ExtendedHeaderRequest)(nil), "p2p.pb.ExtendedHeaderRequest") -} - -func init() { - proto.RegisterFile("header/p2p/pb/extended_header_request.proto", fileDescriptor_ea2a1467b965216e) -} - -var fileDescriptor_ea2a1467b965216e = []byte{ - // 168 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x48, 0x4d, 0x4c, - 0x49, 0x2d, 0xd2, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xd2, 0x4f, 0xad, 0x28, 0x49, 0xcd, 0x4b, - 0x49, 0x4d, 0x89, 0x87, 0x08, 0xc7, 0x17, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0xe8, 0x15, 0x14, - 0xe5, 0x97, 0xe4, 0x0b, 0xb1, 0x15, 0x18, 0x15, 0xe8, 0x15, 0x24, 0x29, 0x45, 0x73, 0x89, 0xba, - 0x42, 0x15, 0x7a, 0x80, 0xd5, 0x05, 0x41, 0x94, 0x09, 0x89, 0x71, 0xb1, 0xe5, 0x17, 0x65, 0xa6, - 0x67, 0xe6, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x04, 0x41, 0x79, 0x42, 0x42, 0x5c, 0x2c, 0x19, - 0x89, 0xc5, 0x19, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0x36, 0x48, 0x6d, 0x62, 0x6e, - 0x7e, 0x69, 0x5e, 0x89, 0x04, 0x33, 0x44, 0x2d, 0x84, 0xe7, 0x24, 0x71, 0xe2, 0x91, 0x1c, 0xe3, - 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, - 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0x57, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x58, 0x4e, 0x0f, 0x06, 0xb4, 0x00, 0x00, 0x00, -} - -func (m *ExtendedHeaderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExtendedHeaderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExtendedHeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Amount != 0 { - i = encodeVarintExtendedHeaderRequest(dAtA, i, uint64(m.Amount)) - i-- - dAtA[i] = 0x18 - } - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintExtendedHeaderRequest(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x12 - } - if m.Origin != 0 { - i = encodeVarintExtendedHeaderRequest(dAtA, i, uint64(m.Origin)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintExtendedHeaderRequest(dAtA []byte, offset int, v uint64) int { - offset -= sovExtendedHeaderRequest(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExtendedHeaderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Origin != 0 { - n += 1 + sovExtendedHeaderRequest(uint64(m.Origin)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovExtendedHeaderRequest(uint64(l)) - } - if m.Amount != 0 { - n += 1 + sovExtendedHeaderRequest(uint64(m.Amount)) - } - return n -} - -func sovExtendedHeaderRequest(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozExtendedHeaderRequest(x uint64) (n int) { - return sovExtendedHeaderRequest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExtendedHeaderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedHeaderRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtendedHeaderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtendedHeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) - } - m.Origin = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedHeaderRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Origin |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedHeaderRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthExtendedHeaderRequest - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthExtendedHeaderRequest - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) - } - m.Amount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedHeaderRequest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Amount |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipExtendedHeaderRequest(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthExtendedHeaderRequest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipExtendedHeaderRequest(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedHeaderRequest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedHeaderRequest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedHeaderRequest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthExtendedHeaderRequest - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupExtendedHeaderRequest - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthExtendedHeaderRequest - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthExtendedHeaderRequest = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowExtendedHeaderRequest = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupExtendedHeaderRequest = fmt.Errorf("proto: unexpected end of group") -) diff --git a/header/p2p/pb/extended_header_request.proto b/header/p2p/pb/extended_header_request.proto deleted file mode 100644 index 8ae8800c58..0000000000 --- a/header/p2p/pb/extended_header_request.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package p2p.pb; - -message ExtendedHeaderRequest { - uint64 origin = 1; - bytes hash = 2; - uint64 amount = 3; -} diff --git a/header/p2p/request.go b/header/p2p/request.go deleted file mode 100644 index 74082a36b1..0000000000 --- a/header/p2p/request.go +++ /dev/null @@ -1,40 +0,0 @@ -package p2p - -import ( - "fmt" - - p2p_pb "github.com/celestiaorg/celestia-node/header/p2p/pb" -) - -// ExtendedHeaderRequest is the packet format for nodes to request ExtendedHeaders -// from the network. -type ExtendedHeaderRequest struct { - Origin uint64 // block height from which to request ExtendedHeaders - Amount uint64 // amount of desired ExtendedHeaders starting from Origin, syncing in ascending order -} - -// MarshalBinary marshals ExtendedHeaderRequest to binary. -func (ehr *ExtendedHeaderRequest) MarshalBinary() ([]byte, error) { - return MarshalExtendedHeaderRequest(ehr) -} - -func (ehr *ExtendedHeaderRequest) UnmarshalBinary(data []byte) error { - if ehr == nil { - return fmt.Errorf("header: cannot UnmarshalBinary - nil ExtendedHeader") - } - - out, err := UnmarshalExtendedHeaderRequest(data) - if err != nil { - return err - } - - *ehr = *out - return nil -} - -func (ehr *ExtendedHeaderRequest) ToProto() *p2p_pb.ExtendedHeaderRequest { - return &p2p_pb.ExtendedHeaderRequest{ - Origin: ehr.Origin, - Amount: ehr.Amount, - } -} diff --git a/header/p2p/serde.go b/header/p2p/serde.go deleted file mode 100644 index 4658867bbb..0000000000 --- a/header/p2p/serde.go +++ /dev/null @@ -1,27 +0,0 @@ -package p2p - -import p2p_pb "github.com/celestiaorg/celestia-node/header/p2p/pb" - -// MarshalExtendedHeaderRequest serializes the given ExtendedHeaderRequest to bytes using protobuf. -// Paired with UnmarshalExtendedHeaderRequest. -func MarshalExtendedHeaderRequest(in *ExtendedHeaderRequest) ([]byte, error) { - out := &p2p_pb.ExtendedHeaderRequest{ - Origin: in.Origin, - Amount: in.Amount, - } - return out.Marshal() -} - -// UnmarshalExtendedHeaderRequest deserializes given data into a new ExtendedHeader using protobuf. -// Paired with MarshalExtendedHeaderRequest. -func UnmarshalExtendedHeaderRequest(data []byte) (*ExtendedHeaderRequest, error) { - in := &p2p_pb.ExtendedHeaderRequest{} - err := in.Unmarshal(data) - if err != nil { - return nil, err - } - return &ExtendedHeaderRequest{ - Origin: in.Origin, - Amount: in.Amount, - }, nil -} diff --git a/header/p2p/server.go b/header/p2p/server.go deleted file mode 100644 index 98747fd228..0000000000 --- a/header/p2p/server.go +++ /dev/null @@ -1,143 +0,0 @@ -package p2p - -import ( - "context" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" - p2p_pb "github.com/celestiaorg/celestia-node/header/p2p/pb" - "github.com/celestiaorg/go-libp2p-messenger/serde" -) - -// ExchangeServer represents the server-side component for -// responding to inbound header-related requests. -type ExchangeServer struct { - host host.Host - store header.Store - - ctx context.Context - cancel context.CancelFunc -} - -// NewExchangeServer returns a new P2P server that handles inbound -// header-related requests. -func NewExchangeServer(host host.Host, store header.Store) *ExchangeServer { - return &ExchangeServer{ - host: host, - store: store, - } -} - -// Start sets the stream handler for inbound header-related requests. -func (serv *ExchangeServer) Start(context.Context) error { - serv.ctx, serv.cancel = context.WithCancel(context.Background()) - log.Info("server: listening for inbound header requests") - - serv.host.SetStreamHandler(exchangeProtocolID, serv.requestHandler) - - return nil -} - -// Stop removes the stream handler for serving header-related requests. -func (serv *ExchangeServer) Stop(context.Context) error { - log.Info("server: stopping server") - serv.cancel() - serv.host.RemoveStreamHandler(exchangeProtocolID) - return nil -} - -// requestHandler handles inbound ExtendedHeaderRequests. -func (serv *ExchangeServer) requestHandler(stream network.Stream) { - // unmarshal request - pbreq := new(p2p_pb.ExtendedHeaderRequest) - _, err := serde.Read(stream, pbreq) - if err != nil { - log.Errorw("server: reading header request from stream", "err", err) - stream.Reset() //nolint:errcheck - return - } - // retrieve and write ExtendedHeaders - if pbreq.Hash != nil { - serv.handleRequestByHash(pbreq.Hash, stream) - } else { - serv.handleRequest(pbreq.Origin, pbreq.Origin+pbreq.Amount, stream) - } - - err = stream.Close() - if err != nil { - log.Errorw("while closing inbound stream", "err", err) - } -} - -// handleRequestByHash returns the ExtendedHeader at the given hash -// if it exists. -func (serv *ExchangeServer) handleRequestByHash(hash []byte, stream network.Stream) { - log.Debugw("server: handling header request", "hash", tmbytes.HexBytes(hash).String()) - - h, err := serv.store.Get(serv.ctx, hash) - if err != nil { - log.Errorw("server: getting header by hash", "hash", tmbytes.HexBytes(hash).String(), "err", err) - stream.Reset() //nolint:errcheck - return - } - resp, err := header.ExtendedHeaderToProto(h) - if err != nil { - log.Errorw("server: marshaling header to proto", "hash", tmbytes.HexBytes(hash).String(), "err", err) - stream.Reset() //nolint:errcheck - return - } - _, err = serde.Write(stream, resp) - if err != nil { - log.Errorw("server: writing header to stream", "hash", tmbytes.HexBytes(hash).String(), "err", err) - stream.Reset() //nolint:errcheck - return - } -} - -// handleRequest fetches the ExtendedHeader at the given origin and -// writes it to the stream. -func (serv *ExchangeServer) handleRequest(from, to uint64, stream network.Stream) { - var headers []*header.ExtendedHeader - if from == uint64(0) { - log.Debug("server: handling head request") - - head, err := serv.store.Head(serv.ctx) - if err != nil { - log.Errorw("server: getting head", "err", err) - stream.Reset() //nolint:errcheck - return - } - headers = make([]*header.ExtendedHeader, 1) - headers[0] = head - } else { - log.Debugw("server: handling headers request", "from", from, "to", to) - - headersByRange, err := serv.store.GetRangeByHeight(serv.ctx, from, to) - if err != nil { - log.Errorw("server: getting headers", "from", from, "to", to, "err", err) - stream.Reset() //nolint:errcheck - return - } - headers = headersByRange - } - // write all headers to stream - for _, h := range headers { - resp, err := header.ExtendedHeaderToProto(h) - if err != nil { - log.Errorw("server: marshaling header to proto", "height", h.Height, "err", err) - stream.Reset() //nolint:errcheck - return - } - - _, err = serde.Write(stream, resp) - if err != nil { - log.Errorw("server: writing header to stream", "height", h.Height, "err", err) - stream.Reset() //nolint:errcheck - return - } - } -} diff --git a/header/p2p/subscriber.go b/header/p2p/subscriber.go deleted file mode 100644 index 3200e11bcf..0000000000 --- a/header/p2p/subscriber.go +++ /dev/null @@ -1,115 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - - "github.com/libp2p/go-libp2p-core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" - pb "github.com/libp2p/go-libp2p-pubsub/pb" - "golang.org/x/crypto/blake2b" - - "github.com/celestiaorg/celestia-node/header" -) - -// Subscriber manages the lifecycle and relationship of header Service -// with the "header-sub" gossipsub topic. -type Subscriber struct { - pubsub *pubsub.PubSub - topic *pubsub.Topic -} - -// NewSubscriber returns a Subscriber that manages the header Service's -// relationship with the "header-sub" gossipsub topic. -func NewSubscriber(ps *pubsub.PubSub) *Subscriber { - return &Subscriber{ - pubsub: ps, - } -} - -// Start starts the Subscriber, registering a topic validator for the "header-sub" -// topic and joining it. -func (p *Subscriber) Start(context.Context) (err error) { - p.topic, err = p.pubsub.Join(PubSubTopic, pubsub.WithTopicMessageIdFn(msgID)) - return err -} - -// Stop closes the topic and unregisters its validator. -func (p *Subscriber) Stop(context.Context) error { - err := p.pubsub.UnregisterTopicValidator(PubSubTopic) - if err != nil { - log.Warnf("unregistering validator: %s", err) - } - - return p.topic.Close() -} - -// AddValidator applies basic pubsub validator for the topic. -func (p *Subscriber) AddValidator(val header.Validator) error { - pval := func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - maybeHead, err := header.UnmarshalExtendedHeader(msg.Data) - if err != nil { - log.Errorw("unmarshalling header", - "from", p.ShortString(), - "err", err) - return pubsub.ValidationReject - } - msg.ValidatorData = maybeHead - return val(ctx, maybeHead) - } - return p.pubsub.RegisterTopicValidator(PubSubTopic, pval) -} - -// Subscribe returns a new subscription to the Subscriber's -// topic. -func (p *Subscriber) Subscribe() (header.Subscription, error) { - if p.topic == nil { - return nil, fmt.Errorf("header topic is not instantiated, service must be started before subscribing") - } - - return newSubscription(p.topic) -} - -// Broadcast broadcasts the given ExtendedHeader to the topic. -func (p *Subscriber) Broadcast(ctx context.Context, header *header.ExtendedHeader, opts ...pubsub.PubOpt) error { - bin, err := header.MarshalBinary() - if err != nil { - return err - } - return p.topic.Publish(ctx, bin, opts...) -} - -// msgID computes an id for a pubsub message -// TODO(@Wondertan): This cause additional allocations per each recvd message in the topic. Find a way to avoid those. -func msgID(pmsg *pb.Message) string { - mID := func(data []byte) string { - hash := blake2b.Sum256(data) - return string(hash[:]) - } - - h, err := header.UnmarshalExtendedHeader(pmsg.Data) - if err != nil { - // There is nothing we can do about the error, and it will be anyway caught during validation. - // We also *have* to return some ID for the msg, so give the hash of even faulty msg - return mID(pmsg.Data) - } - - // IMPORTANT NOTE: - // Due to the nature of the Tendermint consensus, validators don't necessarily collect commit signatures from the - // entire validator set, but only the minimum required amount of them (>2/3 of voting power). In addition, - // signatures are collected asynchronously. Therefore, each validator may have a different set of signatures that - // pass the minimum required voting power threshold, causing nondeterminism in the header message gossiped over the - // network. Subsequently, this causes message duplicates as each Bridge Node, connected to a personal validator, - // sends the validator's own view of commits of effectively the same header. - // - // To solve the problem above, we exclude nondeterministic value from message id calculation - h.Commit.Signatures = nil - - data, err := header.MarshalExtendedHeader(h) - if err != nil { - // See the note under unmarshalling step - return mID(pmsg.Data) - } - - return mID(data) -} diff --git a/header/p2p/subscription.go b/header/p2p/subscription.go deleted file mode 100644 index c419acc661..0000000000 --- a/header/p2p/subscription.go +++ /dev/null @@ -1,53 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - "reflect" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - - "github.com/celestiaorg/celestia-node/header" -) - -// subscription handles retrieving ExtendedHeaders from the header pubsub topic. -type subscription struct { - topic *pubsub.Topic - subscription *pubsub.Subscription -} - -// newSubscription creates a new ExtendedHeader event subscription -// on the given host. -func newSubscription(topic *pubsub.Topic) (*subscription, error) { - sub, err := topic.Subscribe() - if err != nil { - return nil, err - } - - return &subscription{ - topic: topic, - subscription: sub, - }, nil -} - -// NextHeader returns the next (latest) verified ExtendedHeader from the network. -func (s *subscription) NextHeader(ctx context.Context) (*header.ExtendedHeader, error) { - msg, err := s.subscription.Next(ctx) - if err != nil { - return nil, err - } - log.Debugw("received message", "topic", msg.Message.GetTopic(), "sender", msg.ReceivedFrom) - - header, ok := msg.ValidatorData.(*header.ExtendedHeader) - if !ok { - panic(fmt.Sprintf("invalid type received %s", reflect.TypeOf(msg.ValidatorData))) - } - - log.Debugw("received new ExtendedHeader", "height", header.Height, "hash", header.Hash()) - return header, nil -} - -// Cancel cancels the subscription to new ExtendedHeaders from the network. -func (s *subscription) Cancel() { - s.subscription.Cancel() -} diff --git a/header/p2p/subscription_test.go b/header/p2p/subscription_test.go deleted file mode 100644 index a21d80283b..0000000000 --- a/header/p2p/subscription_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package p2p - -import ( - "context" - "testing" - "time" - - "github.com/libp2p/go-libp2p-core/event" - pubsub "github.com/libp2p/go-libp2p-pubsub" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header" -) - -// TestSubscriber tests the header Service's implementation of Subscriber. -func TestSubscriber(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) - defer cancel() - - // create mock network - net, err := mocknet.FullMeshLinked(2) - require.NoError(t, err) - - suite := header.NewTestSuite(t, 3) - - // get mock host and create new gossipsub on it - pubsub1, err := pubsub.NewGossipSub(ctx, net.Hosts()[0], pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - - // create sub-service lifecycles for header service 1 - p2pSub1 := NewSubscriber(pubsub1) - err = p2pSub1.Start(context.Background()) - require.NoError(t, err) - - // get mock host and create new gossipsub on it - pubsub2, err := pubsub.NewGossipSub(ctx, net.Hosts()[1], - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - - // create sub-service lifecycles for header service 2 - p2pSub2 := NewSubscriber(pubsub2) - err = p2pSub2.Start(context.Background()) - require.NoError(t, err) - - sub0, err := net.Hosts()[0].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - sub1, err := net.Hosts()[1].EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - - err = net.ConnectAllButSelf() - require.NoError(t, err) - - // wait on both peer identification events - for i := 0; i < 2; i++ { - select { - case <-sub0.Out(): - case <-sub1.Out(): - case <-ctx.Done(): - assert.FailNow(t, "timeout waiting for peers to connect") - } - } - - // subscribe - _, err = p2pSub2.Subscribe() - require.NoError(t, err) - - p2pSub1.AddValidator(func(context.Context, *header.ExtendedHeader) pubsub.ValidationResult { //nolint:errcheck - return pubsub.ValidationAccept - }) - subscription, err := p2pSub1.Subscribe() - require.NoError(t, err) - - expectedHeader := suite.GenExtendedHeaders(1)[0] - bin, err := expectedHeader.MarshalBinary() - require.NoError(t, err) - - err = p2pSub2.topic.Publish(ctx, bin, pubsub.WithReadiness(pubsub.MinTopicSize(1))) - require.NoError(t, err) - - // get next ExtendedHeader from network - header, err := subscription.NextHeader(ctx) - require.NoError(t, err) - - assert.Equal(t, expectedHeader.Height, header.Height) - assert.Equal(t, expectedHeader.Hash(), header.Hash()) - assert.Equal(t, expectedHeader.DAH.Hash(), header.DAH.Hash()) -} diff --git a/header/pb/extended_header.pb.go b/header/pb/extended_header.pb.go index 5ad2ab89a8..59fdee0d72 100644 --- a/header/pb/extended_header.pb.go +++ b/header/pb/extended_header.pb.go @@ -5,8 +5,8 @@ package header_pb import ( fmt "fmt" + da "github.com/celestiaorg/celestia-app/proto/celestia/da" proto "github.com/gogo/protobuf/proto" - da "github.com/tendermint/tendermint/proto/tendermint/da" types "github.com/tendermint/tendermint/proto/tendermint/types" io "io" math "math" @@ -99,24 +99,24 @@ func init() { func init() { proto.RegisterFile("header/pb/extended_header.proto", fileDescriptor_370294a9fc09133f) } var fileDescriptor_370294a9fc09133f = []byte{ - // 261 bytes of a gzipped FileDescriptorProto + // 268 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcf, 0x48, 0x4d, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x48, 0xd2, 0x4f, 0xad, 0x28, 0x49, 0xcd, 0x4b, 0x49, 0x4d, 0x89, 0x87, 0x08, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x71, 0xc2, 0x78, 0x49, 0x52, 0x32, 0x60, 0xf9, 0xa2, 0xdc, 0xcc, 0xbc, 0x12, 0xfd, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x08, 0x09, 0x51, 0x28, 0xa5, - 0x80, 0x21, 0x5b, 0x96, 0x98, 0x93, 0x99, 0x92, 0x58, 0x92, 0x0f, 0x35, 0x4a, 0x4a, 0x07, 0x49, - 0x45, 0x4a, 0xa2, 0x7e, 0x4a, 0x62, 0x49, 0x62, 0x7c, 0x62, 0x59, 0x62, 0x66, 0x4e, 0x62, 0x52, - 0x66, 0x4e, 0x66, 0x49, 0x25, 0x8a, 0xc5, 0x4a, 0x9f, 0x18, 0xb9, 0xf8, 0x5c, 0xa1, 0x4e, 0xf2, - 0x00, 0x4b, 0x08, 0x19, 0x70, 0xb1, 0x41, 0x94, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x49, - 0xe8, 0x21, 0x4c, 0xd4, 0x83, 0xb8, 0x05, 0xa2, 0x32, 0x08, 0xaa, 0x0e, 0xa4, 0x23, 0x39, 0x3f, - 0x37, 0x37, 0xb3, 0x44, 0x82, 0x09, 0x97, 0x0e, 0x67, 0xb0, 0x7c, 0x10, 0x54, 0x9d, 0x90, 0x33, - 0x17, 0x2f, 0xdc, 0xdd, 0xf1, 0xc5, 0xa9, 0x25, 0x12, 0xcc, 0x60, 0x8d, 0x72, 0x98, 0x1a, 0xc3, - 0x60, 0xca, 0x82, 0x53, 0x4b, 0x82, 0x78, 0xca, 0x90, 0x78, 0x42, 0xe6, 0x5c, 0xcc, 0x29, 0x89, - 0x19, 0x12, 0x2c, 0x60, 0xad, 0xaa, 0xc8, 0x5a, 0x53, 0x12, 0xf5, 0x5c, 0x12, 0x4b, 0x12, 0x1d, - 0x91, 0xbc, 0x0d, 0x75, 0x32, 0x48, 0x87, 0x93, 0xc4, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, - 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, - 0xcb, 0x31, 0x24, 0xb1, 0x81, 0x43, 0xc5, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xfa, 0x6c, - 0xbc, 0xb1, 0x01, 0x00, 0x00, + 0x80, 0x21, 0x5b, 0x96, 0x98, 0x93, 0x99, 0x92, 0x58, 0x92, 0x0f, 0x35, 0x4a, 0x4a, 0x2b, 0x39, + 0x35, 0x27, 0xb5, 0xb8, 0x24, 0x33, 0x51, 0x3f, 0x05, 0x84, 0x4a, 0x12, 0xe3, 0x13, 0xcb, 0x12, + 0x33, 0x73, 0x12, 0x93, 0x32, 0x73, 0x32, 0x4b, 0x2a, 0x51, 0xac, 0x55, 0xfa, 0xc0, 0xc8, 0xc5, + 0xe7, 0x0a, 0x75, 0x90, 0x07, 0x58, 0x42, 0xc8, 0x80, 0x8b, 0x0d, 0xa2, 0x44, 0x82, 0x51, 0x81, + 0x51, 0x83, 0xdb, 0x48, 0x42, 0x0f, 0x61, 0xa3, 0x1e, 0xc4, 0x25, 0x10, 0x95, 0x41, 0x50, 0x75, + 0x20, 0x1d, 0xc9, 0xf9, 0xb9, 0xb9, 0x99, 0x25, 0x12, 0x4c, 0xb8, 0x74, 0x38, 0x83, 0xe5, 0x83, + 0xa0, 0xea, 0x84, 0x9c, 0xb9, 0x78, 0xe1, 0xae, 0x8e, 0x2f, 0x4e, 0x2d, 0x91, 0x60, 0x06, 0x6b, + 0x94, 0xc3, 0xd4, 0x18, 0x06, 0x53, 0x16, 0x9c, 0x5a, 0x12, 0xc4, 0x53, 0x86, 0xc4, 0x13, 0x32, + 0xe5, 0x62, 0x4e, 0x49, 0xcc, 0x90, 0x60, 0x01, 0x6b, 0x55, 0xd6, 0x83, 0xf9, 0x5a, 0x2f, 0x25, + 0x51, 0xcf, 0x25, 0xb1, 0x24, 0xd1, 0x11, 0xc9, 0xd3, 0x50, 0x07, 0x83, 0xd4, 0x3b, 0x49, 0x9c, + 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, + 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0x38, 0x4c, 0x8c, 0x01, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x33, 0x74, 0x5f, 0xa8, 0xad, 0x01, 0x00, 0x00, } func (m *ExtendedHeader) Marshal() (dAtA []byte, err error) { diff --git a/header/pb/extended_header.proto b/header/pb/extended_header.proto index b6155ca0cd..aafb575327 100644 --- a/header/pb/extended_header.proto +++ b/header/pb/extended_header.proto @@ -4,13 +4,13 @@ package header.pb; import "tendermint/types/types.proto"; import "tendermint/types/validator.proto"; -import "tendermint/da/data_availability_header.proto"; +import "celestia/da/data_availability_header.proto"; message ExtendedHeader { tendermint.types.Header header = 1; tendermint.types.Commit commit = 2; tendermint.types.ValidatorSet validator_set = 3; - tendermint.da.DataAvailabilityHeader dah = 4; + celestia.da.DataAvailabilityHeader dah = 4; } // Generated with: diff --git a/header/serde.go b/header/serde.go index 298c120448..a511a1352b 100644 --- a/header/serde.go +++ b/header/serde.go @@ -1,8 +1,11 @@ package header import ( - "github.com/tendermint/tendermint/pkg/da" + pb "github.com/libp2p/go-libp2p-pubsub/pb" core "github.com/tendermint/tendermint/types" + "golang.org/x/crypto/blake2b" + + "github.com/celestiaorg/celestia-app/pkg/da" header_pb "github.com/celestiaorg/celestia-node/header/pb" ) @@ -58,7 +61,7 @@ func UnmarshalExtendedHeader(data []byte) (*ExtendedHeader, error) { return nil, err } - return out, out.ValidateBasic() + return out, nil } func ExtendedHeaderToProto(eh *ExtendedHeader) (*header_pb.ExtendedHeader, error) { @@ -91,3 +94,34 @@ func ProtoToExtendedHeader(pb *header_pb.ExtendedHeader) (*ExtendedHeader, error } return header, nil } + +// msgID computes an id for a pubsub message +// TODO(@Wondertan): This cause additional allocations per each recvd message in the topic +// +// Find a way to avoid those. +func MsgID(pmsg *pb.Message) string { + mID := func(data []byte) string { + hash := blake2b.Sum256(data) + return string(hash[:]) + } + + h, _ := UnmarshalExtendedHeader(pmsg.Data) + if h == nil || h.RawHeader.ValidateBasic() != nil { + // There is nothing we can do about the error, and it will be anyway caught during validation. + // We also *have* to return some ID for the msg, so give the hash of even faulty msg + return mID(pmsg.Data) + } + + // IMPORTANT NOTE: + // Due to the nature of the Tendermint consensus, validators don't necessarily collect commit + // signatures from the entire validator set, but only the minimum required amount of them (>2/3 of + // voting power). In addition, signatures are collected asynchronously. Therefore, each validator + // may have a different set of signatures that pass the minimum required voting power threshold, + // causing nondeterminism in the header message gossiped over the network. Subsequently, this + // causes message duplicates as each Bridge Node, connected to a personal validator, sends the + // validator's own view of commits of effectively the same header. + // + // To solve the nondeterminism problem above, we don't compute msg id on message body and take + // the actual header hash as an id. + return h.Commit.BlockID.String() +} diff --git a/header/serde_test.go b/header/serde_test.go deleted file mode 100644 index 36c0301eb9..0000000000 --- a/header/serde_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package header - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestMarshalUnmarshalExtendedHeader(t *testing.T) { - in := RandExtendedHeader(t) - data, err := in.MarshalBinary() - require.NoError(t, err) - - out := &ExtendedHeader{} - err = out.UnmarshalBinary(data) - require.NoError(t, err) - assert.Equal(t, in.ValidatorSet, out.ValidatorSet) - assert.True(t, in.DAH.Equals(out.DAH)) - // not the check for equality as time.Time is not serialized exactly 1:1 - assert.NotZero(t, out.RawHeader) - assert.NotNil(t, out.Commit) -} diff --git a/header/store/batch.go b/header/store/batch.go deleted file mode 100644 index f1f33dc38e..0000000000 --- a/header/store/batch.go +++ /dev/null @@ -1,106 +0,0 @@ -package store - -import ( - "sync" - - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" -) - -// batch keeps an adjacent range of headers and loosely mimics the Store -// interface. NOTE: Can fully implement Store for a use case. -// -// It keeps a mapping 'height -> header' and 'hash -> height' -// unlike the Store which keeps 'hash -> header' and 'height -> hash'. -// The approach simplifies implementation for the batch and -// makes it better optimized for the GetByHeight case which is what we need. -type batch struct { - lk sync.RWMutex - heights map[string]uint64 - headers []*header.ExtendedHeader -} - -// newBatch creates the batch with the given pre-allocated size. -func newBatch(size int) *batch { - return &batch{ - heights: make(map[string]uint64, size), - headers: make([]*header.ExtendedHeader, 0, size), - } -} - -// Len gives current length of the batch. -func (b *batch) Len() int { - b.lk.RLock() - defer b.lk.RUnlock() - return len(b.headers) -} - -// GetAll returns a slice of all the headers in the batch. -func (b *batch) GetAll() []*header.ExtendedHeader { - b.lk.RLock() - defer b.lk.RUnlock() - return b.headers -} - -// Get returns a header by its hash. -func (b *batch) Get(hash tmbytes.HexBytes) *header.ExtendedHeader { - b.lk.RLock() - defer b.lk.RUnlock() - height, ok := b.heights[hash.String()] - if !ok { - return nil - } - - return b.getByHeight(height) -} - -// GetByHeight returns a header by its height. -func (b *batch) GetByHeight(height uint64) *header.ExtendedHeader { - b.lk.RLock() - defer b.lk.RUnlock() - return b.getByHeight(height) -} - -func (b *batch) getByHeight(height uint64) *header.ExtendedHeader { - ln := uint64(len(b.headers)) - if ln == 0 { - return nil - } - - head := uint64(b.headers[ln-1].Height) - base := head - ln - if height > head || height <= base { - return nil - } - - return b.headers[height-base-1] -} - -// Append appends new headers to the batch. -func (b *batch) Append(headers ...*header.ExtendedHeader) { - b.lk.Lock() - defer b.lk.Unlock() - for _, h := range headers { - b.headers = append(b.headers, h) - b.heights[h.Hash().String()] = uint64(h.Height) - } -} - -// Has checks whether header by the hash is present in the batch. -func (b *batch) Has(hash tmbytes.HexBytes) bool { - b.lk.RLock() - defer b.lk.RUnlock() - _, ok := b.heights[hash.String()] - return ok -} - -// Reset cleans references to batched headers. -func (b *batch) Reset() { - b.lk.Lock() - defer b.lk.Unlock() - b.headers = b.headers[:0] - for k := range b.heights { - delete(b.heights, k) - } -} diff --git a/header/store/height_indexer.go b/header/store/height_indexer.go deleted file mode 100644 index 2fd70671fb..0000000000 --- a/header/store/height_indexer.go +++ /dev/null @@ -1,58 +0,0 @@ -package store - -import ( - "context" - - lru "github.com/hashicorp/golang-lru" - "github.com/ipfs/go-datastore" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" -) - -// TODO(@Wondertan): There should be a more clever way to index heights, than just storing HeightToHash pair... -// heightIndexer simply stores and cashes mappings between header Height and Hash. -type heightIndexer struct { - ds datastore.Batching - cache *lru.ARCCache -} - -// newHeightIndexer creates new heightIndexer. -func newHeightIndexer(ds datastore.Batching) (*heightIndexer, error) { - cache, err := lru.NewARC(DefaultIndexCacheSize) - if err != nil { - return nil, err - } - - return &heightIndexer{ - ds: ds, - cache: cache, - }, nil -} - -// HashByHeight loads a header hash corresponding to the given height. -func (hi *heightIndexer) HashByHeight(ctx context.Context, h uint64) (tmbytes.HexBytes, error) { - if v, ok := hi.cache.Get(h); ok { - return v.(tmbytes.HexBytes), nil - } - - val, err := hi.ds.Get(ctx, heightKey(h)) - if err != nil { - return nil, err - } - - hi.cache.Add(h, tmbytes.HexBytes(val)) - return val, nil -} - -// IndexTo saves mapping between header Height and Hash to the given batch. -func (hi *heightIndexer) IndexTo(ctx context.Context, batch datastore.Batch, headers ...*header.ExtendedHeader) error { - for _, h := range headers { - err := batch.Put(ctx, heightKey(uint64(h.Height)), h.Hash()) - if err != nil { - return err - } - } - - return nil -} diff --git a/header/store/heightsub.go b/header/store/heightsub.go deleted file mode 100644 index ddae9e4f55..0000000000 --- a/header/store/heightsub.go +++ /dev/null @@ -1,117 +0,0 @@ -package store - -import ( - "context" - "errors" - "sync" - "sync/atomic" - - "github.com/celestiaorg/celestia-node/header" -) - -// errElapsedHeight is thrown when a requested height was already provided to heightSub. -var errElapsedHeight = errors.New("elapsed height") - -// heightSub provides a minimalistic mechanism to wait till header for a height becomes available. -type heightSub struct { - // height refers to the latest locally available header height - // that has been fully verified and inserted into the subjective chain - height uint64 // atomic - heightReqsLk sync.Mutex - heightReqs map[uint64][]chan *header.ExtendedHeader -} - -// newHeightSub instantiates new heightSub. -func newHeightSub() *heightSub { - return &heightSub{ - heightReqs: make(map[uint64][]chan *header.ExtendedHeader), - } -} - -// Height reports current height. -func (hs *heightSub) Height() uint64 { - return atomic.LoadUint64(&hs.height) -} - -// SetHeight sets the new head height for heightSub. -func (hs *heightSub) SetHeight(height uint64) { - atomic.StoreUint64(&hs.height, height) -} - -// Sub subscribes for a header of a given height. -// It can return errElapsedHeight, which means a requested header was already provided -// and caller should get it elsewhere. -func (hs *heightSub) Sub(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - if hs.Height() >= height { - return nil, errElapsedHeight - } - - hs.heightReqsLk.Lock() - if hs.Height() >= height { - // This is a rare case we have to account for. - // The lock above can park a goroutine long enough for hs.height to change for a requested height, - // leaving the request never fulfilled and the goroutine deadlocked. - hs.heightReqsLk.Unlock() - return nil, errElapsedHeight - } - resp := make(chan *header.ExtendedHeader, 1) - hs.heightReqs[height] = append(hs.heightReqs[height], resp) - hs.heightReqsLk.Unlock() - - select { - case resp := <-resp: - return resp, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -// Pub processes all the outstanding subscriptions matching the given headers. -// Pub is only safe when called from one goroutine. -// For Pub to work correctly, heightSub has to be initialized with SetHeight -// so that given headers are contiguous to the height on heightSub. -func (hs *heightSub) Pub(headers ...*header.ExtendedHeader) { - ln := len(headers) - if ln == 0 { - return - } - - height := hs.Height() - from, to := uint64(headers[0].Height), uint64(headers[ln-1].Height) - if height+1 != from { - log.Fatal("PLEASE FILE A BUG REPORT: headers given to the heightSub are in the wrong order") - return - } - hs.SetHeight(to) - - hs.heightReqsLk.Lock() - defer hs.heightReqsLk.Unlock() - - // there is a common case where we Pub only header - // in this case, we shouldn't loop over each heightReqs - // and instead read from the map directly - if ln == 1 { - reqs, ok := hs.heightReqs[from] - if ok { - for _, req := range reqs { - req <- headers[0] // reqs must always be buffered, so this won't block - } - delete(hs.heightReqs, from) - } - return - } - - // instead of looping over each header in 'headers', we can loop over each request - // which will drastically decrease idle iterations, as there will be less requests than headers - for height, reqs := range hs.heightReqs { - // then we look if any of the requests match the given range of headers - if height >= from && height <= to { - // and if so, calculate its position and fulfill requests - h := headers[height-from] - for _, req := range reqs { - req <- h // reqs must always be buffered, so this won't block - } - delete(hs.heightReqs, height) - } - } -} diff --git a/header/store/heightsub_test.go b/header/store/heightsub_test.go deleted file mode 100644 index 5be45d17ef..0000000000 --- a/header/store/heightsub_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package store - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/celestiaorg/celestia-node/header" -) - -func TestHeightSub(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - hs := newHeightSub() - - // assert subscription returns nil for past heights - { - h := header.RandExtendedHeader(t) - h.Height = 100 - hs.SetHeight(99) - hs.Pub(h) - - h, err := hs.Sub(ctx, 10) - assert.ErrorIs(t, err, errElapsedHeight) - assert.Nil(t, h) - } - - // assert actual subscription works - { - go func() { - // fixes flakiness on CI - time.Sleep(time.Millisecond) - - h1 := header.RandExtendedHeader(t) - h1.Height = 101 - h2 := header.RandExtendedHeader(t) - h2.Height = 102 - hs.Pub(h1, h2) - }() - - h, err := hs.Sub(ctx, 101) - assert.NoError(t, err) - assert.NotNil(t, h) - } -} diff --git a/header/store/init.go b/header/store/init.go deleted file mode 100644 index 8017e4922c..0000000000 --- a/header/store/init.go +++ /dev/null @@ -1,26 +0,0 @@ -package store - -import ( - "context" - - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" -) - -// Init ensures a Store is initialized. If it is not already initialized, -// it initializes the Store by requesting the header with the given hash. -func Init(ctx context.Context, store header.Store, ex header.Exchange, hash tmbytes.HexBytes) error { - _, err := store.Head(ctx) - switch err { - default: - return err - case header.ErrNoHead: - initial, err := ex.Get(ctx, hash) - if err != nil { - return err - } - - return store.Init(ctx, initial) - } -} diff --git a/header/store/init_test.go b/header/store/init_test.go deleted file mode 100644 index df24553bbc..0000000000 --- a/header/store/init_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package store - -import ( - "context" - "testing" - "time" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/local" -) - -func TestInitStore_NoReinit(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - suite := header.NewTestSuite(t, 3) - head := suite.Head() - exchange := local.NewExchange(NewTestStore(ctx, t, head)) - - ds := sync.MutexWrap(datastore.NewMapDatastore()) - store, err := NewStore(ds) - require.NoError(t, err) - - err = Init(ctx, store, exchange, head.Hash()) - assert.NoError(t, err) - - err = store.Start(ctx) - require.NoError(t, err) - - _, err = store.Append(ctx, suite.GenExtendedHeaders(10)...) - require.NoError(t, err) - - err = store.Stop(ctx) - require.NoError(t, err) - - reopenedStore, err := NewStore(ds) - require.NoError(t, err) - - err = Init(ctx, reopenedStore, exchange, head.Hash()) - assert.NoError(t, err) - - err = reopenedStore.Start(ctx) - require.NoError(t, err) - - reopenedHead, err := reopenedStore.Head(ctx) - require.NoError(t, err) - - // check that reopened head changed and the store wasn't reinitialized - assert.Equal(t, suite.Head().Height, reopenedHead.Height) - assert.NotEqual(t, head.Height, reopenedHead.Height) - - err = reopenedStore.Stop(ctx) - require.NoError(t, err) -} diff --git a/header/store/keys.go b/header/store/keys.go deleted file mode 100644 index 6f22326015..0000000000 --- a/header/store/keys.go +++ /dev/null @@ -1,22 +0,0 @@ -package store - -import ( - "strconv" - - "github.com/ipfs/go-datastore" - - "github.com/celestiaorg/celestia-node/header" -) - -var ( - storePrefix = datastore.NewKey("headers") - headKey = datastore.NewKey("head") -) - -func heightKey(h uint64) datastore.Key { - return datastore.NewKey(strconv.Itoa(int(h))) -} - -func headerKey(h *header.ExtendedHeader) datastore.Key { - return datastore.NewKey(h.Hash().String()) -} diff --git a/header/store/store.go b/header/store/store.go deleted file mode 100644 index 0aab86257c..0000000000 --- a/header/store/store.go +++ /dev/null @@ -1,413 +0,0 @@ -package store - -import ( - "context" - "errors" - "fmt" - "sync" - - logging "github.com/ipfs/go-log/v2" - - lru "github.com/hashicorp/golang-lru" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" -) - -var log = logging.Logger("header/store") - -// TODO(@Wondertan): Those values must be configurable and proper defaults should be set for specific node type. (#709) -var ( - // DefaultStoreCacheSize defines the amount of max entries allowed in the Header Store cache. - DefaultStoreCacheSize = 4096 - // DefaultIndexCacheSize defines the amount of max entries allowed in the Height to Hash index cache. - DefaultIndexCacheSize = 16384 - // DefaultWriteBatchSize defines the size of the batched header write. - // Headers are written in batches not to thrash the underlying Datastore with writes. - DefaultWriteBatchSize = 2048 -) - -var ( - // errStoppedStore is returned for attempted operations on a stopped store - errStoppedStore = errors.New("stopped store") -) - -// store implements the Store interface for ExtendedHeaders over Datastore. -type store struct { - // header storing - // - // underlying KV store - ds datastore.Batching - // adaptive replacement cache of headers - cache *lru.ARCCache - - // header heights management - // - // maps heights to hashes - heightIndex *heightIndexer - // manages current store read head height (1) and - // allows callers to wait until header for a height is stored (2) - heightSub *heightSub - - // writing to datastore - // - writeLk sync.Mutex - // queue of headers to be written - writes chan []*header.ExtendedHeader - // signals when writes are finished - writesDn chan struct{} - // writeHead maintains the current write head - writeHead *header.ExtendedHeader - // pending keeps headers pending to be written in one batch - pending *batch -} - -// NewStore constructs a Store over datastore. -// The datastore must have a head there otherwise Start will error. -// For first initialization of Store use NewStoreWithHead. -func NewStore(ds datastore.Batching) (header.Store, error) { - return newStore(ds) -} - -// NewStoreWithHead initiates a new Store and forcefully sets a given trusted header as head. -func NewStoreWithHead(ctx context.Context, ds datastore.Batching, head *header.ExtendedHeader) (header.Store, error) { - store, err := newStore(ds) - if err != nil { - return nil, err - } - - return store, store.Init(ctx, head) -} - -func newStore(ds datastore.Batching) (*store, error) { - ds = namespace.Wrap(ds, storePrefix) - cache, err := lru.NewARC(DefaultStoreCacheSize) - if err != nil { - return nil, err - } - - index, err := newHeightIndexer(ds) - if err != nil { - return nil, err - } - - return &store{ - ds: ds, - cache: cache, - heightIndex: index, - heightSub: newHeightSub(), - writes: make(chan []*header.ExtendedHeader, 16), - writesDn: make(chan struct{}), - pending: newBatch(DefaultWriteBatchSize), - }, nil -} - -func (s *store) Init(ctx context.Context, initial *header.ExtendedHeader) error { - // trust the given header as the initial head - err := s.flush(ctx, initial) - if err != nil { - return err - } - - log.Infow("initialized head", "height", initial.Height, "hash", initial.Hash()) - return nil -} - -func (s *store) Start(context.Context) error { - go s.flushLoop() - return nil -} - -func (s *store) Stop(ctx context.Context) error { - select { - case <-s.writesDn: - return errStoppedStore - default: - } - // signal to prevent further writes to Store - s.writes <- nil - select { - case <-s.writesDn: // wait till it is done writing - case <-ctx.Done(): - return ctx.Err() - } - - // cleanup caches - s.cache.Purge() - s.heightIndex.cache.Purge() - return nil -} - -func (s *store) Height() uint64 { - return s.heightSub.Height() -} - -func (s *store) Head(ctx context.Context) (*header.ExtendedHeader, error) { - head, err := s.GetByHeight(ctx, s.heightSub.Height()) - if err == nil { - return head, nil - } - - head, err = s.readHead(ctx) - switch err { - default: - return nil, err - case datastore.ErrNotFound, header.ErrNotFound: - return nil, header.ErrNoHead - case nil: - s.heightSub.SetHeight(uint64(head.Height)) - log.Infow("loaded head", "height", head.Height, "hash", head.Hash()) - return head, nil - } -} - -func (s *store) Get(ctx context.Context, hash tmbytes.HexBytes) (*header.ExtendedHeader, error) { - if v, ok := s.cache.Get(hash.String()); ok { - return v.(*header.ExtendedHeader), nil - } - // check if the requested header is not yet written on disk - if h := s.pending.Get(hash); h != nil { - return h, nil - } - - b, err := s.ds.Get(ctx, datastore.NewKey(hash.String())) - if err != nil { - if err == datastore.ErrNotFound { - return nil, header.ErrNotFound - } - - return nil, err - } - - h, err := header.UnmarshalExtendedHeader(b) - if err != nil { - return nil, err - } - - s.cache.Add(h.Hash().String(), h) - return h, nil -} - -func (s *store) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - if height == 0 { - return nil, fmt.Errorf("header/store: height must be bigger than zero") - } - // if the requested 'height' was not yet published - // we subscribe to it - h, err := s.heightSub.Sub(ctx, height) - if err != errElapsedHeight { - return h, err - } - // otherwise, the errElapsedHeight is thrown, - // which means the requested 'height' should be present - // - // check if the requested header is not yet written on disk - if h := s.pending.GetByHeight(height); h != nil { - return h, nil - } - - hash, err := s.heightIndex.HashByHeight(ctx, height) - if err != nil { - if err == datastore.ErrNotFound { - return nil, header.ErrNotFound - } - - return nil, err - } - - return s.Get(ctx, hash) -} - -func (s *store) GetRangeByHeight(ctx context.Context, from, to uint64) ([]*header.ExtendedHeader, error) { - h, err := s.GetByHeight(ctx, to-1) - if err != nil { - return nil, err - } - - ln := to - from - headers := make([]*header.ExtendedHeader, ln) - for i := ln - 1; i > 0; i-- { - headers[i] = h - h, err = s.Get(ctx, h.LastHeader()) - if err != nil { - return nil, err - } - } - headers[0] = h - - return headers, nil -} - -func (s *store) Has(ctx context.Context, hash tmbytes.HexBytes) (bool, error) { - if ok := s.cache.Contains(hash.String()); ok { - return ok, nil - } - // check if the requested header is not yet written on disk - if ok := s.pending.Has(hash); ok { - return ok, nil - } - - return s.ds.Has(ctx, datastore.NewKey(hash.String())) -} - -func (s *store) Append(ctx context.Context, headers ...*header.ExtendedHeader) (int, error) { - lh := len(headers) - if lh == 0 { - return 0, nil - } - // taking the ownership of append - // mainly, this is need to avoid race conditions for s.writeHead - s.writeLk.Lock() - defer s.writeLk.Unlock() - - // take current write head to verify headers against - var err error - head := s.writeHead - if head == nil { - head, err = s.Head(ctx) - if err != nil { - return 0, err - } - } - - // collect valid headers - verified := make([]*header.ExtendedHeader, 0, lh) - for i, h := range headers { - err = head.VerifyAdjacent(h) - if err != nil { - var verErr *header.VerifyError - if errors.As(err, &verErr) { - log.Errorw("invalid header", - "height_of_head", head.Height, - "hash_of_head", head.Hash(), - "height_of_invalid", h.Height, - "hash_of_invalid", h.Hash(), - "reason", verErr.Reason) - } - // if the first header is invalid, no need to go further - if i == 0 { - // and simply return - return 0, err - } - // otherwise, stop the loop and apply headers appeared to be valid - break - } - verified, head = append(verified, h), h - } - - // queue headers to be written on disk - select { - case s.writes <- verified: - ln := len(verified) - s.writeHead = verified[ln-1] - log.Infow("new head", "height", s.writeHead.Height, "hash", s.writeHead.Hash()) - // we return an error here after writing, - // as there might be an invalid header in between of a given range - return ln, err - case <-s.writesDn: - return 0, errStoppedStore - case <-ctx.Done(): - return 0, ctx.Err() - } -} - -// flushLoop performs writing task to the underlying datastore in a separate routine -// This way writes are controlled and manageable from one place allowing -// (1) Appends not to be blocked on long disk IO writes and underlying DB compactions -// (2) Batching header writes -func (s *store) flushLoop() { - defer close(s.writesDn) - ctx := context.Background() - for headers := range s.writes { - // add headers to the pending and ensure they are accessible - s.pending.Append(headers...) - // and notify waiters if any + increase current read head height - // it is important to do Pub after updating pending - // so pending is consistent with atomic Height counter on the heightSub - s.heightSub.Pub(headers...) - // don't flush and continue if pending batch is not grown enough, - // and Store is not stopping(headers == nil) - if s.pending.Len() < DefaultWriteBatchSize && headers != nil { - continue - } - - err := s.flush(ctx, s.pending.GetAll()...) - if err != nil { - // TODO(@Wondertan): Should this be a fatal error case with os.Exit? - from, to := uint64(headers[0].Height), uint64(headers[len(headers)-1].Height) - log.Errorw("writing header batch", "from", from, "to", to) - continue - } - // reset pending - s.pending.Reset() - - if headers == nil { - // a signal to stop - return - } - } -} - -// flush writes the given batch to datastore. -func (s *store) flush(ctx context.Context, headers ...*header.ExtendedHeader) error { - ln := len(headers) - if ln == 0 { - return nil - } - - batch, err := s.ds.Batch(ctx) - if err != nil { - return err - } - - // collect all the headers in the batch to be written - for _, h := range headers { - b, err := h.MarshalBinary() - if err != nil { - return err - } - - err = batch.Put(ctx, headerKey(h), b) - if err != nil { - return err - } - } - - // marshal and add to batch reference to the new head - b, err := headers[ln-1].Hash().MarshalJSON() - if err != nil { - return err - } - - err = batch.Put(ctx, headKey, b) - if err != nil { - return err - } - - // write height indexes for headers as well - err = s.heightIndex.IndexTo(ctx, batch, headers...) - if err != nil { - return err - } - - // finally, commit the batch on disk - return batch.Commit(ctx) -} - -// readHead loads the head from the datastore. -func (s *store) readHead(ctx context.Context) (*header.ExtendedHeader, error) { - b, err := s.ds.Get(ctx, headKey) - if err != nil { - return nil, err - } - - var head tmbytes.HexBytes - err = head.UnmarshalJSON(b) - if err != nil { - return nil, err - } - - return s.Get(ctx, head) -} diff --git a/header/store/store_test.go b/header/store/store_test.go deleted file mode 100644 index e6bc3ad50a..0000000000 --- a/header/store/store_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package store - -import ( - "context" - "testing" - "time" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tmrand "github.com/tendermint/tendermint/libs/rand" - - "github.com/celestiaorg/celestia-node/header" -) - -func TestStore(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - suite := header.NewTestSuite(t, 3) - - ds := sync.MutexWrap(datastore.NewMapDatastore()) - store, err := NewStoreWithHead(ctx, ds, suite.Head()) - require.NoError(t, err) - - err = store.Start(ctx) - require.NoError(t, err) - - head, err := store.Head(ctx) - require.NoError(t, err) - assert.EqualValues(t, suite.Head().Hash(), head.Hash()) - - in := suite.GenExtendedHeaders(10) - ln, err := store.Append(ctx, in...) - require.NoError(t, err) - assert.Equal(t, 10, ln) - - out, err := store.GetRangeByHeight(ctx, 2, 12) - require.NoError(t, err) - for i, h := range in { - assert.Equal(t, h.Hash(), out[i].Hash()) - } - - head, err = store.Head(ctx) - require.NoError(t, err) - assert.Equal(t, out[len(out)-1].Hash(), head.Hash()) - - ok, err := store.Has(ctx, in[5].Hash()) - require.NoError(t, err) - assert.True(t, ok) - - ok, err = store.Has(ctx, tmrand.Bytes(32)) - require.NoError(t, err) - assert.False(t, ok) - - go func() { - ln, err := store.Append(ctx, suite.GenExtendedHeaders(1)...) - require.NoError(t, err) - assert.Equal(t, 1, ln) - }() - - h, err := store.GetByHeight(ctx, 12) - require.NoError(t, err) - assert.NotNil(t, h) - - err = store.Stop(ctx) - require.NoError(t, err) - - // check that the store can be successfully started after previous stop - // with all data being flushed. - store, err = NewStore(ds) - require.NoError(t, err) - - err = store.Start(ctx) - require.NoError(t, err) - - head, err = store.Head(ctx) - require.NoError(t, err) - assert.Equal(t, suite.Head().Hash(), head.Hash()) - - out, err = store.GetRangeByHeight(ctx, 1, 13) - require.NoError(t, err) - assert.Len(t, out, 12) - - err = store.Stop(ctx) - require.NoError(t, err) -} - -func TestStorePendingCacheMiss(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - suite := header.NewTestSuite(t, 3) - - ds := sync.MutexWrap(datastore.NewMapDatastore()) - - DefaultWriteBatchSize = 100 - DefaultStoreCacheSize = 100 - store, err := NewStoreWithHead(ctx, ds, suite.Head()) - require.NoError(t, err) - - err = store.Start(ctx) - require.NoError(t, err) - _, err = store.Append(ctx, suite.GenExtendedHeaders(100)...) - require.NoError(t, err) - - _, err = store.Append(ctx, suite.GenExtendedHeaders(50)...) - require.NoError(t, err) - - _, err = store.GetRangeByHeight(ctx, 1, 101) - require.NoError(t, err) - - _, err = store.GetRangeByHeight(ctx, 101, 151) - require.NoError(t, err) -} diff --git a/header/store/testing.go b/header/store/testing.go deleted file mode 100644 index 2a6a3acb0c..0000000000 --- a/header/store/testing.go +++ /dev/null @@ -1,27 +0,0 @@ -package store - -import ( - "context" - "testing" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header" -) - -// NewTestStore creates initialized and started in memory header Store which is useful for testing. -func NewTestStore(ctx context.Context, t *testing.T, head *header.ExtendedHeader) header.Store { - store, err := NewStoreWithHead(ctx, sync.MutexWrap(datastore.NewMapDatastore()), head) - require.NoError(t, err) - - err = store.Start(ctx) - require.NoError(t, err) - - t.Cleanup(func() { - err := store.Stop(ctx) - require.NoError(t, err) - }) - return store -} diff --git a/header/sync/ranges.go b/header/sync/ranges.go deleted file mode 100644 index 7b5db2444a..0000000000 --- a/header/sync/ranges.go +++ /dev/null @@ -1,153 +0,0 @@ -package sync - -import ( - "sync" - - "github.com/celestiaorg/celestia-node/header" -) - -// ranges keeps non-overlapping and non-adjacent header ranges which are used to cache headers (in ascending order). -// This prevents unnecessary / duplicate network requests for additional headers during sync. -type ranges struct { - ranges []*headerRange - lk sync.Mutex // no need for RWMutex as there is only one reader -} - -// Head returns the highest ExtendedHeader in all ranges if any. -func (rs *ranges) Head() *header.ExtendedHeader { - rs.lk.Lock() - defer rs.lk.Unlock() - - ln := len(rs.ranges) - if ln == 0 { - return nil - } - - head := rs.ranges[ln-1] - return head.Head() -} - -// Add appends the new ExtendedHeader to existing range or starts a new one. -// It starts a new one if the new ExtendedHeader is not adjacent to any of existing ranges. -func (rs *ranges) Add(h *header.ExtendedHeader) { - head := rs.Head() - - // short-circuit if header is from the past - if head != nil && head.Height >= h.Height { - // TODO(@Wondertan): Technically, we can still apply the header: - // * Headers here are verified, so we can trust them - // * PubSub does not guarantee the ordering of msgs - // * So there might be a case where ordering is broken - // * Even considering the delay(block time) with which new headers are generated - // * But rarely - // Would be still nice to implement - log.Warnf("rcvd headers in wrong order") - return - } - - rs.lk.Lock() - defer rs.lk.Unlock() - - // if the new header is adjacent to head - if head != nil && h.Height == head.Height+1 { - // append it to the last known range - rs.ranges[len(rs.ranges)-1].Append(h) - } else { - // otherwise, start a new range - rs.ranges = append(rs.ranges, newRange(h)) - - // it is possible to miss a header or few from PubSub, due to quick disconnects or sleep - // once we start rcving them again we save those in new range - // so 'Syncer.findHeaders' can fetch what was missed - } -} - -// FirstRangeWithin checks if the first range is within a given height span [start:end] -// and returns it. -func (rs *ranges) FirstRangeWithin(start, end uint64) (*headerRange, bool) { - r, ok := rs.First() - if !ok { - return nil, false - } - - if r.start >= start && r.start <= end { - return r, true - } - - return nil, false -} - -// First provides a first non-empty range, while cleaning up empty ones. -func (rs *ranges) First() (*headerRange, bool) { - rs.lk.Lock() - defer rs.lk.Unlock() - - for { - if len(rs.ranges) == 0 { - return nil, false - } - - out := rs.ranges[0] - if !out.Empty() { - return out, true - } - - rs.ranges = rs.ranges[1:] - } -} - -type headerRange struct { - start uint64 - lk sync.Mutex // no need for RWMutex as there is only one reader - headers []*header.ExtendedHeader -} - -func newRange(h *header.ExtendedHeader) *headerRange { - return &headerRange{ - start: uint64(h.Height), - headers: []*header.ExtendedHeader{h}, - } -} - -// Append appends new headers. -func (r *headerRange) Append(h ...*header.ExtendedHeader) { - r.lk.Lock() - r.headers = append(r.headers, h...) - r.lk.Unlock() -} - -// Empty reports if range is empty. -func (r *headerRange) Empty() bool { - r.lk.Lock() - defer r.lk.Unlock() - return len(r.headers) == 0 -} - -// Head reports the head of range if any. -func (r *headerRange) Head() *header.ExtendedHeader { - r.lk.Lock() - defer r.lk.Unlock() - ln := len(r.headers) - if ln == 0 { - return nil - } - return r.headers[ln-1] -} - -// Before truncates all the headers before height 'end' - [r.Start:end] -func (r *headerRange) Before(end uint64) ([]*header.ExtendedHeader, uint64) { - r.lk.Lock() - defer r.lk.Unlock() - - amnt := uint64(len(r.headers)) - if r.start+amnt >= end { - amnt = end - r.start + 1 // + 1 to include 'end' as well - } - - out := r.headers[:amnt] - r.headers = r.headers[amnt:] - if len(r.headers) != 0 { - r.start = uint64(r.headers[0].Height) - } - return out, amnt -} diff --git a/header/sync/sync.go b/header/sync/sync.go deleted file mode 100644 index 20a404ecae..0000000000 --- a/header/sync/sync.go +++ /dev/null @@ -1,352 +0,0 @@ -package sync - -import ( - "context" - "errors" - "sync" - "time" - - logging "github.com/ipfs/go-log/v2" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/header" -) - -var log = logging.Logger("header/sync") - -// Syncer implements efficient synchronization for headers. -// -// There are two main processes running in Syncer: -// 1. Main syncing loop(s.syncLoop) -// * Performs syncing from the subjective(local chain view) header up to the latest known trusted header -// * Syncs by requesting missing headers from Exchange or -// * By accessing cache of pending and verified headers -// 2. Receives new headers from PubSub subnetwork (s.processIncoming) -// * Usually, a new header is adjacent to the trusted head and if so, it is simply appended to the local store, -// incrementing the subjective height and making it the new latest known trusted header. -// * Or, if it receives a header further in the future, -// * verifies against the latest known trusted header -// * adds the header to pending cache(making it the latest known trusted header) -// * and triggers syncing loop to catch up to that point. -type Syncer struct { - sub header.Subscriber - exchange header.Exchange - store header.Store - - // stateLk protects state which represents the current or latest sync - stateLk sync.RWMutex - state State - // signals to start syncing - triggerSync chan struct{} - // pending keeps ranges of valid headers received from the network awaiting to be appended to store - pending ranges - // cancel cancels syncLoop's context - cancel context.CancelFunc -} - -// NewSyncer creates a new instance of Syncer. -func NewSyncer(exchange header.Exchange, store header.Store, sub header.Subscriber) *Syncer { - return &Syncer{ - sub: sub, - exchange: exchange, - store: store, - triggerSync: make(chan struct{}, 1), // should be buffered - } -} - -// Start starts the syncing routine. -func (s *Syncer) Start(context.Context) error { - err := s.sub.AddValidator(s.processIncoming) - if err != nil { - return err - } - - ctx, cancel := context.WithCancel(context.Background()) - go s.syncLoop(ctx) - s.wantSync() - s.cancel = cancel - return nil -} - -// Stop stops Syncer. -func (s *Syncer) Stop(ctx context.Context) error { - s.cancel() - return s.sub.Stop(ctx) -} - -// WaitSync blocks until ongoing sync is done. -func (s *Syncer) WaitSync(ctx context.Context) error { - state := s.State() - if state.Finished() { - return nil - } - - // this store method blocks until header is available - _, err := s.store.GetByHeight(ctx, state.ToHeight) - return err -} - -// State collects all the information about a sync. -type State struct { - ID uint64 // incrementing ID of a sync - Height uint64 // height at the moment when State is requested for a sync - FromHeight, ToHeight uint64 // the starting and the ending point of a sync - FromHash, ToHash tmbytes.HexBytes - Start, End time.Time - Error error // the error that might happen within a sync -} - -// Finished returns true if sync is done, false otherwise. -func (s State) Finished() bool { - return s.ToHeight <= s.Height -} - -// Duration returns the duration of the sync. -func (s State) Duration() time.Duration { - return s.End.Sub(s.Start) -} - -// State reports state of the current (if in progress), or last sync (if finished). -// Note that throughout the whole Syncer lifetime there might an initial sync and multiple catch-ups. -// All of them are treated as different syncs with different state IDs and other information. -func (s *Syncer) State() State { - s.stateLk.RLock() - state := s.state - s.stateLk.RUnlock() - state.Height = s.store.Height() - return state -} - -// trustedHead returns the latest known trusted header that is within the trusting period. -func (s *Syncer) trustedHead(ctx context.Context) (*header.ExtendedHeader, error) { - // check pending for trusted header and return it if applicable - // NOTE: Pending cannot be expired, guaranteed - pendHead := s.pending.Head() - if pendHead != nil { - return pendHead, nil - } - - sbj, err := s.store.Head(ctx) - if err != nil { - return nil, err - } - - // check if our subjective header is not expired and use it - if !sbj.IsExpired() { - return sbj, nil - } - - // otherwise, request head from a trustedPeer or, in other words, do automatic subjective initialization - objHead, err := s.exchange.Head(ctx) - if err != nil { - return nil, err - } - - s.pending.Add(objHead) - return objHead, nil -} - -// wantSync will trigger the syncing loop (non-blocking). -func (s *Syncer) wantSync() { - select { - case s.triggerSync <- struct{}{}: - default: - } -} - -// syncLoop controls syncing process. -func (s *Syncer) syncLoop(ctx context.Context) { - for { - select { - case <-s.triggerSync: - s.sync(ctx) - case <-ctx.Done(): - return - } - } -} - -// sync ensures we are synced up to any trusted header. -func (s *Syncer) sync(ctx context.Context) { - trstHead, err := s.trustedHead(ctx) - if err != nil { - log.Errorw("getting trusted head", "err", err) - return - } - - s.syncTo(ctx, trstHead) -} - -// processIncoming processes new processIncoming Headers, validates them and stores/caches if applicable. -func (s *Syncer) processIncoming(ctx context.Context, maybeHead *header.ExtendedHeader) pubsub.ValidationResult { - // 1. Try to append. If header is not adjacent/from future - try it for pending cache below - _, err := s.store.Append(ctx, maybeHead) - switch err { - case nil: - // a happy case where we append adjacent header correctly - return pubsub.ValidationAccept - case header.ErrNonAdjacent: - // not adjacent, so try to cache it after verifying - default: - var verErr *header.VerifyError - if errors.As(err, &verErr) { - return pubsub.ValidationReject - } - - log.Errorw("appending header", - "height", maybeHead.Height, - "hash", maybeHead.Hash().String(), - "err", err) - // might be a storage error or something else, but we can still try to continue processing 'maybeHead' - } - - // 2. Get known trusted head, so we can verify maybeHead - trstHead, err := s.trustedHead(ctx) - if err != nil { - log.Errorw("getting trusted head", "err", err) - return pubsub.ValidationIgnore // we don't know if header is invalid so ignore - } - - // 3. Filter out maybeHead if behind trusted - if maybeHead.Height <= trstHead.Height { - log.Warnw("received known header", - "height", maybeHead.Height, - "hash", maybeHead.Hash()) - return pubsub.ValidationIgnore // we don't know if header is invalid so ignore - } - - // 4. Verify maybeHead against trusted - err = trstHead.VerifyNonAdjacent(maybeHead) - var verErr *header.VerifyError - if errors.As(err, &verErr) { - log.Errorw("invalid header", - "height_of_invalid", maybeHead.Height, - "hash_of_invalid", maybeHead.Hash(), - "height_of_trusted", trstHead.Height, - "hash_of_trusted", trstHead.Hash(), - "reason", verErr.Reason) - return pubsub.ValidationReject - } - - // 5. Save verified header to pending cache - // NOTE: Pending cache can't be DOSed as we verify above each header against a trusted one. - s.pending.Add(maybeHead) - // and trigger sync to catch-up - s.wantSync() - log.Infow("pending head", - "height", maybeHead.Height, - "hash", maybeHead.Hash()) - return pubsub.ValidationAccept -} - -// syncTo requests headers from locally stored head up to the new head. -func (s *Syncer) syncTo(ctx context.Context, newHead *header.ExtendedHeader) { - head, err := s.store.Head(ctx) - if err != nil { - log.Errorw("getting head during sync", "err", err) - return - } - - if head.Height == newHead.Height { - return - } - - log.Infow("syncing headers", - "from", head.Height, - "to", newHead.Height) - err = s.doSync(ctx, head, newHead) - if err != nil { - if errors.Is(err, context.Canceled) { - // don't log this error as it is normal case of Syncer being stopped - return - } - - log.Errorw("syncing headers", - "from", head.Height, - "to", newHead.Height, - "err", err) - return - } - - log.Infow("finished syncing", - "from", head.Height, - "to", newHead.Height, - "elapsed time", s.state.End.Sub(s.state.Start)) -} - -// doSync performs actual syncing updating the internal State -func (s *Syncer) doSync(ctx context.Context, fromHead, toHead *header.ExtendedHeader) (err error) { - from, to := uint64(fromHead.Height)+1, uint64(toHead.Height) - - s.stateLk.Lock() - s.state.ID++ - s.state.FromHeight = from - s.state.ToHeight = to - s.state.FromHash = fromHead.Hash() - s.state.ToHash = toHead.Hash() - s.state.Start = time.Now() - s.stateLk.Unlock() - - for processed := 0; from < to; from += uint64(processed) { - processed, err = s.processHeaders(ctx, from, to) - if err != nil && processed == 0 { - break - } - } - - s.stateLk.Lock() - s.state.End = time.Now() - s.state.Error = err - s.stateLk.Unlock() - return err -} - -// processHeaders gets and stores headers starting at the given 'from' height up to 'to' height - [from:to] -func (s *Syncer) processHeaders(ctx context.Context, from, to uint64) (int, error) { - headers, err := s.findHeaders(ctx, from, to) - if err != nil { - return 0, err - } - - return s.store.Append(ctx, headers...) -} - -// TODO(@Wondertan): Number of headers that can be requested at once. Either make this configurable or, -// find a proper rationale for constant. -// TODO(@Wondertan): Make configurable -var requestSize uint64 = 512 - -// findHeaders gets headers from either remote peers or from local cache of headers received by PubSub - [from:to] -func (s *Syncer) findHeaders(ctx context.Context, from, to uint64) ([]*header.ExtendedHeader, error) { - amount := to - from + 1 // + 1 to include 'to' height as well - if amount > requestSize { - to, amount = from+requestSize, requestSize - } - - out := make([]*header.ExtendedHeader, 0, amount) - for from < to { - // if we have some range cached - use it - r, ok := s.pending.FirstRangeWithin(from, to) - if !ok { - hs, err := s.exchange.GetRangeByHeight(ctx, from, amount) - return append(out, hs...), err - } - - // first, request everything between from and start of the found range - hs, err := s.exchange.GetRangeByHeight(ctx, from, r.start-from) - if err != nil { - return nil, err - } - out = append(out, hs...) - from += uint64(len(hs)) - - // then, apply cached range if any - cached, ln := r.Before(to) - out = append(out, cached...) - from += ln - } - - return out, nil -} diff --git a/header/sync/sync_test.go b/header/sync/sync_test.go deleted file mode 100644 index 12e160ef6d..0000000000 --- a/header/sync/sync_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package sync - -import ( - "context" - "testing" - "time" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/local" - "github.com/celestiaorg/celestia-node/header/store" -) - -func TestSyncSimpleRequestingHead(t *testing.T) { - // this way we force local head of Syncer to expire, so it requests a new one from trusted peer - header.TrustingPeriod = time.Microsecond - requestSize = 13 // just some random number - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - suite := header.NewTestSuite(t, 3) - head := suite.Head() - - remoteStore := store.NewTestStore(ctx, t, head) - _, err := remoteStore.Append(ctx, suite.GenExtendedHeaders(100)...) - require.NoError(t, err) - - _, err = remoteStore.GetByHeight(ctx, 100) - require.NoError(t, err) - - localStore := store.NewTestStore(ctx, t, head) - syncer := NewSyncer(local.NewExchange(remoteStore), localStore, &header.DummySubscriber{}) - err = syncer.Start(ctx) - require.NoError(t, err) - - _, err = localStore.GetByHeight(ctx, 100) - require.NoError(t, err) - - exp, err := remoteStore.Head(ctx) - require.NoError(t, err) - - have, err := localStore.Head(ctx) - require.NoError(t, err) - assert.Equal(t, exp.Height, have.Height) - assert.Empty(t, syncer.pending.Head()) - - state := syncer.State() - assert.Equal(t, uint64(exp.Height), state.Height) - assert.Equal(t, uint64(2), state.FromHeight) - assert.Equal(t, uint64(exp.Height), state.ToHeight) - assert.True(t, state.Finished(), state) -} - -func TestSyncCatchUp(t *testing.T) { - // just set a big enough value, so we trust local header and don't request anything - header.TrustingPeriod = time.Minute - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - suite := header.NewTestSuite(t, 3) - head := suite.Head() - - remoteStore := store.NewTestStore(ctx, t, head) - localStore := store.NewTestStore(ctx, t, head) - syncer := NewSyncer(local.NewExchange(remoteStore), localStore, &header.DummySubscriber{}) - // 1. Initial sync - err := syncer.Start(ctx) - require.NoError(t, err) - - // 2. chain grows and syncer misses that - _, err = remoteStore.Append(ctx, suite.GenExtendedHeaders(100)...) - require.NoError(t, err) - - // 3. syncer rcvs header from the future and starts catching-up - res := syncer.processIncoming(ctx, suite.GenExtendedHeaders(1)[0]) - assert.Equal(t, pubsub.ValidationAccept, res) - - _, err = localStore.GetByHeight(ctx, 102) - require.NoError(t, err) - - exp, err := remoteStore.Head(ctx) - require.NoError(t, err) - - // 4. assert syncer caught-up - have, err := localStore.Head(ctx) - require.NoError(t, err) - assert.Equal(t, exp.Height+1, have.Height) // plus one as we didn't add last header to remoteStore - assert.Empty(t, syncer.pending.Head()) - - state := syncer.State() - assert.Equal(t, uint64(exp.Height+1), state.Height) - assert.Equal(t, uint64(2), state.FromHeight) - assert.Equal(t, uint64(exp.Height+1), state.ToHeight) - assert.True(t, state.Finished(), state) -} - -func TestSyncPendingRangesWithMisses(t *testing.T) { - // just set a big enough value, so we trust local header and don't request anything - header.TrustingPeriod = time.Minute - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - suite := header.NewTestSuite(t, 3) - head := suite.Head() - - remoteStore := store.NewTestStore(ctx, t, head) - localStore := store.NewTestStore(ctx, t, head) - syncer := NewSyncer(local.NewExchange(remoteStore), localStore, &header.DummySubscriber{}) - err := syncer.Start(ctx) - require.NoError(t, err) - - // miss 1 (helps to test that Syncer properly requests missed Headers from Exchange) - _, err = remoteStore.Append(ctx, suite.GenExtendedHeaders(1)...) - require.NoError(t, err) - - range1 := suite.GenExtendedHeaders(15) - _, err = remoteStore.Append(ctx, range1...) - require.NoError(t, err) - - // miss 2 - _, err = remoteStore.Append(ctx, suite.GenExtendedHeaders(3)...) - require.NoError(t, err) - - range2 := suite.GenExtendedHeaders(23) - _, err = remoteStore.Append(ctx, range2...) - require.NoError(t, err) - - // manually add to pending - for _, h := range append(range1, range2...) { - syncer.pending.Add(h) - } - - // and fire up a sync - syncer.sync(ctx) - - _, err = remoteStore.GetByHeight(ctx, 43) - require.NoError(t, err) - _, err = localStore.GetByHeight(ctx, 43) - require.NoError(t, err) - - exp, err := remoteStore.Head(ctx) - require.NoError(t, err) - - have, err := localStore.Head(ctx) - require.NoError(t, err) - - assert.Equal(t, exp.Height, have.Height) - assert.Empty(t, syncer.pending.Head()) // assert all cache from pending is used -} diff --git a/header/testing.go b/header/testing.go deleted file mode 100644 index 113d767802..0000000000 --- a/header/testing.go +++ /dev/null @@ -1,267 +0,0 @@ -// TODO(@Wondertan): Ideally, we should move that into subpackage, so this does not get included into binary of -// production code, but that does not matter at the moment. -package header - -import ( - "context" - - mrand "math/rand" - "testing" - "time" - - "github.com/ipfs/go-blockservice" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/tmhash" - "github.com/tendermint/tendermint/libs/bytes" - tmrand "github.com/tendermint/tendermint/libs/rand" - tmtime "github.com/tendermint/tendermint/libs/time" - "github.com/tendermint/tendermint/pkg/da" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" - - "github.com/celestiaorg/celestia-node/core" - - "github.com/celestiaorg/celestia-node/ipld" -) - -// TestSuite provides everything you need to test chain of Headers. -// If not, please don't hesitate to extend it for your case. -type TestSuite struct { - t *testing.T - - vals []types.PrivValidator - valSet *types.ValidatorSet - valPntr int - - head *ExtendedHeader -} - -// NewTestSuite setups a new test suite with a given number of validators. -func NewTestSuite(t *testing.T, num int) *TestSuite { - valSet, vals := core.RandValidatorSet(num, 10) - return &TestSuite{ - t: t, - vals: vals, - valSet: valSet, - } -} - -func (s *TestSuite) genesis() *ExtendedHeader { - gen := RandRawHeader(s.t) - gen.ValidatorsHash = s.valSet.Hash() - gen.NextValidatorsHash = s.valSet.Hash() - gen.Height = 1 - voteSet := types.NewVoteSet(gen.ChainID, gen.Height, 0, tmproto.PrecommitType, s.valSet) - commit, err := core.MakeCommit(RandBlockID(s.t), gen.Height, 0, voteSet, s.vals, time.Now()) - require.NoError(s.t, err) - dah := EmptyDAH() - eh := &ExtendedHeader{ - RawHeader: *gen, - Commit: commit, - ValidatorSet: s.valSet, - DAH: &dah, - } - require.NoError(s.t, eh.ValidateBasic()) - return eh -} - -func (s *TestSuite) Head() *ExtendedHeader { - if s.head == nil { - s.head = s.genesis() - } - return s.head -} - -func (s *TestSuite) GenExtendedHeaders(num int) []*ExtendedHeader { - headers := make([]*ExtendedHeader, num) - for i := range headers { - headers[i] = s.GenExtendedHeader() - } - return headers -} - -func (s *TestSuite) GenExtendedHeader() *ExtendedHeader { - if s.head == nil { - s.head = s.genesis() - return s.head - } - - dah := da.MinDataAvailabilityHeader() - height := s.Head().Height + 1 - rh := s.GenRawHeader(height, s.Head().Hash(), s.Head().Commit.Hash(), dah.Hash()) - s.head = &ExtendedHeader{ - RawHeader: *rh, - Commit: s.Commit(rh), - ValidatorSet: s.valSet, - DAH: &dah, - } - require.NoError(s.t, s.head.ValidateBasic()) - return s.head -} - -func (s *TestSuite) GenRawHeader( - height int64, lastHeader, lastCommit, dataHash bytes.HexBytes) *RawHeader { - rh := RandRawHeader(s.t) - rh.Height = height - rh.Time = time.Now() - rh.LastBlockID = types.BlockID{Hash: lastHeader} - rh.LastCommitHash = lastCommit - rh.DataHash = dataHash - rh.ValidatorsHash = s.valSet.Hash() - rh.NextValidatorsHash = s.valSet.Hash() - rh.ProposerAddress = s.nextProposer().Address - return rh -} - -func (s *TestSuite) Commit(h *RawHeader) *types.Commit { - bid := types.BlockID{ - Hash: h.Hash(), - // Unfortunately, we still have to commit PartSetHeader even we don't need it in Celestia - PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}, - } - round := int32(0) - comms := make([]types.CommitSig, len(s.vals)) - for i, val := range s.vals { - v := &types.Vote{ - ValidatorAddress: s.valSet.Validators[i].Address, - ValidatorIndex: int32(i), - Height: h.Height, - Round: round, - Timestamp: tmtime.Now(), - Type: tmproto.PrecommitType, - BlockID: bid, - } - sgntr, err := val.(types.MockPV).PrivKey.Sign(types.VoteSignBytes(h.ChainID, v.ToProto())) - require.Nil(s.t, err) - v.Signature = sgntr - comms[i] = v.CommitSig() - } - - return types.NewCommit(h.Height, round, bid, comms) -} - -func (s *TestSuite) nextProposer() *types.Validator { - if s.valPntr == len(s.valSet.Validators)-1 { - s.valPntr = 0 - } else { - s.valPntr++ - } - val := s.valSet.Validators[s.valPntr] - return val -} - -// RandExtendedHeader provides an ExtendedHeader fixture. -func RandExtendedHeader(t *testing.T) *ExtendedHeader { - rh := RandRawHeader(t) - valSet, vals := core.RandValidatorSet(3, 1) - rh.ValidatorsHash = valSet.Hash() - voteSet := types.NewVoteSet(rh.ChainID, rh.Height, 0, tmproto.PrecommitType, valSet) - commit, err := core.MakeCommit(RandBlockID(t), rh.Height, 0, voteSet, vals, time.Now()) - require.NoError(t, err) - dah := EmptyDAH() - return &ExtendedHeader{ - RawHeader: *rh, - Commit: commit, - ValidatorSet: valSet, - DAH: &dah, - } -} - -// RandRawHeader provides a RawHeader fixture. -func RandRawHeader(t *testing.T) *RawHeader { - return &RawHeader{ - Version: version.Consensus{Block: uint64(11), App: uint64(1)}, - ChainID: "test", - Height: mrand.Int63(), //nolint:gosec - Time: time.Now(), - LastBlockID: RandBlockID(t), - LastCommitHash: tmrand.Bytes(32), - DataHash: tmrand.Bytes(32), - ValidatorsHash: tmrand.Bytes(32), - NextValidatorsHash: tmrand.Bytes(32), - ConsensusHash: tmrand.Bytes(32), - AppHash: tmrand.Bytes(32), - LastResultsHash: tmrand.Bytes(32), - EvidenceHash: tmhash.Sum([]byte{}), - ProposerAddress: tmrand.Bytes(20), - } -} - -// RandBlockID provides a BlockID fixture. -func RandBlockID(t *testing.T) types.BlockID { - bid := types.BlockID{ - Hash: make([]byte, 32), - PartSetHeader: types.PartSetHeader{ - Total: 123, - Hash: make([]byte, 32), - }, - } - mrand.Read(bid.Hash) //nolint:gosec - mrand.Read(bid.PartSetHeader.Hash) //nolint:gosec - return bid -} - -// FraudMaker creates a custom ConstructFn that breaks the block at the given height. -func FraudMaker(t *testing.T, faultHeight int64) ConstructFn { - log.Warn("Corrupting block...", "height", faultHeight) - return func(ctx context.Context, - b *types.Block, - comm *types.Commit, - vals *types.ValidatorSet, - bServ blockservice.BlockService) (*ExtendedHeader, error) { - eh := &ExtendedHeader{ - RawHeader: b.Header, - Commit: comm, - ValidatorSet: vals, - } - - if b.Height == faultHeight { - eh = CreateFraudExtHeader(t, eh, bServ) - return eh, nil - } - return MakeExtendedHeader(ctx, b, comm, vals, bServ) - } -} - -func CreateFraudExtHeader(t *testing.T, eh *ExtendedHeader, dag blockservice.BlockService) *ExtendedHeader { - extended := ipld.RandEDS(t, 2) - shares := ipld.ExtractEDS(extended) - copy(shares[0][ipld.NamespaceSize:], shares[1][ipld.NamespaceSize:]) - extended, err := ipld.ImportShares(context.Background(), shares, dag) - require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(extended) - eh.DAH = &dah - return eh -} - -type DummySubscriber struct { - Headers []*ExtendedHeader -} - -func (mhs *DummySubscriber) AddValidator(Validator) error { - return nil -} - -func (mhs *DummySubscriber) Subscribe() (Subscription, error) { - return mhs, nil -} - -func (mhs *DummySubscriber) NextHeader(ctx context.Context) (*ExtendedHeader, error) { - defer func() { - if len(mhs.Headers) > 1 { - // pop the already-returned header - cp := mhs.Headers - mhs.Headers = cp[1:] - } else { - mhs.Headers = make([]*ExtendedHeader, 0) - } - }() - if len(mhs.Headers) == 0 { - return nil, context.Canceled - } - return mhs.Headers[0], nil -} - -func (mhs *DummySubscriber) Stop(context.Context) error { return nil } -func (mhs *DummySubscriber) Cancel() {} diff --git a/header/verify.go b/header/verify.go deleted file mode 100644 index deb497645a..0000000000 --- a/header/verify.go +++ /dev/null @@ -1,97 +0,0 @@ -package header - -import ( - "bytes" - "fmt" - "time" - - "github.com/tendermint/tendermint/light" -) - -// TrustingPeriod is period through which we can trust a header's validators set. -// -// Should be significantly less than the unbonding period (e.g. unbonding -// period = 3 weeks, trusting period = 2 weeks). -// -// More specifically, trusting period + time needed to check headers + time -// needed to report and punish misbehavior should be less than the unbonding -// period. -// TODO(@Wondertan): We should request it from the network's state params -// or listen for network params changes to always have a topical value. -var TrustingPeriod = 168 * time.Hour - -// IsExpired checks if header is expired against trusting period. -func (eh *ExtendedHeader) IsExpired() bool { - expirationTime := eh.Time.Add(TrustingPeriod) - return !expirationTime.After(time.Now()) -} - -// VerifyNonAdjacent validates non-adjacent untrusted header against trusted 'eh'. -func (eh *ExtendedHeader) VerifyNonAdjacent(untrst *ExtendedHeader) error { - if err := eh.verify(untrst); err != nil { - return &VerifyError{Reason: err} - } - - // Ensure that untrusted commit has enough of trusted commit's power. - err := eh.ValidatorSet.VerifyCommitLightTrusting(eh.ChainID, untrst.Commit, light.DefaultTrustLevel) - if err != nil { - return &VerifyError{err} - } - - return nil -} - -// VerifyAdjacent validates adjacent untrusted header against trusted 'eh'. -func (eh *ExtendedHeader) VerifyAdjacent(untrst *ExtendedHeader) error { - if untrst.Height != eh.Height+1 { - return ErrNonAdjacent - } - - if err := eh.verify(untrst); err != nil { - return &VerifyError{Reason: err} - } - - // Check the validator hashes are the same - if !bytes.Equal(untrst.ValidatorsHash, eh.NextValidatorsHash) { - return &VerifyError{ - fmt.Errorf("expected old header next validators (%X) to match those from new header (%X)", - eh.NextValidatorsHash, - untrst.ValidatorsHash, - ), - } - } - - return nil -} - -// clockDrift defines how much new header's time can drift into -// the future relative to the now time during verification. -var clockDrift = 10 * time.Second - -// verify performs basic verification of untrusted header. -func (eh *ExtendedHeader) verify(untrst *ExtendedHeader) error { - if untrst.ChainID != eh.ChainID { - return fmt.Errorf("new untrusted header has different chain %s, not %s", untrst.ChainID, eh.ChainID) - } - - if !untrst.Time.After(eh.Time) { - return fmt.Errorf("expected new untrusted header time %v to be after old header time %v", untrst.Time, eh.Time) - } - - now := time.Now() - if !untrst.Time.Before(now.Add(clockDrift)) { - return fmt.Errorf( - "new untrusted header has a time from the future %v (now: %v, clockDrift: %v)", untrst.Time, now, clockDrift) - } - - return nil -} - -// VerifyError is thrown on during VerifyAdjacent and VerifyNonAdjacent if verification fails. -type VerifyError struct { - Reason error -} - -func (vr *VerifyError) Error() string { - return fmt.Sprintf("header: verify: %s", vr.Reason.Error()) -} diff --git a/header/verify_test.go b/header/verify_test.go deleted file mode 100644 index 499624bb1d..0000000000 --- a/header/verify_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package header - -import ( - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - tmrand "github.com/tendermint/tendermint/libs/rand" -) - -func TestVerifyAdjacent(t *testing.T) { - h := NewTestSuite(t, 2).GenExtendedHeaders(2) - trusted, untrusted := h[0], h[1] - tests := []struct { - prepare func() - err bool - }{ - { - prepare: func() {}, - err: false, - }, - { - prepare: func() { - untrusted.ValidatorsHash = tmrand.Bytes(32) - }, - err: true, - }, - { - prepare: func() { - untrusted.Time = untrusted.Time.Add(time.Minute) - }, - err: true, - }, - { - prepare: func() { - untrusted.Time = untrusted.Time.Truncate(time.Hour) - }, - err: true, - }, - { - prepare: func() { - untrusted.ChainID = "toaster" - }, - err: true, - }, - { - prepare: func() { - untrusted.Height++ - }, - err: true, - }, - } - - for i, test := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - test.prepare() - err := trusted.VerifyAdjacent(untrusted) - if test.err { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} diff --git a/ipld/add.go b/ipld/add.go deleted file mode 100644 index 6b768cb3a0..0000000000 --- a/ipld/add.go +++ /dev/null @@ -1,112 +0,0 @@ -package ipld - -import ( - "context" - "fmt" - "math" - - "github.com/ipfs/go-blockservice" - ipld "github.com/ipfs/go-ipld-format" - - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/tendermint/tendermint/pkg/wrapper" -) - -// AddShares erasures and extends shares to blockservice.BlockService using the provided ipld.NodeAdder. -func AddShares( - ctx context.Context, - shares []Share, - adder blockservice.BlockService, -) (*rsmt2d.ExtendedDataSquare, error) { - if len(shares) == 0 { - return nil, fmt.Errorf("empty data") // empty block is not an empty Data - } - squareSize := int(math.Sqrt(float64(len(shares)))) - // create nmt adder wrapping batch adder with calculated size - bs := batchSize(squareSize * 2) - batchAdder := NewNmtNodeAdder(ctx, adder, ipld.MaxSizeBatchOption(bs)) - // create the nmt wrapper to generate row and col commitments - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(squareSize), nmt.NodeVisitor(batchAdder.Visit)) - // recompute the eds - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, DefaultRSMT2DCodec(), tree.Constructor) - if err != nil { - return nil, fmt.Errorf("failure to recompute the extended data square: %w", err) - } - // compute roots - eds.RowRoots() - // commit the batch to ipfs - return eds, batchAdder.Commit() -} - -// ImportShares imports flattend chunks of data into Extended Data square and saves it in blockservice.BlockService -func ImportShares( - ctx context.Context, - shares [][]byte, - adder blockservice.BlockService) (*rsmt2d.ExtendedDataSquare, error) { - if len(shares) == 0 { - return nil, fmt.Errorf("ipld: importing empty data") - } - squareSize := int(math.Sqrt(float64(len(shares)))) - // create nmt adder wrapping batch adder with calculated size - bs := batchSize(squareSize * 2) - batchAdder := NewNmtNodeAdder(ctx, adder, ipld.MaxSizeBatchOption(bs)) - // create the nmt wrapper to generate row and col commitments - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(squareSize/2), nmt.NodeVisitor(batchAdder.Visit)) - // recompute the eds - eds, err := rsmt2d.ImportExtendedDataSquare(shares, DefaultRSMT2DCodec(), tree.Constructor) - if err != nil { - return nil, fmt.Errorf("failure to recompute the extended data square: %w", err) - } - // compute roots - eds.RowRoots() - // commit the batch to DAG - return eds, batchAdder.Commit() -} - -// ExtractODS returns the original shares of the given ExtendedDataSquare. This -// is a helper function for circumstances where AddShares must be used after the EDS has already -// been generated. -func ExtractODS(eds *rsmt2d.ExtendedDataSquare) []Share { - origWidth := eds.Width() / 2 - origShares := make([][]byte, origWidth*origWidth) - for i := uint(0); i < origWidth; i++ { - row := eds.Row(i) - for j := uint(0); j < origWidth; j++ { - origShares[(i*origWidth)+j] = row[j] - } - } - return origShares -} - -// ExtractEDS takes an EDS and extracts all shares from it in a flattened slice(row by row). -func ExtractEDS(eds *rsmt2d.ExtendedDataSquare) []Share { - flattenedEDSSize := eds.Width() * eds.Width() - out := make([][]byte, flattenedEDSSize) - count := 0 - for i := uint(0); i < eds.Width(); i++ { - for _, share := range eds.Row(i) { - out[count] = share - count++ - } - } - return out -} - -// batchSize calculates the amount of nodes that are generated from block of 'squareSizes' -// to be batched in one write. -func batchSize(squareSize int) int { - // (squareSize*2-1) - amount of nodes in a generated binary tree - // squareSize*2 - the total number of trees, both for rows and cols - // (squareSize*squareSize) - all the shares - // - // Note that while our IPLD tree looks like this: - // ---X - // -X---X - // X-X-X-X - // X-X-X-X - // here we count leaves only once: the CIDs are the same for columns and rows - // and for the last two layers as well: - return (squareSize*2-1)*squareSize*2 - (squareSize * squareSize) -} diff --git a/ipld/get.go b/ipld/get.go deleted file mode 100644 index 6e00ca5f32..0000000000 --- a/ipld/get.go +++ /dev/null @@ -1,194 +0,0 @@ -package ipld - -import ( - "context" - - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - - "github.com/celestiaorg/celestia-node/ipld/plugin" - "github.com/celestiaorg/nmt" - - "github.com/celestiaorg/nmt/namespace" -) - -// GetShare fetches and returns the data for leaf `leafIndex` of root `rootCid`. -func GetShare( - ctx context.Context, - bGetter blockservice.BlockGetter, - rootCid cid.Cid, - leafIndex int, - totalLeafs int, // this corresponds to the extended square width -) (Share, error) { - nd, err := GetLeaf(ctx, bGetter, rootCid, leafIndex, totalLeafs) - if err != nil { - return nil, err - } - - return leafToShare(nd), nil -} - -// GetLeaf fetches and returns the raw leaf. -// It walks down the IPLD NMT tree until it finds the requested one. -func GetLeaf(ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid, leaf, total int) (ipld.Node, error) { - // request the node - nd, err := plugin.GetNode(ctx, bGetter, root) - if err != nil { - return nil, err - } - - // look for links - lnks := nd.Links() - if len(lnks) == 1 { - // in case there is only one we reached tree's bottom, so finally request the leaf. - return plugin.GetNode(ctx, bGetter, lnks[0].Cid) - } - - // route walk to appropriate children - total /= 2 // as we are using binary tree, every step decreases total leaves in a half - if leaf < total { - root = lnks[0].Cid // if target leave on the left, go with walk down the first children - } else { - root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second - } - - // recursively walk down through selected children - return GetLeaf(ctx, bGetter, root, leaf, total) -} - -// GetProofsForShares fetches Merkle proofs for the given shares -// and returns the result as an array of ShareWithProof. -func GetProofsForShares( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - shares [][]byte, -) ([]*ShareWithProof, error) { - proofs := make([]*ShareWithProof, len(shares)) - for index, share := range shares { - if share != nil { - proof := make([]cid.Cid, 0) - // TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same tree. - // Add options that will control what data will be fetched. - s, err := GetLeaf(ctx, bGetter, root, index, len(shares)) - if err != nil { - return nil, err - } - proof, err = GetProof(ctx, bGetter, root, proof, index, len(shares)) - if err != nil { - return nil, err - } - proofs[index] = NewShareWithProof(index, s.RawData()[1:], proof) - } - } - - return proofs, nil -} - -// GetProof fetches and returns the leaf's Merkle Proof. -// It walks down the IPLD NMT tree until it reaches the leaf and returns collected proof -func GetProof( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - proof []cid.Cid, - leaf, total int, -) ([]cid.Cid, error) { - // request the node - nd, err := plugin.GetNode(ctx, bGetter, root) - if err != nil { - return nil, err - } - // look for links - lnks := nd.Links() - if len(lnks) == 1 { - p := make([]cid.Cid, len(proof)) - copy(p, proof) - return p, nil - } - - // route walk to appropriate children - total /= 2 // as we are using binary tree, every step decreases total leaves in a half - if leaf < total { - root = lnks[0].Cid // if target leave on the left, go with walk down the first children - proof = append(proof, lnks[1].Cid) - } else { - root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second - proof, err = GetProof(ctx, bGetter, root, proof, leaf, total) - if err != nil { - return nil, err - } - return append(proof, lnks[0].Cid), nil - } - - // recursively walk down through selected children - return GetProof(ctx, bGetter, root, proof, leaf, total) -} - -// GetSharesByNamespace returns all the shares from the given root -// with the given namespace.ID. -func GetSharesByNamespace( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - nID namespace.ID, -) ([]Share, error) { - leaves, err := GetLeavesByNamespace(ctx, bGetter, root, nID) - if err != nil { - return nil, err - } - - shares := make([]Share, len(leaves)) - for i, leaf := range leaves { - shares[i] = leafToShare(leaf) - } - - return shares, nil -} - -// GetLeavesByNamespace returns all the leaves from the given root with the given namespace.ID. -// If nothing is found it returns both data and err as nil. -func GetLeavesByNamespace( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - nID namespace.ID, -) ([]ipld.Node, error) { - err := SanityCheckNID(nID) - if err != nil { - return nil, err - } - rootH := plugin.NamespacedSha256FromCID(root) - if nID.Less(nmt.MinNamespace(rootH, nID.Size())) || !nID.LessOrEqual(nmt.MaxNamespace(rootH, nID.Size())) { - return nil, nil - } - // request the node - nd, err := plugin.GetNode(ctx, bGetter, root) - if err != nil { - return nil, err - } - // check links - lnks := nd.Links() - if len(lnks) == 1 { - // if there is one link, then this is a leaf node, so just return it - return []ipld.Node{nd}, nil - } - // if there are some links, then traverse them - var out []ipld.Node - for _, lnk := range nd.Links() { - nds, err := GetLeavesByNamespace(ctx, bGetter, lnk.Cid, nID) - if err != nil { - return out, err - } - out = append(out, nds...) - } - return out, nil -} - -// leafToShare converts an NMT leaf into a Share. -func leafToShare(nd ipld.Node) Share { - // * First byte represents the type of the node, and is unrelated to the actual share data - // * Additional namespace is prepended so that parity data can be identified with a parity namespace, which we cut off - return nd.RawData()[1+NamespaceSize:] // TODO(@Wondertan): Rework NMT/IPLD plugin to avoid the type byte -} diff --git a/ipld/get_shares.go b/ipld/get_shares.go deleted file mode 100644 index 5253a1f2eb..0000000000 --- a/ipld/get_shares.go +++ /dev/null @@ -1,119 +0,0 @@ -package ipld - -import ( - "context" - "sync" - - "github.com/gammazero/workerpool" - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - - "github.com/celestiaorg/celestia-node/ipld/plugin" -) - -// NumWorkersLimit sets global limit for workers spawned by GetShares. -// GetShares could be called MaxSquareSize(128) times per data square each -// spawning up to 128/2 goroutines and altogether this is 8192. Considering -// there can be N blocks fetched at the same time, e.g. during catching up data -// from the past, we multiply this number by the amount of allowed concurrent -// data square fetches(NumConcurrentSquares). -// -// NOTE: This value only limits amount of simultaneously running workers that -// are spawned as the load increases and are killed, once the load declines. -// TODO(@Wondertan): This assumes we have parallelized DASer implemented. -// Sync the values once it is shipped. -// TODO(@Wondertan): Allow configuration of values without global state. -var NumWorkersLimit = MaxSquareSize * MaxSquareSize / 2 * NumConcurrentSquares - -// NumConcurrentSquares limits the amount of squares that are fetched -// concurrently/simultaneously. -var NumConcurrentSquares = 8 - -// Global worker pool that globally controls and limits goroutines spawned by -// GetShares. -// TODO(@Wondertan): Idle timeout for workers needs to be configured to around block time, -// so that workers spawned between each reconstruction for every new block are reused. -var pool = workerpool.New(NumWorkersLimit) - -// GetShares gets shares from either local storage, or, if not found, requests -// them from immediate/connected peers. It puts them into the given 'put' func, -// does not return any error, and returns/unblocks only on success -// (got all shares) or on context cancellation. -// -// It works concurrently by spawning workers in the pool which do one basic -// thing - block until data is fetched, s. t. share processing is never -// sequential, and thus we request *all* the shares available without waiting -// for others to finish. It is the required property to maximize data -// availability. As a side effect, we get concurrent tree traversal reducing -// time to data time. -// -// GetShares relies on the fact that the underlying data structure is a binary -// tree, so it's not suitable for anything else besides that. Parts on the -// implementation that rely on this property are explicitly tagged with -// (bin-tree-feat). -func GetShares(ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid, shares int, put func(int, Share)) { - // job is not used anywhere else, so can be kept here - type job struct { - id cid.Cid - pos int - } - // this buffer ensures writes to 'jobs' are never blocking (bin-tree-feat) - jobs := make(chan *job, (shares+1)/2) // +1 for the case where 'shares' is 1 - jobs <- &job{id: root} - // total is an amount of routines spawned and total amount of nodes we process (bin-tree-feat) - // so we can specify exact amount of loops we do, and wait for this amount - // of routines to finish processing - total := shares*2 - 1 - wg := sync.WaitGroup{} - wg.Add(total) - // all preparations are done, so begin processing jobs - for i := 0; i < total; i++ { - select { - case j := <-jobs: - // work over each job concurrently, s.t. shares do not block - // processing of each other - pool.Submit(func() { - defer wg.Done() - nd, err := plugin.GetNode(ctx, bGetter, j.id) - if err != nil { - // we don't really care about errors here - // just fetch as much as possible - return - } - // check links to know what we should do with the node - lnks := nd.Links() - if len(lnks) == 1 { // so we are almost there - // the reason why the comment on 'total' is lying, as each - // leaf has its own additional leaf(hack) so get it - nd, err := plugin.GetNode(ctx, bGetter, lnks[0].Cid) - if err != nil { - // again, we don't care - return - } - // successfully fetched a share/leaf - // ladies and gentlemen, we got em! - put(j.pos, leafToShare(nd)) - return - } - // ok, we found more links - for i, lnk := range lnks { - // send those to be processed - select { - case jobs <- &job{ - id: lnk.Cid, - // calc position for children nodes (bin-tree-feat), - // s.t. 'if' above knows where to put a share - pos: j.pos*2 + i, - }: - case <-ctx.Done(): - return - } - } - }) - case <-ctx.Done(): - return - } - } - // "tick-tack, how much more should I wait before you get those shares?" - the goroutine - wg.Wait() -} diff --git a/ipld/get_test.go b/ipld/get_test.go deleted file mode 100644 index dade592cfa..0000000000 --- a/ipld/get_test.go +++ /dev/null @@ -1,389 +0,0 @@ -package ipld - -import ( - "context" - "math" - "math/rand" - "strconv" - "testing" - "time" - - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - offline "github.com/ipfs/go-ipfs-exchange-offline" - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/pkg/da" - "github.com/tendermint/tendermint/pkg/wrapper" - - "github.com/celestiaorg/celestia-node/ipld/plugin" - "github.com/celestiaorg/nmt/namespace" - "github.com/celestiaorg/rsmt2d" -) - -func TestGetShare(t *testing.T) { - const size = 8 - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - bServ := mdutils.Bserv() - - // generate random shares for the nmt - shares := RandShares(t, size*size) - eds, err := AddShares(ctx, shares, bServ) - require.NoError(t, err) - - for i, leaf := range shares { - row := i / size - pos := i - (size * row) - share, err := GetShare(ctx, bServ, plugin.MustCidFromNamespacedSha256(eds.RowRoots()[row]), pos, size*2) - require.NoError(t, err) - assert.Equal(t, leaf, share) - } -} - -func TestBlockRecovery(t *testing.T) { - originalSquareWidth := 8 - shareCount := originalSquareWidth * originalSquareWidth - extendedSquareWidth := 2 * originalSquareWidth - extendedShareCount := extendedSquareWidth * extendedSquareWidth - - // generate test data - quarterShares := RandShares(t, shareCount) - allShares := RandShares(t, shareCount) - - testCases := []struct { - name string - shares []Share - expectErr bool - errString string - d int // number of shares to delete - }{ - {"missing 1/2 shares", quarterShares, false, "", extendedShareCount / 2}, - {"missing 1/4 shares", quarterShares, false, "", extendedShareCount / 4}, - {"max missing data", quarterShares, false, "", (originalSquareWidth + 1) * (originalSquareWidth + 1)}, - {"missing all but one shares", allShares, true, "failed to solve data square", extendedShareCount - 1}, - } - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - squareSize := uint64(math.Sqrt(float64(len(tc.shares)))) - - // create trees for creating roots - tree := wrapper.NewErasuredNamespacedMerkleTree(squareSize) - recoverTree := wrapper.NewErasuredNamespacedMerkleTree(squareSize) - - eds, err := rsmt2d.ComputeExtendedDataSquare(tc.shares, rsmt2d.NewRSGF8Codec(), tree.Constructor) - require.NoError(t, err) - - // calculate roots using the first complete square - rowRoots := eds.RowRoots() - colRoots := eds.ColRoots() - - flat := ExtractEDS(eds) - - // recover a partially complete square - rdata := removeRandShares(flat, tc.d) - eds, err = rsmt2d.ImportExtendedDataSquare( - rdata, - rsmt2d.NewRSGF8Codec(), - recoverTree.Constructor, - ) - require.NoError(t, err) - - err = eds.Repair(rowRoots, colRoots, rsmt2d.NewRSGF8Codec(), recoverTree.Constructor) - if tc.expectErr { - require.Error(t, err) - require.Contains(t, err.Error(), tc.errString) - return - } - assert.NoError(t, err) - - reds, err := rsmt2d.ImportExtendedDataSquare(rdata, rsmt2d.NewRSGF8Codec(), tree.Constructor) - require.NoError(t, err) - // check that the squares are equal - assert.Equal(t, ExtractEDS(eds), ExtractEDS(reds)) - }) - } -} - -func Test_ConvertEDStoShares(t *testing.T) { - squareWidth := 16 - shares := RandShares(t, squareWidth*squareWidth) - - // create the nmt wrapper to generate row and col commitments - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(squareWidth)) - - // compute extended square - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, rsmt2d.NewRSGF8Codec(), tree.Constructor) - require.NoError(t, err) - - resshares := ExtractODS(eds) - require.Equal(t, shares, resshares) -} - -// removes d shares from data -func removeRandShares(data [][]byte, d int) [][]byte { - count := len(data) - // remove shares randomly - for i := 0; i < d; { - ind := rand.Intn(count) - if len(data[ind]) == 0 { - continue - } - data[ind] = nil - i++ - } - return data -} - -func TestGetSharesByNamespace(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - bServ := mdutils.Bserv() - - var tests = []struct { - rawData []Share - }{ - {rawData: RandShares(t, 4)}, - {rawData: RandShares(t, 16)}, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - // choose random nID from rand shares - expected := tt.rawData[len(tt.rawData)/2] - nID := expected[:NamespaceSize] - - // change rawData to contain several shares with same nID - tt.rawData[(len(tt.rawData)/2)+1] = expected - - // put raw data in BlockService - eds, err := AddShares(ctx, tt.rawData, bServ) - require.NoError(t, err) - - for _, row := range eds.RowRoots() { - rcid := plugin.MustCidFromNamespacedSha256(row) - shares, err := GetSharesByNamespace(ctx, bServ, rcid, nID) - require.NoError(t, err) - - for _, share := range shares { - assert.Equal(t, expected, share) - } - } - }) - } -} - -func TestGetLeavesByNamespace_AbsentNamespaceId(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - bServ := mdutils.Bserv() - - shares := RandShares(t, 16) - - minNid := make([]byte, NamespaceSize) - midNid := make([]byte, NamespaceSize) - maxNid := make([]byte, NamespaceSize) - - numberOfShares := len(shares) - - copy(minNid, shares[0][:NamespaceSize]) - copy(maxNid, shares[numberOfShares-1][:NamespaceSize]) - copy(midNid, shares[numberOfShares/2][:NamespaceSize]) - - // create min nid missing data by replacing first namespace id with second - minNidMissingData := make([]Share, len(shares)) - copy(minNidMissingData, shares) - copy(minNidMissingData[0][:NamespaceSize], shares[1][:NamespaceSize]) - - // create max nid missing data by replacing last namespace id with second last - maxNidMissingData := make([]Share, len(shares)) - copy(maxNidMissingData, shares) - copy(maxNidMissingData[numberOfShares-1][:NamespaceSize], shares[numberOfShares-2][:NamespaceSize]) - - // create mid nid missing data by replacing middle namespace id with the one after - midNidMissingData := make([]Share, len(shares)) - copy(midNidMissingData, shares) - copy(midNidMissingData[numberOfShares/2][:NamespaceSize], shares[(numberOfShares/2)+1][:NamespaceSize]) - - var tests = []struct { - name string - data []Share - missingNid []byte - }{ - {name: "Namespace id less than the minimum namespace in data", data: minNidMissingData, missingNid: minNid}, - {name: "Namespace id greater than the maximum namespace in data", data: maxNidMissingData, missingNid: maxNid}, - {name: "Namespace id in range but still missing", data: midNidMissingData, missingNid: midNid}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - eds, err := AddShares(ctx, shares, bServ) - require.NoError(t, err) - assertNoRowContainsNID(t, bServ, eds, tt.missingNid) - }) - } -} - -func TestGetLeavesByNamespace_MultipleRowsContainingSameNamespaceId(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - bServ := mdutils.Bserv() - - shares := RandShares(t, 16) - - // set all shares to the same namespace and data but the last one - nid := shares[0][:NamespaceSize] - commonNamespaceData := shares[0] - - for i, nspace := range shares { - if i == len(shares)-1 { - break - } - - copy(nspace, commonNamespaceData) - } - - eds, err := AddShares(ctx, shares, bServ) - require.NoError(t, err) - - for _, row := range eds.RowRoots() { - rcid := plugin.MustCidFromNamespacedSha256(row) - nodes, err := GetLeavesByNamespace(ctx, bServ, rcid, nid) - require.NoError(t, err) - - for _, node := range nodes { - // test that the data returned by GetLeavesByNamespace for nid - // matches the commonNamespaceData that was copied across almost all data - share := node.RawData()[1:] - assert.Equal(t, commonNamespaceData, share[NamespaceSize:]) - } - } -} - -func TestBatchSize(t *testing.T) { - tests := []struct { - name string - origWidth int - }{ - {"2", 2}, - {"4", 4}, - {"8", 8}, - {"16", 16}, - {"32", 32}, - // {"64", 64}, // test case too large for CI with race detector - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(tt.origWidth)) - defer cancel() - - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - - eds := RandEDS(t, tt.origWidth) - _, err := AddShares(ctx, ExtractODS(eds), blockservice.New(bs, offline.Exchange(bs))) - require.NoError(t, err) - - out, err := bs.AllKeysChan(ctx) - require.NoError(t, err) - - var count int - for range out { - count++ - } - extendedWidth := tt.origWidth * 2 - assert.Equalf(t, count, batchSize(extendedWidth), "batchSize(%v)", extendedWidth) - }) - } -} - -func assertNoRowContainsNID( - t *testing.T, - bServ blockservice.BlockService, - eds *rsmt2d.ExtendedDataSquare, - nID namespace.ID, -) { - // get all row root cids - rowRootCIDs := make([]cid.Cid, len(eds.RowRoots())) - for i, rowRoot := range eds.RowRoots() { - rowRootCIDs[i] = plugin.MustCidFromNamespacedSha256(rowRoot) - } - - // for each row root cid check if the minNID exists - for _, rowCID := range rowRootCIDs { - data, err := GetLeavesByNamespace(context.Background(), bServ, rowCID, nID) - assert.Nil(t, data) - assert.Nil(t, err) - } -} - -func TestGetProof(t *testing.T) { - const width = 4 - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - bServ := mdutils.Bserv() - - shares := RandShares(t, width*width) - in, err := AddShares(ctx, shares, bServ) - require.NoError(t, err) - - dah := da.NewDataAvailabilityHeader(in) - var tests = []struct { - roots [][]byte - }{ - {dah.RowsRoots}, - {dah.ColumnRoots}, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - for _, root := range tt.roots { - rootCid := plugin.MustCidFromNamespacedSha256(root) - for index := 0; uint(index) < in.Width(); index++ { - proof := make([]cid.Cid, 0) - proof, err = GetProof(ctx, bServ, rootCid, proof, index, int(in.Width())) - require.NoError(t, err) - node, err := GetLeaf(ctx, bServ, rootCid, index, int(in.Width())) - require.NoError(t, err) - inclusion := NewShareWithProof(index, node.RawData()[1:], proof) - require.True(t, inclusion.Validate(rootCid)) - } - } - }) - } -} - -func TestGetProofs(t *testing.T) { - const width = 4 - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - bServ := mdutils.Bserv() - - shares := RandShares(t, width*width) - in, err := AddShares(ctx, shares, bServ) - require.NoError(t, err) - - dah := da.NewDataAvailabilityHeader(in) - for _, root := range dah.ColumnRoots { - rootCid := plugin.MustCidFromNamespacedSha256(root) - data := make([][]byte, 0, in.Width()) - for index := 0; uint(index) < in.Width(); index++ { - node, err := GetLeaf(ctx, bServ, rootCid, index, int(in.Width())) - require.NoError(t, err) - data = append(data, node.RawData()[9:]) - } - - proves, err := GetProofsForShares(ctx, bServ, rootCid, data) - require.NoError(t, err) - for _, proof := range proves { - require.True(t, proof.Validate(rootCid)) - } - } -} diff --git a/ipld/helpers.go b/ipld/helpers.go deleted file mode 100644 index 791f7ee239..0000000000 --- a/ipld/helpers.go +++ /dev/null @@ -1,10 +0,0 @@ -package ipld - -import "fmt" - -func SanityCheckNID(nID []byte) error { - if len(nID) != NamespaceSize { - return fmt.Errorf("expected namespace ID of size %d, got %d", NamespaceSize, len(nID)) - } - return nil -} diff --git a/ipld/nmt_adder.go b/ipld/nmt_adder.go deleted file mode 100644 index 4b49eedfce..0000000000 --- a/ipld/nmt_adder.go +++ /dev/null @@ -1,62 +0,0 @@ -package ipld - -import ( - "context" - - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-merkledag" - - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - - "github.com/celestiaorg/celestia-node/ipld/plugin" -) - -// NmtNodeAdder adds ipld.Nodes to the underlying ipld.Batch if it is inserted -// into a nmt tree. -type NmtNodeAdder struct { - ctx context.Context - add *ipld.Batch - leaves *cid.Set - err error -} - -// NewNmtNodeAdder returns a new NmtNodeAdder with the provided context and -// batch. Note that the context provided should have a timeout -// It is not thread-safe. -func NewNmtNodeAdder(ctx context.Context, bs blockservice.BlockService, opts ...ipld.BatchOption) *NmtNodeAdder { - return &NmtNodeAdder{ - add: ipld.NewBatch(ctx, merkledag.NewDAGService(bs), opts...), - ctx: ctx, - leaves: cid.NewSet(), - } -} - -// Visit is a NodeVisitor that can be used during the creation of a new NMT to -// create and add ipld.Nodes to the Batch while computing the root of the NMT. -func (n *NmtNodeAdder) Visit(hash []byte, children ...[]byte) { - if n.err != nil { - return // protect from further visits if there is an error - } - - id := plugin.MustCidFromNamespacedSha256(hash) - switch len(children) { - case 1: - if n.leaves.Visit(id) { - n.err = n.add.Add(n.ctx, plugin.NewNMTLeafNode(id, children[0])) - } - case 2: - n.err = n.add.Add(n.ctx, plugin.NewNMTNode(id, children[0], children[1])) - default: - panic("expected a binary tree") - } -} - -// Commit checks for errors happened during Visit and if absent commits data to inner Batch. -func (n *NmtNodeAdder) Commit() error { - if n.err != nil { - return n.err - } - - return n.add.Commit() -} diff --git a/ipld/pb/share.proto b/ipld/pb/share.proto deleted file mode 100644 index 7838d069f4..0000000000 --- a/ipld/pb/share.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package ipld.pb; - -option go_package="github.com/celestiaorg/celestia-node/ipld/pb"; - -message MerkleProof { - int64 start = 1; - int64 end = 2; - repeated bytes nodes = 3; - bytes leaf_hash = 4; -} - -message Share { - bytes Data = 1; - MerkleProof Proof = 2; -} diff --git a/ipld/plugin/nmt.go b/ipld/plugin/nmt.go deleted file mode 100644 index c0700390a8..0000000000 --- a/ipld/plugin/nmt.go +++ /dev/null @@ -1,337 +0,0 @@ -package plugin - -import ( - "bytes" - "context" - "crypto/sha256" - "errors" - "fmt" - "hash" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" - mhcore "github.com/multiformats/go-multihash/core" - "github.com/tendermint/tendermint/pkg/consts" - - "github.com/celestiaorg/nmt" -) - -const ( - // Below used multiformats (one codec, one multihash) seem free: - // https://github.com/multiformats/multicodec/blob/master/table.csv - - // NmtCodec is the codec used for leaf and inner nodes of a Namespaced Merkle Tree. - NmtCodec = 0x7700 - - // Sha256Namespace8Flagged is the multihash code used to hash blocks - // that contain an NMT node (inner and leaf nodes). - Sha256Namespace8Flagged = 0x7701 - - // nmtHashSize is the size of a digest created by an NMT in bytes. - nmtHashSize = 2*consts.NamespaceSize + sha256.Size -) - -func init() { - mhcore.Register(Sha256Namespace8Flagged, func() hash.Hash { - return NewNamespaceHasher(nmt.NewNmtHasher(sha256.New(), nmt.DefaultNamespaceIDLen, true)) - }) -} - -type namespaceHasher struct { - *nmt.Hasher - tp byte - data []byte -} - -func NewNamespaceHasher(hasher *nmt.Hasher) hash.Hash { - return &namespaceHasher{ - Hasher: hasher, - } -} - -func (n *namespaceHasher) Write(data []byte) (int, error) { - ln, nln, hln := len(data), int(n.NamespaceLen), n.Hash.Size() - innerNodeSize, leafNodeSize := (nln*2+hln)*2, nln+consts.ShareSize - switch ln { - default: - return 0, fmt.Errorf("wrong data size") - case innerNodeSize, innerNodeSize + 1: // w/ and w/o additional type byte - n.tp = nmt.NodePrefix - case leafNodeSize, leafNodeSize + 1: // w/ and w/o additional type byte - n.tp = nmt.LeafPrefix - } - - n.data = data[1:] - return ln, nil -} - -func (n *namespaceHasher) Sum([]byte) []byte { - isLeafData := n.tp == nmt.LeafPrefix - if isLeafData { - return n.Hasher.HashLeaf(n.data) - } - - flagLen := int(n.NamespaceLen * 2) - sha256Len := n.Hasher.Size() - return n.Hasher.HashNode(n.data[:flagLen+sha256Len], n.data[flagLen+sha256Len:]) -} - -func GetNode(ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid) (ipld.Node, error) { - block, err := bGetter.GetBlock(ctx, root) - if err != nil { - var errNotFound *ipld.ErrNotFound - if errors.As(err, &errNotFound) { - return nil, errNotFound - } - return nil, err - } - - return decodeBlock(block) -} - -func decodeBlock(block blocks.Block) (ipld.Node, error) { - // length of the domain separator for leaf and inner nodes: - const prefixOffset = 1 - var ( - leafPrefix = []byte{nmt.LeafPrefix} - innerPrefix = []byte{nmt.NodePrefix} - ) - data := block.RawData() - if len(data) == 0 { - return &nmtLeafNode{ - cid: cid.Undef, - Data: nil, - }, nil - } - domainSeparator := data[:prefixOffset] - if bytes.Equal(domainSeparator, leafPrefix) { - return &nmtLeafNode{ - cid: block.Cid(), - Data: data[prefixOffset:], - }, nil - } - if bytes.Equal(domainSeparator, innerPrefix) { - return &nmtNode{ - cid: block.Cid(), - l: data[prefixOffset : prefixOffset+nmtHashSize], - r: data[prefixOffset+nmtHashSize:], - }, nil - } - return nil, fmt.Errorf( - "expected first byte of block to be either the leaf or inner node prefix: (%x, %x), got: %x)", - leafPrefix, - innerPrefix, - domainSeparator, - ) -} - -var _ ipld.Node = (*nmtNode)(nil) -var _ ipld.Node = (*nmtLeafNode)(nil) - -type nmtNode struct { - // TODO(ismail): we might want to export these later - cid cid.Cid - l, r []byte -} - -func NewNMTNode(id cid.Cid, l, r []byte) ipld.Node { - return nmtNode{id, l, r} -} - -func (n nmtNode) RawData() []byte { - return append([]byte{nmt.NodePrefix}, append(n.l, n.r...)...) -} - -func (n nmtNode) Cid() cid.Cid { - return n.cid -} - -func (n nmtNode) String() string { - return fmt.Sprintf(` -node { - hash: %x, - l: %x, - r: %x" -}`, n.cid.Hash(), n.l, n.r) -} - -func (n nmtNode) Loggable() map[string]interface{} { - return nil -} - -func (n nmtNode) Resolve(path []string) (interface{}, []string, error) { - switch path[0] { - case "0": - left, err := CidFromNamespacedSha256(n.l) - if err != nil { - return nil, nil, err - } - return &ipld.Link{Cid: left}, path[1:], nil - case "1": - right, err := CidFromNamespacedSha256(n.r) - if err != nil { - return nil, nil, err - } - return &ipld.Link{Cid: right}, path[1:], nil - default: - return nil, nil, errors.New("invalid path for inner node") - } -} - -func (n nmtNode) Tree(path string, depth int) []string { - if path != "" || depth != -1 { - panic("proper tree not yet implemented") - } - - return []string{ - "0", - "1", - } -} - -func (n nmtNode) ResolveLink(path []string) (*ipld.Link, []string, error) { - obj, rest, err := n.Resolve(path) - if err != nil { - return nil, nil, err - } - - lnk, ok := obj.(*ipld.Link) - if !ok { - return nil, nil, errors.New("was not a link") - } - - return lnk, rest, nil -} - -func (n nmtNode) Copy() ipld.Node { - l := make([]byte, len(n.l)) - copy(l, n.l) - r := make([]byte, len(n.r)) - copy(r, n.r) - - return &nmtNode{ - cid: n.cid, - l: l, - r: r, - } -} - -func (n nmtNode) Links() []*ipld.Link { - leftCid := MustCidFromNamespacedSha256(n.l) - rightCid := MustCidFromNamespacedSha256(n.r) - - return []*ipld.Link{{Cid: leftCid}, {Cid: rightCid}} -} - -func (n nmtNode) Stat() (*ipld.NodeStat, error) { - return &ipld.NodeStat{}, nil -} - -func (n nmtNode) Size() (uint64, error) { - return 0, nil -} - -type nmtLeafNode struct { - cid cid.Cid - Data []byte -} - -func NewNMTLeafNode(id cid.Cid, data []byte) ipld.Node { - return &nmtLeafNode{id, data} -} - -func (l nmtLeafNode) RawData() []byte { - return append([]byte{nmt.LeafPrefix}, l.Data...) -} - -func (l nmtLeafNode) Cid() cid.Cid { - return l.cid -} - -func (l nmtLeafNode) String() string { - return fmt.Sprintf(` -leaf { - hash: %x, - len(Data): %v -}`, l.cid.Hash(), len(l.Data)) -} - -func (l nmtLeafNode) Loggable() map[string]interface{} { - return nil -} - -func (l nmtLeafNode) Resolve(path []string) (interface{}, []string, error) { - return nil, nil, errors.New("invalid path for leaf node") -} - -func (l nmtLeafNode) Tree(_path string, _depth int) []string { - return nil -} - -func (l nmtLeafNode) ResolveLink(path []string) (*ipld.Link, []string, error) { - obj, rest, err := l.Resolve(path) - if err != nil { - return nil, nil, err - } - - lnk, ok := obj.(*ipld.Link) - if !ok { - return nil, nil, errors.New("was not a link") - } - return lnk, rest, nil -} - -func (l nmtLeafNode) Copy() ipld.Node { - panic("implement me") -} - -func (l nmtLeafNode) Links() []*ipld.Link { - return []*ipld.Link{{Cid: l.Cid()}} -} - -func (l nmtLeafNode) Stat() (*ipld.NodeStat, error) { - return &ipld.NodeStat{}, nil -} - -func (l nmtLeafNode) Size() (uint64, error) { - return 0, nil -} - -// CidFromNamespacedSha256 uses a hash from an nmt tree to create a CID -func CidFromNamespacedSha256(namespacedHash []byte) (cid.Cid, error) { - if got, want := len(namespacedHash), nmtHashSize; got != want { - return cid.Cid{}, fmt.Errorf("invalid namespaced hash length, got: %v, want: %v", got, want) - } - buf, err := mh.Encode(namespacedHash, Sha256Namespace8Flagged) - if err != nil { - return cid.Undef, err - } - return cid.NewCidV1(NmtCodec, buf), nil -} - -// MustCidFromNamespacedSha256 is a wrapper around cidFromNamespacedSha256 that panics -// in case of an error. Use with care and only in places where no error should occur. -func MustCidFromNamespacedSha256(hash []byte) cid.Cid { - cidFromHash, err := CidFromNamespacedSha256(hash) - if err != nil { - panic( - fmt.Sprintf("malformed hash: %s, codec: %v", - err, - mh.Codes[Sha256Namespace8Flagged]), - ) - } - return cidFromHash -} - -// cidPrefixSize is the size of the prepended buffer of the CID encoding -// for NamespacedSha256. For more information, see: -// https://multiformats.io/multihash/#the-multihash-format -const cidPrefixSize = 4 - -// NamespacedSha256FromCID derives the Namespaced hash from the given CID. -func NamespacedSha256FromCID(cid cid.Cid) []byte { - return cid.Hash()[cidPrefixSize:] -} diff --git a/ipld/plugin/nmt_test.go b/ipld/plugin/nmt_test.go deleted file mode 100644 index b175e18f18..0000000000 --- a/ipld/plugin/nmt_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package plugin - -import ( - "bytes" - "math" - "math/rand" - "sort" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/pkg/consts" - - "github.com/tendermint/tendermint/pkg/da" -) - -// TestNamespaceFromCID checks that deriving the Namespaced hash from -// the given CID works correctly. -func TestNamespaceFromCID(t *testing.T) { - var tests = []struct { - randData [][]byte - }{ - {randData: generateRandNamespacedRawData(4, consts.NamespaceSize, consts.ShareSize)}, - {randData: generateRandNamespacedRawData(16, 16, consts.ShareSize)}, - {randData: generateRandNamespacedRawData(4, 4, consts.ShareSize)}, - {randData: generateRandNamespacedRawData(4, consts.NamespaceSize, consts.ShareSize/2)}, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - // create DAH from rand data - squareSize := uint64(math.Sqrt(float64(len(tt.randData)))) - eds, err := da.ExtendShares(squareSize, tt.randData) - require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(eds) - // check to make sure NamespacedHash is correctly derived from CID - for _, row := range dah.RowsRoots { - c, err := CidFromNamespacedSha256(row) - require.NoError(t, err) - - got := NamespacedSha256FromCID(c) - assert.Equal(t, row, got) - } - }) - } -} - -// generateRandNamespacedRawData returns random namespaced raw data for testing purposes. -func generateRandNamespacedRawData(total, nidSize, leafSize uint32) [][]byte { - data := make([][]byte, total) - for i := uint32(0); i < total; i++ { - nid := make([]byte, nidSize) - - rand.Read(nid) - data[i] = nid - } - sortByteArrays(data) - for i := uint32(0); i < total; i++ { - d := make([]byte, leafSize) - - rand.Read(d) - data[i] = append(data[i], d...) - } - - return data -} - -func sortByteArrays(src [][]byte) { - sort.Slice(src, func(i, j int) bool { return bytes.Compare(src[i], src[j]) < 0 }) -} diff --git a/ipld/retriever_byzantine.go b/ipld/retriever_byzantine.go deleted file mode 100644 index e2d17977c0..0000000000 --- a/ipld/retriever_byzantine.go +++ /dev/null @@ -1,61 +0,0 @@ -package ipld - -import ( - "context" - "fmt" - - "github.com/ipfs/go-blockservice" - "github.com/tendermint/tendermint/pkg/da" - - "github.com/celestiaorg/celestia-node/ipld/plugin" - "github.com/celestiaorg/rsmt2d" -) - -// ErrByzantine is a thrown when recovered data square is not correct -// (merkle proofs do not match parity erasure-coding data). -// -// It is converted from rsmt2d.ByzantineRow/Col + -// Merkle Proof for each share. -type ErrByzantine struct { - Index uint32 - Shares []*ShareWithProof - Axis rsmt2d.Axis -} - -func (e *ErrByzantine) Error() string { - return fmt.Sprintf("byzantine error(Axis:%v, Index:%v)", e.Axis, e.Index) -} - -// NewErrByzantine creates new ErrByzantine from rsmt2d error. -// If error happens during proof collection, it terminates the process with os.Exit(1). -func NewErrByzantine( - ctx context.Context, - bGetter blockservice.BlockGetter, - dah *da.DataAvailabilityHeader, - errByz *rsmt2d.ErrByzantineData, -) *ErrByzantine { - root := [][][]byte{ - dah.RowsRoots, - dah.ColumnRoots, - }[errByz.Axis][errByz.Index] - sharesWithProof, err := GetProofsForShares( - ctx, - bGetter, - plugin.MustCidFromNamespacedSha256(root), - errByz.Shares, - ) - if err != nil { - // Fatal as rsmt2d proved that error is byzantine, - // but we cannot properly collect the proof, - // so verification will fail and thus services won't be stopped - // while we still have to stop them. - // TODO(@Wondertan): Find a better way to handle - log.Fatalw("getting proof for ErrByzantine", "err", err) - } - - return &ErrByzantine{ - Index: uint32(errByz.Index), - Shares: sharesWithProof, - Axis: errByz.Axis, - } -} diff --git a/ipld/retriever_test.go b/ipld/retriever_test.go deleted file mode 100644 index 18e2265d6b..0000000000 --- a/ipld/retriever_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package ipld - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/gammazero/workerpool" - format "github.com/ipfs/go-ipld-format" - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/pkg/da" - "github.com/tendermint/tendermint/pkg/wrapper" - - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" -) - -func init() { - // randomize quadrant fetching, otherwise quadrant sampling is deterministic - rand.Seed(time.Now().UnixNano()) - // limit the amount of workers for tests - pool = workerpool.New(1000) -} - -func TestRetriever_Retrieve(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - bServ := mdutils.Bserv() - r := NewRetriever(bServ) - - type test struct { - name string - squareSize int - } - tests := []test{ - {"1x1(min)", 1}, - {"2x2(med)", 2}, - {"4x4(med)", 4}, - {"8x8(med)", 8}, - {"16x16(med)", 16}, - {"32x32(med)", 32}, - {"64x64(med)", 64}, - {"128x128(max)", MaxSquareSize}, - } - for _, tc := range tests { - tc := tc - t.Run(tc.name, func(t *testing.T) { - // generate EDS - shares := RandShares(t, tc.squareSize*tc.squareSize) - in, err := AddShares(ctx, shares, bServ) - require.NoError(t, err) - - // limit with timeout, specifically retrieval - ctx, cancel := context.WithTimeout(ctx, time.Minute*5) // the timeout is big for the max size which is long - defer cancel() - - dah := da.NewDataAvailabilityHeader(in) - out, err := r.Retrieve(ctx, &dah) - require.NoError(t, err) - assert.True(t, EqualEDS(in, out)) - }) - } -} - -func TestRetriever_ByzantineError(t *testing.T) { - const width = 8 - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - bserv := mdutils.Bserv() - shares := ExtractEDS(RandEDS(t, width)) - _, err := ImportShares(ctx, shares, bserv) - require.NoError(t, err) - - // corrupt shares so that eds erasure coding does not match - copy(shares[14][8:], shares[15][8:]) - - // import corrupted eds - batchAdder := NewNmtNodeAdder(ctx, bserv, format.MaxSizeBatchOption(batchSize(width*2))) - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(width), nmt.NodeVisitor(batchAdder.Visit)) - attackerEDS, err := rsmt2d.ImportExtendedDataSquare(shares, DefaultRSMT2DCodec(), tree.Constructor) - require.NoError(t, err) - err = batchAdder.Commit() - require.NoError(t, err) - - // ensure we rcv an error - da := da.NewDataAvailabilityHeader(attackerEDS) - r := NewRetriever(bserv) - _, err = r.Retrieve(ctx, &da) - var errByz *ErrByzantine - require.ErrorAs(t, err, &errByz) -} - -// TestRetriever_MultipleRandQuadrants asserts that reconstruction succeeds -// when any three random quadrants requested. -func TestRetriever_MultipleRandQuadrants(t *testing.T) { - RetrieveQuadrantTimeout = time.Millisecond * 500 - const squareSize = 32 - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - bServ := mdutils.Bserv() - r := NewRetriever(bServ) - - // generate EDS - shares := RandShares(t, squareSize*squareSize) - in, err := AddShares(ctx, shares, bServ) - require.NoError(t, err) - - dah := da.NewDataAvailabilityHeader(in) - ses, err := r.newSession(ctx, &dah) - require.NoError(t, err) - - // wait until two additional quadrants requested - // this reliably allows us to reproduce the issue - time.Sleep(RetrieveQuadrantTimeout * 2) - // then ensure we have enough shares for reconstruction for slow machines e.g. CI - <-ses.Done() - - _, err = ses.Reconstruct(ctx) - assert.NoError(t, err) -} diff --git a/ipld/share.go b/ipld/share.go deleted file mode 100644 index 1da3908548..0000000000 --- a/ipld/share.go +++ /dev/null @@ -1,99 +0,0 @@ -package ipld - -import ( - "crypto/sha256" - - "github.com/ipfs/go-cid" - "github.com/tendermint/tendermint/pkg/consts" - - "github.com/celestiaorg/celestia-node/ipld/pb" - "github.com/celestiaorg/celestia-node/ipld/plugin" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" -) - -const ( - // MaxSquareSize is currently the maximum size supported for unerasured data in rsmt2d.ExtendedDataSquare. - MaxSquareSize = consts.MaxSquareSize - // NamespaceSize is a system-wide size for NMT namespaces. - NamespaceSize = consts.NamespaceSize - // ShareSize is a system-wide size of a share, including both data and namespace ID - ShareSize = consts.ShareSize -) - -// DefaultRSMT2DCodec sets the default rsmt2d.Codec for shares. -var DefaultRSMT2DCodec = consts.DefaultCodec - -// Share contains the raw share data without the corresponding namespace. -// NOTE: Alias for the byte is chosen to keep maximal compatibility, especially with rsmt2d. Ideally, we should define -// reusable type elsewhere and make everyone(Core, rsmt2d, ipld) to rely on it. -type Share = []byte - -// ShareID gets the namespace ID from the share. -func ShareID(s Share) namespace.ID { - return s[:NamespaceSize] -} - -// ShareData gets data from the share. -func ShareData(s Share) []byte { - return s[NamespaceSize:] -} - -// ShareWithProof contains data with corresponding Merkle Proof -type ShareWithProof struct { - // Share is a full data including namespace - Share - // Proof is a Merkle Proof of current share - Proof *nmt.Proof -} - -// NewShareWithProof takes the given leaf and its path, starting from the tree root, -// and computes the nmt.Proof for it. -func NewShareWithProof(index int, share Share, pathToLeaf []cid.Cid) *ShareWithProof { - rangeProofs := make([][]byte, 0, len(pathToLeaf)) - for i := len(pathToLeaf) - 1; i >= 0; i-- { - node := plugin.NamespacedSha256FromCID(pathToLeaf[i]) - rangeProofs = append(rangeProofs, node) - } - - proof := nmt.NewInclusionProof(index, index+1, rangeProofs, true) - return &ShareWithProof{ - share, - &proof, - } -} - -// Validate validates inclusion of the share under the given root CID. -func (s *ShareWithProof) Validate(root cid.Cid) bool { - return s.Proof.VerifyInclusion( - sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally - ShareID(s.Share), - [][]byte{ShareData(s.Share)}, - plugin.NamespacedSha256FromCID(root), - ) -} - -func (s *ShareWithProof) ShareWithProofToProto() *pb.Share { - return &pb.Share{ - Data: s.Share, - Proof: &pb.MerkleProof{ - Start: int64(s.Proof.Start()), - End: int64(s.Proof.End()), - Nodes: s.Proof.Nodes(), - LeafHash: s.Proof.LeafHash(), - }, - } -} - -func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof { - shares := make([]*ShareWithProof, len(protoShares)) - for i, share := range protoShares { - proof := ProtoToProof(share.Proof) - shares[i] = &ShareWithProof{share.Data, &proof} - } - return shares -} - -func ProtoToProof(protoProof *pb.MerkleProof) nmt.Proof { - return nmt.NewInclusionProof(int(protoProof.Start), int(protoProof.End), protoProof.Nodes, true) -} diff --git a/ipld/test_helpers.go b/ipld/test_helpers.go deleted file mode 100644 index 1a8967e95b..0000000000 --- a/ipld/test_helpers.go +++ /dev/null @@ -1,68 +0,0 @@ -package ipld - -import ( - "bytes" - mrand "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/rsmt2d" - - "github.com/tendermint/tendermint/pkg/wrapper" -) - -// EqualEDS check whether two given EDSes are equal. -// TODO(Wondertan): Move to rsmt2d -// TODO(Wondertan): Propose use of int by default instead of uint for the sake convenience and Golang practices -func EqualEDS(a *rsmt2d.ExtendedDataSquare, b *rsmt2d.ExtendedDataSquare) bool { - if a.Width() != b.Width() { - return false - } - - for i := uint(0); i < a.Width(); i++ { - ar, br := a.Row(i), b.Row(i) - for j := 0; j < len(ar); j++ { - if !bytes.Equal(ar[j], br[j]) { - return false - } - } - } - - return true -} - -// RandEDS generates EDS filled with the random data with the given size for original square. -func RandEDS(t *testing.T, size int) *rsmt2d.ExtendedDataSquare { - shares := RandShares(t, size*size) - // create the nmt wrapper to generate row and col commitments - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(size)) - // recompute the eds - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, DefaultRSMT2DCodec(), tree.Constructor) - require.NoError(t, err, "failure to recompute the extended data square") - return eds -} - -// RandShares generate 'total' amount of shares filled with random data. -func RandShares(t *testing.T, total int) []Share { - if total&(total-1) != 0 { - t.Fatal("Namespace total must be power of 2") - } - - shares := make([]Share, total) - for i := range shares { - nid := make([]byte, ShareSize) - _, err := mrand.Read(nid[:NamespaceSize]) // nolint:gosec // G404: Use of weak random number generator - require.NoError(t, err) - shares[i] = nid - } - sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i], shares[j]) < 0 }) - - for i := range shares { - _, err := mrand.Read(shares[i][NamespaceSize:]) // nolint:gosec // G404: Use of weak random number generator - require.NoError(t, err) - } - - return shares -} diff --git a/libs/authtoken/authtoken.go b/libs/authtoken/authtoken.go new file mode 100644 index 0000000000..3d6645c972 --- /dev/null +++ b/libs/authtoken/authtoken.go @@ -0,0 +1,36 @@ +package authtoken + +import ( + "encoding/json" + + "github.com/cristalhq/jwt" + "github.com/filecoin-project/go-jsonrpc/auth" + + "github.com/celestiaorg/celestia-node/api/rpc/perms" +) + +// ExtractSignedPermissions returns the permissions granted to the token by the passed signer. +// If the token isn't signed by the signer, it will not pass verification. +func ExtractSignedPermissions(signer jwt.Signer, token string) ([]auth.Permission, error) { + tk, err := jwt.ParseAndVerifyString(token, signer) + if err != nil { + return nil, err + } + p := new(perms.JWTPayload) + err = json.Unmarshal(tk.RawClaims(), p) + if err != nil { + return nil, err + } + return p.Allow, nil +} + +// NewSignedJWT returns a signed JWT token with the passed permissions and signer. +func NewSignedJWT(signer jwt.Signer, permissions []auth.Permission) (string, error) { + token, err := jwt.NewTokenBuilder(signer).Build(&perms.JWTPayload{ + Allow: permissions, + }) + if err != nil { + return "", err + } + return token.InsecureString(), nil +} diff --git a/libs/edssser/edssser.go b/libs/edssser/edssser.go new file mode 100644 index 0000000000..34712b785a --- /dev/null +++ b/libs/edssser/edssser.go @@ -0,0 +1,173 @@ +package edssser + +import ( + "context" + "errors" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/ipfs/go-datastore" + + "github.com/celestiaorg/celestia-app/pkg/da" + + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +type Config struct { + EDSSize int + EDSWrites int + EnableLog bool + LogFilePath string + StatLogFreq int + OpTimeout time.Duration +} + +// EDSsser stand for EDS Store Stresser. +type EDSsser struct { + config Config + datastore datastore.Batching + edsstoreMu sync.Mutex + edsstore *eds.Store + + statsFileMu sync.Mutex + statsFile *os.File +} + +func NewEDSsser(path string, datastore datastore.Batching, cfg Config) (*EDSsser, error) { + storeCfg := eds.DefaultParameters() + edsstore, err := eds.NewStore(storeCfg, path, datastore) + if err != nil { + return nil, err + } + + return &EDSsser{ + config: cfg, + datastore: datastore, + edsstore: edsstore, + }, nil +} + +func (ss *EDSsser) Run(ctx context.Context) (stats Stats, err error) { + ss.edsstoreMu.Lock() + defer ss.edsstoreMu.Unlock() + + err = ss.edsstore.Start(ctx) + if err != nil { + return stats, err + } + defer func() { + err = errors.Join(err, ss.edsstore.Stop(ctx)) + }() + + edsHashes, err := ss.edsstore.List() + if err != nil { + return stats, err + } + fmt.Printf("recovered %d EDSes\n\n", len(edsHashes)) + + t := &testing.T{} + for toWrite := ss.config.EDSWrites - len(edsHashes); ctx.Err() == nil && toWrite > 0; toWrite-- { + took, err := ss.put(ctx, t) + + stats.TotalWritten++ + stats.TotalTime += took + if took < stats.MinTime || stats.MinTime == 0 { + stats.MinTime = took + } else if took > stats.MaxTime { + stats.MaxTime = took + } + + if ss.config.EnableLog { + if stats.TotalWritten%ss.config.StatLogFreq == 0 { + stats := stats.Finalize() + fmt.Println(stats) + go func() { + err := ss.dumpStat(stats) + if err != nil { + fmt.Printf("error dumping stats: %s\n", err.Error()) + } + }() + } + if err != nil { + fmt.Printf("ERROR put: %s, took: %v, at: %v\n", err.Error(), took, time.Now()) + continue + } + if took > ss.config.OpTimeout/2 { + fmt.Println("long put", "size", ss.config.EDSSize, "took", took, "at", time.Now()) + continue + } + + fmt.Println("square written", "size", ss.config.EDSSize, "took", took, "at", time.Now()) + } + } + return stats, nil +} + +func (ss *EDSsser) dumpStat(stats Stats) (err error) { + ss.statsFileMu.Lock() + defer ss.statsFileMu.Unlock() + + ss.statsFile, err = os.Create(ss.config.LogFilePath + "/edssser_stats.txt") + if err != nil { + return err + } + + _, err = ss.statsFile.Write([]byte(stats.String())) + if err != nil { + return err + } + + return ss.statsFile.Close() +} + +type Stats struct { + TotalWritten int + TotalTime, MinTime, MaxTime, AvgTime time.Duration + // Deviation ? +} + +func (stats Stats) Finalize() Stats { + if stats.TotalTime != 0 { + stats.AvgTime = stats.TotalTime / time.Duration(stats.TotalWritten) + } + return stats +} + +func (stats Stats) String() string { + return fmt.Sprintf(` +TotalWritten %d +TotalWritingTime %v +MaxTime %s +MinTime %s +AvgTime %s +`, + stats.TotalWritten, + stats.TotalTime, + stats.MaxTime, + stats.MinTime, + stats.AvgTime, + ) +} + +func (ss *EDSsser) put(ctx context.Context, t *testing.T) (time.Duration, error) { + ctx, cancel := context.WithTimeout(ctx, ss.config.OpTimeout) + if ss.config.OpTimeout == 0 { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + // divide by 2 to get ODS size as expected by RandEDS + square := edstest.RandEDS(t, ss.config.EDSSize/2) + dah, err := da.NewDataAvailabilityHeader(square) + if err != nil { + return 0, err + } + + now := time.Now() + err = ss.edsstore.Put(ctx, dah.Hash(), square) + return time.Since(now), err +} diff --git a/libs/fslock/locker.go b/libs/fslock/locker.go index a858d0bf7d..f451c42cc1 100644 --- a/libs/fslock/locker.go +++ b/libs/fslock/locker.go @@ -21,8 +21,8 @@ func Lock(path string) (*Locker, error) { } // Locker is a simple utility meant to create lock files. -// This is to prevent multiple processes from managing the same working directory by purpose or accident. -// NOTE: Windows is not supported. +// This is to prevent multiple processes from managing the same working directory by purpose or +// accident. NOTE: Windows is not supported. type Locker struct { file *os.File path string diff --git a/libs/fxutil/fxutil.go b/libs/fxutil/fxutil.go index a03ab0cb20..872f7c91a6 100644 --- a/libs/fxutil/fxutil.go +++ b/libs/fxutil/fxutil.go @@ -43,14 +43,14 @@ func InvokeIf(cond bool, function interface{}) fx.Option { return fx.Options() } -// ProvideAs creates an FX option that provides constructor 'cnstr' with the returned values types as 'cnstrs' -// It is as simple utility that hides away FX annotation details. +// ProvideAs creates an FX option that provides constructor 'cnstr' with the returned values types +// as 'cnstrs' It is as simple utility that hides away FX annotation details. func ProvideAs(cnstr interface{}, cnstrs ...interface{}) fx.Option { return fx.Provide(fx.Annotate(cnstr, fx.As(cnstrs...))) } -// ReplaceAs creates an FX option that substitutes types defined by constructors 'cnstrs' with the value 'val'. -// It is as simple utility that hides away FX annotation details. +// ReplaceAs creates an FX option that substitutes types defined by constructors 'cnstrs' with the +// value 'val'. It is as simple utility that hides away FX annotation details. func ReplaceAs(val interface{}, cnstrs ...interface{}) fx.Option { return fx.Replace(fx.Annotate(val, fx.As(cnstrs...))) } diff --git a/libs/keystore/fs_keystore.go b/libs/keystore/fs_keystore.go index 79087c8397..02361f0084 100644 --- a/libs/keystore/fs_keystore.go +++ b/libs/keystore/fs_keystore.go @@ -7,6 +7,8 @@ import ( "io/fs" "os" "path/filepath" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" ) // ErrNotFound is returned when the key does not exist. @@ -15,16 +17,21 @@ var ErrNotFound = errors.New("keystore: key not found") // fsKeystore implements persistent Keystore over OS filesystem. type fsKeystore struct { path string + + ring keyring.Keyring } // NewFSKeystore creates a new Keystore over OS filesystem. // The path must point to a directory. It is created automatically if necessary. -func NewFSKeystore(path string) (Keystore, error) { +func NewFSKeystore(path string, ring keyring.Keyring) (Keystore, error) { err := os.Mkdir(path, 0755) if err != nil && !os.IsExist(err) { return nil, fmt.Errorf("keystore: failed to make a dir: %w", err) } - return &fsKeystore{path: path}, nil + return &fsKeystore{ + path: path, + ring: ring, + }, nil } func (f *fsKeystore) Put(n KeyName, pk PrivKey) error { @@ -122,6 +129,10 @@ func (f *fsKeystore) Path() string { return f.path } +func (f *fsKeystore) Keyring() keyring.Keyring { + return f.ring +} + func (f *fsKeystore) pathTo(file string) string { return filepath.Join(f.path, file) } diff --git a/libs/keystore/fs_keystore_test.go b/libs/keystore/fs_keystore_test.go index c20f00fb41..2baba2d8fc 100644 --- a/libs/keystore/fs_keystore_test.go +++ b/libs/keystore/fs_keystore_test.go @@ -8,7 +8,7 @@ import ( ) func TestFSKeystore(t *testing.T) { - kstore, err := NewFSKeystore(t.TempDir() + "/keystore") + kstore, err := NewFSKeystore(t.TempDir()+"/keystore", nil) require.NoError(t, err) err = kstore.Put("test", PrivKey{Body: []byte("test_private_key")}) diff --git a/libs/keystore/keystore.go b/libs/keystore/keystore.go index 522389cdfa..d9bc21a486 100644 --- a/libs/keystore/keystore.go +++ b/libs/keystore/keystore.go @@ -3,6 +3,7 @@ package keystore import ( "fmt" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/multiformats/go-base32" ) @@ -34,6 +35,10 @@ type Keystore interface { // Path reports the path of the Keystore. Path() string + + // Keyring returns the keyring corresponding to the node's + // keystore. + Keyring() keyring.Keyring } // KeyNameFromBase32 decodes KeyName from Base32 format. diff --git a/libs/keystore/map_keystore.go b/libs/keystore/map_keystore.go index 8439e59ab9..84de91458e 100644 --- a/libs/keystore/map_keystore.go +++ b/libs/keystore/map_keystore.go @@ -3,17 +3,26 @@ package keystore import ( "fmt" "sync" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/app/encoding" ) // mapKeystore is a simple in-memory Keystore implementation. type mapKeystore struct { keys map[KeyName]PrivKey keysLk sync.Mutex + ring keyring.Keyring } // NewMapKeystore constructs in-memory Keystore. func NewMapKeystore() Keystore { - return &mapKeystore{keys: make(map[KeyName]PrivKey)} + return &mapKeystore{ + keys: make(map[KeyName]PrivKey), + ring: keyring.NewInMemory(encoding.MakeConfig(app.ModuleEncodingRegisters...).Codec), + } } func (m *mapKeystore) Put(n KeyName, k PrivKey) error { @@ -69,3 +78,7 @@ func (m *mapKeystore) List() ([]KeyName, error) { func (m *mapKeystore) Path() string { return "" } + +func (m *mapKeystore) Keyring() keyring.Keyring { + return m.ring +} diff --git a/libs/pidstore/pidstore.go b/libs/pidstore/pidstore.go new file mode 100644 index 0000000000..17241aa4a9 --- /dev/null +++ b/libs/pidstore/pidstore.go @@ -0,0 +1,97 @@ +package pidstore + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" +) + +var ( + storePrefix = datastore.NewKey("pidstore") + peersKey = datastore.NewKey("peers") + + log = logging.Logger("pidstore") +) + +// PeerIDStore is used to store/load peers to/from disk. +type PeerIDStore struct { + ds datastore.Datastore +} + +// NewPeerIDStore creates a new peer ID store backed by the given datastore. +func NewPeerIDStore(ctx context.Context, ds datastore.Datastore) (*PeerIDStore, error) { + pidstore := &PeerIDStore{ + ds: namespace.Wrap(ds, storePrefix), + } + + // check if pidstore is already initialized, and if not, + // initialize the pidstore + exists, err := pidstore.ds.Has(ctx, peersKey) + if err != nil { + return nil, err + } + if !exists { + return pidstore, pidstore.Put(ctx, []peer.ID{}) + } + + // if pidstore exists, ensure its contents are uncorrupted + _, err = pidstore.Load(ctx) + if err != nil { + log.Warn("pidstore: corrupted pidstore detected, resetting...", "err", err) + return pidstore, pidstore.reset(ctx) + } + + return pidstore, nil +} + +// Load loads the peers from datastore and returns them. +func (p *PeerIDStore) Load(ctx context.Context) ([]peer.ID, error) { + log.Debug("Loading peers") + + bin, err := p.ds.Get(ctx, peersKey) + if err != nil { + return nil, fmt.Errorf("pidstore: loading peers from datastore: %w", err) + } + + var peers []peer.ID + err = json.Unmarshal(bin, &peers) + if err != nil { + return nil, fmt.Errorf("pidstore: unmarshalling peer IDs: %w", err) + } + + log.Infow("Loaded peers from disk", "amount", len(peers)) + return peers, nil +} + +// Put persists the given peer IDs to the datastore. +func (p *PeerIDStore) Put(ctx context.Context, peers []peer.ID) error { + log.Debugw("Persisting peers to disk", "amount", len(peers)) + + bin, err := json.Marshal(peers) + if err != nil { + return fmt.Errorf("pidstore: marshal peerlist: %w", err) + } + + if err = p.ds.Put(ctx, peersKey, bin); err != nil { + return fmt.Errorf("pidstore: error writing to datastore: %w", err) + } + + log.Infow("Persisted peers successfully", "amount", len(peers)) + return nil +} + +// reset resets the pidstore in case of corruption. +func (p *PeerIDStore) reset(ctx context.Context) error { + log.Warn("pidstore: resetting the pidstore...") + err := p.ds.Delete(ctx, peersKey) + if err != nil { + return fmt.Errorf("pidstore: error resetting datastore: %w", err) + } + + return p.Put(ctx, []peer.ID{}) +} diff --git a/libs/pidstore/pidstore_test.go b/libs/pidstore/pidstore_test.go new file mode 100644 index 0000000000..4a35783db3 --- /dev/null +++ b/libs/pidstore/pidstore_test.go @@ -0,0 +1,93 @@ +package pidstore + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPutLoad(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer t.Cleanup(cancel) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + + t.Run("uninitialized-pidstore", func(t *testing.T) { + testPutLoad(ctx, ds, t) + }) + t.Run("initialized-pidstore", func(t *testing.T) { + testPutLoad(ctx, ds, t) + }) +} + +// TestCorruptedPidstore tests whether a pidstore can detect +// corruption and reset itself on construction. +func TestCorruptedPidstore(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer t.Cleanup(cancel) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + + // intentionally corrupt the store + wrappedDS := namespace.Wrap(ds, storePrefix) + err := wrappedDS.Put(ctx, peersKey, []byte("corrupted")) + require.NoError(t, err) + + pidstore, err := NewPeerIDStore(ctx, ds) + require.NoError(t, err) + + got, err := pidstore.Load(ctx) + require.NoError(t, err) + assert.Equal(t, []peer.ID{}, got) +} + +func testPutLoad(ctx context.Context, ds datastore.Datastore, t *testing.T) { + peerstore, err := NewPeerIDStore(ctx, ds) + require.NoError(t, err) + + ids, err := generateRandomPeerList(10) + require.NoError(t, err) + + err = peerstore.Put(ctx, ids) + require.NoError(t, err) + + retrievedPeerlist, err := peerstore.Load(ctx) + require.NoError(t, err) + + assert.Equal(t, len(ids), len(retrievedPeerlist)) + assert.Equal(t, ids, retrievedPeerlist) +} + +func generateRandomPeerList(length int) ([]peer.ID, error) { + peerlist := make([]peer.ID, length) + for i := range peerlist { + key, err := rsa.GenerateKey(rand.Reader, 2096) + if err != nil { + return nil, err + } + + _, pubkey, err := crypto.KeyPairFromStdKey(key) + if err != nil { + return nil, err + } + + peerID, err := peer.IDFromPublicKey(pubkey) + if err != nil { + return nil, err + } + + peerlist[i] = peerID + } + + return peerlist, nil +} diff --git a/libs/utils/address.go b/libs/utils/address.go new file mode 100644 index 0000000000..c20d11ad06 --- /dev/null +++ b/libs/utils/address.go @@ -0,0 +1,45 @@ +package utils + +import ( + "errors" + "fmt" + "net" + "strings" +) + +var ErrInvalidIP = errors.New("invalid IP address or hostname given") + +// SanitizeAddr trims leading protocol scheme and port from the given +// IP address or hostname if present. +func SanitizeAddr(addr string) (string, error) { + original := addr + addr = strings.TrimPrefix(addr, "http://") + addr = strings.TrimPrefix(addr, "https://") + addr = strings.TrimPrefix(addr, "tcp://") + addr = strings.TrimSuffix(addr, "/") + addr = strings.Split(addr, ":")[0] + if addr == "" { + return "", fmt.Errorf("%w: %s", ErrInvalidIP, original) + } + return addr, nil +} + +// ValidateAddr sanitizes the given address and verifies that it is a valid IP or hostname. The +// sanitized address is returned. +func ValidateAddr(addr string) (string, error) { + addr, err := SanitizeAddr(addr) + if err != nil { + return addr, err + } + + ip := net.ParseIP(addr) + if ip != nil { + return addr, nil + } + + resolved, err := net.ResolveIPAddr("ip4", addr) + if err != nil { + return addr, err + } + return resolved.String(), nil +} diff --git a/libs/utils/address_test.go b/libs/utils/address_test.go new file mode 100644 index 0000000000..48a7747a4a --- /dev/null +++ b/libs/utils/address_test.go @@ -0,0 +1,75 @@ +package utils + +import ( + "net" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSanitizeAddr(t *testing.T) { + var tests = []struct { + addr string + want string + err error + }{ + // Testcase: trims protocol prefix + {addr: "http://celestia.org", want: "celestia.org"}, + // Testcase: protocol prefix trimmed already + {addr: "celestia.org", want: "celestia.org"}, + // Testcase: trims protocol prefix, and trims port and trailing slash suffix + {addr: "tcp://192.168.42.42:5050/", want: "192.168.42.42"}, + // Testcase: invariant ip + {addr: "192.168.42.42", want: "192.168.42.42"}, + // Testcase: empty addr + {addr: "", want: "", err: ErrInvalidIP}, + } + + for _, tt := range tests { + t.Run(tt.addr, func(t *testing.T) { + got, err := SanitizeAddr(tt.addr) + require.Equal(t, tt.want, got) + require.ErrorIs(t, err, tt.err) + }) + } +} + +func TestValidateAddr(t *testing.T) { + type want struct { + addr string + unresolved bool + } + var tests = []struct { + addr string + want want + }{ + // Testcase: ip is valid + {addr: "192.168.42.42:5050", want: want{addr: "192.168.42.42"}}, + // Testcase: ip is valid, no port + {addr: "192.168.42.42", want: want{addr: "192.168.42.42"}}, + // Testcase: resolves localhost + {addr: "http://localhost:8080/", want: want{unresolved: true}}, + // Testcase: hostname is valid + {addr: "https://celestia.org", want: want{unresolved: true}}, + // Testcase: hostname is valid, but no schema + {addr: "celestia.org", want: want{unresolved: true}}, + } + + for _, tt := range tests { + t.Run(tt.addr, func(t *testing.T) { + got, err := ValidateAddr(tt.addr) + require.NoError(t, err) + + // validate that returned value is ip + if ip := net.ParseIP(got); ip == nil { + t.Fatalf("empty ip") + } + + if tt.want.unresolved { + // unresolved addr has no addr to compare with + return + } + require.Equal(t, tt.want.addr, got) + }) + } +} diff --git a/libs/utils/fs.go b/libs/utils/fs.go index d67e9a1eaa..4ad8b6443e 100644 --- a/libs/utils/fs.go +++ b/libs/utils/fs.go @@ -1,6 +1,8 @@ package utils -import "os" +import ( + "os" +) // Exists checks whether file or directory exists under the given 'path' on the system. func Exists(path string) bool { diff --git a/libs/utils/resetctx.go b/libs/utils/resetctx.go new file mode 100644 index 0000000000..a108cc27b4 --- /dev/null +++ b/libs/utils/resetctx.go @@ -0,0 +1,14 @@ +package utils + +import ( + "context" +) + +// ResetContextOnError returns a fresh context if the given context has an error. +func ResetContextOnError(ctx context.Context) context.Context { + if ctx.Err() != nil { + ctx = context.Background() + } + + return ctx +} diff --git a/libs/utils/square.go b/libs/utils/square.go new file mode 100644 index 0000000000..68d7fc5ce7 --- /dev/null +++ b/libs/utils/square.go @@ -0,0 +1,10 @@ +package utils + +import ( + "math" +) + +// SquareSize returns the size of the square based on the given amount of shares. +func SquareSize(lenShares int) uint64 { + return uint64(math.Sqrt(float64(lenShares))) +} diff --git a/libs/utils/traces.go b/libs/utils/traces.go new file mode 100644 index 0000000000..c7bd6b72f5 --- /dev/null +++ b/libs/utils/traces.go @@ -0,0 +1,17 @@ +package utils + +import ( + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// SetStatusAndEnd sets the status of the span depending on the contents of the passed error and +// ends it. +func SetStatusAndEnd(span trace.Span, err error) { + defer span.End() + if err != nil { + span.SetStatus(codes.Error, err.Error()) + return + } + span.SetStatus(codes.Ok, "") +} diff --git a/logs/logs.go b/logs/logs.go index 5f8bd68f06..5cb9ed16c6 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -1,6 +1,8 @@ package logs -import logging "github.com/ipfs/go-log/v2" +import ( + logging "github.com/ipfs/go-log/v2" +) func SetAllLoggers(level logging.LogLevel) { logging.SetAllLoggers(level) @@ -11,10 +13,20 @@ func SetAllLoggers(level logging.LogLevel) { _ = logging.SetLogLevel("dht", "ERROR") _ = logging.SetLogLevel("swarm2", "WARN") _ = logging.SetLogLevel("bitswap", "WARN") + _ = logging.SetLogLevel("bitswap-client", "WARN") _ = logging.SetLogLevel("connmgr", "WARN") _ = logging.SetLogLevel("nat", "INFO") _ = logging.SetLogLevel("dht/RtRefreshManager", "FATAL") _ = logging.SetLogLevel("bitswap_network", "ERROR") + _ = logging.SetLogLevel("badger", "INFO") + _ = logging.SetLogLevel("basichost", "INFO") + _ = logging.SetLogLevel("pubsub", "WARN") + _ = logging.SetLogLevel("net/identify", "ERROR") + _ = logging.SetLogLevel("shrex/nd", "WARN") + _ = logging.SetLogLevel("shrex/eds", "WARN") + _ = logging.SetLogLevel("dagstore", "WARN") + _ = logging.SetLogLevel("dagstore/upgrader", "WARN") + _ = logging.SetLogLevel("fx", "FATAL") } func SetDebugLogging() { diff --git a/node/components.go b/node/components.go deleted file mode 100644 index 2150dbb72c..0000000000 --- a/node/components.go +++ /dev/null @@ -1,139 +0,0 @@ -package node - -import ( - "context" - "sync" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/raulk/go-watchdog" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/fraud" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/libs/fxutil" - nodecore "github.com/celestiaorg/celestia-node/node/core" - "github.com/celestiaorg/celestia-node/node/p2p" - "github.com/celestiaorg/celestia-node/node/rpc" - "github.com/celestiaorg/celestia-node/node/services" - statecomponents "github.com/celestiaorg/celestia-node/node/state" - "github.com/celestiaorg/celestia-node/params" - headerServ "github.com/celestiaorg/celestia-node/service/header" - rpcServ "github.com/celestiaorg/celestia-node/service/rpc" - "github.com/celestiaorg/celestia-node/service/share" - "github.com/celestiaorg/celestia-node/service/state" -) - -// lightComponents keeps all the components as DI options required to build a Light Node. -func lightComponents(cfg *Config, store Store) fx.Option { - return fx.Options( - fx.Supply(Light), - baseComponents(cfg, store), - fx.Provide(services.DASer), - fx.Provide(services.HeaderExchangeP2P(cfg.Services)), - fx.Provide(services.LightAvailability(cfg.Services)), - fx.Provide(services.CacheAvailability[*share.LightAvailability]), - fx.Invoke(rpc.Handler), - ) -} - -// bridgeComponents keeps all the components as DI options required to build a Bridge Node. -func bridgeComponents(cfg *Config, store Store) fx.Option { - return fx.Options( - fx.Supply(Bridge), - baseComponents(cfg, store), - nodecore.Components(cfg.Core), - fx.Supply(header.MakeExtendedHeader), - fx.Provide(services.FullAvailability(cfg.Services)), - fx.Provide(services.CacheAvailability[*share.FullAvailability]), - fx.Invoke(func( - state *state.Service, - share *share.Service, - header *headerServ.Service, - rpcSrv *rpcServ.Server, - ) { - rpc.Handler(state, share, header, rpcSrv, nil) - }), - ) -} - -// fullComponents keeps all the components as DI options required to build a Full Node. -func fullComponents(cfg *Config, store Store) fx.Option { - return fx.Options( - fx.Supply(Full), - baseComponents(cfg, store), - fx.Provide(services.DASer), - fx.Provide(services.HeaderExchangeP2P(cfg.Services)), - fx.Provide(services.FullAvailability(cfg.Services)), - fx.Provide(services.CacheAvailability[*share.FullAvailability]), - fx.Invoke(rpc.Handler), - ) -} - -// baseComponents keeps all the common components shared between different Node types. -func baseComponents(cfg *Config, store Store) fx.Option { - return fx.Options( - fx.Provide(params.DefaultNetwork), - fx.Provide(params.BootstrappersFor), - fx.Provide(context.Background), - fx.Supply(cfg), - fx.Supply(store.Config), - fx.Provide(store.Datastore), - fx.Provide(store.Keystore), - // share components - fx.Invoke(share.EnsureEmptySquareExists), - fx.Provide(services.ShareService), - // header components - fx.Provide(services.HeaderService), - fx.Provide(services.HeaderStore), - fx.Invoke(services.HeaderStoreInit(&cfg.Services)), - fxutil.ProvideAs(services.FraudService, new(fraud.Service), new(fraud.Subscriber)), - fx.Provide(services.HeaderSyncer), - fxutil.ProvideAs(services.P2PSubscriber, new(header.Broadcaster), new(header.Subscriber)), - fx.Provide(services.HeaderP2PExchangeServer), - // p2p components - fx.Invoke(invokeWatchdog(store.Path())), - p2p.Components(cfg.P2P), - // state components - statecomponents.Components(cfg.Core, cfg.Key), - // RPC components - fx.Provide(rpc.Server(cfg.RPC)), - ) -} - -// invokeWatchdog starts the memory watchdog that helps to prevent some of OOMs by forcing GCing -// It also collects heap profiles in the given directory when heap grows to more than 90% of memory usage -func invokeWatchdog(pprofdir string) func(lc fx.Lifecycle) error { - return func(lc fx.Lifecycle) (errOut error) { - onceWatchdog.Do(func() { - // to get watchdog information logged out - watchdog.Logger = logWatchdog - // these set up heap pprof auto capturing on disk when threshold hit 90% usage - watchdog.HeapProfileDir = pprofdir - watchdog.HeapProfileMaxCaptures = 10 - watchdog.HeapProfileThreshold = 0.9 - - policy := watchdog.NewWatermarkPolicy(0.50, 0.60, 0.70, 0.85, 0.90, 0.925, 0.95) - err, stop := watchdog.SystemDriven(0, time.Second*5, policy) - if err != nil { - errOut = err - return - } - - lc.Append(fx.Hook{ - OnStop: func(context.Context) error { - stop() - return nil - }, - }) - }) - return - } -} - -// TODO(@Wondetan): We must start watchdog only once. This is needed for tests where we run multiple instance -// of the Node. Ideally, the Node should have some testing options instead, so we can check for it and run without -// such utilities but it does not hurt to run one instance of watchdog per test. -var onceWatchdog = sync.Once{} - -var logWatchdog = logging.Logger("watchdog") diff --git a/node/config.go b/node/config.go deleted file mode 100644 index aa400f7eaa..0000000000 --- a/node/config.go +++ /dev/null @@ -1,95 +0,0 @@ -package node - -import ( - "io" - "os" - - "github.com/BurntSushi/toml" - - "github.com/celestiaorg/celestia-node/node/core" - "github.com/celestiaorg/celestia-node/node/key" - "github.com/celestiaorg/celestia-node/node/p2p" - "github.com/celestiaorg/celestia-node/node/services" - "github.com/celestiaorg/celestia-node/service/rpc" -) - -// ConfigLoader defines a function that loads a config from any source. -type ConfigLoader func() (*Config, error) - -// Config is main configuration structure for a Node. -// It combines configuration units for all Node subsystems. -type Config struct { - Core core.Config - Key key.Config - P2P p2p.Config - RPC rpc.Config - Services services.Config -} - -// DefaultConfig provides a default Config for a given Node Type 'tp'. -// NOTE: Currently, configs are identical, but this will change. -func DefaultConfig(tp Type) *Config { - switch tp { - case Bridge: - return &Config{ - Core: core.DefaultConfig(), - Key: key.DefaultConfig(), - P2P: p2p.DefaultConfig(), - RPC: rpc.DefaultConfig(), - Services: services.DefaultConfig(), - } - case Light: - return &Config{ - Key: key.DefaultConfig(), - RPC: rpc.DefaultConfig(), - P2P: p2p.DefaultConfig(), - Services: services.DefaultConfig(), - } - case Full: - return &Config{ - Key: key.DefaultConfig(), - RPC: rpc.DefaultConfig(), - P2P: p2p.DefaultConfig(), - Services: services.DefaultConfig(), - } - default: - panic("node: unknown Node Type") - } -} - -// SaveConfig saves Config 'cfg' under the given 'path'. -func SaveConfig(path string, cfg *Config) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - return cfg.Encode(f) -} - -// LoadConfig loads Config from the given 'path'. -func LoadConfig(path string) (*Config, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var cfg Config - return &cfg, cfg.Decode(f) -} - -// Encode encodes a given Config into w. -// TODO(@Wondertan): We should have a description for each field written into w, -// so users can instantly understand purpose of each field. Ideally, we should have a utility program to parse comments -// from actual sources(*.go files) and generate docs from comments. Hint: use 'ast' package. -func (cfg *Config) Encode(w io.Writer) error { - return toml.NewEncoder(w).Encode(cfg) -} - -// Decode decodes a Config from a given reader r. -func (cfg *Config) Decode(r io.Reader) error { - _, err := toml.NewDecoder(r).Decode(cfg) - return err -} diff --git a/node/config_opts.go b/node/config_opts.go deleted file mode 100644 index 02eb33e43e..0000000000 --- a/node/config_opts.go +++ /dev/null @@ -1,103 +0,0 @@ -package node - -import "time" - -// WithRemoteCoreIP configures Node to connect to the given remote Core IP. -func WithRemoteCoreIP(ip string) Option { - return func(sets *settings) { - sets.cfg.Core.IP = ip - } -} - -// WithRemoteCorePort configures Node to connect to the given remote Core port. -func WithRemoteCorePort(port string) Option { - return func(sets *settings) { - sets.cfg.Core.RPCPort = port - } -} - -// WithGRPCPort configures Node to connect to given gRPC port -// for state-related queries. -func WithGRPCPort(port string) Option { - return func(sets *settings) { - sets.cfg.Core.GRPCPort = port - } -} - -// WithRPCPort configures Node to expose the given port for RPC -// queries. -func WithRPCPort(port string) Option { - return func(sets *settings) { - sets.cfg.RPC.Port = port - } -} - -// WithRPCAddress configures Node to listen on the given address for RPC -// queries. -func WithRPCAddress(addr string) Option { - return func(sets *settings) { - sets.cfg.RPC.Address = addr - } -} - -// WithTrustedHash sets TrustedHash to the Config. -func WithTrustedHash(hash string) Option { - return func(sets *settings) { - sets.cfg.Services.TrustedHash = hash - } -} - -// WithTrustedPeers appends new "trusted peers" to the Config. -func WithTrustedPeers(addr ...string) Option { - return func(sets *settings) { - sets.cfg.Services.TrustedPeers = append(sets.cfg.Services.TrustedPeers, addr...) - } -} - -// WithPeersLimit overrides default peer limit for peers found during discovery. -func WithPeersLimit(limit uint) Option { - return func(sets *settings) { - sets.cfg.Services.PeersLimit = limit - } -} - -// WithDiscoveryInterval sets interval between discovery sessions. -func WithDiscoveryInterval(interval time.Duration) Option { - return func(sets *settings) { - if interval <= 0 { - return - } - sets.cfg.Services.DiscoveryInterval = interval - } -} - -// WithAdvertiseInterval sets interval between advertises. -func WithAdvertiseInterval(interval time.Duration) Option { - return func(sets *settings) { - if interval <= 0 { - return - } - sets.cfg.Services.AdvertiseInterval = interval - } -} - -// WithConfig sets the entire custom config. -func WithConfig(custom *Config) Option { - return func(sets *settings) { - sets.cfg = custom - } -} - -// WithMutualPeers sets the `MutualPeers` field in the config. -func WithMutualPeers(addrs []string) Option { - return func(sets *settings) { - sets.cfg.P2P.MutualPeers = addrs - } -} - -// WithKeyringAccName sets the `KeyringAccName` field in the key config. -func WithKeyringAccName(name string) Option { - return func(sets *settings) { - sets.cfg.Key.KeyringAccName = name - } -} diff --git a/node/config_test.go b/node/config_test.go deleted file mode 100644 index 941e86fbf3..0000000000 --- a/node/config_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package node - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestConfigWriteRead(t *testing.T) { - buf := bytes.NewBuffer(nil) - in := DefaultConfig(Bridge) - - err := in.Encode(buf) - require.NoError(t, err) - - var out Config - err = out.Decode(buf) - require.NoError(t, err) - assert.EqualValues(t, in, &out) -} diff --git a/node/core/core.go b/node/core/core.go deleted file mode 100644 index 3d2fcdf979..0000000000 --- a/node/core/core.go +++ /dev/null @@ -1,76 +0,0 @@ -package core - -import ( - "context" - "fmt" - - "github.com/ipfs/go-blockservice" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/libs/fxutil" - - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/header" - headercore "github.com/celestiaorg/celestia-node/header/core" -) - -// Config combines all configuration fields for managing the relationship with a Core node. -type Config struct { - IP string - RPCPort string - GRPCPort string -} - -// DefaultConfig returns default configuration for managing the -// node's connection to a Celestia-Core endpoint. -func DefaultConfig() Config { - return Config{} -} - -// Components collects all the components and services related to managing the relationship with the Core node. -func Components(cfg Config) fx.Option { - return fx.Options( - fx.Provide(core.NewBlockFetcher), - fxutil.ProvideAs(headercore.NewExchange, new(header.Exchange)), - fx.Invoke(HeaderListener), - fx.Provide(func(lc fx.Lifecycle) (core.Client, error) { - if cfg.IP == "" { - return nil, fmt.Errorf("no celestia-core endpoint given") - } - client, err := RemoteClient(cfg) - if err != nil { - return nil, err - } - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - return client.Start() - }, - OnStop: func(context.Context) error { - return client.Stop() - }, - }) - - return client, err - }), - ) -} - -func HeaderListener( - lc fx.Lifecycle, - ex *core.BlockFetcher, - bcast header.Broadcaster, - bServ blockservice.BlockService, - construct header.ConstructFn, -) *headercore.Listener { - cl := headercore.NewListener(bcast, ex, bServ, construct) - lc.Append(fx.Hook{ - OnStart: cl.Start, - OnStop: cl.Stop, - }) - return cl -} - -// RemoteClient provides a constructor for core.Client over RPC. -func RemoteClient(cfg Config) (core.Client, error) { - return core.NewRemote(cfg.IP, cfg.RPCPort) -} diff --git a/node/init.go b/node/init.go deleted file mode 100644 index efedb0b57c..0000000000 --- a/node/init.go +++ /dev/null @@ -1,128 +0,0 @@ -package node - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/celestiaorg/celestia-node/libs/fslock" - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/params" -) - -// Init initializes the Node FileSystem Store for the given Node Type 'tp' in the directory under 'path' with -// default Config. Options are applied over default Config and persisted on disk. -func Init(path string, tp Type, options ...Option) error { - sets := &settings{cfg: DefaultConfig(tp)} - for _, option := range options { - option(sets) - } - - path, err := storePath(path) - if err != nil { - return err - } - log.Infof("Initializing %s Node Store over '%s'", tp, path) - - err = initRoot(path) - if err != nil { - return err - } - - flock, err := fslock.Lock(lockPath(path)) - if err != nil { - if err == fslock.ErrLocked { - return ErrOpened - } - return err - } - defer flock.Unlock() // nolint: errcheck - - err = initDir(keysPath(path)) - if err != nil { - return err - } - - err = initDir(dataPath(path)) - if err != nil { - return err - } - - if sets.cfg == nil { - return errors.New("configuration is missing for the node's initialisation") - } - - cfgPath := configPath(path) - if !utils.Exists(cfgPath) { - err = SaveConfig(cfgPath, sets.cfg) - if err != nil { - return err - } - log.Infow("Saving config", "path", cfgPath) - } else { - log.Infow("Config already exists", "path", cfgPath) - } - - log.Info("Node Store initialized") - return nil -} - -// IsInit checks whether FileSystem Store was setup under given 'path'. -// If any required file/subdirectory does not exist, then false is reported. -func IsInit(path string) bool { - path, err := storePath(path) - if err != nil { - log.Errorw("parsing store path", "path", path, "err", err) - return false - } - - _, err = LoadConfig(configPath(path)) // load the Config and implicitly check for its existence - if err != nil { - log.Errorw("loading config", "path", path, "err", err) - return false - } - - if utils.Exists(keysPath(path)) && - utils.Exists(dataPath(path)) { - return true - } - - return false -} - -const perms = 0755 - -// initRoot initializes(creates) directory if not created and check if it is writable -func initRoot(path string) error { - err := initDir(path) - if err != nil { - return err - } - - // check for writing permissions - f, err := os.Create(filepath.Join(path, ".check")) - if err != nil { - return err - } - - err = f.Close() - if err != nil { - return err - } - - return os.Remove(f.Name()) -} - -// initDir creates a dir if not exist -func initDir(path string) error { - if utils.Exists(path) { - // if the dir already exists and `CELESTIA_CUSTOM` env var is set, - // fail out to prevent store corruption - if _, ok := os.LookupEnv(params.EnvCustomNetwork); ok { - return fmt.Errorf("cannot run a custom network over an already-existing node store") - } - return nil - } - return os.Mkdir(path, perms) -} diff --git a/node/init_test.go b/node/init_test.go deleted file mode 100644 index 3fd1064483..0000000000 --- a/node/init_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package node - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/libs/fslock" -) - -func TestInit(t *testing.T) { - dir := t.TempDir() - nodes := []Type{Light, Bridge} - - for _, node := range nodes { - require.NoError(t, Init(dir, node)) - assert.True(t, IsInit(dir)) - } -} - -func TestInitErrForInvalidPath(t *testing.T) { - path := "/invalid_path" - nodes := []Type{Light, Bridge} - - for _, node := range nodes { - require.Error(t, Init(path, node)) - } -} - -func TestIsInitWithBrokenConfig(t *testing.T) { - dir := t.TempDir() - f, err := os.Create(configPath(dir)) - require.NoError(t, err) - defer f.Close() - //nolint:errcheck - f.Write([]byte(` - [P2P] - ListenAddresses = [/ip4/0.0.0.0/tcp/2121] - `)) - assert.False(t, IsInit(dir)) -} - -func TestIsInitForNonExistDir(t *testing.T) { - path := "/invalid_path" - assert.False(t, IsInit(path)) -} - -func TestInitErrForLockedDir(t *testing.T) { - dir := t.TempDir() - flock, err := fslock.Lock(lockPath(dir)) - require.NoError(t, err) - defer flock.Unlock() //nolint:errcheck - nodes := []Type{Light, Bridge} - - for _, node := range nodes { - require.Error(t, Init(dir, node)) - } -} diff --git a/node/key/key.go b/node/key/key.go deleted file mode 100644 index 490897bf74..0000000000 --- a/node/key/key.go +++ /dev/null @@ -1,13 +0,0 @@ -package key - -// Config contains configuration parameters for constructing -// the node's keyring signer. -type Config struct { - KeyringAccName string -} - -func DefaultConfig() Config { - return Config{ - KeyringAccName: "", - } -} diff --git a/node/node.go b/node/node.go deleted file mode 100644 index ca7ed014c3..0000000000 --- a/node/node.go +++ /dev/null @@ -1,170 +0,0 @@ -package node - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/ipfs/go-blockservice" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/routing" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/p2p/net/conngater" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/das" - "github.com/celestiaorg/celestia-node/fraud" - "github.com/celestiaorg/celestia-node/params" - "github.com/celestiaorg/celestia-node/service/header" - "github.com/celestiaorg/celestia-node/service/rpc" - "github.com/celestiaorg/celestia-node/service/share" - "github.com/celestiaorg/celestia-node/service/state" -) - -const Timeout = time.Second * 15 - -var log = logging.Logger("node") - -// Node represents the core structure of a Celestia node. It keeps references to all Celestia-specific -// components and services in one place and provides flexibility to run a Celestia node in different modes. -// Currently supported modes: -// * Bridge -// * Light -// * Full -type Node struct { - Type Type - Network params.Network - Bootstrappers params.Bootstrappers - Config *Config - - // CoreClient provides access to a Core node process. - CoreClient core.Client `optional:"true"` - - // rpc components - RPCServer *rpc.Server `optional:"true"` - // p2p components - Host host.Host - ConnGater *conngater.BasicConnectionGater - Routing routing.PeerRouting - DataExchange exchange.Interface - BlockService blockservice.BlockService - // p2p protocols - PubSub *pubsub.PubSub - // services - ShareServ *share.Service // not optional - HeaderServ *header.Service // not optional - StateServ *state.Service // not optional - FraudServ fraud.Service // not optional - DASer *das.DASer `optional:"true"` - - // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop - start, stop lifecycleFunc -} - -// New assembles a new Node with the given type 'tp' over Store 'store'. -func New(tp Type, store Store, options ...Option) (*Node, error) { - cfg, err := store.Config() - if err != nil { - return nil, err - } - - s := &settings{cfg: cfg} - for _, option := range options { - option(s) - } - - switch tp { - case Bridge: - return newNode(bridgeComponents(s.cfg, store), fx.Options(s.opts...)) - case Light: - return newNode(lightComponents(s.cfg, store), fx.Options(s.opts...)) - case Full: - return newNode(fullComponents(s.cfg, store), fx.Options(s.opts...)) - default: - panic("node: unknown Node Type") - } -} - -// Start launches the Node and all its components and services. -func (n *Node) Start(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, Timeout) - defer cancel() - - err := n.start(ctx) - if err != nil { - log.Errorf("starting %s Node: %s", n.Type, err) - return fmt.Errorf("node: failed to start: %w", err) - } - - log.Infof("\n\n/_____/ /_____/ /_____/ /_____/ /_____/ \n\nStarted celestia DA node \nnode "+ - "type: %s\nnetwork: %s\n\n/_____/ /_____/ /_____/ /_____/ /_____/ \n", strings.ToLower(n.Type.String()), - n.Network) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(n.Host)) - if err != nil { - log.Errorw("Retrieving multiaddress information", "err", err) - return err - } - fmt.Println("The p2p host is listening on:") - for _, addr := range addrs { - fmt.Println("* ", addr.String()) - } - fmt.Println() - return nil -} - -// Run is a Start which blocks on the given context 'ctx' until it is canceled. -// If canceled, the Node is still in the running state and should be gracefully stopped via Stop. -func (n *Node) Run(ctx context.Context) error { - err := n.Start(ctx) - if err != nil { - return err - } - - <-ctx.Done() - return ctx.Err() -} - -// Stop shuts down the Node, all its running Components/Services and returns. -// Canceling the given context earlier 'ctx' unblocks the Stop and aborts graceful shutdown forcing remaining -// Components/Services to close immediately. -func (n *Node) Stop(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, Timeout) - defer cancel() - - err := n.stop(ctx) - if err != nil { - log.Errorf("Stopping %s Node: %s", n.Type, err) - return err - } - - log.Infof("stopped %s Node", n.Type) - return nil -} - -// newNode creates a new Node from given DI options. -// DI options allow initializing the Node with a customized set of components and services. -// NOTE: newNode is currently meant to be used privately to create various custom Node types e.g. Light, unless we -// decide to give package users the ability to create custom node types themselves. -func newNode(opts ...fx.Option) (*Node, error) { - node := new(Node) - app := fx.New( - fx.NopLogger, - fx.Extract(node), - fx.Options(opts...), - ) - if err := app.Err(); err != nil { - return nil, err - } - - node.start, node.stop = app.Start, app.Stop - return node, nil -} - -// lifecycleFunc defines a type for common lifecycle funcs. -type lifecycleFunc func(context.Context) error diff --git a/node/node_light_test.go b/node/node_light_test.go deleted file mode 100644 index c9b83d8041..0000000000 --- a/node/node_light_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package node - -import ( - "crypto/rand" - "testing" - - "github.com/libp2p/go-libp2p-core/crypto" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/params" -) - -func TestNewLightWithP2PKey(t *testing.T) { - key, _, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - node := TestNode(t, Light, WithP2PKey(key)) - assert.True(t, node.Host.ID().MatchesPrivateKey(key)) -} - -func TestNewLightWithHost(t *testing.T) { - nw, _ := mocknet.WithNPeers(1) - node := TestNode(t, Light, WithHost(nw.Hosts()[0])) - assert.Equal(t, nw.Peers()[0], node.Host.ID()) -} - -func TestLight_WithMutualPeers(t *testing.T) { - peers := []string{ - "/ip6/100:0:114b:abc5:e13a:c32f:7a9e:f00a/tcp/2121/p2p/12D3KooWSRqDfpLsQxpyUhLC9oXHD2WuZ2y5FWzDri7LT4Dw9fSi", - "/ip4/192.168.1.10/tcp/2121/p2p/12D3KooWSRqDfpLsQxpyUhLC9oXHD2WuZ2y5FWzDri7LT4Dw9fSi", - } - node := TestNode(t, Light, WithMutualPeers(peers)) - require.NotNil(t, node) - assert.Equal(t, node.Config.P2P.MutualPeers, peers) -} - -func TestLight_WithNetwork(t *testing.T) { - node := TestNode(t, Light) - require.NotNil(t, node) - assert.Equal(t, params.Private, node.Network) -} diff --git a/node/node_test.go b/node/node_test.go deleted file mode 100644 index 2b3fd31fa6..0000000000 --- a/node/node_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package node - -import ( - "context" - "strconv" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestLifecycle(t *testing.T) { - var test = []struct { - tp Type - coreExpected bool - }{ - {tp: Bridge, coreExpected: true}, - {tp: Full}, - {tp: Light}, - } - - for i, tt := range test { - t.Run(strconv.Itoa(i), func(t *testing.T) { - node := TestNode(t, tt.tp) - require.NotNil(t, node) - require.NotNil(t, node.Config) - require.NotNil(t, node.Host) - require.NotNil(t, node.HeaderServ) - require.NotNil(t, node.StateServ) - require.Equal(t, tt.tp, node.Type) - - if tt.coreExpected { - require.NotNil(t, node.CoreClient) - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - err := node.Start(ctx) - require.NoError(t, err) - - err = node.Stop(ctx) - require.NoError(t, err) - }) - } -} diff --git a/node/p2p/bitswap.go b/node/p2p/bitswap.go deleted file mode 100644 index 3777f1dea1..0000000000 --- a/node/p2p/bitswap.go +++ /dev/null @@ -1,68 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - - "github.com/ipfs/go-bitswap" - "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/protocol" - routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/libs/fxutil" - nparams "github.com/celestiaorg/celestia-node/params" -) - -const ( - // default size of bloom filter in blockStore - defaultBloomFilterSize = 512 << 10 - // default amount of hash functions defined for bloom filter - defaultBloomFilterHashes = 7 - // default size of arc cache in blockStore - defaultARCCacheSize = 64 << 10 -) - -// DataExchange provides a constructor for IPFS block's DataExchange over BitSwap. -func DataExchange(cfg Config) func(bitSwapParams) (exchange.Interface, blockstore.Blockstore, error) { - return func(params bitSwapParams) (exchange.Interface, blockstore.Blockstore, error) { - ctx := fxutil.WithLifecycle(params.Ctx, params.Lc) - bs, err := blockstore.CachedBlockstore( - ctx, - blockstore.NewBlockstore(params.Ds), - blockstore.CacheOpts{ - HasBloomFilterSize: defaultBloomFilterSize, - HasBloomFilterHashes: defaultBloomFilterHashes, - HasARCCacheSize: defaultARCCacheSize, - }, - ) - if err != nil { - return nil, nil, err - } - prefix := protocol.ID(fmt.Sprintf("/celestia/%s", params.Net)) - return bitswap.New( - ctx, - network.NewFromIpfsHost(params.Host, &routinghelpers.Null{}, network.Prefix(prefix)), - bs, - bitswap.ProvideEnabled(false), - // NOTE: These below ar required for our protocol to work reliably. - // See https://github.com/celestiaorg/celestia-node/issues/732 - bitswap.SetSendDontHaves(false), - bitswap.SetSimulateDontHavesOnTimeout(false), - ), bs, nil - } -} - -type bitSwapParams struct { - fx.In - - Ctx context.Context - Net nparams.Network - Lc fx.Lifecycle - Host host.Host - Ds datastore.Batching -} diff --git a/node/p2p/host.go b/node/p2p/host.go deleted file mode 100644 index f121041a23..0000000000 --- a/node/p2p/host.go +++ /dev/null @@ -1,78 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - - "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/peerstore" - "github.com/libp2p/go-libp2p-core/routing" - p2pconfig "github.com/libp2p/go-libp2p/config" - routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" - "github.com/libp2p/go-libp2p/p2p/net/conngater" - "go.uber.org/fx" - - nparams "github.com/celestiaorg/celestia-node/params" -) - -// RoutedHost constructs a wrapped Host that may fallback to address discovery, -// if any top-level operation on the Host is provided with PeerID(Hash(PbK)) only. -func RoutedHost(base HostBase, r routing.PeerRouting) host.Host { - return routedhost.Wrap(base, r) -} - -// Host returns constructor for Host. -func Host(cfg Config) func(hostParams) (HostBase, error) { - return func(params hostParams) (HostBase, error) { - opts := []libp2p.Option{ - libp2p.NoListenAddrs, // do not listen automatically - libp2p.AddrsFactory(params.AddrF), - libp2p.Identity(params.Key), - libp2p.Peerstore(params.PStore), - libp2p.ConnectionManager(params.ConnMngr), - libp2p.ConnectionGater(params.ConnGater), - libp2p.UserAgent(fmt.Sprintf("celestia-%s", params.Net)), - libp2p.NATPortMap(), // enables upnp - libp2p.DisableRelay(), - // to clearly define what defaults we rely upon - libp2p.DefaultSecurity, - libp2p.DefaultTransports, - libp2p.DefaultMuxers, - } - - // TODO(@Wondertan): Other, non Celestia bootstrapper may also enable NATService to contribute the network. - if cfg.Bootstrapper { - opts = append(opts, libp2p.EnableNATService()) - } - - h, err := libp2p.NewWithoutDefaults(opts...) - if err != nil { - return nil, err - } - - params.Lc.Append(fx.Hook{OnStop: func(context.Context) error { - return h.Close() - }}) - - return h, nil - } -} - -type HostBase host.Host - -type hostParams struct { - fx.In - - Net nparams.Network - Lc fx.Lifecycle - ID peer.ID - Key crypto.PrivKey - AddrF p2pconfig.AddrsFactory - PStore peerstore.Peerstore - ConnMngr connmgr.ConnManager - ConnGater *conngater.BasicConnectionGater -} diff --git a/node/p2p/ipld.go b/node/p2p/ipld.go deleted file mode 100644 index 5835618a43..0000000000 --- a/node/p2p/ipld.go +++ /dev/null @@ -1,12 +0,0 @@ -package p2p - -import ( - "github.com/ipfs/go-blockservice" - blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" -) - -// BlockService constructs IPFS's BlockService for fetching arbitrary Merkle structures. -func BlockService(bs blockstore.Blockstore, ex exchange.Interface) blockservice.BlockService { - return blockservice.New(bs, ex) -} diff --git a/node/p2p/misc.go b/node/p2p/misc.go deleted file mode 100644 index d77c5c9868..0000000000 --- a/node/p2p/misc.go +++ /dev/null @@ -1,67 +0,0 @@ -package p2p - -import ( - "time" - - "github.com/ipfs/go-datastore" - coreconnmgr "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/peerstore" - "github.com/libp2p/go-libp2p-peerstore/pstoremem" - "github.com/libp2p/go-libp2p/p2p/net/conngater" - "github.com/libp2p/go-libp2p/p2p/net/connmgr" - - "github.com/celestiaorg/celestia-node/params" -) - -// ConnManagerConfig configures connection manager. -type ConnManagerConfig struct { - // Low and High are watermarks governing the number of connections that'll be maintained. - Low, High int - // GracePeriod is the amount of time a newly opened connection is given before it becomes subject to pruning. - GracePeriod time.Duration -} - -// DefaultConnManagerConfig returns defaults for ConnManagerConfig. -func DefaultConnManagerConfig() ConnManagerConfig { - return ConnManagerConfig{ - Low: 50, - High: 100, - GracePeriod: time.Minute, - } -} - -// ConnectionManager provides a constructor for ConnectionManager. -func ConnectionManager(cfg Config) func(params.Bootstrappers) (coreconnmgr.ConnManager, error) { - return func(bpeers params.Bootstrappers) (coreconnmgr.ConnManager, error) { - fpeers, err := cfg.mutualPeers() - if err != nil { - return nil, err - } - cm, err := connmgr.NewConnManager( - cfg.ConnManager.Low, - cfg.ConnManager.High, - connmgr.WithGracePeriod(cfg.ConnManager.GracePeriod), - ) - if err != nil { - return nil, err - } - for _, info := range fpeers { - cm.Protect(info.ID, "protected-mutual") - } - for _, info := range bpeers { - cm.Protect(info.ID, "protected-bootstrap") - } - - return cm, nil - } -} - -// ConnectionGater constructs a BasicConnectionGater. -func ConnectionGater(ds datastore.Batching) (*conngater.BasicConnectionGater, error) { - return conngater.NewBasicConnectionGater(ds) -} - -// PeerStore constructs a PeerStore. -func PeerStore() (peerstore.Peerstore, error) { - return pstoremem.NewPeerstore() -} diff --git a/node/p2p/pubsub.go b/node/p2p/pubsub.go deleted file mode 100644 index b87137fccc..0000000000 --- a/node/p2p/pubsub.go +++ /dev/null @@ -1,56 +0,0 @@ -package p2p - -import ( - "context" - - "github.com/libp2p/go-libp2p-core/host" - pubsub "github.com/libp2p/go-libp2p-pubsub" - pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" - "go.uber.org/fx" - "golang.org/x/crypto/blake2b" - - "github.com/celestiaorg/celestia-node/libs/fxutil" -) - -// PubSub provides a constructor for PubSub protocol with GossipSub routing. -func PubSub(cfg Config) func(pubSubParams) (*pubsub.PubSub, error) { - return func(params pubSubParams) (*pubsub.PubSub, error) { - fpeers, err := cfg.mutualPeers() - if err != nil { - return nil, err - } - - // TODO(@Wondertan) for PubSub options: - // * Hash-based MsgId function. - // * Validate default peer scoring params for our use-case. - // * Strict subscription filter - // * For different network types(mainnet/testnet/devnet) we should have different network topic names. - // * Hardcode positive score for bootstrap peers - // * Bootstrappers should only gossip and PX - // * Peers should trust boostrappers, so peerscore for them should always be high. - opts := []pubsub.Option{ - pubsub.WithPeerExchange(cfg.PeerExchange || cfg.Bootstrapper), - pubsub.WithDirectPeers(fpeers), - pubsub.WithMessageIdFn(hashMsgID), - } - - return pubsub.NewGossipSub( - fxutil.WithLifecycle(params.Ctx, params.Lc), - params.Host, - opts..., - ) - } -} - -func hashMsgID(m *pubsub_pb.Message) string { - hash := blake2b.Sum256(m.Data) - return string(hash[:]) -} - -type pubSubParams struct { - fx.In - - Ctx context.Context - Lc fx.Lifecycle - Host host.Host -} diff --git a/node/p2p/routing.go b/node/p2p/routing.go deleted file mode 100644 index a077eb7d67..0000000000 --- a/node/p2p/routing.go +++ /dev/null @@ -1,75 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - - "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/protocol" - "github.com/libp2p/go-libp2p-core/routing" - dht "github.com/libp2p/go-libp2p-kad-dht" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/libs/fxutil" - nparams "github.com/celestiaorg/celestia-node/params" -) - -var log = logging.Logger("node/p2p") - -// ContentRouting constructs nil content routing, -// as for our use-case existing ContentRouting mechanisms, e.g DHT, are unsuitable -func ContentRouting(r routing.PeerRouting) routing.ContentRouting { - return r.(*dht.IpfsDHT) -} - -// PeerRouting provides constructor for PeerRouting over DHT. -// Basically, this provides a way to discover peer addresses by respecting public keys. -func PeerRouting(cfg Config) func(routingParams) (routing.PeerRouting, error) { - return func(params routingParams) (routing.PeerRouting, error) { - if cfg.RoutingTableRefreshPeriod <= 0 { - cfg.RoutingTableRefreshPeriod = defaultRoutingRefreshPeriod - log.Warnf("routingTableRefreshPeriod is not valid. restoring to default value: %d", cfg.RoutingTableRefreshPeriod) - } - opts := []dht.Option{ - dht.Mode(dht.ModeAuto), - dht.BootstrapPeers(params.Peers...), - dht.ProtocolPrefix(protocol.ID(fmt.Sprintf("/celestia/%s", params.Net))), - dht.Datastore(params.DataStore), - dht.RoutingTableRefreshPeriod(cfg.RoutingTableRefreshPeriod), - } - - if cfg.Bootstrapper { - // override options for bootstrapper - opts = append(opts, - dht.Mode(dht.ModeServer), // it must accept incoming connections - dht.BootstrapPeers(), // no bootstrappers for a bootstrapper ¯\_(ツ)_/¯ - ) - } - - d, err := dht.New(fxutil.WithLifecycle(params.Ctx, params.Lc), params.Host, opts...) - if err != nil { - return nil, err - } - params.Lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return d.Bootstrap(ctx) - }, - OnStop: func(context.Context) error { - return d.Close() - }, - }) - return d, nil - } -} - -type routingParams struct { - fx.In - - Ctx context.Context - Net nparams.Network - Peers nparams.Bootstrappers - Lc fx.Lifecycle - Host HostBase - DataStore datastore.Batching -} diff --git a/node/rpc/component.go b/node/rpc/component.go deleted file mode 100644 index 9c8cc14769..0000000000 --- a/node/rpc/component.go +++ /dev/null @@ -1,38 +0,0 @@ -package rpc - -import ( - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/das" - "github.com/celestiaorg/celestia-node/service/header" - "github.com/celestiaorg/celestia-node/service/rpc" - "github.com/celestiaorg/celestia-node/service/share" - "github.com/celestiaorg/celestia-node/service/state" -) - -// Server constructs a new RPC Server from the given Config. -// TODO @renaynay @Wondertan: this component is meant to be removed on implementation -// of https://github.com/celestiaorg/celestia-node/pull/506. -func Server(cfg rpc.Config) func(lc fx.Lifecycle) *rpc.Server { - return func(lc fx.Lifecycle) *rpc.Server { - serv := rpc.NewServer(cfg) - lc.Append(fx.Hook{ - OnStart: serv.Start, - OnStop: serv.Stop, - }) - return serv - } -} - -// Handler constructs a new RPC Handler from the given services. -func Handler( - state *state.Service, - share *share.Service, - header *header.Service, - serv *rpc.Server, - daser *das.DASer, -) { - handler := rpc.NewHandler(state, share, header, daser) - handler.RegisterEndpoints(serv) - handler.RegisterMiddleware(serv) -} diff --git a/node/rpc_test.go b/node/rpc_test.go deleted file mode 100644 index 7a8b78fad1..0000000000 --- a/node/rpc_test.go +++ /dev/null @@ -1,254 +0,0 @@ -package node - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strconv" - "testing" - - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/das" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/local" - "github.com/celestiaorg/celestia-node/header/store" - "github.com/celestiaorg/celestia-node/header/sync" - service "github.com/celestiaorg/celestia-node/service/header" - "github.com/celestiaorg/celestia-node/service/rpc" - "github.com/celestiaorg/celestia-node/service/share" -) - -// NOTE: The following tests are against common RPC endpoints provided by -// celestia-node. They will be removed upon refactoring of the RPC -// architecture and Public API. @renaynay @Wondertan. - -const testHeight = uint64(2) - -// TestNamespacedSharesRequest tests the `/namespaced_shares` endpoint. -func TestNamespacedSharesRequest(t *testing.T) { - testGetNamespacedRequest(t, "namespaced_shares", func(t *testing.T, resp *http.Response) { - t.Helper() - namespacedShares := new(rpc.NamespacedSharesResponse) - err := json.NewDecoder(resp.Body).Decode(namespacedShares) - assert.NoError(t, err) - assert.Equal(t, testHeight, namespacedShares.Height) - }) -} - -// TestNamespacedDataRequest tests the `/namespaced_shares` endpoint. -func TestNamespacedDataRequest(t *testing.T) { - testGetNamespacedRequest(t, "namespaced_data", func(t *testing.T, resp *http.Response) { - t.Helper() - namespacedData := new(rpc.NamespacedDataResponse) - err := json.NewDecoder(resp.Body).Decode(namespacedData) - assert.NoError(t, err) - assert.Equal(t, testHeight, namespacedData.Height) - }) -} - -func testGetNamespacedRequest(t *testing.T, endpointName string, assertResponseOK func(*testing.T, *http.Response)) { - t.Helper() - nd := setupNodeWithModifiedRPC(t) - // create several requests for header at height 2 - var tests = []struct { - nID string - expectedErr bool - errMsg string - }{ - { - nID: "0000000000000001", - expectedErr: false, - }, - { - nID: "00000000000001", - expectedErr: true, - errMsg: "expected namespace ID of size 8, got 7", - }, - { - nID: "000000000000000001", - expectedErr: true, - errMsg: "expected namespace ID of size 8, got 9", - }, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - endpoint := fmt.Sprintf("http://127.0.0.1:%s/%s/%s/height/%d", - nd.RPCServer.ListenAddr()[5:], endpointName, tt.nID, testHeight) - resp, err := http.Get(endpoint) - defer func() { - err = resp.Body.Close() - require.NoError(t, err) - }() - // check resp - if tt.expectedErr { - require.False(t, resp.StatusCode == http.StatusOK) - require.Equal(t, "application/json", resp.Header.Get("Content-Type")) - - var errorMessage string - err := json.NewDecoder(resp.Body).Decode(&errorMessage) - - require.NoError(t, err) - require.Equal(t, tt.errMsg, errorMessage) - - return - } - require.NoError(t, err) - require.True(t, resp.StatusCode == http.StatusOK) - - assertResponseOK(t, resp) - }) - } -} - -// TestHeadRequest rests the `/head` endpoint. -func TestHeadRequest(t *testing.T) { - nd := setupNodeWithModifiedRPC(t) - endpoint := fmt.Sprintf("http://127.0.0.1:%s/head", nd.RPCServer.ListenAddr()[5:]) - resp, err := http.Get(endpoint) - require.NoError(t, err) - defer func() { - err = resp.Body.Close() - require.NoError(t, err) - }() - require.True(t, resp.StatusCode == http.StatusOK) -} - -// TestHeaderRequest tests the `/header` endpoint. -func TestHeaderRequest(t *testing.T) { - nd := setupNodeWithModifiedRPC(t) - // create several requests for headers - var tests = []struct { - height uint64 - expectedErr bool - }{ - { - height: uint64(2), - expectedErr: false, - }, - { - height: uint64(0), - expectedErr: true, - }, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - endpoint := fmt.Sprintf("http://127.0.0.1:%s/header/%d", nd.RPCServer.ListenAddr()[5:], tt.height) - resp, err := http.Get(endpoint) - require.NoError(t, err) - defer func() { - err = resp.Body.Close() - require.NoError(t, err) - }() - - require.Equal(t, tt.expectedErr, resp.StatusCode != http.StatusOK) - }) - } -} - -// TestAvailabilityRequest tests the /data_available endpoint. -func TestAvailabilityRequest(t *testing.T) { - nd := setupNodeWithModifiedRPC(t) - - height := 5 - endpoint := fmt.Sprintf("http://127.0.0.1:%s/data_available/%d", nd.RPCServer.ListenAddr()[5:], height) - resp, err := http.Get(endpoint) - require.NoError(t, err) - defer func() { - err = resp.Body.Close() - require.NoError(t, err) - }() - - buf, err := ioutil.ReadAll(resp.Body) - require.NoError(t, err) - - availResp := new(rpc.AvailabilityResponse) - err = json.Unmarshal(buf, &availResp) - require.NoError(t, err) - - assert.True(t, availResp.Available) -} - -func TestDASStateRequest(t *testing.T) { - nd := setupNodeWithModifiedRPC(t) - - endpoint := fmt.Sprintf("http://127.0.0.1:%s/daser/state", nd.RPCServer.ListenAddr()[5:]) - resp, err := http.Get(endpoint) - require.NoError(t, err) - defer func() { - err = resp.Body.Close() - require.NoError(t, err) - }() - dasStateResp := new(rpc.DasStateResponse) - err = json.NewDecoder(resp.Body).Decode(dasStateResp) - require.NoError(t, err) - // ensure daser is running - assert.True(t, dasStateResp.SampleRoutine.IsRunning) -} - -func setupNodeWithModifiedRPC(t *testing.T) *Node { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - // create test node with a dummy header service, manually add a dummy header - // service and register it with rpc handler/server - hServ := setupHeaderService(ctx, t) - daser := setupDASer() - // create overrides - overrideHeaderServ := func(sets *settings) { - sets.opts = append(sets.opts, fx.Replace(hServ)) - } - overrideDASer := func(sets *settings) { - sets.opts = append(sets.opts, fx.Replace(func() func(lc fx.Lifecycle) *das.DASer { - return func(lc fx.Lifecycle) *das.DASer { - lc.Append(fx.Hook{ - OnStart: daser.Start, - OnStop: daser.Stop, - }) - return daser - } - })) - } - overrideRPCHandler := func(sets *settings) { - sets.opts = append(sets.opts, fx.Invoke(func(srv *rpc.Server) { - handler := rpc.NewHandler(nil, nil, hServ, daser) - handler.RegisterEndpoints(srv) - })) - } - nd := TestNode(t, Full, overrideHeaderServ, overrideDASer, overrideRPCHandler) - // start node - err := nd.Start(ctx) - require.NoError(t, err) - t.Cleanup(func() { - err = nd.Stop(ctx) - require.NoError(t, err) - }) - return nd -} - -func setupHeaderService(ctx context.Context, t *testing.T) *service.Service { - suite := header.NewTestSuite(t, 1) - head := suite.Head() - // create header stores - remoteStore := store.NewTestStore(ctx, t, head) - localStore := store.NewTestStore(ctx, t, head) - _, err := localStore.Append(ctx, suite.GenExtendedHeaders(5)...) - require.NoError(t, err) - // create syncer - syncer := sync.NewSyncer(local.NewExchange(remoteStore), localStore, &header.DummySubscriber{}) - - return service.NewHeaderService(syncer, nil, nil, nil, localStore) -} - -func setupDASer() *das.DASer { - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - sub := &header.DummySubscriber{Headers: make([]*header.ExtendedHeader, 10)} - return das.NewDASer(share.NewTestSuccessfulAvailability(), sub, nil, ds, nil) -} diff --git a/node/services/config.go b/node/services/config.go deleted file mode 100644 index 501686d58f..0000000000 --- a/node/services/config.go +++ /dev/null @@ -1,76 +0,0 @@ -package services - -import ( - "encoding/hex" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/multiformats/go-multiaddr" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - - "github.com/celestiaorg/celestia-node/params" -) - -var log = logging.Logger("node/services") - -type Config struct { - // TrustedHash is the Block/Header hash that Nodes use as starting point for header synchronization. - // Only affects the node once on initial sync. - TrustedHash string - // TrustedPeers are the peers we trust to fetch headers from. - // Note: The trusted does *not* imply Headers are not verified, but trusted as reliable to fetch headers - // at any moment. - TrustedPeers []string - // NOTE: All further fields related to share/discovery. - // PeersLimit defines how many peers will be added during discovery. - PeersLimit uint - // DiscoveryInterval is an interval between discovery sessions. - DiscoveryInterval time.Duration - // AdvertiseInterval is a interval between advertising sessions. - // NOTE: only full and bridge can advertise themselves. - AdvertiseInterval time.Duration -} - -func DefaultConfig() Config { - return Config{ - TrustedHash: "", - TrustedPeers: make([]string, 0), - PeersLimit: 3, - DiscoveryInterval: time.Second * 30, - AdvertiseInterval: time.Second * 30, - } -} - -func (cfg *Config) trustedPeers(bpeers params.Bootstrappers) (infos []peer.AddrInfo, err error) { - if len(cfg.TrustedPeers) == 0 { - log.Infof("No trusted peers in config, initializing with default bootstrappers as trusted peers") - return bpeers, nil - } - - infos = make([]peer.AddrInfo, len(cfg.TrustedPeers)) - for i, tpeer := range cfg.TrustedPeers { - ma, err := multiaddr.NewMultiaddr(tpeer) - if err != nil { - return nil, err - } - p, err := peer.AddrInfoFromP2pAddr(ma) - if err != nil { - return nil, err - } - infos[i] = *p - } - - return -} - -func (cfg *Config) trustedHash(net params.Network) (tmbytes.HexBytes, error) { - if cfg.TrustedHash == "" { - gen, err := params.GenesisFor(net) - if err != nil { - return nil, err - } - return hex.DecodeString(gen) - } - return hex.DecodeString(cfg.TrustedHash) -} diff --git a/node/services/service.go b/node/services/service.go deleted file mode 100644 index 226d4842d0..0000000000 --- a/node/services/service.go +++ /dev/null @@ -1,276 +0,0 @@ -package services - -import ( - "context" - - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/peerstore" - "github.com/libp2p/go-libp2p-core/routing" - pubsub "github.com/libp2p/go-libp2p-pubsub" - routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/das" - "github.com/celestiaorg/celestia-node/fraud" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/p2p" - "github.com/celestiaorg/celestia-node/header/store" - "github.com/celestiaorg/celestia-node/header/sync" - "github.com/celestiaorg/celestia-node/libs/fxutil" - "github.com/celestiaorg/celestia-node/params" - headerservice "github.com/celestiaorg/celestia-node/service/header" - "github.com/celestiaorg/celestia-node/service/share" -) - -// HeaderSyncer creates a new Syncer. -func HeaderSyncer( - ctx context.Context, - lc fx.Lifecycle, - ex header.Exchange, - store header.Store, - sub header.Subscriber, - fservice fraud.Service, -) (*sync.Syncer, error) { - syncer := sync.NewSyncer(ex, store, sub) - lifecycleCtx := fxutil.WithLifecycle(ctx, lc) - lc.Append(fx.Hook{ - OnStart: func(startCtx context.Context) error { - return FraudLifecycle(startCtx, lifecycleCtx, fraud.BadEncoding, fservice, syncer.Start, syncer.Stop) - }, - OnStop: syncer.Stop, - }) - - return syncer, nil -} - -// P2PSubscriber creates a new p2p.Subscriber. -func P2PSubscriber(lc fx.Lifecycle, sub *pubsub.PubSub) (*p2p.Subscriber, *p2p.Subscriber) { - p2pSub := p2p.NewSubscriber(sub) - lc.Append(fx.Hook{ - OnStart: p2pSub.Start, - OnStop: p2pSub.Stop, - }) - return p2pSub, p2pSub -} - -// HeaderService creates a new header.Service. -func HeaderService( - syncer *sync.Syncer, - sub header.Subscriber, - p2pServer *p2p.ExchangeServer, - ex header.Exchange, - store header.Store, -) *headerservice.Service { - return headerservice.NewHeaderService(syncer, sub, p2pServer, ex, store) -} - -// HeaderExchangeP2P constructs new Exchange for headers. -func HeaderExchangeP2P(cfg Config) func(params.Bootstrappers, host.Host) (header.Exchange, error) { - return func(bpeers params.Bootstrappers, host host.Host) (header.Exchange, error) { - peers, err := cfg.trustedPeers(bpeers) - if err != nil { - return nil, err - } - ids := make([]peer.ID, len(peers)) - for index, peer := range peers { - ids[index] = peer.ID - host.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) - } - return p2p.NewExchange(host, ids), nil - } -} - -// HeaderP2PExchangeServer creates a new header/p2p.ExchangeServer. -func HeaderP2PExchangeServer(lc fx.Lifecycle, host host.Host, store header.Store) *p2p.ExchangeServer { - p2pServ := p2p.NewExchangeServer(host, store) - lc.Append(fx.Hook{ - OnStart: p2pServ.Start, - OnStop: p2pServ.Stop, - }) - - return p2pServ -} - -// HeaderStore creates and initializes new header.Store. -func HeaderStore(lc fx.Lifecycle, ds datastore.Batching) (header.Store, error) { - store, err := store.NewStore(ds) - if err != nil { - return nil, err - } - lc.Append(fx.Hook{ - OnStart: store.Start, - OnStop: store.Stop, - }) - return store, nil -} - -// HeaderStoreInit initializes the store. -func HeaderStoreInit(cfg *Config) func(context.Context, params.Network, header.Store, header.Exchange) error { - return func(ctx context.Context, net params.Network, s header.Store, ex header.Exchange) error { - trustedHash, err := cfg.trustedHash(net) - if err != nil { - return err - } - - err = store.Init(ctx, s, ex, trustedHash) - if err != nil { - // TODO(@Wondertan): Error is ignored, as otherwise unit tests for Node construction fail. - // This is due to requesting step of initialization, which fetches initial Header by trusted hash from - // the network. The step can't be done during unit tests and fixing it would require either - // * Having some test/dev/offline mode for Node that mocks out all the networking - // * Hardcoding full extended header in params pkg, instead of hashes, so we avoid requesting step - log.Errorf("initializing store failed: %s", err) - } - - return nil - } -} - -// ShareService constructs new share.Service. -func ShareService(lc fx.Lifecycle, bServ blockservice.BlockService, avail share.Availability) *share.Service { - service := share.NewService(bServ, avail) - lc.Append(fx.Hook{ - OnStart: service.Start, - OnStop: service.Stop, - }) - return service -} - -// DASer constructs a new Data Availability Sampler. -func DASer( - ctx context.Context, - lc fx.Lifecycle, - avail share.Availability, - sub header.Subscriber, - hstore header.Store, - ds datastore.Batching, - fservice fraud.Service, -) *das.DASer { - das := das.NewDASer(avail, sub, hstore, ds, fservice) - lifecycleCtx := fxutil.WithLifecycle(ctx, lc) - lc.Append(fx.Hook{ - OnStart: func(startContext context.Context) error { - return FraudLifecycle(startContext, lifecycleCtx, fraud.BadEncoding, fservice, das.Start, das.Stop) - }, - OnStop: das.Stop, - }) - - return das -} - -// FraudService constructs fraud proof service. -func FraudService( - sub *pubsub.PubSub, - hstore header.Store, - ds datastore.Batching, -) fraud.Service { - return fraud.NewService(sub, hstore.GetByHeight, ds) -} - -// LightAvailability constructs light share availability. -func LightAvailability(cfg Config) func( - lc fx.Lifecycle, - bServ blockservice.BlockService, - r routing.ContentRouting, - h host.Host, -) *share.LightAvailability { - return func( - lc fx.Lifecycle, - bServ blockservice.BlockService, - r routing.ContentRouting, - h host.Host, - ) *share.LightAvailability { - disc := share.NewDiscovery( - h, - routingdisc.NewRoutingDiscovery(r), - cfg.PeersLimit, - cfg.DiscoveryInterval, - cfg.AdvertiseInterval, - ) - la := share.NewLightAvailability(bServ, disc) - lc.Append(fx.Hook{ - OnStart: la.Start, - OnStop: la.Stop, - }) - return la - } -} - -// FullAvailability constructs full share availability. -func FullAvailability(cfg Config) func( - lc fx.Lifecycle, - bServ blockservice.BlockService, - r routing.ContentRouting, - h host.Host, -) *share.FullAvailability { - return func( - lc fx.Lifecycle, - bServ blockservice.BlockService, - r routing.ContentRouting, - h host.Host, - ) *share.FullAvailability { - disc := share.NewDiscovery( - h, - routingdisc.NewRoutingDiscovery(r), - cfg.PeersLimit, - cfg.DiscoveryInterval, - cfg.AdvertiseInterval, - ) - fa := share.NewFullAvailability(bServ, disc) - lc.Append(fx.Hook{ - OnStart: fa.Start, - OnStop: fa.Stop, - }) - return fa - } -} - -// CacheAvailability wraps either Full or Light availability with a cache for result sampling. -func CacheAvailability[A share.Availability](lc fx.Lifecycle, ds datastore.Batching, avail A) share.Availability { - ca := share.NewCacheAvailability(avail, ds) - lc.Append(fx.Hook{ - OnStop: ca.Close, - }) - return ca -} - -// FraudLifecycle controls the lifecycle of service depending on fraud proofs. -// It starts the service only if no fraud-proof exists and stops the service automatically -// if a proof arrives after the service was started. -func FraudLifecycle( - startCtx, lifecycleCtx context.Context, - p fraud.ProofType, - fservice fraud.Service, - start, stop func(context.Context) error, -) error { - proofs, err := fservice.Get(startCtx, p) - switch err { - default: - return err - case nil: - return &fraud.ErrFraudExists{Proof: proofs} - case datastore.ErrNotFound: - } - err = start(startCtx) - if err != nil { - return err - } - // handle incoming Fraud Proofs - go fraud.OnProof(lifecycleCtx, fservice, p, func(fraud.Proof) { - if err := stop(lifecycleCtx); err != nil { - log.Error(err) - } - }) - return nil -} - -// Metrics enables metrics for services. -func Metrics() fx.Option { - return fx.Options( - fx.Invoke(header.MonitorHead), - // add more monitoring here - ) -} diff --git a/node/settings.go b/node/settings.go deleted file mode 100644 index 0a60329ecd..0000000000 --- a/node/settings.go +++ /dev/null @@ -1,117 +0,0 @@ -package node - -import ( - "encoding/hex" - "time" - - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/host" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/node/services" - - apptypes "github.com/celestiaorg/celestia-app/x/payment/types" - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/libs/fxutil" - "github.com/celestiaorg/celestia-node/node/p2p" - "github.com/celestiaorg/celestia-node/params" -) - -// settings store values that can be augmented or changed for Node with Options. -type settings struct { - cfg *Config - opts []fx.Option -} - -// Option for Node's Config. -type Option func(*settings) - -// WithNetwork specifies the Network to which the Node should connect to. -// WARNING: Use this option with caution and never run the Node with different networks over the same persisted Store. -func WithNetwork(net params.Network) Option { - return func(sets *settings) { - sets.opts = append(sets.opts, fx.Replace(net)) - } -} - -// WithP2PKey sets custom Ed25519 private key for p2p networking. -func WithP2PKey(key crypto.PrivKey) Option { - return func(sets *settings) { - sets.opts = append(sets.opts, fxutil.ReplaceAs(key, new(crypto.PrivKey))) - } -} - -// WithP2PKeyStr sets custom hex encoded Ed25519 private key for p2p networking. -func WithP2PKeyStr(key string) Option { - return func(sets *settings) { - decKey, err := hex.DecodeString(key) - if err != nil { - sets.opts = append(sets.opts, fx.Error(err)) - return - } - - key, err := crypto.UnmarshalEd25519PrivateKey(decKey) - if err != nil { - sets.opts = append(sets.opts, fx.Error(err)) - return - } - - sets.opts = append(sets.opts, fxutil.ReplaceAs(key, new(crypto.PrivKey))) - } - -} - -// WithHost sets custom Host's data for p2p networking. -func WithHost(hst host.Host) Option { - return func(sets *settings) { - sets.opts = append(sets.opts, fxutil.ReplaceAs(hst, new(p2p.HostBase))) - } -} - -// WithCoreClient sets custom client for core process -func WithCoreClient(client core.Client) Option { - return func(sets *settings) { - sets.opts = append(sets.opts, fxutil.ReplaceAs(client, new(core.Client))) - } -} - -// WithHeaderConstructFn sets custom func that creates extended header -func WithHeaderConstructFn(construct header.ConstructFn) Option { - return func(sets *settings) { - sets.opts = append(sets.opts, fx.Replace(construct)) - } -} - -// WithKeyringSigner overrides the default keyring signer constructed -// by the node. -func WithKeyringSigner(signer *apptypes.KeyringSigner) Option { - return func(sets *settings) { - sets.opts = append(sets.opts, fx.Replace(signer)) - } -} - -// WithBootstrappers sets custom bootstrap peers. -func WithBootstrappers(peers params.Bootstrappers) Option { - return func(sets *settings) { - sets.opts = append(sets.opts, fx.Replace(peers)) - } -} - -// WithRefreshRoutingTablePeriod sets custom refresh period for dht. -// Currently, it is used to speed up tests. -func WithRefreshRoutingTablePeriod(interval time.Duration) Option { - return func(sets *settings) { - sets.cfg.P2P.RoutingTableRefreshPeriod = interval - } -} - -// WithMetrics enables metrics exporting for the node. -func WithMetrics(enable bool) Option { - return func(sets *settings) { - if !enable { - return - } - sets.opts = append(sets.opts, services.Metrics()) - } -} diff --git a/node/state/core.go b/node/state/core.go deleted file mode 100644 index 804c7f07d2..0000000000 --- a/node/state/core.go +++ /dev/null @@ -1,26 +0,0 @@ -package state - -import ( - "go.uber.org/fx" - - apptypes "github.com/celestiaorg/celestia-app/x/payment/types" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/service/state" -) - -// CoreAccessor constructs a new instance of state.Accessor over -// a celestia-core connection. -func CoreAccessor( - coreIP, - coreRPC, - coreGRPC string, -) func(fx.Lifecycle, *apptypes.KeyringSigner, header.Store) (state.Accessor, error) { - return func(lc fx.Lifecycle, signer *apptypes.KeyringSigner, getter header.Store) (state.Accessor, error) { - ca := state.NewCoreAccessor(signer, getter, coreIP, coreRPC, coreGRPC) - lc.Append(fx.Hook{ - OnStart: ca.Start, - OnStop: ca.Stop, - }) - return ca, nil - } -} diff --git a/node/state/keyring.go b/node/state/keyring.go deleted file mode 100644 index 4b5502eaa7..0000000000 --- a/node/state/keyring.go +++ /dev/null @@ -1,74 +0,0 @@ -package state - -import ( - "fmt" - "os" - - "github.com/cosmos/cosmos-sdk/crypto/hd" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - - "github.com/celestiaorg/celestia-app/app" - "github.com/celestiaorg/celestia-app/app/encoding" - apptypes "github.com/celestiaorg/celestia-app/x/payment/types" - "github.com/celestiaorg/celestia-node/libs/keystore" - "github.com/celestiaorg/celestia-node/node/key" - "github.com/celestiaorg/celestia-node/params" -) - -func Keyring(cfg key.Config) func(keystore.Keystore, params.Network) (*apptypes.KeyringSigner, error) { - return func(ks keystore.Keystore, net params.Network) (*apptypes.KeyringSigner, error) { - // TODO @renaynay: Include option for setting custom `userInput` parameter with - // implementation of https://github.com/celestiaorg/celestia-node/issues/415. - // TODO @renaynay @Wondertan: ensure that keyring backend from config is passed - // here instead of hardcoded `BackendTest`: https://github.com/celestiaorg/celestia-node/issues/603. - encConf := encoding.MakeEncodingConfig(app.ModuleEncodingRegisters...) - ring, err := keyring.New(app.Name, keyring.BackendTest, ks.Path(), os.Stdin, encConf.Codec) - if err != nil { - return nil, err - } - - var info *keyring.Record - // if custom keyringAccName provided, find key for that name - if cfg.KeyringAccName != "" { - keyInfo, err := ring.Key(cfg.KeyringAccName) - if err != nil { - return nil, err - } - info = keyInfo - } else { - // check if key exists for signer - keys, err := ring.List() - if err != nil { - return nil, err - } - // if no key was found in keystore path, generate new key for node - if len(keys) == 0 { - log.Infow("NO KEY FOUND IN STORE, GENERATING NEW KEY...", "path", ks.Path()) - keyInfo, mn, err := ring.NewMnemonic("my_celes_key", keyring.English, "", - "", hd.Secp256k1) - if err != nil { - return nil, err - } - log.Info("NEW KEY GENERATED...") - addr, err := keyInfo.GetAddress() - if err != nil { - return nil, err - } - fmt.Printf("\nNAME: %s\nADDRESS: %s\nMNEMONIC (save this somewhere safe!!!): \n%s\n\n", - keyInfo.Name, addr.String(), mn) - - info = keyInfo - } else { - // if one or more keys are present and no keyringAccName was given, use the first key in list - info = keys[0] - } - } - // construct signer using the default key found / generated above - signer := apptypes.NewKeyringSigner(ring, info.Name, string(net)) - signerInfo := signer.GetSignerInfo() - log.Infow("constructed keyring signer", "backend", keyring.BackendTest, "path", ks.Path(), - "key name", signerInfo.Name, "chain-id", string(net)) - - return signer, nil - } -} diff --git a/node/state/state.go b/node/state/state.go deleted file mode 100644 index a3ac29f23b..0000000000 --- a/node/state/state.go +++ /dev/null @@ -1,48 +0,0 @@ -package state - -import ( - "context" - - logging "github.com/ipfs/go-log/v2" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/fraud" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/libs/fxutil" - "github.com/celestiaorg/celestia-node/node/core" - "github.com/celestiaorg/celestia-node/node/key" - "github.com/celestiaorg/celestia-node/node/services" - "github.com/celestiaorg/celestia-node/service/state" -) - -var log = logging.Logger("state-access-constructor") - -// Components provides all components necessary to construct the -// state service. -func Components(coreCfg core.Config, keyCfg key.Config) fx.Option { - return fx.Options( - fx.Provide(Keyring(keyCfg)), - fx.Provide(CoreAccessor(coreCfg.IP, coreCfg.RPCPort, coreCfg.GRPCPort)), - fx.Provide(Service), - ) -} - -// Service constructs a new state.Service. -func Service( - ctx context.Context, - lc fx.Lifecycle, - accessor state.Accessor, - store header.Store, - fservice fraud.Service, -) *state.Service { - serv := state.NewService(accessor, store) - lifecycleCtx := fxutil.WithLifecycle(ctx, lc) - lc.Append(fx.Hook{ - OnStart: func(startCtx context.Context) error { - return services.FraudLifecycle(startCtx, lifecycleCtx, fraud.BadEncoding, fservice, serv.Start, serv.Stop) - }, - OnStop: serv.Stop, - }) - - return serv -} diff --git a/node/store.go b/node/store.go deleted file mode 100644 index a548af6a4b..0000000000 --- a/node/store.go +++ /dev/null @@ -1,195 +0,0 @@ -package node - -import ( - "errors" - "fmt" - "path/filepath" - "sync" - - "github.com/dgraph-io/badger/v2/options" - "github.com/ipfs/go-datastore" - dsbadger "github.com/ipfs/go-ds-badger2" - "github.com/mitchellh/go-homedir" - - "github.com/celestiaorg/celestia-node/libs/fslock" - "github.com/celestiaorg/celestia-node/libs/keystore" -) - -var ( - // ErrOpened is thrown on attempt to open already open/in-use Store. - ErrOpened = errors.New("node: store is in use") - // ErrNotInited is thrown on attempt to open Store without initialization. - ErrNotInited = errors.New("node: store is not initialized") -) - -// Store encapsulates storage for the Node. Basically, it is the Store of all Stores. -// It provides access for the Node data stored in root directory e.g. '~/.celestia'. -type Store interface { - // Path reports the FileSystem path of Store. - Path() string - - // Keystore provides a Keystore to access keys. - Keystore() (keystore.Keystore, error) - - // Datastore provides a Datastore - a KV store for arbitrary data to be stored on disk. - Datastore() (datastore.Batching, error) - - // Config loads the stored Node config. - Config() (*Config, error) - - // PutConfig alters the stored Node config. - PutConfig(*Config) error - - // Close closes the Store freeing up acquired resources and locks. - Close() error -} - -// OpenStore creates new FS Store under the given 'path'. -// To be opened the Store must be initialized first, otherwise ErrNotInited is thrown. -// OpenStore takes a file Lock on directory, hence only one Store can be opened at a time under the given 'path', -// otherwise ErrOpened is thrown. -func OpenStore(path string) (Store, error) { - path, err := storePath(path) - if err != nil { - return nil, err - } - - flock, err := fslock.Lock(lockPath(path)) - if err != nil { - if err == fslock.ErrLocked { - return nil, ErrOpened - } - return nil, err - } - - ok := IsInit(path) - if !ok { - flock.Unlock() // nolint: errcheck - return nil, ErrNotInited - } - - return &fsStore{ - path: path, - dirLock: flock, - }, nil -} - -func (f *fsStore) Path() string { - return f.path -} - -func (f *fsStore) Config() (*Config, error) { - cfg, err := LoadConfig(configPath(f.path)) - if err != nil { - return nil, fmt.Errorf("node: can't load Config: %w", err) - } - - return cfg, nil -} - -func (f *fsStore) PutConfig(cfg *Config) error { - err := SaveConfig(configPath(f.path), cfg) - if err != nil { - return fmt.Errorf("node: can't save Config: %w", err) - } - - return nil -} - -func (f *fsStore) Keystore() (_ keystore.Keystore, err error) { - f.lock.RLock() - if f.keys != nil { - f.lock.RUnlock() - return f.keys, nil - } - f.lock.RUnlock() - - f.lock.Lock() - defer f.lock.Unlock() - - f.keys, err = keystore.NewFSKeystore(keysPath(f.path)) - if err != nil { - return nil, fmt.Errorf("node: can't open Keystore: %w", err) - } - - return f.keys, nil -} - -func (f *fsStore) Datastore() (_ datastore.Batching, err error) { - f.lock.RLock() - if f.data != nil { - f.lock.RUnlock() - return f.data, nil - } - f.lock.RUnlock() - - f.lock.Lock() - defer f.lock.Unlock() - - opts := dsbadger.DefaultOptions // this should be copied - - // Badger sets ValueThreshold to 1K by default and this makes shares being stored in LSM tree - // instead of the value log, so we change the value to be lower than share size, - // so shares are store in value log. For value log and LSM definitions - opts.ValueThreshold = 128 - // We always write unique values to Badger transaction so there is no need to detect conflicts. - opts.DetectConflicts = false - // Use MemoryMap for better performance - opts.ValueLogLoadingMode = options.MemoryMap - opts.TableLoadingMode = options.MemoryMap - // Truncate set to true will truncate corrupted data on start if there is any. - // If we don't truncate, the node will refuse to start and will beg for recovering, etc. - // If we truncate, the node will start with any uncorrupted data and reliably sync again what was corrupted - // in most cases. - opts.Truncate = true - // MaxTableSize defines in memory and on disk size of LSM tree - // Bigger values constantly takes more RAM - // TODO(@Wondertan): Make configurable with more conservative defaults for Light Node - opts.MaxTableSize = 64 << 20 - // Remove GC as long as we don't have pruning of data to be GCed. - // Currently, we only append data on disk without removing. - // TODO(@Wondertan): Find good enough default, once pruning is shipped. - opts.GcInterval = 0 - - f.data, err = dsbadger.NewDatastore(dataPath(f.path), &opts) - if err != nil { - return nil, fmt.Errorf("node: can't open Badger Datastore: %w", err) - } - - return f.data, nil -} - -func (f *fsStore) Close() error { - defer f.dirLock.Unlock() // nolint: errcheck - return f.data.Close() -} - -type fsStore struct { - path string - - data datastore.Batching - keys keystore.Keystore - - lock sync.RWMutex // protects all the fields - dirLock *fslock.Locker // protects directory -} - -func storePath(path string) (string, error) { - return homedir.Expand(filepath.Clean(path)) -} - -func configPath(base string) string { - return filepath.Join(base, "config.toml") -} - -func lockPath(base string) string { - return filepath.Join(base, "lock") -} - -func keysPath(base string) string { - return filepath.Join(base, "keys") -} - -func dataPath(base string) string { - return filepath.Join(base, "data") -} diff --git a/node/store_test.go b/node/store_test.go deleted file mode 100644 index b22014e36c..0000000000 --- a/node/store_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package node - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRepo(t *testing.T) { - var tests = []struct { - tp Type - }{ - {tp: Bridge}, {tp: Light}, {tp: Full}, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - dir := t.TempDir() - - _, err := OpenStore(dir) - assert.ErrorIs(t, err, ErrNotInited) - - err = Init(dir, tt.tp) - require.NoError(t, err) - - store, err := OpenStore(dir) - require.NoError(t, err) - - _, err = OpenStore(dir) - assert.ErrorIs(t, err, ErrOpened) - - ks, err := store.Keystore() - assert.NoError(t, err) - assert.NotNil(t, ks) - - data, err := store.Datastore() - assert.NoError(t, err) - assert.NotNil(t, data) - - cfg, err := store.Config() - assert.NoError(t, err) - assert.NotNil(t, cfg) - - err = store.Close() - assert.NoError(t, err) - }) - } -} diff --git a/node/testing.go b/node/testing.go deleted file mode 100644 index 2961402c37..0000000000 --- a/node/testing.go +++ /dev/null @@ -1,59 +0,0 @@ -package node - -import ( - "context" - "net" - "testing" - "time" - - "github.com/cosmos/cosmos-sdk/crypto/hd" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/app" - "github.com/celestiaorg/celestia-app/app/encoding" - apptypes "github.com/celestiaorg/celestia-app/x/payment/types" - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/params" -) - -// MockStore provides mock in memory Store for testing purposes. -func MockStore(t *testing.T, cfg *Config) Store { - t.Helper() - store := NewMemStore() - err := store.PutConfig(cfg) - require.NoError(t, err) - return store -} - -func TestNode(t *testing.T, tp Type, opts ...Option) *Node { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - t.Cleanup(cancel) - - store := MockStore(t, DefaultConfig(tp)) - _, _, cfg := core.StartTestKVApp(ctx, t) - endpoint, err := core.GetEndpoint(cfg) - require.NoError(t, err) - ip, port, err := net.SplitHostPort(endpoint) - require.NoError(t, err) - opts = append(opts, - WithRemoteCoreIP(ip), - WithRemoteCorePort(port), - WithNetwork(params.Private), - WithRPCPort("0"), - WithKeyringSigner(TestKeyringSigner(t)), - ) - nd, err := New(tp, store, opts...) - require.NoError(t, err) - return nd -} - -func TestKeyringSigner(t *testing.T) *apptypes.KeyringSigner { - encConf := encoding.MakeEncodingConfig(app.ModuleEncodingRegisters...) - ring := keyring.NewInMemory(encConf.Codec) - signer := apptypes.NewKeyringSigner(ring, "", string(params.Private)) - _, _, err := signer.NewMnemonic("test_celes", keyring.English, "", - "", hd.Secp256k1) - require.NoError(t, err) - return signer -} diff --git a/node/tests/fraud_test.go b/node/tests/fraud_test.go deleted file mode 100644 index e7d355bca1..0000000000 --- a/node/tests/fraud_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package tests - -import ( - "context" - "testing" - "time" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/fraud" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/node" - "github.com/celestiaorg/celestia-node/node/tests/swamp" -) - -/* - Test-Case: Full Node will propagate BEFP to the network, once ByzantineError will be received from sampling. - Pre-Requisites: - - CoreClient is started by swamp. - Steps: - 1. Create a Bridge Node(BN) with broken extended header at height 10. - 2. Start a BN. - 3. Create a Full Node(FN) with a connection to BN as a trusted peer. - 4. Start a FN. - 5. Subscribe to BEFP and wait when it will be received. - 6. Check FN is not synced to 15. - Note: 15 is not available because DASer will be stopped before reaching this height due to receiving BEFP. -*/ -func TestFraudProofBroadcasting(t *testing.T) { - sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Millisecond*100)) - - bridge := sw.NewBridgeNode(node.WithHeaderConstructFn(header.FraudMaker(t, 10))) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - t.Cleanup(cancel) - - err := bridge.Start(ctx) - require.NoError(t, err) - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) - - store := node.MockStore(t, node.DefaultConfig(node.Full)) - full := sw.NewNodeWithStore(node.Full, store, node.WithTrustedPeers(addrs[0].String())) - - err = full.Start(ctx) - require.NoError(t, err) - - subscr, err := full.FraudServ.Subscribe(fraud.BadEncoding) - require.NoError(t, err) - _, err = subscr.Proof(ctx) - require.NoError(t, err) - - // Since GetByHeight is a blocking operation for headers that is not received, we - // should set a timeout because all daser/syncer are stopped at this point - newCtx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500) - // rework this after https://github.com/celestiaorg/celestia-node/issues/427 - t.Cleanup(cancel) - - _, err = full.HeaderServ.GetByHeight(newCtx, 15) - require.ErrorIs(t, err, context.DeadlineExceeded) - - require.NoError(t, full.Stop(ctx)) - require.NoError(t, sw.RemoveNode(full, node.Full)) - - full = sw.NewNodeWithStore(node.Full, store, node.WithTrustedPeers(addrs[0].String())) - require.Error(t, full.Start(ctx)) - proofs, err := full.FraudServ.Get(ctx, fraud.BadEncoding) - require.NoError(t, err) - require.NotNil(t, proofs) -} diff --git a/node/tests/p2p_test.go b/node/tests/p2p_test.go deleted file mode 100644 index ef95ed2dd2..0000000000 --- a/node/tests/p2p_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package tests - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/libp2p/go-libp2p-core/event" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/celestiaorg/celestia-node/node" - "github.com/celestiaorg/celestia-node/node/tests/swamp" -) - -/* -Test-Case: Full/Light Nodes connection to Bridge as a Bootstapper -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Create full/light nodes with bridge node as bootsrapped peer -4. Start full/light nodes -5. Check that nodes are connected to bridge -*/ -func TestUseBridgeNodeAsBootstraper(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - - err := bridge.Start(ctx) - require.NoError(t, err) - - addr := host.InfoFromHost(bridge.Host) - - full := sw.NewFullNode(node.WithBootstrappers([]peer.AddrInfo{*addr})) - light := sw.NewLightNode(node.WithBootstrappers([]peer.AddrInfo{*addr})) - nodes := []*node.Node{full, light} - for index := range nodes { - require.NoError(t, nodes[index].Start(ctx)) - assert.Equal(t, *addr, nodes[index].Bootstrappers[0]) - assert.True(t, nodes[index].Host.Network().Connectedness(addr.ID) == network.Connected) - } -} - -/* - Test-Case: Add peer to blacklist - Steps: - 1. Create a Full Node(BN) - 2. Start a FN - 3. Create a Light Node(LN) - 5. Start a LN - 6. Explicitly block FN id by LN - 7. Check FN is allowed to dial with LN - 8. Check LN is not allowed to dial with FN -*/ -func TestAddPeerToBlackList(t *testing.T) { - sw := swamp.NewSwamp(t) - full := sw.NewFullNode() - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - require.NoError(t, full.Start(ctx)) - - addr := host.InfoFromHost(full.Host) - light := sw.NewLightNode() - require.NoError(t, light.Start(ctx)) - require.NoError(t, light.ConnGater.BlockPeer(addr.ID)) - - require.True(t, full.ConnGater.InterceptPeerDial(host.InfoFromHost(light.Host).ID)) - require.False(t, light.ConnGater.InterceptPeerDial(addr.ID)) -} - -/* -Test-Case: Connect Full And Light using Bridge node as a bootstrapper -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Create full/light nodes with bridge node as bootsrapped peer -4. Start full/light nodes -5. Ensure that nodes are connected to bridge -6. Wait until light will find full node -7. Check that full and light nodes are connected to each other -8. Stop FN and ensure that it's not connected to LN -*/ -func TestBootstrapNodesFromBridgeNode(t *testing.T) { - sw := swamp.NewSwamp(t) - cfg := node.DefaultConfig(node.Bridge) - cfg.P2P.Bootstrapper = true - const defaultTimeInterval = time.Second * 10 - var defaultOptions = []node.Option{ - node.WithRefreshRoutingTablePeriod(defaultTimeInterval), - node.WithDiscoveryInterval(defaultTimeInterval), - node.WithAdvertiseInterval(defaultTimeInterval), - } - - bridgeConfig := append([]node.Option{node.WithConfig(cfg)}, defaultOptions...) - bridge := sw.NewBridgeNode(bridgeConfig...) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - - err := bridge.Start(ctx) - require.NoError(t, err) - addr := host.InfoFromHost(bridge.Host) - - nodesConfig := append([]node.Option{node.WithBootstrappers([]peer.AddrInfo{*addr})}, defaultOptions...) - full := sw.NewFullNode(nodesConfig...) - light := sw.NewLightNode(nodesConfig...) - nodes := []*node.Node{full, light} - ch := make(chan struct{}) - sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) - defer sub.Close() - for index := range nodes { - require.NoError(t, nodes[index].Start(ctx)) - assert.Equal(t, *addr, nodes[index].Bootstrappers[0]) - assert.True(t, nodes[index].Host.Network().Connectedness(addr.ID) == network.Connected) - } - addrFull := host.InfoFromHost(full.Host) - go func() { - for e := range sub.Out() { - connStatus := e.(event.EvtPeerConnectednessChanged) - if connStatus.Peer == full.Host.ID() { - ch <- struct{}{} - } - } - }() - - select { - case <-ctx.Done(): - t.Fatal("peer was not found") - case <-ch: - assert.True(t, light.Host.Network().Connectedness(addrFull.ID) == network.Connected) - } - - sw.Disconnect(t, light.Host.ID(), full.Host.ID()) - require.NoError(t, full.Stop(ctx)) - select { - case <-ctx.Done(): - t.Fatal("peer was not disconnected") - case <-ch: - assert.True(t, light.Host.Network().Connectedness(addrFull.ID) == network.NotConnected) - } -} - -/* -Test-Case: Restart full node discovery after one node is disconnected -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Create 2 full nodes with bridge node as bootstrapper peer and start them -4. Check that nodes are connected to each other -5. Create one more node with disabled discovery -6. Disconnect FNs from each other -7. Check that the last FN is connected to one of the nodes -*NOTE*: this test will take some time because it relies on several cycles of peer discovery -*/ -func TestRestartNodeDiscovery(t *testing.T) { - sw := swamp.NewSwamp(t) - cfg := node.DefaultConfig(node.Bridge) - cfg.P2P.Bootstrapper = true - const defaultTimeInterval = time.Second * 2 - const fullNodes = 2 - var defaultOptions = []node.Option{ - node.WithPeersLimit(fullNodes), - node.WithRefreshRoutingTablePeriod(defaultTimeInterval), - node.WithDiscoveryInterval(defaultTimeInterval), - node.WithAdvertiseInterval(defaultTimeInterval), - } - bridgeConfig := append([]node.Option{node.WithConfig(cfg)}, defaultOptions...) - bridge := sw.NewBridgeNode(bridgeConfig...) - - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - t.Cleanup(cancel) - - err := bridge.Start(ctx) - require.NoError(t, err) - addr := host.InfoFromHost(bridge.Host) - nodes := make([]*node.Node, fullNodes) - nodesConfig := append([]node.Option{node.WithBootstrappers([]peer.AddrInfo{*addr})}, defaultOptions...) - for index := 0; index < fullNodes; index++ { - nodes[index] = sw.NewFullNode(nodesConfig...) - } - - identitySub, err := nodes[0].Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) - require.NoError(t, err) - defer identitySub.Close() - - for index := 0; index < fullNodes; index++ { - require.NoError(t, nodes[index].Start(ctx)) - assert.True(t, nodes[index].Host.Network().Connectedness(addr.ID) == network.Connected) - } - - // wait until full nodes connect each other - e := <-identitySub.Out() - connStatus := e.(event.EvtPeerIdentificationCompleted) - id := connStatus.Peer - if id != nodes[1].Host.ID() { - t.Fatal("unexpected peer connected") - } - require.True(t, nodes[0].Host.Network().Connectedness(id) == network.Connected) - - // create one more node with disabled discovery - nodesConfig[1] = node.WithPeersLimit(0) - node := sw.NewFullNode(nodesConfig...) - connectSub, err := nodes[0].Host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) - defer connectSub.Close() - sw.Disconnect(t, nodes[0].Host.ID(), nodes[1].Host.ID()) - require.NoError(t, node.Start(ctx)) - for { - select { - case <-ctx.Done(): - require.True(t, nodes[0].Host.Network().Connectedness(node.Host.ID()) == network.Connected) - case conn := <-connectSub.Out(): - status := conn.(event.EvtPeerConnectednessChanged) - if status.Peer != node.Host.ID() { - continue - } - require.True(t, status.Connectedness == network.Connected) - return - } - } -} diff --git a/node/tests/reconstruct_test.go b/node/tests/reconstruct_test.go deleted file mode 100644 index e127049515..0000000000 --- a/node/tests/reconstruct_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// Test with light nodes spawns more goroutines than in the race detectors budget, -// and thus we're disabling the race detector. -// TODO(@Wondertan): Remove this once we move to go1.19 with unlimited race detector -//go:build !race - -package tests - -import ( - "context" - "testing" - "time" - - "github.com/libp2p/go-libp2p-core/event" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - - "github.com/celestiaorg/celestia-node/ipld" - "github.com/celestiaorg/celestia-node/node" - "github.com/celestiaorg/celestia-node/node/tests/swamp" - "github.com/celestiaorg/celestia-node/service/share" -) - -/* -Test-Case: Full Node reconstructs blocks from a Bridge node -Pre-Reqs: -- First 20 blocks have a block size of 16 -- Blocktime is 100 ms -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Create a Full Node(FN) with BN as a trusted peer -4. Start a FN -5. Check that a FN can retrieve shares from 1 to 20 blocks -*/ -func TestFullReconstructFromBridge(t *testing.T) { - const ( - blocks = 20 - bsize = 16 - btime = time.Millisecond * 100 - ) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) - go sw.FillBlocks(ctx, t, bsize, blocks) - - bridge := sw.NewBridgeNode() - err := bridge.Start(ctx) - require.NoError(t, err) - - full := sw.NewFullNode(node.WithTrustedPeers(getMultiAddr(t, bridge.Host))) - err = full.Start(ctx) - require.NoError(t, err) - - errg, bctx := errgroup.WithContext(ctx) - for i := 1; i <= blocks+1; i++ { - i := i - errg.Go(func() error { - h, err := full.HeaderServ.GetByHeight(bctx, uint64(i)) - if err != nil { - return err - } - - return full.ShareServ.SharesAvailable(bctx, h.DAH) - }) - } - - err = errg.Wait() - require.NoError(t, err) -} - -/* -Test-Case: Full Node reconstructs blocks only from Light Nodes -Pre-Reqs: -- First 20 blocks have a block size of 16 -- Blocktime is 100 ms -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Create 69 Light Nodes(LNs) with BN as a trusted peer -4. Start 69 LNs -5. Create a Full Node(FN) with 69 LNs as trusted peers -6. Unlink FN connection to BN -7. Start a FN -8. Check that a FN can retrieve shares from 1 to 20 blocks -*/ -func TestFullReconstructFromLights(t *testing.T) { - ipld.RetrieveQuadrantTimeout = time.Millisecond * 100 - share.DefaultSampleAmount = 20 - const ( - blocks = 20 - btime = time.Millisecond * 100 - bsize = 16 - lnodes = 69 - ) - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - t.Cleanup(cancel) - sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) - go sw.FillBlocks(ctx, t, bsize, blocks) - - cfg := node.DefaultConfig(node.Bridge) - cfg.P2P.Bootstrapper = true - const defaultTimeInterval = time.Second * 10 - var defaultOptions = []node.Option{ - node.WithRefreshRoutingTablePeriod(defaultTimeInterval), - node.WithDiscoveryInterval(defaultTimeInterval), - node.WithAdvertiseInterval(defaultTimeInterval), - } - - bridgeConfig := append([]node.Option{node.WithConfig(cfg)}, defaultOptions...) - cfg.P2P.Bootstrapper = true - bridge := sw.NewBridgeNode(bridgeConfig...) - require.NoError(t, bridge.Start(ctx)) - addr := host.InfoFromHost(bridge.Host) - - nodesConfig := append([]node.Option{node.WithBootstrappers([]peer.AddrInfo{*addr})}, defaultOptions...) - full := sw.NewFullNode(nodesConfig...) - lights := make([]*node.Node, lnodes) - subs := make([]event.Subscription, lnodes) - errg, errCtx := errgroup.WithContext(ctx) - for i := 0; i < lnodes; i++ { - i := i - errg.Go(func() error { - light := sw.NewLightNode(nodesConfig...) - sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}) - if err != nil { - return err - } - subs[i] = sub - lights[i] = light - return light.Start(errCtx) - }) - } - require.NoError(t, errg.Wait()) - require.NoError(t, full.Start(ctx)) - for i := 0; i < lnodes; i++ { - select { - case <-ctx.Done(): - t.Fatal("peer was not found") - case <-subs[i].Out(): - continue - } - } - errg, bctx := errgroup.WithContext(ctx) - for i := 1; i <= blocks+1; i++ { - i := i - errg.Go(func() error { - h, err := full.HeaderServ.GetByHeight(bctx, uint64(i)) - if err != nil { - return err - } - - return full.ShareServ.SharesAvailable(bctx, h.DAH) - }) - } - - require.NoError(t, errg.Wait()) -} - -func getMultiAddr(t *testing.T, h host.Host) string { - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(h)) - require.NoError(t, err) - return addrs[0].String() -} diff --git a/node/tests/swamp/config.go b/node/tests/swamp/config.go deleted file mode 100644 index 2545b90bbe..0000000000 --- a/node/tests/swamp/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package swamp - -import ( - "time" - - "github.com/tendermint/tendermint/abci/types" - tn "github.com/tendermint/tendermint/config" - rpctest "github.com/tendermint/tendermint/rpc/test" - - "github.com/celestiaorg/celestia-node/core" -) - -// Components struct represents a set of pre-requisite attributes from the test scenario -type Components struct { - App types.Application - CoreCfg *tn.Config -} - -// DefaultComponents creates a KvStore with a block retention of 200 -// In addition, the empty block interval is set to 200ms -func DefaultComponents() *Components { - app := core.CreateKVStore(2000) - tnCfg, err := rpctest.CreateConfig("swamp_tm") - if err != nil { - panic(err) - } - tnCfg.Consensus.CreateEmptyBlocksInterval = 100 * time.Millisecond - return &Components{ - App: app, - CoreCfg: tnCfg, - } -} - -// Option for modifying Swamp's Config. -type Option func(*Components) - -// WithBlockTime sets a custom interval for block creation. -func WithBlockTime(t time.Duration) Option { - return func(c *Components) { - // for empty block - c.CoreCfg.Consensus.CreateEmptyBlocksInterval = t - // for filled block - c.CoreCfg.Consensus.TimeoutCommit = t - c.CoreCfg.Consensus.SkipTimeoutCommit = true - } -} diff --git a/node/tests/swamp/swamp.go b/node/tests/swamp/swamp.go deleted file mode 100644 index ade91645ca..0000000000 --- a/node/tests/swamp/swamp.go +++ /dev/null @@ -1,316 +0,0 @@ -package swamp - -import ( - "context" - "fmt" - "math/rand" - "net" - "testing" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - ma "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/types" - - "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/libs/keystore" - "github.com/celestiaorg/celestia-node/logs" - "github.com/celestiaorg/celestia-node/node" - "github.com/celestiaorg/celestia-node/node/p2p" - "github.com/celestiaorg/celestia-node/params" -) - -var blackholeIP6 = net.ParseIP("100::") - -const subscriberID string = "NewBlockSwamp/Events" - -var queryEvent string = types.QueryForEvent(types.EventNewBlockValue).String() - -// Swamp represents the main functionality that is needed for the test-case: -// - Network to link the nodes -// - CoreClient to share between Bridge nodes -// - Slices of created Bridge/Full/Light Nodes -// - trustedHash taken from the CoreClient and shared between nodes -type Swamp struct { - t *testing.T - Network mocknet.Mocknet - CoreClient core.Client - BridgeNodes []*node.Node - FullNodes []*node.Node - LightNodes []*node.Node - trustedHash string - comps *Components -} - -// NewSwamp creates a new instance of Swamp. -func NewSwamp(t *testing.T, options ...Option) *Swamp { - if testing.Verbose() { - logs.SetDebugLogging() - } - - ic := DefaultComponents() - for _, option := range options { - option(ic) - } - - var err error - ctx := context.Background() - - // TODO(@Bidon15): CoreClient(limitation) - // Now, we are making an assumption that consensus mechanism is already tested out - // so, we are not creating bridge nodes with each one containing its own core client - // instead we are assigning all created BNs to 1 Core from the swamp - core.StartTestNode(ctx, t, ic.App, ic.CoreCfg) - endpoint, err := core.GetEndpoint(ic.CoreCfg) - require.NoError(t, err) - ip, port, err := net.SplitHostPort(endpoint) - require.NoError(t, err) - remote, err := core.NewRemote(ip, port) - require.NoError(t, err) - - err = remote.Start() - require.NoError(t, err) - - swp := &Swamp{ - t: t, - Network: mocknet.New(), - CoreClient: remote, - comps: ic, - } - - swp.trustedHash, err = swp.getTrustedHash(ctx) - require.NoError(t, err) - - swp.t.Cleanup(func() { - swp.stopAllNodes(ctx, swp.BridgeNodes, swp.FullNodes, swp.LightNodes) - }) - - return swp -} - -// stopAllNodes goes through all received slices of Nodes and stops one-by-one -// this eliminates a manual clean-up in the test-cases itself in the end -func (s *Swamp) stopAllNodes(ctx context.Context, allNodes ...[]*node.Node) { - for _, nodes := range allNodes { - for _, node := range nodes { - require.NoError(s.t, node.Stop(ctx)) - } - } -} - -// GetCoreBlockHashByHeight returns a tendermint block's hash by provided height -func (s *Swamp) GetCoreBlockHashByHeight(ctx context.Context, height int64) bytes.HexBytes { - b, err := s.CoreClient.Block(ctx, &height) - require.NoError(s.t, err) - return b.BlockID.Hash -} - -// WaitTillHeight holds the test execution until the given amount of blocks -// has been produced by the CoreClient. -func (s *Swamp) WaitTillHeight(ctx context.Context, height int64) { - require.Greater(s.t, height, int64(0)) - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - results, err := s.CoreClient.Subscribe(ctx, subscriberID, queryEvent) - require.NoError(s.t, err) - - defer func() { - // TODO(@Wondertan): For some reason, the Unsubscribe does not work and we have to do - // an UnsubscribeAll as a hack. There is somewhere a bug in the Tendermint which should be - // investigated - err = s.CoreClient.UnsubscribeAll(ctx, subscriberID) - require.NoError(s.t, err) - }() - - for { - select { - case <-ctx.Done(): - return - case block := <-results: - newBlock := block.Data.(types.EventDataNewBlock) - if height <= newBlock.Block.Height { - return - } - } - } -} - -// createPeer is a helper for celestia nodes to initialize -// with a real key instead of using a bogus one. -func (s *Swamp) createPeer(ks keystore.Keystore) host.Host { - key, err := p2p.Key(ks) - require.NoError(s.t, err) - - // IPv6 will be starting with 100:0 - token := make([]byte, 12) - rand.Read(token) //nolint:gosec - ip := append(net.IP{}, blackholeIP6...) - copy(ip[net.IPv6len-len(token):], token) - - // reference to GenPeer func in libp2p/p2p/net/mock/mock_net.go - // on how we generate new multiaddr for new peer - a, err := ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/4242", ip)) - require.NoError(s.t, err) - - host, err := s.Network.AddPeer(key, a) - require.NoError(s.t, err) - - require.NoError(s.t, s.Network.LinkAll()) - return host -} - -// getTrustedHash is needed for celestia nodes to get the trustedhash -// from CoreClient. This is required to initialize and start correctly. -func (s *Swamp) getTrustedHash(ctx context.Context) (string, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - results, err := s.CoreClient.Subscribe(ctx, subscriberID, queryEvent) - require.NoError(s.t, err) - - defer func() { - err := s.CoreClient.UnsubscribeAll(ctx, subscriberID) - require.NoError(s.t, err) - }() - - select { - case <-ctx.Done(): - return "", fmt.Errorf("can't get trusted hash as the channel is closed") - case block := <-results: - newBlock := block.Data.(types.EventDataNewBlock).Block - return newBlock.Hash().String(), nil - } -} - -// NewBridgeNode creates a new instance of a BridgeNode providing a default config -// and a mockstore to the NewNodeWithStore method -func (s *Swamp) NewBridgeNode(options ...node.Option) *node.Node { - cfg := node.DefaultConfig(node.Bridge) - store := node.MockStore(s.t, cfg) - - return s.NewNodeWithStore(node.Bridge, store, options...) -} - -// NewFullNode creates a new instance of a FullNode providing a default config -// and a mockstore to the NewNodeWithStore method -func (s *Swamp) NewFullNode(options ...node.Option) *node.Node { - cfg := node.DefaultConfig(node.Full) - store := node.MockStore(s.t, cfg) - - return s.NewNodeWithStore(node.Full, store, options...) -} - -// NewLightNode creates a new instance of a LightNode providing a default config -// and a mockstore to the NewNodeWithStore method -func (s *Swamp) NewLightNode(options ...node.Option) *node.Node { - cfg := node.DefaultConfig(node.Light) - store := node.MockStore(s.t, cfg) - - return s.NewNodeWithStore(node.Light, store, options...) -} - -// NewNodeWithStore creates a new instance of Node with predefined Store. -// Afterwards, the instance is stored in the swamp's Nodes' slice according to the -// node's type provided from the user. -func (s *Swamp) NewNodeWithStore(t node.Type, store node.Store, options ...node.Option) *node.Node { - var n *node.Node - - options = append(options, node.WithKeyringSigner(node.TestKeyringSigner(s.t))) - - switch t { - case node.Bridge: - options = append(options, - node.WithCoreClient(s.CoreClient), - ) - n = s.newNode(node.Bridge, store, options...) - s.BridgeNodes = append(s.BridgeNodes, n) - case node.Full: - n = s.newNode(node.Full, store, options...) - s.FullNodes = append(s.FullNodes, n) - case node.Light: - n = s.newNode(node.Light, store, options...) - s.LightNodes = append(s.LightNodes, n) - } - - return n -} - -func (s *Swamp) newNode(t node.Type, store node.Store, options ...node.Option) *node.Node { - ks, err := store.Keystore() - require.NoError(s.t, err) - - // TODO(@Bidon15): If for some reason, we receive one of existing options - // like from the test case, we need to check them and not use - // default that are set here - options = append(options, - node.WithHost(s.createPeer(ks)), - node.WithTrustedHash(s.trustedHash), - node.WithNetwork(params.Private), - node.WithRPCPort("0"), - ) - - node, err := node.New(t, store, options...) - require.NoError(s.t, err) - - return node -} - -// RemoveNode removes a node from the swamp's node slice -// this allows reusage of the same var in the test scenario -// if the user needs to stop and start the same node -func (s *Swamp) RemoveNode(n *node.Node, t node.Type) error { - var err error - switch t { - case node.Light: - s.LightNodes, err = s.remove(n, s.LightNodes) - return err - case node.Bridge: - s.BridgeNodes, err = s.remove(n, s.BridgeNodes) - return err - case node.Full: - s.FullNodes, err = s.remove(n, s.FullNodes) - return err - default: - return fmt.Errorf("no such type or node") - } -} - -func (s *Swamp) remove(rn *node.Node, sn []*node.Node) ([]*node.Node, error) { - if len(sn) == 1 { - return nil, nil - } - - initSize := len(sn) - for i := 0; i < len(sn); i++ { - if sn[i] == rn { - sn = append(sn[:i], sn[i+1:]...) - i-- - } - } - - if initSize <= len(sn) { - return sn, fmt.Errorf("cannot delete the node") - } - return sn, nil -} - -// Connect allows to connect peers after hard disconnection. -func (s *Swamp) Connect(t *testing.T, peerA, peerB peer.ID) { - _, err := s.Network.LinkPeers(peerA, peerB) - require.NoError(t, err) - _, err = s.Network.ConnectPeers(peerA, peerB) - require.NoError(t, err) -} - -// Disconnect allows to break a connection between two peers without any possibility to re-establish it. -// Order is very important here. We have to unlink peers first, and only after that call disconnect. -// This is hard disconnect and peers will not be able to reconnect. -// In order to reconnect peers again, please use swamp.Connect -func (s *Swamp) Disconnect(t *testing.T, peerA, peerB peer.ID) { - require.NoError(t, s.Network.UnlinkPeers(peerA, peerB)) - require.NoError(t, s.Network.DisconnectPeers(peerA, peerB)) -} diff --git a/node/tests/swamp/swamp_tx.go b/node/tests/swamp/swamp_tx.go deleted file mode 100644 index 5a7eed9a2a..0000000000 --- a/node/tests/swamp/swamp_tx.go +++ /dev/null @@ -1,40 +0,0 @@ -package swamp - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/ipld" -) - -// SubmitData submits given data in the block. -// TODO(@Wondertan): This must be a real PFD using celestia-app, once we able to run App -// in the Swamp. -func (s *Swamp) SubmitData(ctx context.Context, t *testing.T, data []byte) { - result, err := s.CoreClient.BroadcastTxSync(ctx, append([]byte("key="), data...)) - require.NoError(t, err) - require.Zero(t, result.Code) -} - -func (s *Swamp) FillBlocks(ctx context.Context, t *testing.T, bsize, blocks int) { - btime := s.comps.CoreCfg.Consensus.CreateEmptyBlocksInterval - timer := time.NewTimer(btime) - defer timer.Stop() - - data := make([]byte, bsize*ipld.ShareSize) - for range make([]int, blocks) { - rand.Read(data) //nolint:gosec - s.SubmitData(ctx, t, data) - - timer.Reset(btime) - select { - case <-timer.C: - case <-ctx.Done(): - return - } - } -} diff --git a/node/tests/sync_test.go b/node/tests/sync_test.go deleted file mode 100644 index ad7515ffd1..0000000000 --- a/node/tests/sync_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package tests - -import ( - "context" - "testing" - "time" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/node" - "github.com/celestiaorg/celestia-node/node/tests/swamp" -) - -// a default timeout for the context that is used in tests -const defaultTimeout = 40 * time.Second - -/* -Test-Case: Sync a Light Node with a Bridge Node -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Check BN is synced to height 20 -4. Create a Light Node(LN) with a trusted peer -5. Start a LN with a defined connection to the BN -6. Check LN is synced to height 30 -*/ -func TestSyncLightWithBridge(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - t.Cleanup(cancel) - - sw.WaitTillHeight(ctx, 20) - - err := bridge.Start(ctx) - require.NoError(t, err) - - h, err := bridge.HeaderServ.GetByHeight(ctx, 20) - require.NoError(t, err) - - require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) - - light := sw.NewLightNode(node.WithTrustedPeers(addrs[0].String())) - - err = light.Start(ctx) - require.NoError(t, err) - - h, err = light.HeaderServ.GetByHeight(ctx, 30) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) -} - -/* -Test-Case: Light Node continues sync after abrupt stop/start -Pre-Requisites: -- CoreClient is started by swamp -- CoreClient has generated 50 blocks -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Check BN is synced to height 20 -4. Create a Light Node(LN) with a trusted peer -5. Start a LN with a defined connection to the BN -6. Check LN is synced to height 30 -7. Stop LN -8. Start LN -9. Check LN is synced to height 40 -*/ -func TestSyncStartStopLightWithBridge(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - - sw.WaitTillHeight(ctx, 50) - - err := bridge.Start(ctx) - require.NoError(t, err) - - h, err := bridge.HeaderServ.GetByHeight(ctx, 20) - require.NoError(t, err) - - require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) - - store := node.MockStore(t, node.DefaultConfig(node.Light)) - light := sw.NewNodeWithStore(node.Light, store, node.WithTrustedPeers(addrs[0].String())) - require.NoError(t, light.Start(ctx)) - - h, err = light.HeaderServ.GetByHeight(ctx, 30) - require.NoError(t, err) - - require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) - - require.NoError(t, light.Stop(ctx)) - require.NoError(t, sw.RemoveNode(light, node.Light)) - - light = sw.NewNodeWithStore(node.Light, store, node.WithTrustedPeers(addrs[0].String())) - require.NoError(t, light.Start(ctx)) - - h, err = light.HeaderServ.GetByHeight(ctx, 40) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 40)) -} - -/* -Test-Case: Sync a Full Node with a Bridge Node -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Check BN is synced to height 20 -4. Create a Full Node(FN) with a connection to BN as a trusted peer -5. Start a FN -6. Check FN is synced to height 30 -*/ -func TestSyncFullWithBridge(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - - sw.WaitTillHeight(ctx, 20) - - err := bridge.Start(ctx) - require.NoError(t, err) - - h, err := bridge.HeaderServ.GetByHeight(ctx, 20) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) - - full := sw.NewFullNode(node.WithTrustedPeers(addrs[0].String())) - require.NoError(t, full.Start(ctx)) - - h, err = full.HeaderServ.GetByHeight(ctx, 30) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) -} - -/* -Test-Case: Sync a Light Node from a Full Node -Pre-Requisites: -- CoreClient is started by swamp -- CoreClient has generated 20 blocks -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Check BN is synced to height 20 -4. Create a Full Node(FN) with a connection to BN as a trusted peer -5. Start a FN -6. Check FN is synced to height 30 -7. Create a Light Node(LN) with a connection to FN as a trusted peer -8. Start LN -9. Check LN is synced to height 50 -*/ -func TestSyncLightWithFull(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - - sw.WaitTillHeight(ctx, 20) - - err := bridge.Start(ctx) - require.NoError(t, err) - - h, err := bridge.HeaderServ.GetByHeight(ctx, 20) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) - - full := sw.NewFullNode(node.WithTrustedPeers(addrs[0].String())) - require.NoError(t, full.Start(ctx)) - - h, err = full.HeaderServ.GetByHeight(ctx, 30) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) - - addrs, err = peer.AddrInfoToP2pAddrs(host.InfoFromHost(full.Host)) - require.NoError(t, err) - - light := sw.NewLightNode(node.WithTrustedPeers(addrs[0].String())) - - err = sw.Network.UnlinkPeers(bridge.Host.ID(), light.Host.ID()) - require.NoError(t, err) - - err = light.Start(ctx) - require.NoError(t, err) - - h, err = light.HeaderServ.GetByHeight(ctx, 50) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 50)) -} - -/* -Test-Case: Sync a Light Node with multiple trusted peers -Pre-Requisites: -- CoreClient is started by swamp -- CoreClient has generated 20 blocks -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Check BN is synced to height 20 -4. Create a Full Node(FN) with a connection to BN as a trusted peer -5. Start a FN -6. Check FN is synced to height 30 -7. Create a Light Node(LN) with a connection to BN, FN as trusted peers -8. Start LN -9. Check LN is synced to height 50 -*/ -func TestSyncLightWithTrustedPeers(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - t.Cleanup(cancel) - - sw.WaitTillHeight(ctx, 20) - - err := bridge.Start(ctx) - require.NoError(t, err) - - h, err := bridge.HeaderServ.GetByHeight(ctx, 20) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) - - trustedPeers := []string{addrs[0].String()} - - full := sw.NewFullNode(node.WithTrustedPeers(addrs[0].String())) - require.NoError(t, full.Start(ctx)) - - h, err = full.HeaderServ.GetByHeight(ctx, 30) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) - - addrs, err = peer.AddrInfoToP2pAddrs(host.InfoFromHost(full.Host)) - require.NoError(t, err) - - trustedPeers = append(trustedPeers, addrs[0].String()) - - light := sw.NewLightNode(node.WithTrustedPeers(trustedPeers...)) - - err = light.Start(ctx) - require.NoError(t, err) - - h, err = light.HeaderServ.GetByHeight(ctx, 50) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 50)) -} diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go new file mode 100644 index 0000000000..f87105541a --- /dev/null +++ b/nodebuilder/blob/blob.go @@ -0,0 +1,75 @@ +package blob + +import ( + "context" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/share" +) + +var _ Module = (*API)(nil) + +// Module defines the API related to interacting with the blobs +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // Submit sends Blobs and reports the height in which they were included. + // Allows sending multiple Blobs atomically synchronously. + // Uses default wallet registered on the Node. + Submit(_ context.Context, _ []*blob.Blob, _ blob.GasPrice) (height uint64, _ error) + // Get retrieves the blob by commitment under the given namespace and height. + Get(_ context.Context, height uint64, _ share.Namespace, _ blob.Commitment) (*blob.Blob, error) + // GetAll returns all blobs at the given height under the given namespaces. + GetAll(_ context.Context, height uint64, _ []share.Namespace) ([]*blob.Blob, error) + // GetProof retrieves proofs in the given namespaces at the given height by commitment. + GetProof(_ context.Context, height uint64, _ share.Namespace, _ blob.Commitment) (*blob.Proof, error) + // Included checks whether a blob's given commitment(Merkle subtree root) is included at + // given height and under the namespace. + Included(_ context.Context, height uint64, _ share.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) +} + +type API struct { + Internal struct { + Submit func(context.Context, []*blob.Blob, blob.GasPrice) (uint64, error) `perm:"write"` + Get func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Blob, error) `perm:"read"` + GetAll func(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) `perm:"read"` + GetProof func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Proof, error) `perm:"read"` + Included func(context.Context, uint64, share.Namespace, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` + } +} + +func (api *API) Submit(ctx context.Context, blobs []*blob.Blob, gasPrice blob.GasPrice) (uint64, error) { + return api.Internal.Submit(ctx, blobs, gasPrice) +} + +func (api *API) Get( + ctx context.Context, + height uint64, + namespace share.Namespace, + commitment blob.Commitment, +) (*blob.Blob, error) { + return api.Internal.Get(ctx, height, namespace, commitment) +} + +func (api *API) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*blob.Blob, error) { + return api.Internal.GetAll(ctx, height, namespaces) +} + +func (api *API) GetProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + commitment blob.Commitment, +) (*blob.Proof, error) { + return api.Internal.GetProof(ctx, height, namespace, commitment) +} + +func (api *API) Included( + ctx context.Context, + height uint64, + namespace share.Namespace, + proof *blob.Proof, + commitment blob.Commitment, +) (bool, error) { + return api.Internal.Included(ctx, height, namespace, proof, commitment) +} diff --git a/nodebuilder/blob/cmd/blob.go b/nodebuilder/blob/cmd/blob.go new file mode 100644 index 0000000000..25a102843b --- /dev/null +++ b/nodebuilder/blob/cmd/blob.go @@ -0,0 +1,298 @@ +package cmd + +import ( + "encoding/base64" + "errors" + "fmt" + "path/filepath" + "reflect" + "strconv" + + "github.com/spf13/cobra" + + "github.com/celestiaorg/celestia-node/blob" + cmdnode "github.com/celestiaorg/celestia-node/cmd" + "github.com/celestiaorg/celestia-node/share" +) + +var ( + base64Flag bool + + gasPrice float64 + + // flagFileInput allows the user to provide file path to the json file + // for submitting multiple blobs. + flagFileInput = "input-file" +) + +func init() { + Cmd.AddCommand(getCmd, getAllCmd, submitCmd, getProofCmd) + + getCmd.PersistentFlags().BoolVar( + &base64Flag, + "base64", + false, + "printed blob's data a base64 string", + ) + + getAllCmd.PersistentFlags().BoolVar( + &base64Flag, + "base64", + false, + "printed blob's data as a base64 string", + ) + + submitCmd.PersistentFlags().Float64Var( + &gasPrice, + "gas.price", + float64(blob.DefaultGasPrice()), + "specifies gas price (in utia) for blob submission.\n"+ + "Gas price will be set to default (0.002) if no value is passed", + ) + + submitCmd.PersistentFlags().String(flagFileInput, "", "Specify the file input") +} + +var Cmd = &cobra.Command{ + Use: "blob [command]", + Short: "Allows to interact with the Blob Service via JSON-RPC", + Args: cobra.NoArgs, + PersistentPreRunE: cmdnode.InitClient, +} + +var getCmd = &cobra.Command{ + Use: "get [height] [namespace] [commitment]", + Args: cobra.ExactArgs(3), + Short: "Returns the blob for the given namespace by commitment at a particular height.", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a height:%v", err) + } + + namespace, err := cmdnode.ParseV0Namespace(args[1]) + if err != nil { + return fmt.Errorf("error parsing a namespace:%v", err) + } + + commitment, err := base64.StdEncoding.DecodeString(args[2]) + if err != nil { + return fmt.Errorf("error parsing a commitment:%v", err) + } + + blob, err := client.Blob.Get(cmd.Context(), height, namespace, commitment) + + formatter := formatData + if base64Flag || err != nil { + formatter = nil + } + return cmdnode.PrintOutput(blob, err, formatter) + }, +} + +var getAllCmd = &cobra.Command{ + Use: "get-all [height] [namespace]", + Args: cobra.ExactArgs(2), + Short: "Returns all blobs for the given namespace at a particular height.", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a height:%v", err) + } + + namespace, err := cmdnode.ParseV0Namespace(args[1]) + if err != nil { + return fmt.Errorf("error parsing a namespace:%v", err) + } + + blobs, err := client.Blob.GetAll(cmd.Context(), height, []share.Namespace{namespace}) + formatter := formatData + if base64Flag || err != nil { + formatter = nil + } + return cmdnode.PrintOutput(blobs, err, formatter) + }, +} + +var submitCmd = &cobra.Command{ + Use: "submit [namespace] [blobData]", + Args: func(cmd *cobra.Command, args []string) error { + path, err := cmd.Flags().GetString(flagFileInput) + if err != nil { + return err + } + + // If there is a file path input we'll check for the file extension + if path != "" { + if filepath.Ext(path) != ".json" { + return fmt.Errorf("invalid file extension, require json got %s", filepath.Ext(path)) + } + + return nil + } + + if len(args) < 2 { + return errors.New("submit requires two arguments: namespace and blobData") + } + + return nil + }, + Short: "Submit the blob(s) at the given namespace(s).\n" + + "User can use namespace and blobData as argument for single blob submission \n" + + "or use --input-file flag with the path to a json file for multiple blobs submission, \n" + + `where the json file contains: + + { + "Blobs": [ + { + "namespace": "0x00010203040506070809", + "blobData": "0x676d" + }, + { + "namespace": "0x42690c204d39600fddd3", + "blobData": "0x676d" + } + ] + }` + + "Note:\n" + + "* fee and gas limit params will be calculated automatically.\n", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + path, err := cmd.Flags().GetString(flagFileInput) + if err != nil { + return err + } + + jsonBlobs := make([]blobJSON, 0) + // In case of there is a file input, get the namespace and blob from the arguments + if path != "" { + paresdBlobs, err := parseSubmitBlobs(path) + if err != nil { + return err + } + + jsonBlobs = append(jsonBlobs, paresdBlobs...) + } else { + jsonBlobs = append(jsonBlobs, blobJSON{Namespace: args[0], BlobData: args[1]}) + } + + var blobs []*blob.Blob + var commitments []blob.Commitment + for _, jsonBlob := range jsonBlobs { + blob, err := getBlobFromArguments(jsonBlob.Namespace, jsonBlob.BlobData) + if err != nil { + return err + } + blobs = append(blobs, blob) + commitments = append(commitments, blob.Commitment) + } + + height, err := client.Blob.Submit( + cmd.Context(), + blobs, + blob.GasPrice(gasPrice), + ) + + response := struct { + Height uint64 `json:"height"` + Commitments []blob.Commitment `json:"commitments"` + }{ + Height: height, + Commitments: commitments, + } + return cmdnode.PrintOutput(response, err, nil) + }, +} + +func getBlobFromArguments(namespaceArg, blobArg string) (*blob.Blob, error) { + namespace, err := cmdnode.ParseV0Namespace(namespaceArg) + if err != nil { + return nil, fmt.Errorf("error parsing a namespace:%v", err) + } + + parsedBlob, err := blob.NewBlobV0(namespace, []byte(blobArg)) + if err != nil { + return nil, fmt.Errorf("error creating a blob:%v", err) + } + + return parsedBlob, nil +} + +var getProofCmd = &cobra.Command{ + Use: "get-proof [height] [namespace] [commitment]", + Args: cobra.ExactArgs(3), + Short: "Retrieves the blob in the given namespaces at the given height by commitment and returns its Proof.", + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a height:%v", err) + } + + namespace, err := cmdnode.ParseV0Namespace(args[1]) + if err != nil { + return fmt.Errorf("error parsing a namespace:%v", err) + } + + commitment, err := base64.StdEncoding.DecodeString(args[2]) + if err != nil { + return fmt.Errorf("error parsing a commitment:%v", err) + } + + proof, err := client.Blob.GetProof(cmd.Context(), height, namespace, commitment) + return cmdnode.PrintOutput(proof, err, nil) + }, +} + +func formatData(data interface{}) interface{} { + type tempBlob struct { + Namespace []byte `json:"namespace"` + Data string `json:"data"` + ShareVersion uint32 `json:"share_version"` + Commitment []byte `json:"commitment"` + } + + if reflect.TypeOf(data).Kind() == reflect.Slice { + blobs := data.([]*blob.Blob) + result := make([]tempBlob, len(blobs)) + for i, b := range blobs { + result[i] = tempBlob{ + Namespace: b.Namespace(), + Data: string(b.Data), + ShareVersion: b.ShareVersion, + Commitment: b.Commitment, + } + } + return result + } + + b := data.(*blob.Blob) + return tempBlob{ + Namespace: b.Namespace(), + Data: string(b.Data), + ShareVersion: b.ShareVersion, + Commitment: b.Commitment, + } +} diff --git a/nodebuilder/blob/cmd/util.go b/nodebuilder/blob/cmd/util.go new file mode 100644 index 0000000000..a33b9c1a84 --- /dev/null +++ b/nodebuilder/blob/cmd/util.go @@ -0,0 +1,32 @@ +package cmd + +import ( + "encoding/json" + "os" +) + +// Define the raw content from the file input. +type blobs struct { + Blobs []blobJSON +} + +type blobJSON struct { + Namespace string + BlobData string +} + +func parseSubmitBlobs(path string) ([]blobJSON, error) { + var rawBlobs blobs + + content, err := os.ReadFile(path) + if err != nil { + return []blobJSON{}, err + } + + err = json.Unmarshal(content, &rawBlobs) + if err != nil { + return []blobJSON{}, err + } + + return rawBlobs.Blobs, err +} diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go new file mode 100644 index 0000000000..0898e70459 --- /dev/null +++ b/nodebuilder/blob/mocks/api.go @@ -0,0 +1,112 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/blob (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + blob "github.com/celestiaorg/celestia-node/blob" + share "github.com/celestiaorg/celestia-node/share" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockModule) Get(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 blob.Commitment) (*blob.Blob, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blob.Blob) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockModuleMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockModule)(nil).Get), arg0, arg1, arg2, arg3) +} + +// GetAll mocks base method. +func (m *MockModule) GetAll(arg0 context.Context, arg1 uint64, arg2 []share.Namespace) ([]*blob.Blob, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAll", arg0, arg1, arg2) + ret0, _ := ret[0].([]*blob.Blob) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAll indicates an expected call of GetAll. +func (mr *MockModuleMockRecorder) GetAll(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockModule)(nil).GetAll), arg0, arg1, arg2) +} + +// GetProof mocks base method. +func (m *MockModule) GetProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 blob.Commitment) (*blob.Proof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blob.Proof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProof indicates an expected call of GetProof. +func (mr *MockModuleMockRecorder) GetProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockModule)(nil).GetProof), arg0, arg1, arg2, arg3) +} + +// Included mocks base method. +func (m *MockModule) Included(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 *blob.Proof, arg4 blob.Commitment) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Included", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Included indicates an expected call of Included. +func (mr *MockModuleMockRecorder) Included(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Included", reflect.TypeOf((*MockModule)(nil).Included), arg0, arg1, arg2, arg3, arg4) +} + +// Submit mocks base method. +func (m *MockModule) Submit(arg0 context.Context, arg1 []*blob.Blob, arg2 blob.GasPrice) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Submit", arg0, arg1, arg2) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Submit indicates an expected call of Submit. +func (mr *MockModuleMockRecorder) Submit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Submit", reflect.TypeOf((*MockModule)(nil).Submit), arg0, arg1, arg2) +} diff --git a/nodebuilder/blob/module.go b/nodebuilder/blob/module.go new file mode 100644 index 0000000000..76e7677725 --- /dev/null +++ b/nodebuilder/blob/module.go @@ -0,0 +1,28 @@ +package blob + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/header" + headerService "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/state" +) + +func ConstructModule() fx.Option { + return fx.Module("blob", + fx.Provide( + func(service headerService.Module) func(context.Context, uint64) (*header.ExtendedHeader, error) { + return service.GetByHeight + }), + fx.Provide(func( + state *state.CoreAccessor, + sGetter share.Getter, + getByHeightFn func(context.Context, uint64) (*header.ExtendedHeader, error), + ) Module { + return blob.NewService(state, sGetter, getByHeightFn) + })) +} diff --git a/nodebuilder/config.go b/nodebuilder/config.go new file mode 100644 index 0000000000..41f24d6d3d --- /dev/null +++ b/nodebuilder/config.go @@ -0,0 +1,171 @@ +package nodebuilder + +import ( + "io" + "os" + + "github.com/BurntSushi/toml" + "github.com/imdario/mergo" + + "github.com/celestiaorg/celestia-node/libs/fslock" + "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + "github.com/celestiaorg/celestia-node/nodebuilder/gateway" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +// ConfigLoader defines a function that loads a config from any source. +type ConfigLoader func() (*Config, error) + +// Config is main configuration structure for a Node. +// It combines configuration units for all Node subsystems. +type Config struct { + Node node.Config + Core core.Config + State state.Config + P2P p2p.Config + RPC rpc.Config + Gateway gateway.Config + Share share.Config + Header header.Config + DASer das.Config `toml:",omitempty"` +} + +// DefaultConfig provides a default Config for a given Node Type 'tp'. +// NOTE: Currently, configs are identical, but this will change. +func DefaultConfig(tp node.Type) *Config { + commonConfig := &Config{ + Node: node.DefaultConfig(tp), + Core: core.DefaultConfig(), + State: state.DefaultConfig(), + P2P: p2p.DefaultConfig(tp), + RPC: rpc.DefaultConfig(), + Gateway: gateway.DefaultConfig(), + Share: share.DefaultConfig(tp), + Header: header.DefaultConfig(tp), + } + + switch tp { + case node.Bridge: + return commonConfig + case node.Light, node.Full: + commonConfig.DASer = das.DefaultConfig(tp) + return commonConfig + default: + panic("node: invalid node type") + } +} + +// SaveConfig saves Config 'cfg' under the given 'path'. +func SaveConfig(path string, cfg *Config) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + + return cfg.Encode(f) +} + +// LoadConfig loads Config from the given 'path'. +func LoadConfig(path string) (*Config, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var cfg Config + return &cfg, cfg.Decode(f) +} + +// RemoveConfig removes the Config from the given store path. +func RemoveConfig(path string) (err error) { + path, err = storePath(path) + if err != nil { + return + } + + flock, err := fslock.Lock(lockPath(path)) + if err != nil { + if err == fslock.ErrLocked { + err = ErrOpened + } + return + } + defer flock.Unlock() //nolint: errcheck + + return removeConfig(configPath(path)) +} + +// removeConfig removes Config from the given 'path'. +func removeConfig(path string) error { + return os.Remove(path) +} + +// UpdateConfig loads the node's config and applies new values +// from the default config of the given node type, saving the +// newly updated config into the node's config path. +func UpdateConfig(tp node.Type, path string) (err error) { + path, err = storePath(path) + if err != nil { + return err + } + + flock, err := fslock.Lock(lockPath(path)) + if err != nil { + if err == fslock.ErrLocked { + err = ErrOpened + } + return err + } + defer flock.Unlock() //nolint: errcheck + + newCfg := DefaultConfig(tp) + + cfgPath := configPath(path) + cfg, err := LoadConfig(cfgPath) + if err != nil { + return err + } + + cfg, err = updateConfig(cfg, newCfg) + if err != nil { + return err + } + + // save the updated config + err = removeConfig(cfgPath) + if err != nil { + return err + } + return SaveConfig(cfgPath, cfg) +} + +// updateConfig merges new values from the new config into the old +// config, returning the updated old config. +func updateConfig(oldCfg *Config, newCfg *Config) (*Config, error) { + err := mergo.Merge(oldCfg, newCfg, mergo.WithOverrideEmptySlice) + return oldCfg, err +} + +// TODO(@Wondertan): We should have a description for each field written into w, +// so users can instantly understand purpose of each field. Ideally, we should have a utility +// program to parse comments from actual sources(*.go files) and generate docs from comments. + +// Hint: use 'ast' package. +// Encode encodes a given Config into w. +func (cfg *Config) Encode(w io.Writer) error { + return toml.NewEncoder(w).Encode(cfg) +} + +// Decode decodes a Config from a given reader r. +func (cfg *Config) Decode(r io.Reader) error { + _, err := toml.NewDecoder(r).Decode(cfg) + return err +} diff --git a/nodebuilder/config_test.go b/nodebuilder/config_test.go new file mode 100644 index 0000000000..e7b64b0aed --- /dev/null +++ b/nodebuilder/config_test.go @@ -0,0 +1,137 @@ +package nodebuilder + +import ( + "bytes" + "testing" + + "github.com/BurntSushi/toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +// TestConfigWriteRead tests that the configs for all node types can be encoded to and from TOML. +func TestConfigWriteRead(t *testing.T) { + tests := []node.Type{ + node.Full, + node.Light, + node.Bridge, + } + + for _, tp := range tests { + t.Run(tp.String(), func(t *testing.T) { + buf := bytes.NewBuffer(nil) + in := DefaultConfig(tp) + + err := in.Encode(buf) + require.NoError(t, err) + + var out Config + err = out.Decode(buf) + require.NoError(t, err) + assert.EqualValues(t, in, &out) + }) + } +} + +// TestUpdateConfig tests that updating an outdated config +// using a new default config applies the correct values and +// preserves old custom values. +func TestUpdateConfig(t *testing.T) { + cfg := new(Config) + _, err := toml.Decode(outdatedConfig, cfg) + require.NoError(t, err) + + newCfg := DefaultConfig(node.Light) + // ensure this config field is not filled in the outdated config + require.NotEqual(t, newCfg.Share.PeerManagerParams, cfg.Share.PeerManagerParams) + + cfg, err = updateConfig(cfg, newCfg) + require.NoError(t, err) + // ensure this config field is now set after updating the config + require.Equal(t, newCfg.Share.PeerManagerParams, cfg.Share.PeerManagerParams) + // ensure old custom values were not changed + require.Equal(t, "thisshouldnthavechanged", cfg.State.KeyringAccName) + require.Equal(t, "7979", cfg.RPC.Port) + require.True(t, cfg.Gateway.Enabled) +} + +// outdatedConfig is an outdated config from a light node +var outdatedConfig = ` +[Core] + IP = "0.0.0.0" + RPCPort = "0" + GRPCPort = "0" + +[State] + KeyringAccName = "thisshouldnthavechanged" + KeyringBackend = "test" + +[P2P] + ListenAddresses = ["/ip4/0.0.0.0/udp/2121/quic-v1", "/ip6/::/udp/2121/quic-v1", "/ip4/0.0.0.0/tcp/2121", +"/ip6/::/tcp/2121"] + AnnounceAddresses = [] + NoAnnounceAddresses = ["/ip4/0.0.0.0/udp/2121/quic-v1", "/ip4/127.0.0.1/udp/2121/quic-v1", "/ip6/::/udp/2121/quic-v1", +"/ip4/0.0.0.0/tcp/2121", "/ip4/127.0.0.1/tcp/2121", "/ip6/::/tcp/2121"] + MutualPeers = [] + PeerExchange = false + RoutingTableRefreshPeriod = "1m0s" + [P2P.ConnManager] + Low = 50 + High = 100 + GracePeriod = "1m0s" + [P2P.Metrics] + PrometheusAgentPort = "8890" + +[RPC] + Address = "0.0.0.0" + Port = "7979" + +[Gateway] + Address = "0.0.0.0" + Port = "26659" + Enabled = true + +[Share] + PeersLimit = 5 + DiscoveryInterval = "30s" + AdvertiseInterval = "30s" + UseShareExchange = true + [Share.ShrExEDSParams] + ServerReadTimeout = "5s" + ServerWriteTimeout = "1m0s" + HandleRequestTimeout = "1m0s" + ConcurrencyLimit = 10 + BufferSize = 32768 + [Share.ShrExNDParams] + ServerReadTimeout = "5s" + ServerWriteTimeout = "2m35s" + HandleRequestTimeout = "1m0s" + ConcurrencyLimit = 10 + +[Header] + TrustedHash = "" + TrustedPeers = [] + [Header.Store] + StoreCacheSize = 4096 + IndexCacheSize = 16384 + WriteBatchSize = 2048 + [Header.Syncer] + TrustingPeriod = "168h0m0s" + [Header.Server] + WriteDeadline = "8s" + ReadDeadline = "1m0s" + RangeRequestTimeout = "10s" + [Header.Client] + MaxHeadersPerRangeRequest = 64 + RangeRequestTimeout = "8s" + TrustedPeersRequestTimeout = "300ms" + +[DASer] + SamplingRange = 100 + ConcurrencyLimit = 16 + BackgroundStoreInterval = "10m0s" + SampleFrom = 1 + SampleTimeout = "4m0s" +` diff --git a/nodebuilder/core/config.go b/nodebuilder/core/config.go new file mode 100644 index 0000000000..bb5eea5b83 --- /dev/null +++ b/nodebuilder/core/config.go @@ -0,0 +1,55 @@ +package core + +import ( + "fmt" + "strconv" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +var MetricsEnabled bool + +// Config combines all configuration fields for managing the relationship with a Core node. +type Config struct { + IP string + RPCPort string + GRPCPort string +} + +// DefaultConfig returns default configuration for managing the +// node's connection to a Celestia-Core endpoint. +func DefaultConfig() Config { + return Config{ + IP: "", + RPCPort: "26657", + GRPCPort: "9090", + } +} + +// Validate performs basic validation of the config. +func (cfg *Config) Validate() error { + if !cfg.IsEndpointConfigured() { + return nil + } + + ip, err := utils.ValidateAddr(cfg.IP) + if err != nil { + return err + } + cfg.IP = ip + _, err = strconv.Atoi(cfg.RPCPort) + if err != nil { + return fmt.Errorf("nodebuilder/core: invalid rpc port: %s", err.Error()) + } + _, err = strconv.Atoi(cfg.GRPCPort) + if err != nil { + return fmt.Errorf("nodebuilder/core: invalid grpc port: %s", err.Error()) + } + return nil +} + +// IsEndpointConfigured returns whether a core endpoint has been set +// on the config (true if set). +func (cfg *Config) IsEndpointConfigured() bool { + return cfg.IP != "" +} diff --git a/nodebuilder/core/constructors.go b/nodebuilder/core/constructors.go new file mode 100644 index 0000000000..53c914a041 --- /dev/null +++ b/nodebuilder/core/constructors.go @@ -0,0 +1,9 @@ +package core + +import ( + "github.com/celestiaorg/celestia-node/core" +) + +func remote(cfg Config) (core.Client, error) { + return core.NewRemote(cfg.IP, cfg.RPCPort) +} diff --git a/nodebuilder/core/flags.go b/nodebuilder/core/flags.go new file mode 100644 index 0000000000..9cbed9b277 --- /dev/null +++ b/nodebuilder/core/flags.go @@ -0,0 +1,60 @@ +package core + +import ( + "fmt" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +var ( + coreFlag = "core.ip" + coreRPCFlag = "core.rpc.port" + coreGRPCFlag = "core.grpc.port" +) + +// Flags gives a set of hardcoded Core flags. +func Flags() *flag.FlagSet { + flags := &flag.FlagSet{} + + flags.String( + coreFlag, + "", + "Indicates node to connect to the given core node. "+ + "Example: , 127.0.0.1. , subdomain.domain.tld "+ + "Assumes RPC port 26657 and gRPC port 9090 as default unless otherwise specified.", + ) + flags.String( + coreRPCFlag, + "26657", + "Set a custom RPC port for the core node connection. The --core.ip flag must also be provided.", + ) + flags.String( + coreGRPCFlag, + "9090", + "Set a custom gRPC port for the core node connection. The --core.ip flag must also be provided.", + ) + return flags +} + +// ParseFlags parses Core flags from the given cmd and saves them to the passed config. +func ParseFlags( + cmd *cobra.Command, + cfg *Config, +) error { + coreIP := cmd.Flag(coreFlag).Value.String() + if coreIP == "" { + if cmd.Flag(coreGRPCFlag).Changed || cmd.Flag(coreRPCFlag).Changed { + return fmt.Errorf("cannot specify RPC/gRPC ports without specifying an IP address for --core.ip") + } + return nil + } + + rpc := cmd.Flag(coreRPCFlag).Value.String() + grpc := cmd.Flag(coreGRPCFlag).Value.String() + + cfg.IP = coreIP + cfg.RPCPort = rpc + cfg.GRPCPort = grpc + return cfg.Validate() +} diff --git a/nodebuilder/core/module.go b/nodebuilder/core/module.go new file mode 100644 index 0000000000..7c5c9e6bfd --- /dev/null +++ b/nodebuilder/core/module.go @@ -0,0 +1,88 @@ +package core + +import ( + "context" + + "go.uber.org/fx" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/core" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/fxutil" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +// ConstructModule collects all the components and services related to managing the relationship +// with the Core node. +func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option { + // sanitize config values before constructing module + cfgErr := cfg.Validate() + + baseComponents := fx.Options( + fx.Supply(*cfg), + fx.Error(cfgErr), + fx.Options(options...), + ) + + switch tp { + case node.Light, node.Full: + return fx.Module("core", baseComponents) + case node.Bridge: + return fx.Module("core", + baseComponents, + fx.Provide(core.NewBlockFetcher), + fxutil.ProvideAs( + func( + fetcher *core.BlockFetcher, + store *eds.Store, + construct header.ConstructFn, + ) (*core.Exchange, error) { + var opts []core.Option + if MetricsEnabled { + opts = append(opts, core.WithMetrics()) + } + + return core.NewExchange(fetcher, store, construct, opts...) + }, + new(libhead.Exchange[*header.ExtendedHeader])), + fx.Invoke(fx.Annotate( + func( + bcast libhead.Broadcaster[*header.ExtendedHeader], + fetcher *core.BlockFetcher, + pubsub *shrexsub.PubSub, + construct header.ConstructFn, + store *eds.Store, + chainID p2p.Network, + ) (*core.Listener, error) { + opts := []core.Option{core.WithChainID(chainID)} + if MetricsEnabled { + opts = append(opts, core.WithMetrics()) + } + + return core.NewListener(bcast, fetcher, pubsub.Broadcast, construct, store, p2p.BlockTime, opts...) + }, + fx.OnStart(func(ctx context.Context, listener *core.Listener) error { + return listener.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, listener *core.Listener) error { + return listener.Stop(ctx) + }), + )), + fx.Provide(fx.Annotate( + remote, + fx.OnStart(func(ctx context.Context, client core.Client) error { + return client.Start() + }), + fx.OnStop(func(ctx context.Context, client core.Client) error { + return client.Stop() + }), + )), + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/core/opts.go b/nodebuilder/core/opts.go new file mode 100644 index 0000000000..56347a5cb6 --- /dev/null +++ b/nodebuilder/core/opts.go @@ -0,0 +1,19 @@ +package core + +import ( + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/core" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/fxutil" +) + +// WithClient sets custom client for core process +func WithClient(client core.Client) fx.Option { + return fxutil.ReplaceAs(client, new(core.Client)) +} + +// WithHeaderConstructFn sets custom func that creates extended header +func WithHeaderConstructFn(construct header.ConstructFn) fx.Option { + return fx.Replace(construct) +} diff --git a/nodebuilder/da/da.go b/nodebuilder/da/da.go new file mode 100644 index 0000000000..0d604d769f --- /dev/null +++ b/nodebuilder/da/da.go @@ -0,0 +1,54 @@ +package da + +import ( + "context" + + "github.com/rollkit/go-da" +) + +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + da.DA +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +type API struct { + Internal struct { + MaxBlobSize func(ctx context.Context) (uint64, error) `perm:"read"` + Get func(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Blob, error) `perm:"read"` + GetIDs func(ctx context.Context, height uint64, ns da.Namespace) ([]da.ID, error) `perm:"read"` + GetProofs func(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Proof, error) `perm:"read"` + Commit func(ctx context.Context, blobs []da.Blob, ns da.Namespace) ([]da.Commitment, error) `perm:"read"` + Validate func(context.Context, []da.ID, []da.Proof, da.Namespace) ([]bool, error) `perm:"read"` + Submit func(context.Context, []da.Blob, float64, da.Namespace) ([]da.ID, error) `perm:"write"` + } +} + +func (api *API) MaxBlobSize(ctx context.Context) (uint64, error) { + return api.Internal.MaxBlobSize(ctx) +} + +func (api *API) Get(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Blob, error) { + return api.Internal.Get(ctx, ids, ns) +} + +func (api *API) GetIDs(ctx context.Context, height uint64, ns da.Namespace) ([]da.ID, error) { + return api.Internal.GetIDs(ctx, height, ns) +} + +func (api *API) GetProofs(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Proof, error) { + return api.Internal.GetProofs(ctx, ids, ns) +} + +func (api *API) Commit(ctx context.Context, blobs []da.Blob, ns da.Namespace) ([]da.Commitment, error) { + return api.Internal.Commit(ctx, blobs, ns) +} + +func (api *API) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns da.Namespace) ([]bool, error) { + return api.Internal.Validate(ctx, ids, proofs, ns) +} + +func (api *API) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns da.Namespace) ([]da.ID, error) { + return api.Internal.Submit(ctx, blobs, gasPrice, ns) +} diff --git a/nodebuilder/da/mocks/api.go b/nodebuilder/da/mocks/api.go new file mode 100644 index 0000000000..5895240906 --- /dev/null +++ b/nodebuilder/da/mocks/api.go @@ -0,0 +1,140 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/da (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// Commit mocks base method. +func (m *MockModule) Commit(arg0 context.Context, arg1 [][]byte, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Commit", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Commit indicates an expected call of Commit. +func (mr *MockModuleMockRecorder) Commit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockModule)(nil).Commit), arg0, arg1, arg2) +} + +// Get mocks base method. +func (m *MockModule) Get(arg0 context.Context, arg1 [][]byte, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockModuleMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockModule)(nil).Get), arg0, arg1, arg2) +} + +// GetIDs mocks base method. +func (m *MockModule) GetIDs(arg0 context.Context, arg1 uint64, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIDs", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIDs indicates an expected call of GetIDs. +func (mr *MockModuleMockRecorder) GetIDs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIDs", reflect.TypeOf((*MockModule)(nil).GetIDs), arg0, arg1, arg2) +} + +// GetProofs mocks base method. +func (m *MockModule) GetProofs(arg0 context.Context, arg1 [][]byte, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProofs", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProofs indicates an expected call of GetProofs. +func (mr *MockModuleMockRecorder) GetProofs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProofs", reflect.TypeOf((*MockModule)(nil).GetProofs), arg0, arg1, arg2) +} + +// MaxBlobSize mocks base method. +func (m *MockModule) MaxBlobSize(arg0 context.Context) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MaxBlobSize", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MaxBlobSize indicates an expected call of MaxBlobSize. +func (mr *MockModuleMockRecorder) MaxBlobSize(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxBlobSize", reflect.TypeOf((*MockModule)(nil).MaxBlobSize), arg0) +} + +// Submit mocks base method. +func (m *MockModule) Submit(arg0 context.Context, arg1 [][]byte, arg2 float64, arg3 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Submit", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Submit indicates an expected call of Submit. +func (mr *MockModuleMockRecorder) Submit(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Submit", reflect.TypeOf((*MockModule)(nil).Submit), arg0, arg1, arg2, arg3) +} + +// Validate mocks base method. +func (m *MockModule) Validate(arg0 context.Context, arg1, arg2 [][]byte, arg3 []byte) ([]bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Validate indicates an expected call of Validate. +func (mr *MockModuleMockRecorder) Validate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockModule)(nil).Validate), arg0, arg1, arg2, arg3) +} diff --git a/nodebuilder/da/module.go b/nodebuilder/da/module.go new file mode 100644 index 0000000000..b119d11076 --- /dev/null +++ b/nodebuilder/da/module.go @@ -0,0 +1,14 @@ +package da + +import ( + "go.uber.org/fx" +) + +func ConstructModule() fx.Option { + return fx.Module("da", + fx.Provide(NewService), + fx.Provide(func(serv *Service) Module { + return serv + }), + ) +} diff --git a/nodebuilder/da/service.go b/nodebuilder/da/service.go new file mode 100644 index 0000000000..b775e10396 --- /dev/null +++ b/nodebuilder/da/service.go @@ -0,0 +1,188 @@ +package da + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + + logging "github.com/ipfs/go-log/v2" + "github.com/rollkit/go-da" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + + "github.com/celestiaorg/celestia-node/blob" + nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/share" +) + +var _ da.DA = (*Service)(nil) + +var log = logging.Logger("go-da") + +// heightLen is a length (in bytes) of serialized height. +// +// This is 8 as uint64 consist of 8 bytes. +const heightLen = 8 + +type Service struct { + blobServ nodeblob.Module +} + +func NewService(blobMod nodeblob.Module) *Service { + return &Service{ + blobServ: blobMod, + } +} + +// MaxBlobSize returns the max blob size +func (s *Service) MaxBlobSize(context.Context) (uint64, error) { + return appconsts.DefaultMaxBytes, nil +} + +// Get returns Blob for each given ID, or an error. +func (s *Service) Get(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Blob, error) { + blobs := make([]da.Blob, 0, len(ids)) + for _, id := range ids { + height, commitment := SplitID(id) + log.Debugw("getting blob", "height", height, "commitment", commitment, "namespace", share.Namespace(ns)) + currentBlob, err := s.blobServ.Get(ctx, height, ns, commitment) + log.Debugw("got blob", "height", height, "commitment", commitment, "namespace", share.Namespace(ns)) + if err != nil { + return nil, err + } + blobs = append(blobs, currentBlob.Data) + } + return blobs, nil +} + +// GetIDs returns IDs of all Blobs located in DA at given height. +func (s *Service) GetIDs(ctx context.Context, height uint64, namespace da.Namespace) ([]da.ID, error) { + var ids []da.ID //nolint:prealloc + log.Debugw("getting ids", "height", height, "namespace", share.Namespace(namespace)) + blobs, err := s.blobServ.GetAll(ctx, height, []share.Namespace{namespace}) + log.Debugw("got ids", "height", height, "namespace", share.Namespace(namespace)) + if err != nil { + if strings.Contains(err.Error(), blob.ErrBlobNotFound.Error()) { + return nil, nil + } + return nil, err + } + for _, b := range blobs { + ids = append(ids, MakeID(height, b.Commitment)) + } + return ids, nil +} + +// GetProofs returns inclusion Proofs for all Blobs located in DA at given height. +func (s *Service) GetProofs(ctx context.Context, ids []da.ID, namespace da.Namespace) ([]da.Proof, error) { + proofs := make([]da.Proof, len(ids)) + for i, id := range ids { + height, commitment := SplitID(id) + proof, err := s.blobServ.GetProof(ctx, height, namespace, commitment) + if err != nil { + return nil, err + } + proofs[i], err = proof.MarshalJSON() + if err != nil { + return nil, err + } + } + return proofs, nil +} + +// Commit creates a Commitment for each given Blob. +func (s *Service) Commit(_ context.Context, daBlobs []da.Blob, namespace da.Namespace) ([]da.Commitment, error) { + _, commitments, err := s.blobsAndCommitments(daBlobs, namespace) + return commitments, err +} + +// Submit submits the Blobs to Data Availability layer. +func (s *Service) Submit( + ctx context.Context, + daBlobs []da.Blob, + gasPrice float64, + namespace da.Namespace, +) ([]da.ID, error) { + blobs, _, err := s.blobsAndCommitments(daBlobs, namespace) + if err != nil { + return nil, err + } + + height, err := s.blobServ.Submit(ctx, blobs, blob.GasPrice(gasPrice)) + if err != nil { + log.Error("failed to submit blobs", "height", height, "gas price", gasPrice) + return nil, err + } + log.Info("successfully submitted blobs", "height", height, "gas price", gasPrice) + ids := make([]da.ID, len(blobs)) + for i, blob := range blobs { + ids[i] = MakeID(height, blob.Commitment) + } + return ids, nil +} + +// blobsAndCommitments converts []da.Blob to []*blob.Blob and generates corresponding +// []da.Commitment +func (s *Service) blobsAndCommitments( + daBlobs []da.Blob, namespace da.Namespace, +) ([]*blob.Blob, []da.Commitment, error) { + blobs := make([]*blob.Blob, 0, len(daBlobs)) + commitments := make([]da.Commitment, 0, len(daBlobs)) + for _, daBlob := range daBlobs { + b, err := blob.NewBlobV0(namespace, daBlob) + if err != nil { + return nil, nil, err + } + blobs = append(blobs, b) + + commitments = append(commitments, b.Commitment) + } + return blobs, commitments, nil +} + +// Validate validates Commitments against the corresponding Proofs. This should be possible without +// retrieving the Blobs. +func (s *Service) Validate( + ctx context.Context, + ids []da.ID, + daProofs []da.Proof, + namespace da.Namespace, +) ([]bool, error) { + included := make([]bool, len(ids)) + proofs := make([]*blob.Proof, len(ids)) + for i, daProof := range daProofs { + blobProof := &blob.Proof{} + err := blobProof.UnmarshalJSON(daProof) + if err != nil { + return nil, err + } + proofs[i] = blobProof + } + for i, id := range ids { + height, commitment := SplitID(id) + // TODO(tzdybal): for some reason, if proof doesn't match commitment, API returns (false, "blob: + // invalid proof") but analysis of the code in celestia-node implies this should never happen - + // maybe it's caused by openrpc? there is no way of gently handling errors here, but returned + // value is fine for us + fmt.Println("proof", proofs[i] == nil, "commitment", commitment == nil) + isIncluded, _ := s.blobServ.Included(ctx, height, namespace, proofs[i], commitment) + included = append(included, isIncluded) + } + return included, nil +} + +func MakeID(height uint64, commitment da.Commitment) da.ID { + id := make([]byte, heightLen+len(commitment)) + binary.LittleEndian.PutUint64(id, height) + copy(id[heightLen:], commitment) + return id +} + +func SplitID(id da.ID) (uint64, da.Commitment) { + if len(id) <= heightLen { + return 0, nil + } + commitment := id[heightLen:] + return binary.LittleEndian.Uint64(id[:heightLen]), commitment +} diff --git a/nodebuilder/das/cmd/das.go b/nodebuilder/das/cmd/das.go new file mode 100644 index 0000000000..7512861ac3 --- /dev/null +++ b/nodebuilder/das/cmd/das.go @@ -0,0 +1,34 @@ +package cmd + +import ( + "github.com/spf13/cobra" + + cmdnode "github.com/celestiaorg/celestia-node/cmd" +) + +func init() { + Cmd.AddCommand(samplingStatsCmd) +} + +var Cmd = &cobra.Command{ + Use: "das [command]", + Short: "Allows to interact with the Daser via JSON-RPC", + Args: cobra.NoArgs, + PersistentPreRunE: cmdnode.InitClient, +} + +var samplingStatsCmd = &cobra.Command{ + Use: "sampling-stats", + Short: "Returns the current statistics over the DA sampling process", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + stats, err := client.DAS.SamplingStats(cmd.Context()) + return cmdnode.PrintOutput(stats, err, nil) + }, +} diff --git a/nodebuilder/das/config.go b/nodebuilder/das/config.go new file mode 100644 index 0000000000..eeaa382a41 --- /dev/null +++ b/nodebuilder/das/config.go @@ -0,0 +1,47 @@ +package das + +import ( + "fmt" + "time" + + "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" +) + +// Config contains configuration parameters for the DASer (or DASing process) +type Config das.Parameters + +// TODO(@derrandz): parameters needs performance testing on real network to define optimal values +// DefaultConfig provide the optimal default configuration per node type. +// For the moment, there is only one default configuration for all node types +// but this function will provide more once #1261 is addressed. +// +// TODO(@derrandz): Address #1261 +func DefaultConfig(tp node.Type) Config { + cfg := das.DefaultParameters() + switch tp { + case node.Light: + cfg.SampleTimeout = modp2p.BlockTime * time.Duration(cfg.ConcurrencyLimit) + case node.Full: + // Default value for DASer concurrency limit is based on dasing using ipld getter. + // Full node will primarily use shrex protocol for sampling, that is much more efficient and can + // fully utilize nodes bandwidth with lower amount of parallel sampling workers + cfg.ConcurrencyLimit = 6 + // Full node uses shrex with fallback to ipld to sample, so need 2x amount of time in worst case + // scenario + cfg.SampleTimeout = 2 * modp2p.BlockTime * time.Duration(cfg.ConcurrencyLimit) + } + return Config(cfg) +} + +// Validate performs basic validation of the config. +// Upon encountering an invalid value, Validate returns an error of type: ErrMisConfig +func (cfg *Config) Validate() error { + err := (*das.Parameters)(cfg).Validate() + if err != nil { + return fmt.Errorf("moddas misconfiguration: %w", err) + } + + return nil +} diff --git a/nodebuilder/das/constructors.go b/nodebuilder/das/constructors.go new file mode 100644 index 0000000000..973aca5679 --- /dev/null +++ b/nodebuilder/das/constructors.go @@ -0,0 +1,64 @@ +package das + +import ( + "context" + "fmt" + "time" + + "github.com/ipfs/go-datastore" + + "github.com/celestiaorg/go-fraud" + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/header" + modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +var _ Module = (*daserStub)(nil) + +var errStub = fmt.Errorf("module/das: stubbed: dasing is not available on bridge nodes") + +// daserStub is a stub implementation of the DASer that is used on bridge nodes, so that we can +// provide a friendlier error when users try to access the daser over the API. +type daserStub struct{} + +func (d daserStub) SamplingStats(context.Context) (das.SamplingStats, error) { + return das.SamplingStats{}, errStub +} + +func (d daserStub) WaitCatchUp(context.Context) error { + return errStub +} + +func newDaserStub() Module { + return &daserStub{} +} + +func newDASer( + da share.Availability, + hsub libhead.Subscriber[*header.ExtendedHeader], + store libhead.Store[*header.ExtendedHeader], + batching datastore.Batching, + fraudServ fraud.Service[*header.ExtendedHeader], + bFn shrexsub.BroadcastFn, + availWindow pruner.AvailabilityWindow, + options ...das.Option, +) (*das.DASer, *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader], error) { + options = append(options, das.WithSamplingWindow(time.Duration(availWindow))) + + ds, err := das.NewDASer(da, hsub, store, batching, fraudServ, bFn, options...) + if err != nil { + return nil, nil, err + } + + return ds, &modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]{ + Service: ds, + FraudServ: fraudServ, + FraudType: byzantine.BadEncoding, + }, nil +} diff --git a/nodebuilder/das/das.go b/nodebuilder/das/das.go new file mode 100644 index 0000000000..f1dddbc4df --- /dev/null +++ b/nodebuilder/das/das.go @@ -0,0 +1,34 @@ +package das + +import ( + "context" + + "github.com/celestiaorg/celestia-node/das" +) + +var _ Module = (*API)(nil) + +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // SamplingStats returns the current statistics over the DA sampling process. + SamplingStats(ctx context.Context) (das.SamplingStats, error) + // WaitCatchUp blocks until DASer finishes catching up to the network head. + WaitCatchUp(ctx context.Context) error +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +type API struct { + Internal struct { + SamplingStats func(ctx context.Context) (das.SamplingStats, error) `perm:"read"` + WaitCatchUp func(ctx context.Context) error `perm:"read"` + } +} + +func (api *API) SamplingStats(ctx context.Context) (das.SamplingStats, error) { + return api.Internal.SamplingStats(ctx) +} + +func (api *API) WaitCatchUp(ctx context.Context) error { + return api.Internal.WaitCatchUp(ctx) +} diff --git a/nodebuilder/das/mocks/api.go b/nodebuilder/das/mocks/api.go new file mode 100644 index 0000000000..c4046e90e8 --- /dev/null +++ b/nodebuilder/das/mocks/api.go @@ -0,0 +1,65 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/das (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + das "github.com/celestiaorg/celestia-node/das" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// SamplingStats mocks base method. +func (m *MockModule) SamplingStats(arg0 context.Context) (das.SamplingStats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SamplingStats", arg0) + ret0, _ := ret[0].(das.SamplingStats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SamplingStats indicates an expected call of SamplingStats. +func (mr *MockModuleMockRecorder) SamplingStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SamplingStats", reflect.TypeOf((*MockModule)(nil).SamplingStats), arg0) +} + +// WaitCatchUp mocks base method. +func (m *MockModule) WaitCatchUp(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitCatchUp", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitCatchUp indicates an expected call of WaitCatchUp. +func (mr *MockModuleMockRecorder) WaitCatchUp(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitCatchUp", reflect.TypeOf((*MockModule)(nil).WaitCatchUp), arg0) +} diff --git a/nodebuilder/das/module.go b/nodebuilder/das/module.go new file mode 100644 index 0000000000..0545fffc27 --- /dev/null +++ b/nodebuilder/das/module.go @@ -0,0 +1,66 @@ +package das + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/header" + modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +func ConstructModule(tp node.Type, cfg *Config) fx.Option { + var err error + // do not validate daser config for bridge node as it + // does not need it + if tp != node.Bridge { + err = cfg.Validate() + } + + baseComponents := fx.Options( + fx.Supply(*cfg), + fx.Error(err), + fx.Provide( + func(c Config) []das.Option { + return []das.Option{ + das.WithSamplingRange(c.SamplingRange), + das.WithConcurrencyLimit(c.ConcurrencyLimit), + das.WithBackgroundStoreInterval(c.BackgroundStoreInterval), + das.WithSampleFrom(c.SampleFrom), + das.WithSampleTimeout(c.SampleTimeout), + } + }, + ), + ) + + switch tp { + case node.Light, node.Full: + return fx.Module( + "das", + baseComponents, + fx.Provide(fx.Annotate( + newDASer, + fx.OnStart(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]) error { + return breaker.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]) error { + return breaker.Stop(ctx) + }), + )), + // Module is needed for the RPC handler + fx.Provide(func(das *das.DASer) Module { + return das + }), + ) + case node.Bridge: + return fx.Module( + "das", + baseComponents, + fx.Provide(newDaserStub), + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/das/module_test.go b/nodebuilder/das/module_test.go new file mode 100644 index 0000000000..f1f7da6f65 --- /dev/null +++ b/nodebuilder/das/module_test.go @@ -0,0 +1,31 @@ +package das + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/fx" + "go.uber.org/fx/fxtest" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +// TestConstructModule_DASBridgeStub verifies that a bridge node implements a stub daser that +// returns an error and empty das.SamplingStats +func TestConstructModule_DASBridgeStub(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + var mod Module + + cfg := DefaultConfig(node.Bridge) + app := fxtest.New(t, + ConstructModule(node.Bridge, &cfg), + fx.Populate(&mod)). + RequireStart() + defer app.RequireStop() + + _, err := mod.SamplingStats(ctx) + assert.ErrorIs(t, err, errStub) +} diff --git a/nodebuilder/das/opts.go b/nodebuilder/das/opts.go new file mode 100644 index 0000000000..f9c76f1781 --- /dev/null +++ b/nodebuilder/das/opts.go @@ -0,0 +1,11 @@ +package das + +import ( + "github.com/celestiaorg/celestia-node/das" +) + +// WithMetrics is a utility function that is expected to be +// "invoked" by the fx lifecycle. +func WithMetrics(d *das.DASer) error { + return d.InitMetrics() +} diff --git a/nodebuilder/default_services.go b/nodebuilder/default_services.go new file mode 100644 index 0000000000..430f6ba66b --- /dev/null +++ b/nodebuilder/default_services.go @@ -0,0 +1,25 @@ +package nodebuilder + +import ( + "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +// PackageToAPI maps a package to its API struct. Currently only used for +// method discovery for openrpc spec generation +var PackageToAPI = map[string]interface{}{ + "fraud": &fraud.API{}, + "state": &state.API{}, + "share": &share.API{}, + "header": &header.API{}, + "das": &das.API{}, + "p2p": &p2p.API{}, + "blob": &blob.API{}, + "node": &node.API{}, +} diff --git a/nodebuilder/fraud/constructors.go b/nodebuilder/fraud/constructors.go new file mode 100644 index 0000000000..eee85d4139 --- /dev/null +++ b/nodebuilder/fraud/constructors.go @@ -0,0 +1,59 @@ +package fraud + +import ( + "github.com/ipfs/go-datastore" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "go.uber.org/fx" + + "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/go-fraud/fraudserv" + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" +) + +func fraudUnmarshaler() fraud.ProofUnmarshaler[*header.ExtendedHeader] { + return defaultProofUnmarshaler +} + +func newFraudServiceWithSync( + lc fx.Lifecycle, + sub *pubsub.PubSub, + host host.Host, + hstore libhead.Store[*header.ExtendedHeader], + registry fraud.ProofUnmarshaler[*header.ExtendedHeader], + ds datastore.Batching, + network p2p.Network, +) (Module, fraud.Service[*header.ExtendedHeader], error) { + syncerEnabled := true + pservice := fraudserv.NewProofService(sub, host, hstore.GetByHeight, registry, ds, syncerEnabled, network.String()) + lc.Append(fx.Hook{ + OnStart: pservice.Start, + OnStop: pservice.Stop, + }) + return &module{ + Service: pservice, + }, pservice, nil +} + +func newFraudServiceWithoutSync( + lc fx.Lifecycle, + sub *pubsub.PubSub, + host host.Host, + hstore libhead.Store[*header.ExtendedHeader], + registry fraud.ProofUnmarshaler[*header.ExtendedHeader], + ds datastore.Batching, + network p2p.Network, +) (Module, fraud.Service[*header.ExtendedHeader], error) { + syncerEnabled := false + pservice := fraudserv.NewProofService(sub, host, hstore.GetByHeight, registry, ds, syncerEnabled, network.String()) + lc.Append(fx.Hook{ + OnStart: pservice.Start, + OnStop: pservice.Stop, + }) + return &module{ + Service: pservice, + }, pservice, nil +} diff --git a/nodebuilder/fraud/fraud.go b/nodebuilder/fraud/fraud.go new file mode 100644 index 0000000000..178b0527a1 --- /dev/null +++ b/nodebuilder/fraud/fraud.go @@ -0,0 +1,121 @@ +package fraud + +import ( + "context" + "encoding/json" + "errors" + + "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" +) + +var _ Module = (*API)(nil) + +// Module encompasses the behavior necessary to subscribe and broadcast fraud proofs within the +// network. Any method signature changed here needs to also be changed in the API struct. +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // Subscribe allows to subscribe on a Proof pub sub topic by its type. + Subscribe(context.Context, fraud.ProofType) (<-chan *Proof, error) + // Get fetches fraud proofs from the disk by its type. + Get(context.Context, fraud.ProofType) ([]Proof, error) +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +type API struct { + Internal struct { + Subscribe func(context.Context, fraud.ProofType) (<-chan *Proof, error) `perm:"read"` + Get func(context.Context, fraud.ProofType) ([]Proof, error) `perm:"read"` + } +} + +func (api *API) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-chan *Proof, error) { + return api.Internal.Subscribe(ctx, proofType) +} + +func (api *API) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { + return api.Internal.Get(ctx, proofType) +} + +var _ Module = (*module)(nil) + +// module is an implementation of Module that uses fraud.module as a backend. It is used to +// provide fraud proofs as a non-interface type to the API, and wrap fraud.Subscriber with a +// channel of Proofs. +type module struct { + fraud.Service[*header.ExtendedHeader] +} + +func (s *module) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-chan *Proof, error) { + subscription, err := s.Service.Subscribe(proofType) + if err != nil { + return nil, err + } + proofs := make(chan *Proof) + go func() { + defer close(proofs) + defer subscription.Cancel() + for { + proof, err := subscription.Proof(ctx) + if err != nil { + if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + log.Errorw("fetching proof from subscription", "err", err) + } + return + } + select { + case <-ctx.Done(): + return + case proofs <- &Proof{Proof: proof}: + } + } + }() + return proofs, nil +} + +func (s *module) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { + originalProofs, err := s.Service.Get(ctx, proofType) + if err != nil { + return nil, err + } + proofs := make([]Proof, len(originalProofs)) + for i, originalProof := range originalProofs { + proofs[i].Proof = originalProof + } + return proofs, nil +} + +// Proof embeds the fraud.Proof interface type to provide a concrete type for JSON serialization. +type Proof struct { + fraud.Proof[*header.ExtendedHeader] +} + +type fraudProofJSON struct { + ProofType fraud.ProofType `json:"proof_type"` + Data []byte `json:"data"` +} + +func (f *Proof) UnmarshalJSON(data []byte) error { + var fp fraudProofJSON + err := json.Unmarshal(data, &fp) + if err != nil { + return err + } + f.Proof, err = defaultProofUnmarshaler.Unmarshal(fp.ProofType, fp.Data) + return err +} + +func (f *Proof) MarshalJSON() ([]byte, error) { + marshaledProof, err := f.MarshalBinary() + if err != nil { + return nil, err + } + fraudProof := &fraudProofJSON{ + ProofType: f.Type(), + Data: marshaledProof, + } + return json.Marshal(fraudProof) +} diff --git a/nodebuilder/fraud/lifecycle.go b/nodebuilder/fraud/lifecycle.go new file mode 100644 index 0000000000..50f4e1035b --- /dev/null +++ b/nodebuilder/fraud/lifecycle.go @@ -0,0 +1,89 @@ +package fraud + +import ( + "context" + "errors" + "fmt" + + "github.com/ipfs/go-datastore" + + "github.com/celestiaorg/go-fraud" + libhead "github.com/celestiaorg/go-header" +) + +// service defines minimal interface with service lifecycle methods +type service interface { + Start(context.Context) error + Stop(context.Context) error +} + +// ServiceBreaker wraps any service with fraud proof subscription of a specific type. +// If proof happens the service is Stopped automatically. +// TODO(@Wondertan): Support multiple fraud types. +type ServiceBreaker[S service, H libhead.Header[H]] struct { + Service S + FraudType fraud.ProofType + FraudServ fraud.Service[H] + + ctx context.Context + cancel context.CancelFunc + sub fraud.Subscription[H] +} + +// Start starts the inner service if there are no fraud proofs stored. +// Subscribes for fraud and stops the service whenever necessary. +func (breaker *ServiceBreaker[S, H]) Start(ctx context.Context) error { + if breaker == nil { + return nil + } + + proofs, err := breaker.FraudServ.Get(ctx, breaker.FraudType) + switch { + default: + return fmt.Errorf("getting proof(%s): %w", breaker.FraudType, err) + case err == nil: + return &fraud.ErrFraudExists[H]{Proof: proofs} + case errors.Is(err, datastore.ErrNotFound): + } + + err = breaker.Service.Start(ctx) + if err != nil { + return err + } + + breaker.sub, err = breaker.FraudServ.Subscribe(breaker.FraudType) + if err != nil { + return fmt.Errorf("subscribing for proof(%s): %w", breaker.FraudType, err) + } + + breaker.ctx, breaker.cancel = context.WithCancel(context.Background()) + go breaker.awaitProof() + return nil +} + +// Stop stops the service and cancels subscription. +func (breaker *ServiceBreaker[S, H]) Stop(ctx context.Context) error { + if breaker == nil { + return nil + } + + if breaker.ctx.Err() != nil { + // short circuit if the service was already stopped + return nil + } + + breaker.sub.Cancel() + defer breaker.cancel() + return breaker.Service.Stop(ctx) +} + +func (breaker *ServiceBreaker[S, H]) awaitProof() { + _, err := breaker.sub.Proof(breaker.ctx) + if err != nil { + return + } + + if err := breaker.Stop(breaker.ctx); err != nil && !errors.Is(err, context.Canceled) { + log.Errorw("stopping service: %s", err.Error()) + } +} diff --git a/nodebuilder/fraud/mocks/api.go b/nodebuilder/fraud/mocks/api.go new file mode 100644 index 0000000000..fcc7a58231 --- /dev/null +++ b/nodebuilder/fraud/mocks/api.go @@ -0,0 +1,67 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/fraud (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + fraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + fraud0 "github.com/celestiaorg/go-fraud" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockModule) Get(arg0 context.Context, arg1 fraud0.ProofType) ([]fraud.Proof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1) + ret0, _ := ret[0].([]fraud.Proof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockModuleMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockModule)(nil).Get), arg0, arg1) +} + +// Subscribe mocks base method. +func (m *MockModule) Subscribe(arg0 context.Context, arg1 fraud0.ProofType) (<-chan *fraud.Proof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Subscribe", arg0, arg1) + ret0, _ := ret[0].(<-chan *fraud.Proof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Subscribe indicates an expected call of Subscribe. +func (mr *MockModuleMockRecorder) Subscribe(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockModule)(nil).Subscribe), arg0, arg1) +} diff --git a/nodebuilder/fraud/module.go b/nodebuilder/fraud/module.go new file mode 100644 index 0000000000..bf353f63c6 --- /dev/null +++ b/nodebuilder/fraud/module.go @@ -0,0 +1,38 @@ +package fraud + +import ( + logging "github.com/ipfs/go-log/v2" + "go.uber.org/fx" + + "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +var log = logging.Logger("module/fraud") + +func ConstructModule(tp node.Type) fx.Option { + baseComponent := fx.Options( + fx.Provide(fraudUnmarshaler), + fx.Provide(func(serv fraud.Service[*header.ExtendedHeader]) fraud.Getter[*header.ExtendedHeader] { + return serv + }), + ) + switch tp { + case node.Light: + return fx.Module( + "fraud", + baseComponent, + fx.Provide(newFraudServiceWithSync), + ) + case node.Full, node.Bridge: + return fx.Module( + "fraud", + baseComponent, + fx.Provide(newFraudServiceWithoutSync), + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/fraud/unmarshaler.go b/nodebuilder/fraud/unmarshaler.go new file mode 100644 index 0000000000..d5e0461f01 --- /dev/null +++ b/nodebuilder/fraud/unmarshaler.go @@ -0,0 +1,32 @@ +package fraud + +import ( + "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" +) + +var defaultProofUnmarshaler proofRegistry + +type proofRegistry struct{} + +func (pr proofRegistry) List() []fraud.ProofType { + return []fraud.ProofType{ + byzantine.BadEncoding, + } +} + +func (pr proofRegistry) Unmarshal(proofType fraud.ProofType, data []byte) (fraud.Proof[*header.ExtendedHeader], error) { + switch proofType { + case byzantine.BadEncoding: + befp := &byzantine.BadEncodingProof{} + err := befp.UnmarshalBinary(data) + if err != nil { + return nil, err + } + return befp, nil + default: + return nil, &fraud.ErrNoUnmarshaler{ProofType: proofType} + } +} diff --git a/nodebuilder/gateway/config.go b/nodebuilder/gateway/config.go new file mode 100644 index 0000000000..c49a4749a3 --- /dev/null +++ b/nodebuilder/gateway/config.go @@ -0,0 +1,37 @@ +package gateway + +import ( + "fmt" + "strconv" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +type Config struct { + Address string + Port string + Enabled bool +} + +func DefaultConfig() Config { + return Config{ + Address: defaultBindAddress, + // do NOT expose the same port as celestia-core by default so that both can run on the same machine + Port: defaultPort, + Enabled: false, + } +} + +func (cfg *Config) Validate() error { + sanitizedAddress, err := utils.ValidateAddr(cfg.Address) + if err != nil { + return fmt.Errorf("gateway: invalid address: %w", err) + } + cfg.Address = sanitizedAddress + + _, err = strconv.Atoi(cfg.Port) + if err != nil { + return fmt.Errorf("gateway: invalid port: %s", err.Error()) + } + return nil +} diff --git a/nodebuilder/gateway/config_test.go b/nodebuilder/gateway/config_test.go new file mode 100644 index 0000000000..9ef3f1e310 --- /dev/null +++ b/nodebuilder/gateway/config_test.go @@ -0,0 +1,18 @@ +package gateway + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestDefaultConfig tests that the default gateway config is correct. +func TestDefaultConfig(t *testing.T) { + expected := Config{ + Address: defaultBindAddress, + Port: defaultPort, + Enabled: false, + } + + assert.Equal(t, expected, DefaultConfig()) +} diff --git a/nodebuilder/gateway/constructors.go b/nodebuilder/gateway/constructors.go new file mode 100644 index 0000000000..c28153b0a5 --- /dev/null +++ b/nodebuilder/gateway/constructors.go @@ -0,0 +1,26 @@ +package gateway + +import ( + "github.com/celestiaorg/celestia-node/api/gateway" + "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +// Handler constructs a new RPC Handler from the given services. +func Handler( + state state.Module, + share share.Module, + header header.Module, + daser *das.DASer, + serv *gateway.Server, +) { + handler := gateway.NewHandler(state, share, header, daser) + handler.RegisterEndpoints(serv) + handler.RegisterMiddleware(serv) +} + +func server(cfg *Config) *gateway.Server { + return gateway.NewServer(cfg.Address, cfg.Port) +} diff --git a/nodebuilder/gateway/defaults.go b/nodebuilder/gateway/defaults.go new file mode 100644 index 0000000000..e6c48d5d4e --- /dev/null +++ b/nodebuilder/gateway/defaults.go @@ -0,0 +1,6 @@ +package gateway + +const ( + defaultBindAddress = "localhost" + defaultPort = "26659" +) diff --git a/nodebuilder/gateway/defaults_test.go b/nodebuilder/gateway/defaults_test.go new file mode 100644 index 0000000000..c504f8cca4 --- /dev/null +++ b/nodebuilder/gateway/defaults_test.go @@ -0,0 +1,12 @@ +package gateway + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestServerDefaultConstants(t *testing.T) { + assert.Equal(t, "localhost", defaultBindAddress) + assert.Equal(t, "26659", defaultPort) +} diff --git a/nodebuilder/gateway/flags.go b/nodebuilder/gateway/flags.go new file mode 100644 index 0000000000..6da4a66f03 --- /dev/null +++ b/nodebuilder/gateway/flags.go @@ -0,0 +1,57 @@ +package gateway + +import ( + "fmt" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +var ( + enabledFlag = "gateway" + addrFlag = "gateway.addr" + portFlag = "gateway.port" +) + +// Flags gives a set of hardcoded node/gateway package flags. +func Flags() *flag.FlagSet { + flags := &flag.FlagSet{} + + flags.Bool( + enabledFlag, + false, + "Enables the REST gateway", + ) + flags.String( + addrFlag, + "", + fmt.Sprintf("Set a custom gateway listen address (default: %s)", defaultBindAddress), + ) + flags.String( + portFlag, + "", + fmt.Sprintf("Set a custom gateway port (default: %s)", defaultPort), + ) + + return flags +} + +// ParseFlags parses gateway flags from the given cmd and saves them to the passed config. +func ParseFlags(cmd *cobra.Command, cfg *Config) { + enabled, err := cmd.Flags().GetBool(enabledFlag) + if cmd.Flags().Changed(enabledFlag) && err == nil { + cfg.Enabled = enabled + } + addr, port := cmd.Flag(addrFlag), cmd.Flag(portFlag) + if !cfg.Enabled && (addr.Changed || port.Changed) { + log.Warn("custom address or port provided without enabling gateway, setting config values") + } + addrVal := addr.Value.String() + if addrVal != "" { + cfg.Address = addrVal + } + portVal := port.Value.String() + if portVal != "" { + cfg.Port = portVal + } +} diff --git a/nodebuilder/gateway/flags_test.go b/nodebuilder/gateway/flags_test.go new file mode 100644 index 0000000000..5f55ac77f2 --- /dev/null +++ b/nodebuilder/gateway/flags_test.go @@ -0,0 +1,95 @@ +package gateway + +import ( + "fmt" + "strconv" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFlags(t *testing.T) { + flags := Flags() + + enabled := flags.Lookup(enabledFlag) + require.NotNil(t, enabled) + assert.Equal(t, "false", enabled.Value.String()) + assert.Equal(t, "Enables the REST gateway", enabled.Usage) + + addr := flags.Lookup(addrFlag) + require.NotNil(t, addr) + assert.Equal(t, "", addr.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom gateway listen address (default: %s)", defaultBindAddress), addr.Usage) + + port := flags.Lookup(portFlag) + require.NotNil(t, port) + assert.Equal(t, "", port.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom gateway port (default: %s)", defaultPort), port.Usage) +} + +func TestParseFlags(t *testing.T) { + tests := []struct { + name string + enabledFlag bool + addrFlag string + portFlag string + expectedCfg *Config + }{ + { + name: "Enabled flag is true", + enabledFlag: true, + addrFlag: "127.0.0.1", + portFlag: "8080", + expectedCfg: &Config{ + Enabled: true, + Address: "127.0.0.1", + Port: "8080", + }, + }, + { + name: "Enabled flag is false", + enabledFlag: false, + addrFlag: "127.0.0.1", + portFlag: "8080", + expectedCfg: &Config{ + Enabled: false, + Address: "127.0.0.1", + Port: "8080", + }, + }, + { + name: "Enabled flag is false and address/port flags are not changed", + enabledFlag: false, + addrFlag: "", + portFlag: "", + expectedCfg: &Config{ + Enabled: false, + Address: "", + Port: "", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := &cobra.Command{} + cfg := &Config{} + + cmd.Flags().AddFlagSet(Flags()) + + err := cmd.Flags().Set(enabledFlag, strconv.FormatBool(test.enabledFlag)) + assert.NoError(t, err) + err = cmd.Flags().Set(addrFlag, test.addrFlag) + assert.NoError(t, err) + err = cmd.Flags().Set(portFlag, test.portFlag) + assert.NoError(t, err) + + ParseFlags(cmd, cfg) + assert.Equal(t, test.expectedCfg.Enabled, cfg.Enabled) + assert.Equal(t, test.expectedCfg.Address, cfg.Address) + assert.Equal(t, test.expectedCfg.Port, cfg.Port) + }) + } +} diff --git a/nodebuilder/gateway/module.go b/nodebuilder/gateway/module.go new file mode 100644 index 0000000000..4cdf325dc0 --- /dev/null +++ b/nodebuilder/gateway/module.go @@ -0,0 +1,62 @@ +package gateway + +import ( + "context" + + logging "github.com/ipfs/go-log/v2" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/api/gateway" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" + stateServ "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +var log = logging.Logger("module/gateway") + +func ConstructModule(tp node.Type, cfg *Config) fx.Option { + // sanitize config values before constructing module + cfgErr := cfg.Validate() + if !cfg.Enabled { + return fx.Options() + } + + baseComponents := fx.Options( + fx.Supply(cfg), + fx.Error(cfgErr), + fx.Provide(fx.Annotate( + server, + fx.OnStart(func(ctx context.Context, server *gateway.Server) error { + return server.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, server *gateway.Server) error { + return server.Stop(ctx) + }), + )), + ) + + switch tp { + case node.Light, node.Full: + return fx.Module( + "gateway", + baseComponents, + fx.Invoke(Handler), + ) + case node.Bridge: + return fx.Module( + "gateway", + baseComponents, + fx.Invoke(func( + state stateServ.Module, + share shareServ.Module, + header headerServ.Module, + serv *gateway.Server, + ) { + Handler(state, share, header, nil, serv) + }), + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/header/cmd/header.go b/nodebuilder/header/cmd/header.go new file mode 100644 index 0000000000..b3bba1eb32 --- /dev/null +++ b/nodebuilder/header/cmd/header.go @@ -0,0 +1,117 @@ +package cmd + +import ( + "encoding/hex" + "fmt" + "strconv" + + "github.com/spf13/cobra" + + cmdnode "github.com/celestiaorg/celestia-node/cmd" +) + +func init() { + Cmd.AddCommand( + localHeadCmd, + networkHeadCmd, + getByHashCmd, + getByHeightCmd, + syncStateCmd, + ) +} + +var Cmd = &cobra.Command{ + Use: "header [command]", + Short: "Allows interaction with the Header Module via JSON-RPC", + Args: cobra.NoArgs, + PersistentPreRunE: cmdnode.InitClient, +} + +var localHeadCmd = &cobra.Command{ + Use: "local-head", + Short: "Returns the ExtendedHeader from the chain head.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + header, err := client.Header.LocalHead(cmd.Context()) + return cmdnode.PrintOutput(header, err, nil) + }, +} + +var networkHeadCmd = &cobra.Command{ + Use: "network-head", + Short: "Provides the Syncer's view of the current network head.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + header, err := client.Header.NetworkHead(cmd.Context()) + return cmdnode.PrintOutput(header, err, nil) + }, +} + +var getByHashCmd = &cobra.Command{ + Use: "get-by-hash", + Short: "Returns the header of the given hash from the node's header store.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + hash, err := hex.DecodeString(args[0]) + if err != nil { + return fmt.Errorf("error decoding a hash: expected a hex encoded string:%v", err) + } + header, err := client.Header.GetByHash(cmd.Context(), hash) + return cmdnode.PrintOutput(header, err, nil) + }, +} + +var getByHeightCmd = &cobra.Command{ + Use: "get-by-height", + Short: "Returns the ExtendedHeader at the given height if it is currently available.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a height:%v", err) + } + + header, err := client.Header.GetByHeight(cmd.Context(), height) + return cmdnode.PrintOutput(header, err, nil) + }, +} + +var syncStateCmd = &cobra.Command{ + Use: "sync-state", + Short: "Returns the current state of the header Syncer.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + header, err := client.Header.SyncState(cmd.Context()) + return cmdnode.PrintOutput(header, err, nil) + }, +} diff --git a/nodebuilder/header/config.go b/nodebuilder/header/config.go new file mode 100644 index 0000000000..3fb7162ae0 --- /dev/null +++ b/nodebuilder/header/config.go @@ -0,0 +1,124 @@ +package header + +import ( + "encoding/hex" + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + + libhead "github.com/celestiaorg/go-header" + p2p_exchange "github.com/celestiaorg/go-header/p2p" + "github.com/celestiaorg/go-header/store" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" +) + +// MetricsEnabled will be set during runtime if metrics are enabled on the node. +var MetricsEnabled = false + +// Config contains configuration parameters for header retrieval and management. +type Config struct { + // TrustedHash is the Block/Header hash that Nodes use as starting point for header synchronization. + // Only affects the node once on initial sync. + TrustedHash string + // TrustedPeers are the peers we trust to fetch headers from. + // Note: The trusted does *not* imply Headers are not verified, but trusted as reliable to fetch + // headers at any moment. + TrustedPeers []string + + Store store.Parameters + Syncer sync.Parameters + + Server p2p_exchange.ServerParameters + Client p2p_exchange.ClientParameters `toml:",omitempty"` +} + +func DefaultConfig(tp node.Type) Config { + cfg := Config{ + TrustedHash: "", + TrustedPeers: make([]string, 0), + Store: store.DefaultParameters(), + Syncer: sync.DefaultParameters(), + Server: p2p_exchange.DefaultServerParameters(), + Client: p2p_exchange.DefaultClientParameters(), + } + + switch tp { + case node.Bridge: + return cfg + case node.Full: + return cfg + case node.Light: + cfg.Store.StoreCacheSize = 512 + cfg.Store.IndexCacheSize = 2048 + cfg.Store.WriteBatchSize = 512 + return cfg + default: + panic("header: invalid node type") + } +} + +func (cfg *Config) trustedPeers(bpeers p2p.Bootstrappers) (infos []peer.AddrInfo, err error) { + if len(cfg.TrustedPeers) == 0 { + log.Infof("No trusted peers in config, initializing with default bootstrappers as trusted peers") + return bpeers, nil + } + + infos = make([]peer.AddrInfo, len(cfg.TrustedPeers)) + for i, tpeer := range cfg.TrustedPeers { + ma, err := multiaddr.NewMultiaddr(tpeer) + if err != nil { + return nil, err + } + p, err := peer.AddrInfoFromP2pAddr(ma) + if err != nil { + return nil, err + } + infos[i] = *p + } + return +} + +func (cfg *Config) trustedHash(net p2p.Network) (libhead.Hash, error) { + if cfg.TrustedHash == "" { + gen, err := p2p.GenesisFor(net) + if err != nil { + return nil, err + } + return hex.DecodeString(gen) + } + return hex.DecodeString(cfg.TrustedHash) +} + +// Validate performs basic validation of the config. +func (cfg *Config) Validate(tp node.Type) error { + err := cfg.Store.Validate() + if err != nil { + return fmt.Errorf("module/header: misconfiguration of store: %w", err) + } + + err = cfg.Syncer.Validate() + if err != nil { + return fmt.Errorf("module/header: misconfiguration of syncer: %w", err) + } + + err = cfg.Server.Validate() + if err != nil { + return fmt.Errorf("module/header: misconfiguration of p2p exchange server: %w", err) + } + + // we do not create a client for bridge nodes + if tp == node.Bridge { + return nil + } + + err = cfg.Client.Validate() + if err != nil { + return fmt.Errorf("module/header: misconfiguration of p2p exchange client: %w", err) + } + + return nil +} diff --git a/nodebuilder/header/constructors.go b/nodebuilder/header/constructors.go new file mode 100644 index 0000000000..a78d609d8e --- /dev/null +++ b/nodebuilder/header/constructors.go @@ -0,0 +1,131 @@ +package header + +import ( + "context" + + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + "go.uber.org/fx" + + libfraud "github.com/celestiaorg/go-fraud" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/p2p" + "github.com/celestiaorg/go-header/store" + "github.com/celestiaorg/go-header/sync" + + modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" +) + +// newP2PExchange constructs a new Exchange for headers. +func newP2PExchange[H libhead.Header[H]]( + lc fx.Lifecycle, + cfg Config, + bpeers modp2p.Bootstrappers, + network modp2p.Network, + host host.Host, + conngater *conngater.BasicConnectionGater, + pidstore p2p.PeerIDStore, +) (libhead.Exchange[H], error) { + peers, err := cfg.trustedPeers(bpeers) + if err != nil { + return nil, err + } + ids := make([]peer.ID, len(peers)) + for index, peer := range peers { + ids[index] = peer.ID + host.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) + } + + opts := []p2p.Option[p2p.ClientParameters]{ + p2p.WithParams(cfg.Client), + p2p.WithNetworkID[p2p.ClientParameters](network.String()), + p2p.WithChainID(network.String()), + p2p.WithPeerIDStore[p2p.ClientParameters](pidstore), + } + if MetricsEnabled { + opts = append(opts, p2p.WithMetrics[p2p.ClientParameters]()) + } + + exchange, err := p2p.NewExchange[H](host, ids, conngater, opts...) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + return exchange.Start(ctx) + }, + OnStop: func(ctx context.Context) error { + return exchange.Stop(ctx) + }, + }) + return exchange, nil +} + +// newSyncer constructs new Syncer for headers. +func newSyncer[H libhead.Header[H]]( + ex libhead.Exchange[H], + fservice libfraud.Service[H], + store libhead.Store[H], + sub libhead.Subscriber[H], + cfg Config, +) (*sync.Syncer[H], *modfraud.ServiceBreaker[*sync.Syncer[H], H], error) { + opts := []sync.Option{sync.WithParams(cfg.Syncer), sync.WithBlockTime(modp2p.BlockTime)} + if MetricsEnabled { + opts = append(opts, sync.WithMetrics()) + } + + syncer, err := sync.NewSyncer[H](ex, store, sub, opts...) + if err != nil { + return nil, nil, err + } + + return syncer, &modfraud.ServiceBreaker[*sync.Syncer[H], H]{ + Service: syncer, + FraudType: byzantine.BadEncoding, + FraudServ: fservice, + }, nil +} + +// newInitStore constructs an initialized store +func newInitStore[H libhead.Header[H]]( + lc fx.Lifecycle, + cfg Config, + net modp2p.Network, + ds datastore.Batching, + ex libhead.Exchange[H], +) (libhead.Store[H], error) { + opts := []store.Option{store.WithParams(cfg.Store)} + if MetricsEnabled { + opts = append(opts, store.WithMetrics()) + } + + s, err := store.NewStore[H](ds, opts...) + if err != nil { + return nil, err + } + + trustedHash, err := cfg.trustedHash(net) + if err != nil { + return nil, err + } + + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + err = store.Init[H](ctx, s, ex, trustedHash) + if err != nil { + return err + } + return s.Start(ctx) + }, + OnStop: func(ctx context.Context) error { + return s.Stop(ctx) + }, + }) + + return s, nil +} diff --git a/cmd/flags_header.go b/nodebuilder/header/flags.go similarity index 68% rename from cmd/flags_header.go rename to nodebuilder/header/flags.go index 669665dc51..aa4cb093a1 100644 --- a/cmd/flags_header.go +++ b/nodebuilder/header/flags.go @@ -1,4 +1,4 @@ -package cmd +package header import ( "encoding/hex" @@ -7,8 +7,6 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/spf13/cobra" flag "github.com/spf13/pflag" - - "github.com/celestiaorg/celestia-node/node" ) var ( @@ -16,8 +14,8 @@ var ( headersTrustedPeersFlag = "headers.trusted-peers" ) -// HeadersFlags gives a set of hardcoded Header package flags. -func HeadersFlags() *flag.FlagSet { +// Flags gives a set of hardcoded Header package flags. +func Flags() *flag.FlagSet { flags := &flag.FlagSet{} flags.AddFlagSet(TrustedPeersFlags()) @@ -26,16 +24,12 @@ func HeadersFlags() *flag.FlagSet { return flags } -// ParseHeadersFlags parses Header package flags from the given cmd and applies values to Env. -func ParseHeadersFlags(cmd *cobra.Command, env *Env) error { - if err := ParseTrustedHashFlags(cmd, env); err != nil { - return err - } - if err := ParseTrustedPeerFlags(cmd, env); err != nil { +// ParseFlags parses Header package flags from the given cmd and applies them to the passed config. +func ParseFlags(cmd *cobra.Command, cfg *Config) error { + if err := ParseTrustedHashFlags(cmd, cfg); err != nil { return err } - - return nil + return ParseTrustedPeerFlags(cmd, cfg) } // TrustedPeersFlags returns a set of flags. @@ -46,12 +40,15 @@ func TrustedPeersFlags() *flag.FlagSet { nil, "Multiaddresses of a reliable peers to fetch headers from. (Format: multiformats.io/multiaddr)", ) - return flags } -// ParseTrustedPeerFlags parses Header package flags from the given cmd and applies values to Env. -func ParseTrustedPeerFlags(cmd *cobra.Command, env *Env) error { +// ParseTrustedPeerFlags parses Header package flags from the given cmd and applies them to the +// passed config. +func ParseTrustedPeerFlags( + cmd *cobra.Command, + cfg *Config, +) error { tpeers, err := cmd.Flags().GetStringSlice(headersTrustedPeersFlag) if err != nil { return err @@ -63,9 +60,7 @@ func ParseTrustedPeerFlags(cmd *cobra.Command, env *Env) error { return fmt.Errorf("cmd: while parsing '%s' with peer addr '%s': %w", headersTrustedPeersFlag, tpeer, err) } } - - env.AddOptions(node.WithTrustedPeers(tpeers...)) - + cfg.TrustedPeers = append(cfg.TrustedPeers, tpeers...) return nil } @@ -78,12 +73,15 @@ func TrustedHashFlags() *flag.FlagSet { "", "Hex encoded header hash. Used to subjectively initialize header synchronization", ) - return flags } -// ParseTrustedHashFlags parses Header package flags from the given cmd and applies values to Env. -func ParseTrustedHashFlags(cmd *cobra.Command, env *Env) error { +// ParseTrustedHashFlags parses Header package flags from the given cmd and saves them to the +// passed config. +func ParseTrustedHashFlags( + cmd *cobra.Command, + cfg *Config, +) error { hash := cmd.Flag(headersTrustedHashFlag).Value.String() if hash != "" { _, err := hex.DecodeString(hash) @@ -91,8 +89,7 @@ func ParseTrustedHashFlags(cmd *cobra.Command, env *Env) error { return fmt.Errorf("cmd: while parsing '%s': %w", headersTrustedHashFlag, err) } - env.AddOptions(node.WithTrustedHash(hash)) + cfg.TrustedHash = hash } - return nil } diff --git a/nodebuilder/header/header.go b/nodebuilder/header/header.go new file mode 100644 index 0000000000..f807796eb6 --- /dev/null +++ b/nodebuilder/header/header.go @@ -0,0 +1,109 @@ +package header + +import ( + "context" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/header" +) + +// Module exposes the functionality needed for querying headers from the network. +// Any method signature changed here needs to also be changed in the API struct. +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // LocalHead returns the ExtendedHeader of the chain head. + LocalHead(context.Context) (*header.ExtendedHeader, error) + + // GetByHash returns the header of the given hash from the node's header store. + GetByHash(ctx context.Context, hash libhead.Hash) (*header.ExtendedHeader, error) + // GetRangeByHeight returns the given range (from:to) of ExtendedHeaders + // from the node's header store and verifies that the returned headers are + // adjacent to each other. + GetRangeByHeight( + ctx context.Context, + from *header.ExtendedHeader, + to uint64, + ) ([]*header.ExtendedHeader, error) + // GetByHeight returns the ExtendedHeader at the given height if it is + // currently available. + GetByHeight(context.Context, uint64) (*header.ExtendedHeader, error) + // WaitForHeight blocks until the header at the given height has been processed + // by the store or context deadline is exceeded. + WaitForHeight(context.Context, uint64) (*header.ExtendedHeader, error) + + // SyncState returns the current state of the header Syncer. + SyncState(context.Context) (sync.State, error) + // SyncWait blocks until the header Syncer is synced to network head. + SyncWait(ctx context.Context) error + // NetworkHead provides the Syncer's view of the current network head. + NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) + + // Subscribe to recent ExtendedHeaders from the network. + Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +type API struct { + Internal struct { + LocalHead func(context.Context) (*header.ExtendedHeader, error) `perm:"read"` + GetByHash func( + ctx context.Context, + hash libhead.Hash, + ) (*header.ExtendedHeader, error) `perm:"read"` + GetRangeByHeight func( + context.Context, + *header.ExtendedHeader, + uint64, + ) ([]*header.ExtendedHeader, error) `perm:"read"` + GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + WaitForHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + SyncState func(ctx context.Context) (sync.State, error) `perm:"read"` + SyncWait func(ctx context.Context) error `perm:"read"` + NetworkHead func(ctx context.Context) (*header.ExtendedHeader, error) `perm:"read"` + Subscribe func(ctx context.Context) (<-chan *header.ExtendedHeader, error) `perm:"read"` + } +} + +func (api *API) GetByHash(ctx context.Context, hash libhead.Hash) (*header.ExtendedHeader, error) { + return api.Internal.GetByHash(ctx, hash) +} + +func (api *API) GetRangeByHeight( + ctx context.Context, + from *header.ExtendedHeader, + to uint64, +) ([]*header.ExtendedHeader, error) { + return api.Internal.GetRangeByHeight(ctx, from, to) +} + +func (api *API) GetByHeight(ctx context.Context, u uint64) (*header.ExtendedHeader, error) { + return api.Internal.GetByHeight(ctx, u) +} + +func (api *API) WaitForHeight(ctx context.Context, u uint64) (*header.ExtendedHeader, error) { + return api.Internal.WaitForHeight(ctx, u) +} + +func (api *API) LocalHead(ctx context.Context) (*header.ExtendedHeader, error) { + return api.Internal.LocalHead(ctx) +} + +func (api *API) SyncState(ctx context.Context) (sync.State, error) { + return api.Internal.SyncState(ctx) +} + +func (api *API) SyncWait(ctx context.Context) error { + return api.Internal.SyncWait(ctx) +} + +func (api *API) NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) { + return api.Internal.NetworkHead(ctx) +} + +func (api *API) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) { + return api.Internal.Subscribe(ctx) +} diff --git a/nodebuilder/header/mocks/api.go b/nodebuilder/header/mocks/api.go new file mode 100644 index 0000000000..b0d2b961d9 --- /dev/null +++ b/nodebuilder/header/mocks/api.go @@ -0,0 +1,172 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/header (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + header "github.com/celestiaorg/celestia-node/header" + header0 "github.com/celestiaorg/go-header" + sync "github.com/celestiaorg/go-header/sync" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// GetByHash mocks base method. +func (m *MockModule) GetByHash(arg0 context.Context, arg1 header0.Hash) (*header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetByHash", arg0, arg1) + ret0, _ := ret[0].(*header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetByHash indicates an expected call of GetByHash. +func (mr *MockModuleMockRecorder) GetByHash(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByHash", reflect.TypeOf((*MockModule)(nil).GetByHash), arg0, arg1) +} + +// GetByHeight mocks base method. +func (m *MockModule) GetByHeight(arg0 context.Context, arg1 uint64) (*header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetByHeight", arg0, arg1) + ret0, _ := ret[0].(*header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetByHeight indicates an expected call of GetByHeight. +func (mr *MockModuleMockRecorder) GetByHeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByHeight", reflect.TypeOf((*MockModule)(nil).GetByHeight), arg0, arg1) +} + +// GetRangeByHeight mocks base method. +func (m *MockModule) GetRangeByHeight(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 uint64) ([]*header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRangeByHeight", arg0, arg1, arg2) + ret0, _ := ret[0].([]*header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRangeByHeight indicates an expected call of GetRangeByHeight. +func (mr *MockModuleMockRecorder) GetRangeByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeByHeight", reflect.TypeOf((*MockModule)(nil).GetRangeByHeight), arg0, arg1, arg2) +} + +// LocalHead mocks base method. +func (m *MockModule) LocalHead(arg0 context.Context) (*header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LocalHead", arg0) + ret0, _ := ret[0].(*header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LocalHead indicates an expected call of LocalHead. +func (mr *MockModuleMockRecorder) LocalHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalHead", reflect.TypeOf((*MockModule)(nil).LocalHead), arg0) +} + +// NetworkHead mocks base method. +func (m *MockModule) NetworkHead(arg0 context.Context) (*header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetworkHead", arg0) + ret0, _ := ret[0].(*header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetworkHead indicates an expected call of NetworkHead. +func (mr *MockModuleMockRecorder) NetworkHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkHead", reflect.TypeOf((*MockModule)(nil).NetworkHead), arg0) +} + +// Subscribe mocks base method. +func (m *MockModule) Subscribe(arg0 context.Context) (<-chan *header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Subscribe", arg0) + ret0, _ := ret[0].(<-chan *header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Subscribe indicates an expected call of Subscribe. +func (mr *MockModuleMockRecorder) Subscribe(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockModule)(nil).Subscribe), arg0) +} + +// SyncState mocks base method. +func (m *MockModule) SyncState(arg0 context.Context) (sync.State, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncState", arg0) + ret0, _ := ret[0].(sync.State) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncState indicates an expected call of SyncState. +func (mr *MockModuleMockRecorder) SyncState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockModule)(nil).SyncState), arg0) +} + +// SyncWait mocks base method. +func (m *MockModule) SyncWait(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncWait", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncWait indicates an expected call of SyncWait. +func (mr *MockModuleMockRecorder) SyncWait(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWait", reflect.TypeOf((*MockModule)(nil).SyncWait), arg0) +} + +// WaitForHeight mocks base method. +func (m *MockModule) WaitForHeight(arg0 context.Context, arg1 uint64) (*header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForHeight", arg0, arg1) + ret0, _ := ret[0].(*header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WaitForHeight indicates an expected call of WaitForHeight. +func (mr *MockModuleMockRecorder) WaitForHeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForHeight", reflect.TypeOf((*MockModule)(nil).WaitForHeight), arg0, arg1) +} diff --git a/nodebuilder/header/module.go b/nodebuilder/header/module.go new file mode 100644 index 0000000000..4be25f7125 --- /dev/null +++ b/nodebuilder/header/module.go @@ -0,0 +1,115 @@ +package header + +import ( + "context" + + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "go.uber.org/fx" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/p2p" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/pidstore" + modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" +) + +var log = logging.Logger("module/header") + +func ConstructModule[H libhead.Header[H]](tp node.Type, cfg *Config) fx.Option { + // sanitize config values before constructing module + cfgErr := cfg.Validate(tp) + + baseComponents := fx.Options( + fx.Supply(*cfg), + fx.Error(cfgErr), + fx.Provide(newHeaderService), + fx.Provide(newInitStore[H]), + fx.Provide(func(subscriber *p2p.Subscriber[H]) libhead.Subscriber[H] { + return subscriber + }), + fx.Provide(fx.Annotate( + newSyncer[H], + fx.OnStart(func( + ctx context.Context, + breaker *modfraud.ServiceBreaker[*sync.Syncer[H], H], + ) error { + return breaker.Start(ctx) + }), + fx.OnStop(func( + ctx context.Context, + breaker *modfraud.ServiceBreaker[*sync.Syncer[H], H], + ) error { + return breaker.Stop(ctx) + }), + )), + fx.Provide(fx.Annotate( + func(ps *pubsub.PubSub, network modp2p.Network) (*p2p.Subscriber[H], error) { + opts := []p2p.SubscriberOption{p2p.WithSubscriberNetworkID(network.String())} + if MetricsEnabled { + opts = append(opts, p2p.WithSubscriberMetrics()) + } + return p2p.NewSubscriber[H](ps, header.MsgID, opts...) + }, + fx.OnStart(func(ctx context.Context, sub *p2p.Subscriber[H]) error { + return sub.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, sub *p2p.Subscriber[H]) error { + return sub.Stop(ctx) + }), + )), + fx.Provide(fx.Annotate( + func( + cfg Config, + host host.Host, + store libhead.Store[H], + network modp2p.Network, + ) (*p2p.ExchangeServer[H], error) { + opts := []p2p.Option[p2p.ServerParameters]{ + p2p.WithParams(cfg.Server), + p2p.WithNetworkID[p2p.ServerParameters](network.String()), + } + if MetricsEnabled { + opts = append(opts, p2p.WithMetrics[p2p.ServerParameters]()) + } + + return p2p.NewExchangeServer[H](host, store, opts...) + }, + fx.OnStart(func(ctx context.Context, server *p2p.ExchangeServer[H]) error { + return server.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, server *p2p.ExchangeServer[H]) error { + return server.Stop(ctx) + }), + )), + ) + + switch tp { + case node.Light, node.Full: + return fx.Module( + "header", + baseComponents, + fx.Provide(newP2PExchange[H]), + fx.Provide(func(ctx context.Context, ds datastore.Batching) (p2p.PeerIDStore, error) { + return pidstore.NewPeerIDStore(ctx, ds) + }), + ) + case node.Bridge: + return fx.Module( + "header", + baseComponents, + fx.Provide(func(subscriber *p2p.Subscriber[H]) libhead.Broadcaster[H] { + return subscriber + }), + fx.Supply(header.MakeExtendedHeader), + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/header/module_test.go b/nodebuilder/header/module_test.go new file mode 100644 index 0000000000..23ca41050f --- /dev/null +++ b/nodebuilder/header/module_test.go @@ -0,0 +1,129 @@ +package header + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + "github.com/stretchr/testify/require" + "go.uber.org/fx" + "go.uber.org/fx/fxtest" + + "github.com/celestiaorg/go-fraud" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/p2p" + "github.com/celestiaorg/go-header/store" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/pidstore" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" +) + +// TestConstructModule_StoreParams ensures that all passed via functional options +// params are set in store correctly. +func TestConstructModule_StoreParams(t *testing.T) { + cfg := DefaultConfig(node.Light) + cfg.Store.StoreCacheSize = 15 + cfg.Store.IndexCacheSize = 25 + cfg.Store.WriteBatchSize = 35 + var headerStore *store.Store[*header.ExtendedHeader] + + app := fxtest.New(t, + fx.Supply(modp2p.Private), + fx.Supply(modp2p.Bootstrappers{}), + fx.Provide(context.Background), + fx.Provide(libp2p.New), + fx.Provide(conngater.NewBasicConnectionGater), + fx.Provide(func() (datastore.Batching, datastore.Datastore) { + ds := datastore.NewMapDatastore() + return ds, ds + }), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), + fx.Invoke( + func(s libhead.Store[*header.ExtendedHeader]) { + ss := s.(*store.Store[*header.ExtendedHeader]) + headerStore = ss + }), + ) + require.NoError(t, app.Err()) + require.Equal(t, headerStore.Params.StoreCacheSize, cfg.Store.StoreCacheSize) + require.Equal(t, headerStore.Params.IndexCacheSize, cfg.Store.IndexCacheSize) + require.Equal(t, headerStore.Params.WriteBatchSize, cfg.Store.WriteBatchSize) +} + +// TestConstructModule_SyncerParams ensures that all passed via functional options +// params are set in syncer correctly. +func TestConstructModule_SyncerParams(t *testing.T) { + cfg := DefaultConfig(node.Light) + cfg.Syncer.TrustingPeriod = time.Hour + cfg.TrustedPeers = []string{"/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p"} + var syncer *sync.Syncer[*header.ExtendedHeader] + app := fxtest.New(t, + fx.Supply(modp2p.Private), + fx.Supply(modp2p.Bootstrappers{}), + fx.Provide(context.Background), + fx.Provide(libp2p.New), + fx.Provide(func(b datastore.Batching) (*conngater.BasicConnectionGater, error) { + return conngater.NewBasicConnectionGater(b) + }), + fx.Provide(func() *pubsub.PubSub { + return nil + }), + fx.Provide(func() datastore.Batching { + return datastore.NewMapDatastore() + }), + fx.Provide(func() fraud.Service[*header.ExtendedHeader] { + return nil + }), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), + fx.Invoke(func(s *sync.Syncer[*header.ExtendedHeader]) { + syncer = s + }), + ) + require.Equal(t, cfg.Syncer.TrustingPeriod, syncer.Params.TrustingPeriod) + require.NoError(t, app.Err()) +} + +// TestConstructModule_ExchangeParams ensures that all passed via functional options +// params are set in store correctly. +func TestConstructModule_ExchangeParams(t *testing.T) { + cfg := DefaultConfig(node.Light) + cfg.Client.MaxHeadersPerRangeRequest = 15 + cfg.TrustedPeers = []string{"/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p"} + var exchange *p2p.Exchange[*header.ExtendedHeader] + var exchangeServer *p2p.ExchangeServer[*header.ExtendedHeader] + + app := fxtest.New(t, + fx.Provide(pidstore.NewPeerIDStore), + fx.Provide(context.Background), + fx.Supply(modp2p.Private), + fx.Supply(modp2p.Bootstrappers{}), + fx.Provide(libp2p.New), + fx.Provide(func() datastore.Batching { + return datastore.NewMapDatastore() + }), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), + fx.Provide(func(b datastore.Batching) (*conngater.BasicConnectionGater, error) { + return conngater.NewBasicConnectionGater(b) + }), + fx.Invoke( + func(e libhead.Exchange[*header.ExtendedHeader], server *p2p.ExchangeServer[*header.ExtendedHeader]) { + ex := e.(*p2p.Exchange[*header.ExtendedHeader]) + exchange = ex + exchangeServer = server + }), + ) + require.NoError(t, app.Err()) + require.Equal(t, exchange.Params.MaxHeadersPerRangeRequest, cfg.Client.MaxHeadersPerRangeRequest) + require.Equal(t, exchange.Params.RangeRequestTimeout, cfg.Client.RangeRequestTimeout) + + require.Equal(t, exchangeServer.Params.WriteDeadline, cfg.Server.WriteDeadline) + require.Equal(t, exchangeServer.Params.ReadDeadline, cfg.Server.ReadDeadline) + require.Equal(t, exchangeServer.Params.RangeRequestTimeout, cfg.Server.RangeRequestTimeout) +} diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go new file mode 100644 index 0000000000..072ef070c6 --- /dev/null +++ b/nodebuilder/header/service.go @@ -0,0 +1,140 @@ +package header + +import ( + "context" + "fmt" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/p2p" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/header" +) + +// Service represents the header Service that can be started / stopped on a node. +// Service's main function is to manage its sub-services. Service can contain several +// sub-services, such as Exchange, ExchangeServer, Syncer, and so forth. +type Service struct { + ex libhead.Exchange[*header.ExtendedHeader] + + syncer syncer + sub libhead.Subscriber[*header.ExtendedHeader] + p2pServer *p2p.ExchangeServer[*header.ExtendedHeader] + store libhead.Store[*header.ExtendedHeader] +} + +// syncer bare minimum Syncer interface for testing +type syncer interface { + libhead.Head[*header.ExtendedHeader] + + State() sync.State + SyncWait(ctx context.Context) error +} + +// newHeaderService creates a new instance of header Service. +func newHeaderService( + syncer *sync.Syncer[*header.ExtendedHeader], + sub libhead.Subscriber[*header.ExtendedHeader], + p2pServer *p2p.ExchangeServer[*header.ExtendedHeader], + ex libhead.Exchange[*header.ExtendedHeader], + store libhead.Store[*header.ExtendedHeader], +) Module { + return &Service{ + syncer: syncer, + sub: sub, + p2pServer: p2pServer, + ex: ex, + store: store, + } +} + +func (s *Service) GetByHash(ctx context.Context, hash libhead.Hash) (*header.ExtendedHeader, error) { + return s.store.Get(ctx, hash) +} + +func (s *Service) GetRangeByHeight( + ctx context.Context, + from *header.ExtendedHeader, + to uint64, +) ([]*header.ExtendedHeader, error) { + return s.store.GetRangeByHeight(ctx, from, to) +} + +func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + head, err := s.syncer.Head(ctx) + switch { + case err != nil: + return nil, err + case head.Height() == height: + return head, nil + case head.Height()+1 < height: + return nil, fmt.Errorf("header: given height is from the future: "+ + "networkHeight: %d, requestedHeight: %d", head.Height(), height) + } + + // TODO(vgonkivs): remove after https://github.com/celestiaorg/go-header/issues/32 is + // implemented and fetch header from HeaderEx if missing locally + head, err = s.store.Head(ctx) + switch { + case err != nil: + return nil, err + case head.Height() == height: + return head, nil + // `+1` allows for one header network lag, e.g. user request header that is milliseconds away + case head.Height()+1 < height: + return nil, fmt.Errorf("header: syncing in progress: "+ + "localHeadHeight: %d, requestedHeight: %d", head.Height(), height) + default: + return s.store.GetByHeight(ctx, height) + } +} + +func (s *Service) WaitForHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return s.store.GetByHeight(ctx, height) +} + +func (s *Service) LocalHead(ctx context.Context) (*header.ExtendedHeader, error) { + return s.store.Head(ctx) +} + +func (s *Service) SyncState(context.Context) (sync.State, error) { + return s.syncer.State(), nil +} + +func (s *Service) SyncWait(ctx context.Context) error { + return s.syncer.SyncWait(ctx) +} + +func (s *Service) NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) { + return s.syncer.Head(ctx) +} + +func (s *Service) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) { + subscription, err := s.sub.Subscribe() + if err != nil { + return nil, err + } + + headerCh := make(chan *header.ExtendedHeader) + go func() { + defer close(headerCh) + defer subscription.Cancel() + + for { + h, err := subscription.NextHeader(ctx) + if err != nil { + if err != context.DeadlineExceeded && err != context.Canceled { + log.Errorw("fetching header from subscription", "err", err) + } + return + } + + select { + case <-ctx.Done(): + return + case headerCh <- h: + } + } + }() + return headerCh, nil +} diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go new file mode 100644 index 0000000000..14d5ada87d --- /dev/null +++ b/nodebuilder/header/service_test.go @@ -0,0 +1,41 @@ +package header + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/header" +) + +func TestGetByHeightHandlesError(t *testing.T) { + serv := Service{ + syncer: &errorSyncer[*header.ExtendedHeader]{}, + } + + assert.NotPanics(t, func() { + h, err := serv.GetByHeight(context.Background(), 100) + assert.Error(t, err) + assert.Nil(t, h) + }) +} + +type errorSyncer[H libhead.Header[H]] struct{} + +func (d *errorSyncer[H]) Head(context.Context, ...libhead.HeadOption[H]) (H, error) { + var zero H + return zero, fmt.Errorf("dummy error") +} + +func (d *errorSyncer[H]) State() sync.State { + return sync.State{} +} + +func (d *errorSyncer[H]) SyncWait(context.Context) error { + return fmt.Errorf("dummy error") +} diff --git a/nodebuilder/init.go b/nodebuilder/init.go new file mode 100644 index 0000000000..0593d88560 --- /dev/null +++ b/nodebuilder/init.go @@ -0,0 +1,231 @@ +package nodebuilder + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/app/encoding" + + "github.com/celestiaorg/celestia-node/libs/fslock" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +// PrintKeyringInfo whether to print keyring information during init. +var PrintKeyringInfo = true + +// Init initializes the Node FileSystem Store for the given Node Type 'tp' in the directory under +// 'path'. +func Init(cfg Config, path string, tp node.Type) error { + path, err := storePath(path) + if err != nil { + return err + } + log.Infof("Initializing %s Node Store over '%s'", tp, path) + + err = initRoot(path) + if err != nil { + return err + } + + flock, err := fslock.Lock(lockPath(path)) + if err != nil { + if err == fslock.ErrLocked { + return ErrOpened + } + return err + } + defer flock.Unlock() //nolint: errcheck + + ksPath := keysPath(path) + err = initDir(ksPath) + if err != nil { + return err + } + + err = initDir(dataPath(path)) + if err != nil { + return err + } + + cfgPath := configPath(path) + err = SaveConfig(cfgPath, &cfg) + if err != nil { + return err + } + log.Infow("Saved config", "path", cfgPath) + + log.Infow("Accessing keyring...") + err = generateKeys(cfg, ksPath) + if err != nil { + log.Errorw("generating account keys", "err", err) + return err + } + + log.Info("Node Store initialized") + return nil +} + +// Reset removes all data from the datastore and dagstore directories. It leaves the keystore and +// config intact. +func Reset(path string, tp node.Type) error { + path, err := storePath(path) + if err != nil { + return err + } + log.Infof("Resetting %s Node Store over '%s'", tp, path) + + flock, err := fslock.Lock(lockPath(path)) + if err != nil { + if err == fslock.ErrLocked { + return ErrOpened + } + return err + } + defer flock.Unlock() //nolint: errcheck + + err = resetDir(dataPath(path)) + if err != nil { + return err + } + + // light nodes don't have dagstore paths + if tp == node.Light { + log.Info("Node Store reset") + return nil + } + + err = resetDir(blocksPath(path)) + if err != nil { + return err + } + + err = resetDir(transientsPath(path)) + if err != nil { + return err + } + + err = resetDir(indexPath(path)) + if err != nil { + return err + } + + log.Info("Node Store reset") + return nil +} + +// IsInit checks whether FileSystem Store was setup under given 'path'. +// If any required file/subdirectory does not exist, then false is reported. +func IsInit(path string) bool { + path, err := storePath(path) + if err != nil { + log.Errorw("parsing store path", "path", path, "err", err) + return false + } + + _, err = LoadConfig(configPath(path)) // load the Config and implicitly check for its existence + if err != nil { + log.Errorw("loading config", "path", path, "err", err) + return false + } + + if utils.Exists(keysPath(path)) && + utils.Exists(dataPath(path)) { + return true + } + + return false +} + +const perms = 0755 + +// initRoot initializes(creates) directory if not created and check if it is writable +func initRoot(path string) error { + err := initDir(path) + if err != nil { + return err + } + + // check for writing permissions + f, err := os.Create(filepath.Join(path, ".check")) + if err != nil { + return err + } + + err = f.Close() + if err != nil { + return err + } + + return os.Remove(f.Name()) +} + +// resetDir removes all files from the given directory and reinitializes it +func resetDir(path string) error { + err := os.RemoveAll(path) + if err != nil { + return err + } + return initDir(path) +} + +// initDir creates a dir if not exist +func initDir(path string) error { + if utils.Exists(path) { + return nil + } + return os.Mkdir(path, perms) +} + +// generateKeys will construct a keyring from the given keystore path and check +// if account keys already exist. If not, it will generate a new account key and +// store it. +func generateKeys(cfg Config, ksPath string) error { + encConf := encoding.MakeConfig(app.ModuleEncodingRegisters...) + + if cfg.State.KeyringBackend == keyring.BackendTest { + log.Warn("Detected plaintext keyring backend. For elevated security properties, consider using" + + " the `file` keyring backend.") + } + ring, err := keyring.New(app.Name, cfg.State.KeyringBackend, ksPath, os.Stdin, encConf.Codec) + if err != nil { + return err + } + keys, err := ring.List() + if err != nil { + return err + } + if len(keys) > 0 { + // at least one key is already present + return nil + } + log.Infow("NO KEY FOUND IN STORE, GENERATING NEW KEY...", "path", ksPath) + keyInfo, mn, err := generateNewKey(ring) + if err != nil { + return err + } + log.Info("NEW KEY GENERATED...") + addr, err := keyInfo.GetAddress() + if err != nil { + return err + } + if PrintKeyringInfo { + fmt.Printf("\nNAME: %s\nADDRESS: %s\nMNEMONIC (save this somewhere safe!!!): \n%s\n\n", + keyInfo.Name, addr.String(), mn) + } + return nil +} + +// generateNewKey generates and returns a new key on the given keyring called +// "my_celes_key". +func generateNewKey(ring keyring.Keyring) (*keyring.Record, string, error) { + return ring.NewMnemonic(state.DefaultAccountName, keyring.English, sdk.GetConfig().GetFullBIP44Path(), + keyring.DefaultBIP39Passphrase, hd.Secp256k1) +} diff --git a/nodebuilder/init_test.go b/nodebuilder/init_test.go new file mode 100644 index 0000000000..e438a191bc --- /dev/null +++ b/nodebuilder/init_test.go @@ -0,0 +1,103 @@ +package nodebuilder + +import ( + "os" + "testing" + + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/app/encoding" + + "github.com/celestiaorg/celestia-node/libs/fslock" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +func TestInit(t *testing.T) { + dir := t.TempDir() + nodes := []node.Type{node.Light, node.Bridge} + + for _, node := range nodes { + cfg := DefaultConfig(node) + require.NoError(t, Init(*cfg, dir, node)) + assert.True(t, IsInit(dir)) + } +} + +func TestInitErrForInvalidPath(t *testing.T) { + path := "/invalid_path" + nodes := []node.Type{node.Light, node.Bridge} + + for _, node := range nodes { + cfg := DefaultConfig(node) + require.Error(t, Init(*cfg, path, node)) + } +} + +func TestIsInitWithBrokenConfig(t *testing.T) { + dir := t.TempDir() + f, err := os.Create(configPath(dir)) + require.NoError(t, err) + defer f.Close() + //nolint:errcheck + f.Write([]byte(` + [P2P] + ListenAddresses = [/ip4/0.0.0.0/tcp/2121] + `)) + assert.False(t, IsInit(dir)) +} + +func TestIsInitForNonExistDir(t *testing.T) { + path := "/invalid_path" + assert.False(t, IsInit(path)) +} + +func TestInitErrForLockedDir(t *testing.T) { + dir := t.TempDir() + flock, err := fslock.Lock(lockPath(dir)) + require.NoError(t, err) + defer flock.Unlock() //nolint:errcheck + nodes := []node.Type{node.Light, node.Bridge} + + for _, node := range nodes { + cfg := DefaultConfig(node) + require.Error(t, Init(*cfg, dir, node)) + } +} + +// TestInit_generateNewKey tests to ensure new account is generated +// correctly. +func TestInit_generateNewKey(t *testing.T) { + cfg := DefaultConfig(node.Bridge) + + encConf := encoding.MakeConfig(app.ModuleEncodingRegisters...) + ring, err := keyring.New(app.Name, cfg.State.KeyringBackend, t.TempDir(), os.Stdin, encConf.Codec) + require.NoError(t, err) + + originalKey, mn, err := generateNewKey(ring) + require.NoError(t, err) + + // check ring and make sure it generated + stored key + keys, err := ring.List() + require.NoError(t, err) + assert.Equal(t, originalKey, keys[0]) + + // ensure the generated account is actually a celestia account + addr, err := originalKey.GetAddress() + require.NoError(t, err) + assert.Contains(t, addr.String(), "celestia") + + // ensure account is recoverable from mnemonic + ring2, err := keyring.New(app.Name, cfg.State.KeyringBackend, t.TempDir(), os.Stdin, encConf.Codec) + require.NoError(t, err) + duplicateKey, err := ring2.NewAccount("test", mn, keyring.DefaultBIP39Passphrase, sdk.GetConfig().GetFullBIP44Path(), + hd.Secp256k1) + require.NoError(t, err) + got, err := duplicateKey.GetAddress() + require.NoError(t, err) + assert.Equal(t, addr.String(), got.String()) +} diff --git a/nodebuilder/module.go b/nodebuilder/module.go new file mode 100644 index 0000000000..ad287b1ac8 --- /dev/null +++ b/nodebuilder/module.go @@ -0,0 +1,69 @@ +package nodebuilder + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/fxutil" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/gateway" + modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/prune" + "github.com/celestiaorg/celestia-node/nodebuilder/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store) fx.Option { + log.Infow("Accessing keyring...") + ks, err := store.Keystore() + if err != nil { + fx.Error(err) + } + signer, err := state.KeyringSigner(cfg.State, ks, network) + if err != nil { + fx.Error(err) + } + + baseComponents := fx.Options( + fx.Supply(tp), + fx.Supply(network), + fx.Provide(p2p.BootstrappersFor), + fx.Provide(func(lc fx.Lifecycle) context.Context { + return fxutil.WithLifecycle(context.Background(), lc) + }), + fx.Supply(cfg), + fx.Supply(store.Config), + fx.Provide(store.Datastore), + fx.Provide(store.Keystore), + fx.Supply(node.StorePath(store.Path())), + fx.Supply(signer), + // modules provided by the node + p2p.ConstructModule(tp, &cfg.P2P), + state.ConstructModule(tp, &cfg.State, &cfg.Core), + modhead.ConstructModule[*header.ExtendedHeader](tp, &cfg.Header), + share.ConstructModule(tp, &cfg.Share), + gateway.ConstructModule(tp, &cfg.Gateway), + core.ConstructModule(tp, &cfg.Core), + das.ConstructModule(tp, &cfg.DASer), + fraud.ConstructModule(tp), + blob.ConstructModule(), + da.ConstructModule(), + node.ConstructModule(tp), + prune.ConstructModule(tp), + rpc.ConstructModule(tp, &cfg.RPC), + ) + + return fx.Module( + "node", + baseComponents, + ) +} diff --git a/nodebuilder/node.go b/nodebuilder/node.go new file mode 100644 index 0000000000..b16a376cc1 --- /dev/null +++ b/nodebuilder/node.go @@ -0,0 +1,191 @@ +package nodebuilder + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/cristalhq/jwt" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/exchange" + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + "go.uber.org/fx" + "go.uber.org/fx/fxevent" + "go.uber.org/zap/zapcore" + + "github.com/celestiaorg/celestia-node/api/gateway" + "github.com/celestiaorg/celestia-node/api/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +var ( + log = logging.Logger("node") + fxLog = logging.Logger("fx") +) + +// Node represents the core structure of a Celestia node. It keeps references to all +// Celestia-specific components and services in one place and provides flexibility to run a +// Celestia node in different modes. Currently supported modes: +// * Bridge +// * Light +// * Full +type Node struct { + fx.In `ignore-unexported:"true"` + + Type node.Type + Network p2p.Network + Bootstrappers p2p.Bootstrappers + Config *Config + AdminSigner jwt.Signer + + // rpc components + RPCServer *rpc.Server // not optional + GatewayServer *gateway.Server `optional:"true"` + + // p2p components + Host host.Host + ConnGater *conngater.BasicConnectionGater + Routing routing.PeerRouting + DataExchange exchange.Interface + BlockService blockservice.BlockService + // p2p protocols + PubSub *pubsub.PubSub + // services + ShareServ share.Module // not optional + HeaderServ header.Module // not optional + StateServ state.Module // not optional + FraudServ fraud.Module // not optional + BlobServ blob.Module // not optional + DASer das.Module // not optional + AdminServ node.Module // not optional + DAMod da.Module // not optional + + // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop + start, stop lifecycleFunc +} + +// New assembles a new Node with the given type 'tp' over Store 'store'. +func New(tp node.Type, network p2p.Network, store Store, options ...fx.Option) (*Node, error) { + cfg, err := store.Config() + if err != nil { + return nil, err + } + + return NewWithConfig(tp, network, store, cfg, options...) +} + +// NewWithConfig assembles a new Node with the given type 'tp' over Store 'store' and a custom +// config. +func NewWithConfig(tp node.Type, network p2p.Network, store Store, cfg *Config, options ...fx.Option) (*Node, error) { + opts := append([]fx.Option{ConstructModule(tp, network, cfg, store)}, options...) + return newNode(opts...) +} + +// Start launches the Node and all its components and services. +func (n *Node) Start(ctx context.Context) error { + to := n.Config.Node.StartupTimeout + ctx, cancel := context.WithTimeout(ctx, to) + defer cancel() + + err := n.start(ctx) + if err != nil { + log.Debugf("error starting %s Node: %s", n.Type, err) + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("node: failed to start within timeout(%s): %w", to, err) + } + return fmt.Errorf("node: failed to start: %w", err) + } + + log.Infof("\n\n/_____/ /_____/ /_____/ /_____/ /_____/ \n\n"+ + "Started celestia DA node \n"+ + "node version: %s\nnode type: %s\nnetwork: %s\n\n"+ + "/_____/ /_____/ /_____/ /_____/ /_____/ \n", + node.GetBuildInfo().SemanticVersion, + strings.ToLower(n.Type.String()), + n.Network) + + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(n.Host)) + if err != nil { + log.Errorw("Retrieving multiaddress information", "err", err) + return err + } + fmt.Println("The p2p host is listening on:") + for _, addr := range addrs { + fmt.Println("* ", addr.String()) + } + fmt.Println() + return nil +} + +// Run is a Start which blocks on the given context 'ctx' until it is canceled. +// If canceled, the Node is still in the running state and should be gracefully stopped via Stop. +func (n *Node) Run(ctx context.Context) error { + err := n.Start(ctx) + if err != nil { + return err + } + + <-ctx.Done() + return ctx.Err() +} + +// Stop shuts down the Node, all its running Modules/Services and returns. +// Canceling the given context earlier 'ctx' unblocks the Stop and aborts graceful shutdown forcing +// remaining Modules/Services to close immediately. +func (n *Node) Stop(ctx context.Context) error { + to := n.Config.Node.ShutdownTimeout + ctx, cancel := context.WithTimeout(ctx, to) + defer cancel() + + err := n.stop(ctx) + if err != nil { + log.Debugf("error stopping %s Node: %s", n.Type, err) + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("node: failed to stop within timeout(%s): %w", to, err) + } + return fmt.Errorf("node: failed to stop: %w", err) + } + + log.Debugf("stopped %s Node", n.Type) + return nil +} + +// newNode creates a new Node from given DI options. +// DI options allow initializing the Node with a customized set of components and services. +// NOTE: newNode is currently meant to be used privately to create various custom Node types e.g. +// Light, unless we decide to give package users the ability to create custom node types themselves. +func newNode(opts ...fx.Option) (*Node, error) { + node := new(Node) + app := fx.New( + fx.WithLogger(func() fxevent.Logger { + zl := &fxevent.ZapLogger{Logger: fxLog.Desugar()} + zl.UseLogLevel(zapcore.DebugLevel) + return zl + }), + fx.Populate(node), + fx.Options(opts...), + ) + if err := app.Err(); err != nil { + return nil, err + } + + node.start, node.stop = app.Start, app.Stop + return node, nil +} + +// lifecycleFunc defines a type for common lifecycle funcs. +type lifecycleFunc func(context.Context) error diff --git a/nodebuilder/node/admin.go b/nodebuilder/node/admin.go new file mode 100644 index 0000000000..f71af66081 --- /dev/null +++ b/nodebuilder/node/admin.go @@ -0,0 +1,59 @@ +package node + +import ( + "context" + + "github.com/cristalhq/jwt" + "github.com/filecoin-project/go-jsonrpc/auth" + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/libs/authtoken" +) + +const APIVersion = "v0.11.0" + +type module struct { + tp Type + signer jwt.Signer +} + +func newModule(tp Type, signer jwt.Signer) Module { + return &module{ + tp: tp, + signer: signer, + } +} + +// Info contains information related to the administrative +// node. +type Info struct { + Type Type `json:"type"` + APIVersion string `json:"api_version"` +} + +func (m *module) Info(context.Context) (Info, error) { + return Info{ + Type: m.tp, + APIVersion: APIVersion, + }, nil +} + +func (m *module) Ready(context.Context) (bool, error) { + // Because the node uses FX to provide the RPC last, all services' lifecycles have been started by + // the point this endpoint is available. It is not 100% guaranteed at this point that all services + // are fully ready, but it is very high probability and all endpoints are available at this point + // regardless. + return true, nil +} + +func (m *module) LogLevelSet(_ context.Context, name, level string) error { + return logging.SetLogLevel(name, level) +} + +func (m *module) AuthVerify(_ context.Context, token string) ([]auth.Permission, error) { + return authtoken.ExtractSignedPermissions(m.signer, token) +} + +func (m *module) AuthNew(_ context.Context, permissions []auth.Permission) (string, error) { + return authtoken.NewSignedJWT(m.signer, permissions) +} diff --git a/nodebuilder/node/auth.go b/nodebuilder/node/auth.go new file mode 100644 index 0000000000..9c16af92c3 --- /dev/null +++ b/nodebuilder/node/auth.go @@ -0,0 +1,41 @@ +package node + +import ( + "crypto/rand" + "io" + + "github.com/cristalhq/jwt" + + "github.com/celestiaorg/celestia-node/libs/keystore" +) + +var SecretName = keystore.KeyName("jwt-secret.jwt") + +// secret returns the node's JWT secret if it exists, or generates +// and saves a new one if it does not. +func secret(ks keystore.Keystore) (jwt.Signer, error) { + // if key already exists, use it + if pk, ok := existing(ks); ok { + return jwt.NewHS256(pk) + } + // otherwise, generate and save new priv key + sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32)) + if err != nil { + return nil, err + } + // save key + err = ks.Put(SecretName, keystore.PrivKey{Body: sk}) + if err != nil { + return nil, err + } + + return jwt.NewHS256(sk) +} + +func existing(ks keystore.Keystore) ([]byte, bool) { + sk, err := ks.Get(SecretName) + if err != nil { + return nil, false + } + return sk.Body, true +} diff --git a/nodebuilder/node/buildInfo.go b/nodebuilder/node/buildInfo.go new file mode 100644 index 0000000000..53d8554d4d --- /dev/null +++ b/nodebuilder/node/buildInfo.go @@ -0,0 +1,35 @@ +package node + +import ( + "fmt" + "runtime" +) + +var ( + buildTime string + lastCommit string + semanticVersion string + + systemVersion = fmt.Sprintf("%s/%s", runtime.GOARCH, runtime.GOOS) + golangVersion = runtime.Version() +) + +// BuildInfo represents all necessary information about current build. +type BuildInfo struct { + BuildTime string + LastCommit string + SemanticVersion string + SystemVersion string + GolangVersion string +} + +// GetBuildInfo returns information about current build. +func GetBuildInfo() *BuildInfo { + return &BuildInfo{ + buildTime, + lastCommit, + semanticVersion, + systemVersion, + golangVersion, + } +} diff --git a/nodebuilder/node/cmd/node.go b/nodebuilder/node/cmd/node.go new file mode 100644 index 0000000000..a65727fb03 --- /dev/null +++ b/nodebuilder/node/cmd/node.go @@ -0,0 +1,105 @@ +package cmd + +import ( + "errors" + "strings" + + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/spf13/cobra" + + cmdnode "github.com/celestiaorg/celestia-node/cmd" +) + +func init() { + Cmd.AddCommand(nodeInfoCmd, logCmd, verifyCmd, authCmd) +} + +var Cmd = &cobra.Command{ + Use: "node [command]", + Short: "Allows administrating running node.", + Args: cobra.NoArgs, + PersistentPreRunE: cmdnode.InitClient, +} + +var nodeInfoCmd = &cobra.Command{ + Use: "info", + Args: cobra.NoArgs, + Short: "Returns administrative information about the node.", + RunE: func(c *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(c.Context()) + if err != nil { + return err + } + defer client.Close() + + info, err := client.Node.Info(c.Context()) + return cmdnode.PrintOutput(info, err, nil) + }, +} + +var logCmd = &cobra.Command{ + Use: "log-level", + Args: cobra.MinimumNArgs(1), + Short: "Sets log level for module.", + Long: "Allows to set log level for module to in format :" + + "`DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL and their lower-case forms`.\n" + + "To set all modules to a particular level `*:` should be passed", + RunE: func(c *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(c.Context()) + if err != nil { + return err + } + defer client.Close() + + for _, ll := range args { + params := strings.Split(ll, ":") + if len(params) != 2 { + return errors.New("cmd: log-level arg must be in form :," + + "e.g. pubsub:debug") + } + + if err := client.Node.LogLevelSet(c.Context(), params[0], params[1]); err != nil { + return err + } + } + return nil + }, +} + +var verifyCmd = &cobra.Command{ + Use: "permissions", + Args: cobra.ExactArgs(1), + Short: "Returns the permissions assigned to the given token.", + + RunE: func(c *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(c.Context()) + if err != nil { + return err + } + defer client.Close() + + perms, err := client.Node.AuthVerify(c.Context(), args[0]) + return cmdnode.PrintOutput(perms, err, nil) + }, +} + +var authCmd = &cobra.Command{ + Use: "set-permissions", + Args: cobra.MinimumNArgs(1), + Short: "Signs and returns a new token with the given permissions.", + RunE: func(c *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(c.Context()) + if err != nil { + return err + } + defer client.Close() + + perms := make([]auth.Permission, len(args)) + for i, p := range args { + perms[i] = (auth.Permission)(p) + } + + result, err := client.Node.AuthNew(c.Context(), perms) + return cmdnode.PrintOutput(result, err, nil) + }, +} diff --git a/nodebuilder/node/config.go b/nodebuilder/node/config.go new file mode 100644 index 0000000000..e44fe2f014 --- /dev/null +++ b/nodebuilder/node/config.go @@ -0,0 +1,38 @@ +package node + +import ( + "fmt" + "time" +) + +var defaultLifecycleTimeout = time.Minute * 2 + +type Config struct { + StartupTimeout time.Duration + ShutdownTimeout time.Duration +} + +// DefaultConfig returns the default node configuration for a given node type. +func DefaultConfig(tp Type) Config { + var timeout time.Duration + switch tp { + case Light: + timeout = time.Second * 20 + default: + timeout = defaultLifecycleTimeout + } + return Config{ + StartupTimeout: timeout, + ShutdownTimeout: timeout, + } +} + +func (c *Config) Validate() error { + if c.StartupTimeout == 0 { + return fmt.Errorf("invalid startup timeout: %v", c.StartupTimeout) + } + if c.ShutdownTimeout == 0 { + return fmt.Errorf("invalid shutdown timeout: %v", c.ShutdownTimeout) + } + return nil +} diff --git a/nodebuilder/node/metrics.go b/nodebuilder/node/metrics.go new file mode 100644 index 0000000000..07c9a5fc0f --- /dev/null +++ b/nodebuilder/node/metrics.go @@ -0,0 +1,72 @@ +package node + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +var meter = otel.Meter("node") + +var ( + timeStarted time.Time + nodeStarted bool +) + +// WithMetrics registers node metrics. +func WithMetrics() error { + nodeStartTS, err := meter.Int64ObservableGauge( + "node_start_ts", + metric.WithDescription("timestamp when the node was started"), + ) + if err != nil { + return err + } + + totalNodeRunTime, err := meter.Float64ObservableCounter( + "node_runtime_counter_in_seconds", + metric.WithDescription("total time the node has been running"), + ) + if err != nil { + return err + } + + buildInfoGauge, err := meter.Float64ObservableGauge( + "build_info", + metric.WithDescription("Celestia Node build information"), + ) + if err != nil { + return err + } + + callback := func(ctx context.Context, observer metric.Observer) error { + if !nodeStarted { + // Observe node start timestamp + timeStarted = time.Now() + observer.ObserveInt64(nodeStartTS, timeStarted.Unix()) + nodeStarted = true + } + + observer.ObserveFloat64(totalNodeRunTime, time.Since(timeStarted).Seconds()) + + // Observe build info with labels + labels := metric.WithAttributes( + attribute.String("build_time", buildTime), + attribute.String("last_commit", lastCommit), + attribute.String("semantic_version", semanticVersion), + attribute.String("system_version", systemVersion), + attribute.String("golang_version", golangVersion), + ) + + observer.ObserveFloat64(buildInfoGauge, 1, labels) + + return nil + } + + _, err = meter.RegisterCallback(callback, nodeStartTS, totalNodeRunTime, buildInfoGauge) + + return err +} diff --git a/nodebuilder/node/mocks/api.go b/nodebuilder/node/mocks/api.go new file mode 100644 index 0000000000..d8789a771c --- /dev/null +++ b/nodebuilder/node/mocks/api.go @@ -0,0 +1,96 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/node (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + node "github.com/celestiaorg/celestia-node/nodebuilder/node" + auth "github.com/filecoin-project/go-jsonrpc/auth" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// AuthNew mocks base method. +func (m *MockModule) AuthNew(arg0 context.Context, arg1 []auth.Permission) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthNew indicates an expected call of AuthNew. +func (mr *MockModuleMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockModule)(nil).AuthNew), arg0, arg1) +} + +// AuthVerify mocks base method. +func (m *MockModule) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1) + ret0, _ := ret[0].([]auth.Permission) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthVerify indicates an expected call of AuthVerify. +func (mr *MockModuleMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockModule)(nil).AuthVerify), arg0, arg1) +} + +// Info mocks base method. +func (m *MockModule) Info(arg0 context.Context) (node.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Info", arg0) + ret0, _ := ret[0].(node.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Info indicates an expected call of Info. +func (mr *MockModuleMockRecorder) Info(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockModule)(nil).Info), arg0) +} + +// LogLevelSet mocks base method. +func (m *MockModule) LogLevelSet(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogLevelSet", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// LogLevelSet indicates an expected call of LogLevelSet. +func (mr *MockModuleMockRecorder) LogLevelSet(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogLevelSet", reflect.TypeOf((*MockModule)(nil).LogLevelSet), arg0, arg1, arg2) +} diff --git a/nodebuilder/node/module.go b/nodebuilder/node/module.go new file mode 100644 index 0000000000..5abfad8e5f --- /dev/null +++ b/nodebuilder/node/module.go @@ -0,0 +1,16 @@ +package node + +import ( + "github.com/cristalhq/jwt" + "go.uber.org/fx" +) + +func ConstructModule(tp Type) fx.Option { + return fx.Module( + "node", + fx.Provide(func(secret jwt.Signer) Module { + return newModule(tp, secret) + }), + fx.Provide(secret), + ) +} diff --git a/nodebuilder/node/node.go b/nodebuilder/node/node.go new file mode 100644 index 0000000000..b2bc7dac31 --- /dev/null +++ b/nodebuilder/node/node.go @@ -0,0 +1,59 @@ +package node + +import ( + "context" + + "github.com/filecoin-project/go-jsonrpc/auth" +) + +// Module defines the API related to interacting with the "administrative" +// node. +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // Info returns administrative information about the node. + Info(context.Context) (Info, error) + + // Ready returns true once the node's RPC is ready to accept requests. + Ready(context.Context) (bool, error) + + // LogLevelSet sets the given component log level to the given level. + LogLevelSet(ctx context.Context, name, level string) error + + // AuthVerify returns the permissions assigned to the given token. + AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) + // AuthNew signs and returns a new token with the given permissions. + AuthNew(ctx context.Context, perms []auth.Permission) (string, error) +} + +var _ Module = (*API)(nil) + +type API struct { + Internal struct { + Info func(context.Context) (Info, error) `perm:"admin"` + Ready func(context.Context) (bool, error) `perm:"read"` + LogLevelSet func(ctx context.Context, name, level string) error `perm:"admin"` + AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"admin"` + AuthNew func(ctx context.Context, perms []auth.Permission) (string, error) `perm:"admin"` + } +} + +func (api *API) Info(ctx context.Context) (Info, error) { + return api.Internal.Info(ctx) +} + +func (api *API) Ready(ctx context.Context) (bool, error) { + return api.Internal.Ready(ctx) +} + +func (api *API) LogLevelSet(ctx context.Context, name, level string) error { + return api.Internal.LogLevelSet(ctx, name, level) +} + +func (api *API) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) { + return api.Internal.AuthVerify(ctx, token) +} + +func (api *API) AuthNew(ctx context.Context, perms []auth.Permission) (string, error) { + return api.Internal.AuthNew(ctx, perms) +} diff --git a/node/type.go b/nodebuilder/node/type.go similarity index 76% rename from node/type.go rename to nodebuilder/node/type.go index b6d8918edc..2f09b26503 100644 --- a/node/type.go +++ b/nodebuilder/node/type.go @@ -4,12 +4,17 @@ package node // The zero value for Type is invalid. type Type uint8 +// StorePath is an alias used in order to pass the base path of the node store to nodebuilder +// modules. +type StorePath string + const ( - // Bridge is a Celestia Node that bridges the Celestia consensus network and data availability network. - // It maintains a trusted channel/connection to a Celestia Core node via the core.Client API. + // Bridge is a Celestia Node that bridges the Celestia consensus network and data availability + // network. It maintains a trusted channel/connection to a Celestia Core node via the core.Client + // API. Bridge Type = iota + 1 - // Light is a stripped-down Celestia Node which aims to be lightweight while preserving the highest possible - // security guarantees. + // Light is a stripped-down Celestia Node which aims to be lightweight while preserving the highest + // possible security guarantees. Light // Full is a Celestia Node that stores blocks in their entirety. Full diff --git a/node/node_bridge_test.go b/nodebuilder/node_bridge_test.go similarity index 57% rename from node/node_bridge_test.go rename to nodebuilder/node_bridge_test.go index 8101ccd66d..d2b7ebaf4e 100644 --- a/node/node_bridge_test.go +++ b/nodebuilder/node_bridge_test.go @@ -1,29 +1,28 @@ -package node +package nodebuilder import ( "context" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-node/core" - "github.com/celestiaorg/celestia-node/params" + coremodule "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" ) func TestBridge_WithMockedCoreClient(t *testing.T) { t.Skip("skipping") // consult https://github.com/celestiaorg/celestia-core/issues/667 for reasoning - repo := MockStore(t, DefaultConfig(Bridge)) + repo := MockStore(t, DefaultConfig(node.Bridge)) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - _, client := core.StartTestClient(ctx, t) - node, err := New(Bridge, repo, WithCoreClient(client), WithNetwork(params.Private)) + client := core.StartTestNode(t).Client + node, err := New(node.Bridge, p2p.Private, repo, coremodule.WithClient(client)) require.NoError(t, err) require.NotNil(t, node) - assert.True(t, node.CoreClient.IsRunning()) - err = node.Start(ctx) require.NoError(t, err) diff --git a/nodebuilder/node_light_test.go b/nodebuilder/node_light_test.go new file mode 100644 index 0000000000..7138a23c9e --- /dev/null +++ b/nodebuilder/node_light_test.go @@ -0,0 +1,56 @@ +package nodebuilder + +import ( + "context" + "crypto/rand" + "testing" + + "github.com/libp2p/go-libp2p/core/crypto" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nodebuilder "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +func TestNewLightWithP2PKey(t *testing.T) { + key, _, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + node := TestNode(t, nodebuilder.Light, p2p.WithP2PKey(key)) + assert.True(t, node.Host.ID().MatchesPrivateKey(key)) +} + +func TestNewLightWithHost(t *testing.T) { + nw, _ := mocknet.WithNPeers(1) + node := TestNode(t, nodebuilder.Light, p2p.WithHost(nw.Hosts()[0])) + assert.Equal(t, nw.Peers()[0], node.Host.ID()) +} + +func TestLight_WithMutualPeers(t *testing.T) { + peers := []string{ + "/ip6/100:0:114b:abc5:e13a:c32f:7a9e:f00a/tcp/2121/p2p/12D3KooWSRqDfpLsQxpyUhLC9oXHD2WuZ2y5FWzDri7LT4Dw9fSi", + "/ip4/192.168.1.10/tcp/2121/p2p/12D3KooWSRqDfpLsQxpyUhLC9oXHD2WuZ2y5FWzDri7LT4Dw9fSi", + } + cfg := DefaultConfig(nodebuilder.Light) + cfg.P2P.MutualPeers = peers + node := TestNodeWithConfig(t, nodebuilder.Light, cfg) + + require.NotNil(t, node) + assert.Equal(t, node.Config.P2P.MutualPeers, peers) +} + +func TestLight_WithNetwork(t *testing.T) { + node := TestNode(t, nodebuilder.Light) + require.NotNil(t, node) + assert.Equal(t, p2p.Private, node.Network) +} + +// TestLight_WithStubbedCoreAccessor ensures that a node started without +// a core connection will return a stubbed StateModule. +func TestLight_WithStubbedCoreAccessor(t *testing.T) { + node := TestNode(t, nodebuilder.Light) + _, err := node.StateServ.Balance(context.Background()) + assert.ErrorIs(t, state.ErrNoStateAccess, err) +} diff --git a/nodebuilder/node_test.go b/nodebuilder/node_test.go new file mode 100644 index 0000000000..41eff32fab --- /dev/null +++ b/nodebuilder/node_test.go @@ -0,0 +1,159 @@ +//go:build !race + +package nodebuilder + +import ( + "context" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + "google.golang.org/protobuf/proto" + + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/share" +) + +func TestLifecycle(t *testing.T) { + var test = []struct { + tp node.Type + }{ + {tp: node.Bridge}, + {tp: node.Full}, + {tp: node.Light}, + } + + for i, tt := range test { + t.Run(strconv.Itoa(i), func(t *testing.T) { + node := TestNode(t, tt.tp) + require.NotNil(t, node) + require.NotNil(t, node.Config) + require.NotNil(t, node.Host) + require.NotNil(t, node.HeaderServ) + require.NotNil(t, node.StateServ) + require.NotNil(t, node.AdminServ) + require.Equal(t, tt.tp, node.Type) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := node.Start(ctx) + require.NoError(t, err) + + err = node.Stop(ctx) + require.NoError(t, err) + }) + } +} + +func TestLifecycle_WithMetrics(t *testing.T) { + url, stop := StartMockOtelCollectorHTTPServer(t) + defer stop() + + otelCollectorURL := strings.ReplaceAll(url, "http://", "") + + var test = []struct { + tp node.Type + coreExpected bool + }{ + {tp: node.Bridge}, + {tp: node.Full}, + {tp: node.Light}, + } + + for i, tt := range test { + t.Run(strconv.Itoa(i), func(t *testing.T) { + node := TestNode( + t, + tt.tp, + WithMetrics( + []otlpmetrichttp.Option{ + otlpmetrichttp.WithEndpoint(otelCollectorURL), + otlpmetrichttp.WithInsecure(), + }, + tt.tp, + ), + ) + require.NotNil(t, node) + require.NotNil(t, node.Config) + require.NotNil(t, node.Host) + require.NotNil(t, node.HeaderServ) + require.NotNil(t, node.StateServ) + require.Equal(t, tt.tp, node.Type) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := node.Start(ctx) + require.NoError(t, err) + + err = node.Stop(ctx) + require.NoError(t, err) + }) + } +} + +func StartMockOtelCollectorHTTPServer(t *testing.T) (string, func()) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v1/metrics" && r.Method != http.MethodPost { + t.Errorf("Expected to request [POST] '/fixedvalue', got: [%s] %s", r.Method, r.URL.Path) + } + + if r.Header.Get("Content-Type") != "application/x-protobuf" { + t.Errorf("Expected Content-Type: application/x-protobuf header, got: %s", r.Header.Get("Content-Type")) + } + + response := collectormetricpb.ExportMetricsServiceResponse{} + rawResponse, _ := proto.Marshal(&response) + contentType := "application/x-protobuf" + status := http.StatusOK + + log.Debug("Responding to otlp POST request") + w.Header().Set("Content-Type", contentType) + w.WriteHeader(status) + _, _ = w.Write(rawResponse) + + log.Debug("Responded to otlp POST request") + })) + + server.EnableHTTP2 = true + return server.URL, server.Close +} + +func TestEmptyBlockExists(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var test = []struct { + tp node.Type + }{ + {tp: node.Bridge}, + {tp: node.Full}, + // technically doesn't need to be tested as a SharesAvailable call to + // light node short circuits on an empty Root + {tp: node.Light}, + } + for i, tt := range test { + t.Run(strconv.Itoa(i), func(t *testing.T) { + node := TestNode(t, tt.tp) + err := node.Start(ctx) + require.NoError(t, err) + + // ensure an empty block exists in store + + eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyRoot()) + err = node.ShareServ.SharesAvailable(ctx, eh) + require.NoError(t, err) + + err = node.Stop(ctx) + require.NoError(t, err) + }) + } + +} diff --git a/node/p2p/addrs.go b/nodebuilder/p2p/addrs.go similarity index 84% rename from node/p2p/addrs.go rename to nodebuilder/p2p/addrs.go index 70a9fa8fde..d8f50c8144 100644 --- a/node/p2p/addrs.go +++ b/nodebuilder/p2p/addrs.go @@ -3,14 +3,14 @@ package p2p import ( "fmt" - "github.com/libp2p/go-libp2p-core/host" p2pconfig "github.com/libp2p/go-libp2p/config" + hst "github.com/libp2p/go-libp2p/core/host" ma "github.com/multiformats/go-multiaddr" ) // Listen returns invoke function that starts listening for inbound connections with libp2p.Host. -func Listen(listen []string) func(host host.Host) (err error) { - return func(host host.Host) (err error) { +func Listen(listen []string) func(h hst.Host) (err error) { + return func(h hst.Host) (err error) { maListen := make([]ma.Multiaddr, len(listen)) for i, addr := range listen { maListen[i], err = ma.NewMultiaddr(addr) @@ -18,12 +18,12 @@ func Listen(listen []string) func(host host.Host) (err error) { return fmt.Errorf("failure to parse config.P2P.ListenAddresses: %s", err) } } - return host.Network().Listen(maListen...) + return h.Network().Listen(maListen...) } } -// AddrsFactory returns a constructor for AddrsFactory. -func AddrsFactory(announce []string, noAnnounce []string) func() (_ p2pconfig.AddrsFactory, err error) { +// addrsFactory returns a constructor for AddrsFactory. +func addrsFactory(announce []string, noAnnounce []string) func() (_ p2pconfig.AddrsFactory, err error) { return func() (_ p2pconfig.AddrsFactory, err error) { // Convert maAnnounce strings to Multiaddresses maAnnounce := make([]ma.Multiaddr, len(announce)) diff --git a/nodebuilder/p2p/bitswap.go b/nodebuilder/p2p/bitswap.go new file mode 100644 index 0000000000..014435071a --- /dev/null +++ b/nodebuilder/p2p/bitswap.go @@ -0,0 +1,101 @@ +package p2p + +import ( + "context" + "errors" + "fmt" + + "github.com/ipfs/boxo/bitswap/client" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/bitswap/server" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + "github.com/ipfs/go-datastore" + routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" + hst "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/protocol" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/share/eds" +) + +const ( + // default size of bloom filter in blockStore + defaultBloomFilterSize = 512 << 10 + // default amount of hash functions defined for bloom filter + defaultBloomFilterHashes = 7 + // default size of arc cache in blockStore + defaultARCCacheSize = 64 << 10 +) + +// dataExchange provides a constructor for IPFS block's DataExchange over BitSwap. +func dataExchange(params bitSwapParams) exchange.Interface { + prefix := protocolID(params.Net) + net := network.NewFromIpfsHost(params.Host, &routinghelpers.Null{}, network.Prefix(prefix)) + srvr := server.New( + params.Ctx, + net, + params.Bs, + server.ProvideEnabled(false), // we don't provide blocks over DHT + // NOTE: These below are required for our protocol to work reliably. + // // See https://github.com/celestiaorg/celestia-node/issues/732 + server.SetSendDontHaves(false), + ) + + clnt := client.New( + params.Ctx, + net, + params.Bs, + client.WithBlockReceivedNotifier(srvr), + client.SetSimulateDontHavesOnTimeout(false), + client.WithoutDuplicatedBlockStats(), + ) + net.Start(srvr, clnt) // starting with hook does not work + + params.Lifecycle.Append(fx.Hook{ + OnStop: func(ctx context.Context) (err error) { + err = errors.Join(err, clnt.Close()) + err = errors.Join(err, srvr.Close()) + net.Stop() + return err + }, + }) + + return clnt +} + +func blockstoreFromDatastore(ctx context.Context, ds datastore.Batching) (blockstore.Blockstore, error) { + return blockstore.CachedBlockstore( + ctx, + blockstore.NewBlockstore(ds), + blockstore.CacheOpts{ + HasBloomFilterSize: defaultBloomFilterSize, + HasBloomFilterHashes: defaultBloomFilterHashes, + HasTwoQueueCacheSize: defaultARCCacheSize, + }, + ) +} + +func blockstoreFromEDSStore(ctx context.Context, store *eds.Store) (blockstore.Blockstore, error) { + return blockstore.CachedBlockstore( + ctx, + store.Blockstore(), + blockstore.CacheOpts{ + HasTwoQueueCacheSize: defaultARCCacheSize, + }, + ) +} + +type bitSwapParams struct { + fx.In + + Lifecycle fx.Lifecycle + Ctx context.Context + Net Network + Host hst.Host + Bs blockstore.Blockstore +} + +func protocolID(network Network) protocol.ID { + return protocol.ID(fmt.Sprintf("/celestia/%s", network)) +} diff --git a/nodebuilder/p2p/bootstrap.go b/nodebuilder/p2p/bootstrap.go new file mode 100644 index 0000000000..8e1856f6fb --- /dev/null +++ b/nodebuilder/p2p/bootstrap.go @@ -0,0 +1,85 @@ +package p2p + +import ( + "os" + "strconv" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" +) + +const EnvKeyCelestiaBootstrapper = "CELESTIA_BOOTSTRAPPER" + +func isBootstrapper() bool { + return os.Getenv(EnvKeyCelestiaBootstrapper) == strconv.FormatBool(true) +} + +// BootstrappersFor returns address information of bootstrap peers for a given network. +func BootstrappersFor(net Network) (Bootstrappers, error) { + bs, err := bootstrappersFor(net) + if err != nil { + return nil, err + } + + return parseAddrInfos(bs) +} + +// bootstrappersFor reports multiaddresses of bootstrap peers for a given network. +func bootstrappersFor(net Network) ([]string, error) { + var err error + net, err = net.Validate() + if err != nil { + return nil, err + } + + return bootstrapList[net], nil +} + +// NOTE: Every time we add a new long-running network, its bootstrap peers have to be added here. +var bootstrapList = map[Network][]string{ + Mainnet: { + "/dns4/da-bridge-1.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWSqZaLcn5Guypo2mrHr297YPJnV8KMEMXNjs3qAS8msw8", + "/dns4/da-bridge-2.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWQpuTFELgsUypqp9N4a1rKBccmrmQVY8Em9yhqppTJcXf", + "/dns4/da-bridge-3.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWSGa4huD6ts816navn7KFYiStBiy5LrBQH1HuEahk4TzQ", + "/dns4/da-bridge-4.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWHBXCmXaUNat6ooynXG837JXPsZpSTeSzZx6DpgNatMmR", + "/dns4/da-bridge-5.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWDGTBK1a2Ru1qmnnRwP6Dmc44Zpsxi3xbgFk7ATEPfmEU", + "/dns4/da-bridge-6.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWLTUFyf3QEGqYkHWQS2yCtuUcL78vnKBdXU5gABM1YDeH", + "/dns4/da-full-1.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWKZCMcwGCYbL18iuw3YVpAZoyb1VBGbx9Kapsjw3soZgr", + "/dns4/da-full-2.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWE3fmRtHgfk9DCuQFfY3H3JYEnTU3xZozv1Xmo8KWrWbK", + "/dns4/da-full-3.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWK6Ftsd4XsWCsQZgZPNhTrE5urwmkoo5P61tGvnKmNVyv", + }, + Arabica: { + "/dns4/da-bridge-1.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWGqwzdEqM54Dce6LXzfFr97Bnhvm6rN7KM7MFwdomfm4S", + "/dns4/da-bridge-2.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWCMGM5eZWVfCN9ZLAViGfLUWAfXP5pCm78NFKb9jpBtua", + "/dns4/da-bridge-3.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWEWuqrjULANpukDFGVoHW3RoeUU53Ec9t9v5cwW3MkVdQ", + "/dns4/da-bridge-4.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWLT1ysSrD7XWSBjh7tU1HQanF5M64dHV6AuM6cYEJxMPk", + }, + Mocha: { + "/dns4/da-bridge-mocha-4.celestia-mocha.com/tcp/2121/p2p/12D3KooWCBAbQbJSpCpCGKzqz3rAN4ixYbc63K68zJg9aisuAajg", + "/dns4/da-bridge-mocha-4-2.celestia-mocha.com/tcp/2121/p2p/12D3KooWK6wJkScGQniymdWtBwBuU36n6BRXp9rCDDUD6P5gJr3G", + "/dns4/da-full-1-mocha-4.celestia-mocha.com/tcp/2121/p2p/12D3KooWCUHPLqQXZzpTx1x3TAsdn3vYmTNDhzg66yG8hqoxGGN8", + "/dns4/da-full-2-mocha-4.celestia-mocha.com/tcp/2121/p2p/12D3KooWR6SHsXPkkvhCRn6vp1RqSefgaT1X1nMNvrVjU2o3GoYy", + }, + Private: {}, +} + +// parseAddrInfos converts strings to AddrInfos +func parseAddrInfos(addrs []string) ([]peer.AddrInfo, error) { + infos := make([]peer.AddrInfo, 0, len(addrs)) + for _, addr := range addrs { + maddr, err := ma.NewMultiaddr(addr) + if err != nil { + log.Errorw("parsing and validating addr", "addr", addr, "err", err) + return nil, err + } + + info, err := peer.AddrInfoFromP2pAddr(maddr) + if err != nil { + log.Errorw("parsing info from multiaddr", "maddr", maddr, "err", err) + return nil, err + } + infos = append(infos, *info) + } + + return infos, nil +} diff --git a/nodebuilder/p2p/cmd/p2p.go b/nodebuilder/p2p/cmd/p2p.go new file mode 100644 index 0000000000..8b44802947 --- /dev/null +++ b/nodebuilder/p2p/cmd/p2p.go @@ -0,0 +1,576 @@ +package cmd + +import ( + "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + ma2 "github.com/multiformats/go-multiaddr" + "github.com/spf13/cobra" + + cmdnode "github.com/celestiaorg/celestia-node/cmd" +) + +type peerInfo struct { + ID string `json:"id"` + PeerAddr []string `json:"peer_addr"` +} + +func init() { + Cmd.AddCommand(infoCmd, + peersCmd, + peerInfoCmd, + connectCmd, + closePeerCmd, + connectednessCmd, + natStatusCmd, + blockPeerCmd, + unblockPeerCmd, + blockedPeersCmd, + protectCmd, + unprotectCmd, + protectedCmd, + bandwidthStatsCmd, + peerBandwidthCmd, + bandwidthForProtocolCmd, + pubsubPeersCmd, + ) +} + +var Cmd = &cobra.Command{ + Use: "p2p [command]", + Short: "Allows interaction with the P2P Module via JSON-RPC", + Args: cobra.NoArgs, + PersistentPreRunE: cmdnode.InitClient, +} + +var infoCmd = &cobra.Command{ + Use: "info", + Short: "Gets the node's peer info (peer id and multiaddresses)", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + info, err := client.P2P.Info(cmd.Context()) + + formatter := func(data interface{}) interface{} { + peerAdd := data.(peer.AddrInfo) + ma := make([]string, len(info.Addrs)) + for i := range peerAdd.Addrs { + ma[i] = peerAdd.Addrs[i].String() + } + + return peerInfo{ + ID: peerAdd.ID.String(), + PeerAddr: ma, + } + } + return cmdnode.PrintOutput(info, err, formatter) + }, +} + +var peersCmd = &cobra.Command{ + Use: "peers", + Short: "Lists the peers we are connected to", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + result, err := client.P2P.Peers(cmd.Context()) + peers := make([]string, len(result)) + for i, peer := range result { + peers[i] = peer.String() + } + + formatter := func(data interface{}) interface{} { + conPeers := data.([]string) + return struct { + Peers []string `json:"peers"` + }{ + Peers: conPeers, + } + } + return cmdnode.PrintOutput(peers, err, formatter) + }, +} + +var peerInfoCmd = &cobra.Command{ + Use: "peer-info [param]", + Short: "Gets PeerInfo for a given peer", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + info, err := client.P2P.PeerInfo(cmd.Context(), pid) + formatter := func(data interface{}) interface{} { + peerAdd := data.(peer.AddrInfo) + ma := make([]string, len(info.Addrs)) + for i := range peerAdd.Addrs { + ma[i] = peerAdd.Addrs[i].String() + } + + return peerInfo{ + ID: peerAdd.ID.String(), + PeerAddr: ma, + } + } + return cmdnode.PrintOutput(info, err, formatter) + }, +} + +var connectCmd = &cobra.Command{ + Use: "connect [peer.ID, address]", + Short: "Establishes a connection with the given peer", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + ma, err := ma2.NewMultiaddr(args[1]) + if err != nil { + return err + } + + peerInfo := peer.AddrInfo{ + ID: pid, + Addrs: []ma2.Multiaddr{ma}, + } + + err = client.P2P.Connect(cmd.Context(), peerInfo) + if err != nil { + return cmdnode.PrintOutput(nil, err, nil) + } + return connectednessCmd.RunE(cmd, args) + }, +} + +var closePeerCmd = &cobra.Command{ + Use: "close-peer [peer.ID]", + Short: "Closes the connection with the given peer", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + err = client.P2P.ClosePeer(cmd.Context(), pid) + if err != nil { + return cmdnode.PrintOutput(nil, err, nil) + } + return connectednessCmd.RunE(cmd, args) + }, +} + +var connectednessCmd = &cobra.Command{ + Use: "connectedness [peer.ID]", + Short: "Checks the connection state between current and given peers", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + con, err := client.P2P.Connectedness(cmd.Context(), pid) + + formatter := func(data interface{}) interface{} { + conn := data.(network.Connectedness) + return struct { + ConnectionState string `json:"connection_state"` + }{ + ConnectionState: conn.String(), + } + } + return cmdnode.PrintOutput(con, err, formatter) + }, +} + +var natStatusCmd = &cobra.Command{ + Use: "nat-status", + Short: "Gets the current NAT status", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + r, err := client.P2P.NATStatus(cmd.Context()) + + formatter := func(data interface{}) interface{} { + rr := data.(network.Reachability) + return struct { + Reachability string `json:"reachability"` + }{ + Reachability: rr.String(), + } + } + return cmdnode.PrintOutput(r, err, formatter) + }, +} + +var blockPeerCmd = &cobra.Command{ + Use: "block-peer [peer.ID]", + Short: "Blocks the given peer", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + err = client.P2P.BlockPeer(cmd.Context(), pid) + + formatter := func(data interface{}) interface{} { + err, ok := data.(error) + blocked := false + if !ok { + blocked = true + } + return struct { + Blocked bool `json:"blocked"` + Peer string `json:"peer"` + Reason error `json:"reason,omitempty"` + }{ + Blocked: blocked, + Peer: args[0], + Reason: err, + } + } + return cmdnode.PrintOutput(err, nil, formatter) + }, +} + +var unblockPeerCmd = &cobra.Command{ + Use: "unblock-peer [peer.ID]", + Short: "Unblocks the given peer", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + err = client.P2P.UnblockPeer(cmd.Context(), pid) + + formatter := func(data interface{}) interface{} { + err, ok := data.(error) + unblocked := false + if !ok { + unblocked = true + } + + return struct { + Unblocked bool `json:"unblocked"` + Peer string `json:"peer"` + Reason error `json:"reason,omitempty"` + }{ + Unblocked: unblocked, + Peer: args[0], + Reason: err, + } + } + return cmdnode.PrintOutput(err, nil, formatter) + }, +} + +var blockedPeersCmd = &cobra.Command{ + Use: "blocked-peers", + Short: "Lists the node's blocked peers", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + list, err := client.P2P.ListBlockedPeers(cmd.Context()) + + pids := make([]string, len(list)) + for i, peer := range list { + pids[i] = peer.String() + } + + formatter := func(data interface{}) interface{} { + peers := data.([]string) + return struct { + Peers []string `json:"peers"` + }{ + Peers: peers, + } + } + return cmdnode.PrintOutput(pids, err, formatter) + }, +} + +var protectCmd = &cobra.Command{ + Use: "protect [peer.ID, tag]", + Short: "Protects the given peer from being pruned by the given tag", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + err = client.P2P.Protect(cmd.Context(), pid, args[1]) + + formatter := func(data interface{}) interface{} { + err, ok := data.(error) + protected := false + if !ok { + protected = true + } + return struct { + Protected bool `json:"protected"` + Peer string `json:"peer"` + Reason error `json:"reason,omitempty"` + }{ + Protected: protected, + Peer: args[0], + Reason: err, + } + } + return cmdnode.PrintOutput(err, nil, formatter) + }, +} + +var unprotectCmd = &cobra.Command{ + Use: "unprotect [peer.ID, tag]", + Short: "Removes protection from the given peer.", + Long: "Removes a protection that may have been placed on a peer, under the specified tag." + + "The return value indicates whether the peer continues to be protected after this call, by way of a different tag", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + _, err = client.P2P.Unprotect(cmd.Context(), pid, args[1]) + + formatter := func(data interface{}) interface{} { + err, ok := data.(error) + unprotected := false + if !ok { + unprotected = true + } + return struct { + Unprotected bool `json:"unprotected"` + Peer string `json:"peer"` + Reason error `json:"reason,omitempty"` + }{ + Unprotected: unprotected, + Peer: args[0], + Reason: err, + } + } + return cmdnode.PrintOutput(err, nil, formatter) + }, +} + +var protectedCmd = &cobra.Command{ + Use: "protected [peer.ID, tag]", + Short: "Ensures that a given peer is protected under a specific tag", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + result, err := client.P2P.IsProtected(cmd.Context(), pid, args[1]) + return cmdnode.PrintOutput(result, err, nil) + }, +} + +type bandwidthStats struct { + TotalIn int64 `json:"total_in"` + TotalOut int64 `json:"total_out"` + RateIn float64 `json:"rate_in"` + RateOut float64 `json:"rate_out"` +} + +var bandwidthStatsCmd = &cobra.Command{ + Use: "bandwidth-stats", + Short: "Provides metrics for current peer.", + Long: "Get stats struct with bandwidth metrics for all data sent/" + + "received by the local peer, regardless of protocol or remote peer IDs", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + result, err := client.P2P.BandwidthStats(cmd.Context()) + + formatter := func(data interface{}) interface{} { + stats := data.(metrics.Stats) + return bandwidthStats{ + TotalIn: stats.TotalIn, + TotalOut: stats.TotalOut, + RateIn: stats.RateIn, + RateOut: stats.RateOut, + } + } + return cmdnode.PrintOutput(result, err, formatter) + }, +} + +var peerBandwidthCmd = &cobra.Command{ + Use: "peer-bandwidth [peer.ID]", + Short: "Gets stats struct with bandwidth metrics associated with the given peer.ID", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + pid, err := peer.Decode(args[0]) + if err != nil { + return err + } + + result, err := client.P2P.BandwidthForPeer(cmd.Context(), pid) + + formatter := func(data interface{}) interface{} { + stats := data.(metrics.Stats) + return bandwidthStats{ + TotalIn: stats.TotalIn, + TotalOut: stats.TotalOut, + RateIn: stats.RateIn, + RateOut: stats.RateOut, + } + } + return cmdnode.PrintOutput(result, err, formatter) + }, +} + +var bandwidthForProtocolCmd = &cobra.Command{ + Use: "protocol-bandwidth [protocol.ID]", + Short: "Gets stats struct with bandwidth metrics associated with the given protocol.ID", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + result, err := client.P2P.BandwidthForProtocol(cmd.Context(), protocol.ID(args[0])) + + formatter := func(data interface{}) interface{} { + stats := data.(metrics.Stats) + return bandwidthStats{ + TotalIn: stats.TotalIn, + TotalOut: stats.TotalOut, + RateIn: stats.RateIn, + RateOut: stats.RateOut, + } + } + return cmdnode.PrintOutput(result, err, formatter) + }, +} + +var pubsubPeersCmd = &cobra.Command{ + Use: "pubsub-peers [topic]", + Short: "Lists the peers we are connected to in the given topic", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + result, err := client.P2P.PubSubPeers(cmd.Context(), args[0]) + peers := make([]string, len(result)) + + for i, peer := range result { + peers[i] = peer.String() + } + + formatter := func(data interface{}) interface{} { + conPeers := data.([]string) + return struct { + Peers []string `json:"peers"` + }{ + Peers: conPeers, + } + } + return cmdnode.PrintOutput(peers, err, formatter) + }, +} diff --git a/node/p2p/p2p.go b/nodebuilder/p2p/config.go similarity index 59% rename from node/p2p/p2p.go rename to nodebuilder/p2p/config.go index 346ec52c8c..99f3fe8879 100644 --- a/node/p2p/p2p.go +++ b/nodebuilder/p2p/config.go @@ -4,9 +4,10 @@ import ( "fmt" "time" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" ) const defaultRoutingRefreshPeriod = time.Minute @@ -17,12 +18,9 @@ type Config struct { ListenAddresses []string // AnnounceAddresses - Addresses to be announced/advertised for peers to connect to AnnounceAddresses []string - // NoAnnounceAddresses - Addresses the P2P subsystem may know about, but that should not be announced/advertised, - // as undialable from WAN + // NoAnnounceAddresses - Addresses the P2P subsystem may know about, but that should not be + // announced/advertised, as undialable from WAN NoAnnounceAddresses []string - // TODO(@Wondertan): This should be a build-time parameter. See https://github.com/celestiaorg/celestia-node/issues/63 - // Bootstrapper is flag telling this node is a bootstrapper. - Bootstrapper bool // MutualPeers are peers which have a bidirectional peering agreement with the configured node. // Connections with those peers are protected from being trimmed, dropped or negatively scored. // NOTE: Any two peers must bidirectionally configure each other on their MutualPeers field. @@ -31,56 +29,43 @@ type Config struct { // This is enabled by default for Bootstrappers. PeerExchange bool // ConnManager is a configuration tuple for ConnectionManager. - ConnManager ConnManagerConfig + ConnManager connManagerConfig RoutingTableRefreshPeriod time.Duration + + // Allowlist for IPColocation PubSub parameter, a list of string CIDRs + IPColocationWhitelist []string } // DefaultConfig returns default configuration for P2P subsystem. -func DefaultConfig() Config { +func DefaultConfig(tp node.Type) Config { return Config{ ListenAddresses: []string{ - "/ip4/0.0.0.0/udp/2121/quic", - "/ip6/::/udp/2121/quic", + "/ip4/0.0.0.0/udp/2121/quic-v1/webtransport", + "/ip6/::/udp/2121/quic-v1/webtransport", + "/ip4/0.0.0.0/udp/2121/quic-v1", + "/ip6/::/udp/2121/quic-v1", "/ip4/0.0.0.0/tcp/2121", "/ip6/::/tcp/2121", }, AnnounceAddresses: []string{}, NoAnnounceAddresses: []string{ - "/ip4/0.0.0.0/udp/2121/quic", - "/ip4/127.0.0.1/tcp/2121/quic", - "/ip6/::/udp/2121/quic", + "/ip4/127.0.0.1/udp/2121/quic-v1/webtransport", + "/ip4/0.0.0.0/udp/2121/quic-v1/webtransport", + "/ip6/::/udp/2121/quic-v1/webtransport", + "/ip4/0.0.0.0/udp/2121/quic-v1", + "/ip4/127.0.0.1/udp/2121/quic-v1", + "/ip6/::/udp/2121/quic-v1", "/ip4/0.0.0.0/tcp/2121", "/ip4/127.0.0.1/tcp/2121", "/ip6/::/tcp/2121", }, MutualPeers: []string{}, - Bootstrapper: false, - PeerExchange: false, - ConnManager: DefaultConnManagerConfig(), + PeerExchange: tp == node.Bridge || tp == node.Full, + ConnManager: defaultConnManagerConfig(tp), RoutingTableRefreshPeriod: defaultRoutingRefreshPeriod, } } -// Components collects all the components and services related to p2p. -func Components(cfg Config) fx.Option { - return fx.Options( - fx.Provide(Key), - fx.Provide(ID), - fx.Provide(PeerStore), - fx.Provide(ConnectionManager(cfg)), - fx.Provide(ConnectionGater), - fx.Provide(Host(cfg)), - fx.Provide(RoutedHost), - fx.Provide(PubSub(cfg)), - fx.Provide(DataExchange(cfg)), - fx.Provide(BlockService), - fx.Provide(PeerRouting(cfg)), - fx.Provide(ContentRouting), - fx.Provide(AddrsFactory(cfg.AnnounceAddresses, cfg.NoAnnounceAddresses)), - fx.Invoke(Listen(cfg.ListenAddresses)), - ) -} - func (cfg *Config) mutualPeers() (_ []peer.AddrInfo, err error) { maddrs := make([]ma.Multiaddr, len(cfg.MutualPeers)) for i, addr := range cfg.MutualPeers { @@ -92,3 +77,12 @@ func (cfg *Config) mutualPeers() (_ []peer.AddrInfo, err error) { return peer.AddrInfosFromP2pAddrs(maddrs...) } + +// Validate performs basic validation of the config. +func (cfg *Config) Validate() error { + if cfg.RoutingTableRefreshPeriod <= 0 { + cfg.RoutingTableRefreshPeriod = defaultRoutingRefreshPeriod + log.Warnf("routingTableRefreshPeriod is not valid. restoring to default value: %d", cfg.RoutingTableRefreshPeriod) + } + return nil +} diff --git a/nodebuilder/p2p/flags.go b/nodebuilder/p2p/flags.go new file mode 100644 index 0000000000..8e7c0f8bc0 --- /dev/null +++ b/nodebuilder/p2p/flags.go @@ -0,0 +1,126 @@ +package p2p + +import ( + "fmt" + "os" + "strings" + + "github.com/multiformats/go-multiaddr" + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +// EnvCustomNetwork is the environment variable name used for setting a custom network. +const EnvCustomNetwork = "CELESTIA_CUSTOM" + +const ( + networkFlag = "p2p.network" + mutualFlag = "p2p.mutual" +) + +// Flags gives a set of p2p flags. +func Flags() *flag.FlagSet { + flags := &flag.FlagSet{} + + flags.StringSlice( + mutualFlag, + nil, + `Comma-separated multiaddresses of mutual peers to keep a prioritized connection with. +Such connection is immune to peer scoring slashing and connection module trimming. +Peers must bidirectionally point to each other. (Format: multiformats.io/multiaddr) +`, + ) + flags.String( + networkFlag, + DefaultNetwork.String(), + fmt.Sprintf("The name of the network to connect to, e.g. %s. Must be passed on "+ + "both init and start to take effect. Assumes mainnet (%s) unless otherwise specified.", + listProvidedNetworks(), + DefaultNetwork.String()), + ) + + return flags +} + +// ParseFlags parses P2P flags from the given cmd and saves them to the passed config. +func ParseFlags( + cmd *cobra.Command, + cfg *Config, +) error { + mutualPeers, err := cmd.Flags().GetStringSlice(mutualFlag) + if err != nil { + return err + } + + for _, peer := range mutualPeers { + _, err = multiaddr.NewMultiaddr(peer) + if err != nil { + return fmt.Errorf("cmd: while parsing '%s': %w", mutualFlag, err) + } + } + + if len(mutualPeers) != 0 { + cfg.MutualPeers = mutualPeers + } + return nil +} + +// ParseNetwork tries to parse the network from the flags and environment, +// and returns either the parsed network or the build's default network +func ParseNetwork(cmd *cobra.Command) (Network, error) { + if envNetwork, err := parseNetworkFromEnv(); envNetwork != "" { + return envNetwork, err + } + parsed := cmd.Flag(networkFlag).Value.String() + switch parsed { + case "": + return "", fmt.Errorf("no network provided, allowed values: %s", listProvidedNetworks()) + + case DefaultNetwork.String(): + return DefaultNetwork, nil + + default: + if net, err := Network(parsed).Validate(); err == nil { + return net, nil + } + return "", fmt.Errorf("invalid network specified: %s, allowed values: %s", parsed, listProvidedNetworks()) + } +} + +// parseNetworkFromEnv tries to parse the network from the environment. +// If no network is set, it returns an empty string. +func parseNetworkFromEnv() (Network, error) { + var network Network + // check if custom network option set + // format: CELESTIA_CUSTOM=:: + if custom, ok := os.LookupEnv(EnvCustomNetwork); ok { + fmt.Print("\n\nWARNING: Celestia custom network specified. Only use this option if the node is " + + "freshly created and initialized.\n**DO NOT** run a custom network over an already-existing node " + + "store!\n\n") + // ensure at least custom network is set + params := strings.Split(custom, ":") + if len(params) == 0 { + return network, fmt.Errorf("params: must provide at least to use a custom network") + } + netID := params[0] + network = Network(netID) + networksList[network] = struct{}{} + // check if genesis hash provided and register it if exists + if len(params) >= 2 { + genHash := params[1] + genesisList[network] = strings.ToUpper(genHash) + } + // check if bootstrappers were provided and register + if len(params) == 3 { + bootstrappers := params[2] + // validate bootstrappers + bs := strings.Split(bootstrappers, ",") + _, err := parseAddrInfos(bs) + if err != nil { + return DefaultNetwork, fmt.Errorf("params: env %s: contains invalid multiaddress", EnvCustomNetwork) + } + bootstrapList[Network(netID)] = bs + } + } + return network, nil +} diff --git a/nodebuilder/p2p/flags_test.go b/nodebuilder/p2p/flags_test.go new file mode 100644 index 0000000000..cfbb5fed5d --- /dev/null +++ b/nodebuilder/p2p/flags_test.go @@ -0,0 +1,127 @@ +package p2p + +import ( + "testing" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestParseNetwork_matchesByAlias checks to ensure flag parsing +// correctly matches the network's alias to the network name. +func TestParseNetwork_matchesByAlias(t *testing.T) { + cmd := createCmdWithNetworkFlag() + + err := cmd.Flags().Set(networkFlag, "arabica") + require.NoError(t, err) + + net, err := ParseNetwork(cmd) + require.NoError(t, err) + assert.Equal(t, Arabica, net) +} + +// TestParseNetwork_matchesByValue checks to ensure flag parsing +// correctly matches the network's actual value to the network name. +func TestParseNetwork_matchesByValue(t *testing.T) { + cmd := createCmdWithNetworkFlag() + + err := cmd.Flags().Set(networkFlag, string(Arabica)) + require.NoError(t, err) + + net, err := ParseNetwork(cmd) + require.NoError(t, err) + assert.Equal(t, Arabica, net) +} + +// TestParseNetwork_parsesFromEnv checks to ensure flag parsing +// correctly fetches the value from the environment variable. +func TestParseNetwork_parsesFromEnv(t *testing.T) { + cmd := createCmdWithNetworkFlag() + + t.Setenv(EnvCustomNetwork, "testing") + + net, err := ParseNetwork(cmd) + require.NoError(t, err) + assert.Equal(t, Network("testing"), net) +} + +func TestParsedNetwork_invalidNetwork(t *testing.T) { + cmd := createCmdWithNetworkFlag() + + err := cmd.Flags().Set(networkFlag, "invalid") + require.NoError(t, err) + + net, err := ParseNetwork(cmd) + assert.Error(t, err) + assert.Equal(t, Network(""), net) +} + +func createCmdWithNetworkFlag() *cobra.Command { + cmd := &cobra.Command{} + flags := &flag.FlagSet{} + flags.String( + networkFlag, + "", + "", + ) + cmd.Flags().AddFlagSet(flags) + return cmd +} + +// Set empty network flag and ensure error returned +func TestParseNetwork_emptyFlag(t *testing.T) { + cmd := createCmdWithNetworkFlag() + + err := cmd.Flags().Set(networkFlag, "") + require.NoError(t, err) + + _, err = ParseNetwork(cmd) + assert.Error(t, err) + +} + +// Set empty network flag and ensure error returned +func TestParseNetwork_emptyEnvEmptyFlag(t *testing.T) { + + t.Setenv(EnvCustomNetwork, "") + + cmd := createCmdWithNetworkFlag() + err := cmd.Flags().Set(networkFlag, "") + require.NoError(t, err) + + _, err = ParseNetwork(cmd) + require.Error(t, err) + +} + +// Env overrides empty flag to take precedence +func TestParseNetwork_envOverridesEmptyFlag(t *testing.T) { + + t.Setenv(EnvCustomNetwork, "custom-network") + + cmd := createCmdWithNetworkFlag() + err := cmd.Flags().Set(networkFlag, "") + require.NoError(t, err) + + network, err := ParseNetwork(cmd) + require.NoError(t, err) + assert.Equal(t, Network("custom-network"), network) + +} + +// Explicitly set flag but env should still override +func TestParseNetwork_envOverridesFlag(t *testing.T) { + + t.Setenv(EnvCustomNetwork, "custom-network") + + cmd := createCmdWithNetworkFlag() + err := cmd.Flags().Set(networkFlag, string(Mocha)) + require.NoError(t, err) + + network, err := ParseNetwork(cmd) + require.NoError(t, err) + assert.Equal(t, Network("custom-network"), network) + +} diff --git a/params/genesis.go b/nodebuilder/p2p/genesis.go similarity index 60% rename from params/genesis.go rename to nodebuilder/p2p/genesis.go index f0255e50a3..e35ca9bf29 100644 --- a/params/genesis.go +++ b/nodebuilder/p2p/genesis.go @@ -1,12 +1,15 @@ -package params +package p2p -import "fmt" +import ( + "fmt" +) // GenesisFor reports a hash of a genesis block for a given network. // Genesis is strictly defined and can't be modified. -// To run a custom genesis private network use CELESTIA_PRIVATE_GENESIS env var. func GenesisFor(net Network) (string, error) { - if err := net.Validate(); err != nil { + var err error + net, err = net.Validate() + if err != nil { return "", err } @@ -20,6 +23,8 @@ func GenesisFor(net Network) (string, error) { // NOTE: Every time we add a new long-running network, its genesis hash has to be added here. var genesisList = map[Network]string{ - Mamaki: "41BBFD05779719E826C4D68C4CCBBC84B2B761EB52BC04CFDE0FF8603C9AA3CA", + Mainnet: "6BE39EFD10BA412A9DB5288488303F5DD32CF386707A5BEF33617F4C43301872", + Arabica: "27122593765E07329BC348E8D16E92DCB4C75B34CCCB35C640FD7A4484D4C711", + Mocha: "B93BBE20A0FBFDF955811B6420F8433904664D45DB4BF51022BE4200C1A1680D", Private: "", } diff --git a/nodebuilder/p2p/host.go b/nodebuilder/p2p/host.go new file mode 100644 index 0000000000..e55cb65d1f --- /dev/null +++ b/nodebuilder/p2p/host.go @@ -0,0 +1,92 @@ +package p2p + +import ( + "context" + "fmt" + + "github.com/libp2p/go-libp2p" + p2pconfig "github.com/libp2p/go-libp2p/config" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/crypto" + hst "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/routing" + routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +// routedHost constructs a wrapped Host that may fallback to address discovery, +// if any top-level operation on the Host is provided with PeerID(Hash(PbK)) only. +func routedHost(base HostBase, r routing.PeerRouting) hst.Host { + return routedhost.Wrap(base, r) +} + +// host returns constructor for Host. +func host(params hostParams) (HostBase, error) { + opts := []libp2p.Option{ + libp2p.NoListenAddrs, // do not listen automatically + libp2p.AddrsFactory(params.AddrF), + libp2p.Identity(params.Key), + libp2p.Peerstore(params.PStore), + libp2p.ConnectionManager(params.ConnMngr), + libp2p.ConnectionGater(params.ConnGater), + libp2p.UserAgent(fmt.Sprintf("celestia-%s", params.Net)), + libp2p.NATPortMap(), // enables upnp + libp2p.DisableRelay(), + libp2p.BandwidthReporter(params.Bandwidth), + libp2p.ResourceManager(params.ResourceManager), + // to clearly define what defaults we rely upon + libp2p.DefaultSecurity, + libp2p.DefaultTransports, + libp2p.DefaultMuxers, + } + + if params.Registry != nil { + opts = append(opts, libp2p.PrometheusRegisterer(params.Registry)) + } else { + opts = append(opts, libp2p.DisableMetrics()) + } + + // All node types except light (bridge, full) will enable NATService + if params.Tp != node.Light { + opts = append(opts, libp2p.EnableNATService()) + } + + h, err := libp2p.NewWithoutDefaults(opts...) + if err != nil { + return nil, err + } + + params.Lc.Append(fx.Hook{OnStop: func(context.Context) error { + return h.Close() + }}) + + return h, nil +} + +type HostBase hst.Host + +type hostParams struct { + fx.In + + Net Network + Lc fx.Lifecycle + ID peer.ID + Key crypto.PrivKey + AddrF p2pconfig.AddrsFactory + PStore peerstore.Peerstore + ConnMngr connmgr.ConnManager + ConnGater *conngater.BasicConnectionGater + Bandwidth *metrics.BandwidthCounter + ResourceManager network.ResourceManager + Registry prometheus.Registerer `optional:"true"` + + Tp node.Type +} diff --git a/node/p2p/identity.go b/nodebuilder/p2p/identity.go similarity index 84% rename from node/p2p/identity.go rename to nodebuilder/p2p/identity.go index 92e18b0591..a6f139834c 100644 --- a/node/p2p/identity.go +++ b/nodebuilder/p2p/identity.go @@ -4,9 +4,9 @@ import ( "crypto/rand" "errors" - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" "github.com/celestiaorg/celestia-node/libs/keystore" ) @@ -42,7 +42,7 @@ func Key(kstore keystore.Keystore) (crypto.PrivKey, error) { return crypto.UnmarshalPrivateKey(ksPriv.Body) } -func ID(key crypto.PrivKey, pstore peerstore.Peerstore) (peer.ID, error) { +func id(key crypto.PrivKey, pstore peerstore.Peerstore) (peer.ID, error) { id, err := peer.IDFromPrivateKey(key) if err != nil { return "", err diff --git a/nodebuilder/p2p/metrics.go b/nodebuilder/p2p/metrics.go new file mode 100644 index 0000000000..095c30d9b7 --- /dev/null +++ b/nodebuilder/p2p/metrics.go @@ -0,0 +1,63 @@ +package p2p + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/fx" +) + +// WithMetrics option sets up native libp2p metrics up. +func WithMetrics() fx.Option { + return fx.Options( + fx.Provide(resourceManagerOpt(traceReporter)), + fx.Provide(prometheusRegisterer), + fx.Invoke(prometheusMetrics), + ) +} + +const ( + promAgentEndpoint = "/metrics" + promAgentPort = "8890" +) + +// prometheusMetrics option sets up native libp2p metrics up +func prometheusMetrics(lifecycle fx.Lifecycle, registerer prometheus.Registerer) error { + registry := registerer.(*prometheus.Registry) + + mux := http.NewServeMux() + handler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{Registry: registerer}) + mux.Handle(promAgentEndpoint, handler) + + // TODO(@Wondertan): Unify all the servers into one (See #2007) + promHTTPServer := &http.Server{ + Addr: fmt.Sprintf(":%s", promAgentPort), + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + } + + lifecycle.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + go func() { + if err := promHTTPServer.ListenAndServe(); err != nil { + log.Errorf("Error starting Prometheus metrics exporter http server: %s", err) + } + }() + + log.Infof("Prometheus agent started on :%s/%s", promAgentPort, promAgentEndpoint) + return nil + }, + OnStop: func(ctx context.Context) error { + return promHTTPServer.Shutdown(ctx) + }, + }) + return nil +} + +func prometheusRegisterer() prometheus.Registerer { + return prometheus.NewRegistry() +} diff --git a/nodebuilder/p2p/misc.go b/nodebuilder/p2p/misc.go new file mode 100644 index 0000000000..0d842e0601 --- /dev/null +++ b/nodebuilder/p2p/misc.go @@ -0,0 +1,78 @@ +package p2p + +import ( + "context" + "time" + + "github.com/ipfs/go-datastore" + connmgri "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" //nolint:staticcheck + "github.com/libp2p/go-libp2p/p2p/net/conngater" + "github.com/libp2p/go-libp2p/p2p/net/connmgr" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +// connManagerConfig configures connection manager. +type connManagerConfig struct { + // Low and High are watermarks governing the number of connections that'll be maintained. + Low, High int + // GracePeriod is the amount of time a newly opened connection is given before it becomes subject + // to pruning. + GracePeriod time.Duration +} + +// defaultConnManagerConfig returns defaults for ConnManagerConfig. +func defaultConnManagerConfig(tp node.Type) connManagerConfig { + switch tp { + case node.Light: + return connManagerConfig{ + Low: 50, + High: 100, + GracePeriod: time.Minute, + } + case node.Bridge, node.Full: + return connManagerConfig{ + Low: 800, + High: 1000, + GracePeriod: time.Minute, + } + default: + panic("unknown node type") + } +} + +// connectionManager provides a constructor for ConnectionManager. +func connectionManager(cfg Config, bpeers Bootstrappers) (connmgri.ConnManager, error) { + fpeers, err := cfg.mutualPeers() + if err != nil { + return nil, err + } + cm, err := connmgr.NewConnManager( + cfg.ConnManager.Low, + cfg.ConnManager.High, + connmgr.WithGracePeriod(cfg.ConnManager.GracePeriod), + ) + if err != nil { + return nil, err + } + for _, info := range fpeers { + cm.Protect(info.ID, "protected-mutual") + } + for _, info := range bpeers { + cm.Protect(info.ID, "protected-bootstrap") + } + + return cm, nil +} + +// connectionGater constructs a BasicConnectionGater. +func connectionGater(ds datastore.Batching) (*conngater.BasicConnectionGater, error) { + return conngater.NewBasicConnectionGater(ds) +} + +// peerStore constructs an on-disk PeerStore. +func peerStore(ctx context.Context, ds datastore.Batching) (peerstore.Peerstore, error) { + return pstoreds.NewPeerstore(ctx, ds, pstoreds.DefaultOpts()) +} diff --git a/nodebuilder/p2p/mocks/api.go b/nodebuilder/p2p/mocks/api.go new file mode 100644 index 0000000000..aa5083199f --- /dev/null +++ b/nodebuilder/p2p/mocks/api.go @@ -0,0 +1,305 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/p2p (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + metrics "github.com/libp2p/go-libp2p/core/metrics" + network "github.com/libp2p/go-libp2p/core/network" + peer "github.com/libp2p/go-libp2p/core/peer" + protocol "github.com/libp2p/go-libp2p/core/protocol" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// BandwidthForPeer mocks base method. +func (m *MockModule) BandwidthForPeer(arg0 context.Context, arg1 peer.ID) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BandwidthForPeer", arg0, arg1) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BandwidthForPeer indicates an expected call of BandwidthForPeer. +func (mr *MockModuleMockRecorder) BandwidthForPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BandwidthForPeer", reflect.TypeOf((*MockModule)(nil).BandwidthForPeer), arg0, arg1) +} + +// BandwidthForProtocol mocks base method. +func (m *MockModule) BandwidthForProtocol(arg0 context.Context, arg1 protocol.ID) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BandwidthForProtocol", arg0, arg1) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BandwidthForProtocol indicates an expected call of BandwidthForProtocol. +func (mr *MockModuleMockRecorder) BandwidthForProtocol(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BandwidthForProtocol", reflect.TypeOf((*MockModule)(nil).BandwidthForProtocol), arg0, arg1) +} + +// BandwidthStats mocks base method. +func (m *MockModule) BandwidthStats(arg0 context.Context) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BandwidthStats", arg0) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BandwidthStats indicates an expected call of BandwidthStats. +func (mr *MockModuleMockRecorder) BandwidthStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BandwidthStats", reflect.TypeOf((*MockModule)(nil).BandwidthStats), arg0) +} + +// BlockPeer mocks base method. +func (m *MockModule) BlockPeer(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockPeer", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// BlockPeer indicates an expected call of BlockPeer. +func (mr *MockModuleMockRecorder) BlockPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockPeer", reflect.TypeOf((*MockModule)(nil).BlockPeer), arg0, arg1) +} + +// ClosePeer mocks base method. +func (m *MockModule) ClosePeer(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClosePeer", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClosePeer indicates an expected call of ClosePeer. +func (mr *MockModuleMockRecorder) ClosePeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClosePeer", reflect.TypeOf((*MockModule)(nil).ClosePeer), arg0, arg1) +} + +// Connect mocks base method. +func (m *MockModule) Connect(arg0 context.Context, arg1 peer.AddrInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Connect indicates an expected call of Connect. +func (mr *MockModuleMockRecorder) Connect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockModule)(nil).Connect), arg0, arg1) +} + +// Connectedness mocks base method. +func (m *MockModule) Connectedness(arg0 context.Context, arg1 peer.ID) (network.Connectedness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connectedness", arg0, arg1) + ret0, _ := ret[0].(network.Connectedness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Connectedness indicates an expected call of Connectedness. +func (mr *MockModuleMockRecorder) Connectedness(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connectedness", reflect.TypeOf((*MockModule)(nil).Connectedness), arg0, arg1) +} + +// Info mocks base method. +func (m *MockModule) Info(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Info", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Info indicates an expected call of Info. +func (mr *MockModuleMockRecorder) Info(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockModule)(nil).Info), arg0) +} + +// IsProtected mocks base method. +func (m *MockModule) IsProtected(arg0 context.Context, arg1 peer.ID, arg2 string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsProtected", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsProtected indicates an expected call of IsProtected. +func (mr *MockModuleMockRecorder) IsProtected(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsProtected", reflect.TypeOf((*MockModule)(nil).IsProtected), arg0, arg1, arg2) +} + +// ListBlockedPeers mocks base method. +func (m *MockModule) ListBlockedPeers(arg0 context.Context) ([]peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBlockedPeers", arg0) + ret0, _ := ret[0].([]peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBlockedPeers indicates an expected call of ListBlockedPeers. +func (mr *MockModuleMockRecorder) ListBlockedPeers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBlockedPeers", reflect.TypeOf((*MockModule)(nil).ListBlockedPeers), arg0) +} + +// NATStatus mocks base method. +func (m *MockModule) NATStatus(arg0 context.Context) (network.Reachability, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NATStatus", arg0) + ret0, _ := ret[0].(network.Reachability) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NATStatus indicates an expected call of NATStatus. +func (mr *MockModuleMockRecorder) NATStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NATStatus", reflect.TypeOf((*MockModule)(nil).NATStatus), arg0) +} + +// PeerInfo mocks base method. +func (m *MockModule) PeerInfo(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PeerInfo", arg0, arg1) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PeerInfo indicates an expected call of PeerInfo. +func (mr *MockModuleMockRecorder) PeerInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerInfo", reflect.TypeOf((*MockModule)(nil).PeerInfo), arg0, arg1) +} + +// Peers mocks base method. +func (m *MockModule) Peers(arg0 context.Context) ([]peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Peers", arg0) + ret0, _ := ret[0].([]peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Peers indicates an expected call of Peers. +func (mr *MockModuleMockRecorder) Peers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peers", reflect.TypeOf((*MockModule)(nil).Peers), arg0) +} + +// Protect mocks base method. +func (m *MockModule) Protect(arg0 context.Context, arg1 peer.ID, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Protect", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Protect indicates an expected call of Protect. +func (mr *MockModuleMockRecorder) Protect(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Protect", reflect.TypeOf((*MockModule)(nil).Protect), arg0, arg1, arg2) +} + +// PubSubPeers mocks base method. +func (m *MockModule) PubSubPeers(arg0 context.Context, arg1 string) ([]peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PubSubPeers", arg0, arg1) + ret0, _ := ret[0].([]peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PubSubPeers indicates an expected call of PubSubPeers. +func (mr *MockModuleMockRecorder) PubSubPeers(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PubSubPeers", reflect.TypeOf((*MockModule)(nil).PubSubPeers), arg0, arg1) +} + +// ResourceState mocks base method. +func (m *MockModule) ResourceState(arg0 context.Context) (rcmgr.ResourceManagerStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResourceState", arg0) + ret0, _ := ret[0].(rcmgr.ResourceManagerStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResourceState indicates an expected call of ResourceState. +func (mr *MockModuleMockRecorder) ResourceState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceState", reflect.TypeOf((*MockModule)(nil).ResourceState), arg0) +} + +// UnblockPeer mocks base method. +func (m *MockModule) UnblockPeer(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnblockPeer", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnblockPeer indicates an expected call of UnblockPeer. +func (mr *MockModuleMockRecorder) UnblockPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnblockPeer", reflect.TypeOf((*MockModule)(nil).UnblockPeer), arg0, arg1) +} + +// Unprotect mocks base method. +func (m *MockModule) Unprotect(arg0 context.Context, arg1 peer.ID, arg2 string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Unprotect", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Unprotect indicates an expected call of Unprotect. +func (mr *MockModuleMockRecorder) Unprotect(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unprotect", reflect.TypeOf((*MockModule)(nil).Unprotect), arg0, arg1, arg2) +} diff --git a/nodebuilder/p2p/module.go b/nodebuilder/p2p/module.go new file mode 100644 index 0000000000..1ddff02173 --- /dev/null +++ b/nodebuilder/p2p/module.go @@ -0,0 +1,60 @@ +package p2p + +import ( + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/metrics" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +var log = logging.Logger("module/p2p") + +// ConstructModule collects all the components and services related to p2p. +func ConstructModule(tp node.Type, cfg *Config) fx.Option { + // sanitize config values before constructing module + cfgErr := cfg.Validate() + + baseComponents := fx.Options( + fx.Supply(*cfg), + fx.Error(cfgErr), + fx.Provide(Key), + fx.Provide(id), + fx.Provide(peerStore), + fx.Provide(connectionManager), + fx.Provide(connectionGater), + fx.Provide(host), + fx.Provide(routedHost), + fx.Provide(pubSub), + fx.Provide(dataExchange), + fx.Provide(ipld.NewBlockservice), + fx.Provide(peerRouting), + fx.Provide(contentRouting), + fx.Provide(addrsFactory(cfg.AnnounceAddresses, cfg.NoAnnounceAddresses)), + fx.Provide(metrics.NewBandwidthCounter), + fx.Provide(newModule), + fx.Invoke(Listen(cfg.ListenAddresses)), + fx.Provide(resourceManager), + fx.Provide(resourceManagerOpt(allowList)), + ) + + switch tp { + case node.Full, node.Bridge: + return fx.Module( + "p2p", + baseComponents, + fx.Provide(blockstoreFromEDSStore), + fx.Provide(infiniteResources), + ) + case node.Light: + return fx.Module( + "p2p", + baseComponents, + fx.Provide(blockstoreFromDatastore), + fx.Provide(autoscaleResources), + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/p2p/module_test.go b/nodebuilder/p2p/module_test.go new file mode 100644 index 0000000000..cb7c945547 --- /dev/null +++ b/nodebuilder/p2p/module_test.go @@ -0,0 +1,67 @@ +package p2p + +import ( + "context" + "testing" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "go.uber.org/fx" + "go.uber.org/fx/fxtest" + + "github.com/celestiaorg/celestia-node/libs/keystore" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +func testModule(tp node.Type) fx.Option { + cfg := DefaultConfig(tp) + // TODO(@Wondertan): Most of these can be deduplicated + // by moving Store into the modnode and introducing there a TestModNode module + // that testers would import + return fx.Options( + fx.NopLogger, + ConstructModule(tp, &cfg), + fx.Provide(context.Background), + fx.Supply(Private), + fx.Supply(Bootstrappers{}), + fx.Supply(tp), + fx.Provide(keystore.NewMapKeystore), + fx.Supply(fx.Annotate(ds_sync.MutexWrap(datastore.NewMapDatastore()), fx.As(new(datastore.Batching)))), + ) +} + +func TestModuleBuild(t *testing.T) { + var test = []struct { + tp node.Type + }{ + {tp: node.Bridge}, + {tp: node.Full}, + {tp: node.Light}, + } + + for _, tt := range test { + t.Run(tt.tp.String(), func(t *testing.T) { + app := fxtest.New(t, testModule(tt.tp)) + app.RequireStart() + app.RequireStop() + }) + } +} + +func TestModuleBuild_WithMetrics(t *testing.T) { + var test = []struct { + tp node.Type + }{ + {tp: node.Full}, + {tp: node.Bridge}, + {tp: node.Light}, + } + + for _, tt := range test { + t.Run(tt.tp.String(), func(t *testing.T) { + app := fxtest.New(t, testModule(tt.tp), WithMetrics()) + app.RequireStart() + app.RequireStop() + }) + } +} diff --git a/nodebuilder/p2p/network.go b/nodebuilder/p2p/network.go new file mode 100644 index 0000000000..53893eff7c --- /dev/null +++ b/nodebuilder/p2p/network.go @@ -0,0 +1,83 @@ +package p2p + +import ( + "errors" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// NOTE: Every time we add a new long-running network, it has to be added here. +const ( + // DefaultNetwork is the default network of the current build. + DefaultNetwork = Mainnet + // Arabica testnet. See: celestiaorg/networks. + Arabica Network = "arabica-11" + // Mocha testnet. See: celestiaorg/networks. + Mocha Network = "mocha-4" + // Private can be used to set up any private network, including local testing setups. + Private Network = "private" + // Celestia mainnet. See: celestiaorg/networks. + Mainnet Network = "celestia" + // BlockTime is a network block time. + // TODO @renaynay @Wondertan (#790) + BlockTime = time.Second * 10 +) + +// Network is a type definition for DA network run by Celestia Node. +type Network string + +// Bootstrappers is a type definition for nodes that will be used as bootstrappers. +type Bootstrappers []peer.AddrInfo + +// ErrInvalidNetwork is thrown when unknown network is used. +var ErrInvalidNetwork = errors.New("params: invalid network") + +// Validate the network. +func (n Network) Validate() (Network, error) { + // return actual network if alias was provided + if net, ok := networkAliases[string(n)]; ok { + return net, nil + } + if _, ok := networksList[n]; !ok { + return "", ErrInvalidNetwork + } + return n, nil +} + +// String returns string representation of the Network. +func (n Network) String() string { + return string(n) +} + +// networksList is a strict list of all known long-standing networks. +var networksList = map[Network]struct{}{ + Mainnet: {}, + Arabica: {}, + Mocha: {}, + Private: {}, +} + +// networkAliases is a strict list of all known long-standing networks +// mapped from the string representation of their *alias* (rather than +// their actual value) to the Network. +var networkAliases = map[string]Network{ + "mainnet": Mainnet, + "arabica": Arabica, + "mocha": Mocha, + "private": Private, +} + +// listProvidedNetworks provides a string listing all known long-standing networks for things like +// command hints. +func listProvidedNetworks() string { + var networks string + for net := range networksList { + // "private" network isn't really a choosable option, so skip + if net != Private { + networks += string(net) + ", " + } + } + // chop off trailing ", " + return networks[:len(networks)-2] +} diff --git a/nodebuilder/p2p/opts.go b/nodebuilder/p2p/opts.go new file mode 100644 index 0000000000..8e5d714a64 --- /dev/null +++ b/nodebuilder/p2p/opts.go @@ -0,0 +1,42 @@ +package p2p + +import ( + "encoding/hex" + + "github.com/ipfs/go-blockservice" + "github.com/libp2p/go-libp2p/core/crypto" + hst "github.com/libp2p/go-libp2p/core/host" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/libs/fxutil" +) + +// WithP2PKey sets custom Ed25519 private key for p2p networking. +func WithP2PKey(key crypto.PrivKey) fx.Option { + return fxutil.ReplaceAs(key, new(crypto.PrivKey)) +} + +// WithP2PKeyStr sets custom hex encoded Ed25519 private key for p2p networking. +func WithP2PKeyStr(key string) fx.Option { + decKey, err := hex.DecodeString(key) + if err != nil { + return fx.Error(err) + } + + privKey, err := crypto.UnmarshalEd25519PrivateKey(decKey) + if err != nil { + return fx.Error(err) + } + + return fxutil.ReplaceAs(privKey, new(crypto.PrivKey)) +} + +// WithHost sets custom Host's data for p2p networking. +func WithHost(hst hst.Host) fx.Option { + return fxutil.ReplaceAs(hst, new(HostBase)) +} + +// WithBlockService allows to replace the default BlockService. +func WithBlockService(bServ blockservice.BlockService) fx.Option { + return fxutil.ReplaceAs(bServ, new(blockservice.BlockService)) +} diff --git a/nodebuilder/p2p/p2p.go b/nodebuilder/p2p/p2p.go new file mode 100644 index 0000000000..a418eaf4d8 --- /dev/null +++ b/nodebuilder/p2p/p2p.go @@ -0,0 +1,288 @@ +package p2p + +import ( + "context" + "fmt" + "reflect" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + libhost "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + "github.com/libp2p/go-libp2p/p2p/net/conngater" +) + +var _ Module = (*API)(nil) + +// Module represents all accessible methods related to the node's p2p +// host / operations. +// +//nolint:dupl +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // Info returns address information about the host. + Info(context.Context) (peer.AddrInfo, error) + // Peers returns connected peers. + Peers(context.Context) ([]peer.ID, error) + // PeerInfo returns a small slice of information Peerstore has on the + // given peer. + PeerInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) + + // Connect ensures there is a connection between this host and the peer with + // given peer. + Connect(ctx context.Context, pi peer.AddrInfo) error + // ClosePeer closes the connection to a given peer. + ClosePeer(ctx context.Context, id peer.ID) error + // Connectedness returns a state signaling connection capabilities. + Connectedness(ctx context.Context, id peer.ID) (network.Connectedness, error) + // NATStatus returns the current NAT status. + NATStatus(context.Context) (network.Reachability, error) + + // BlockPeer adds a peer to the set of blocked peers. + BlockPeer(ctx context.Context, p peer.ID) error + // UnblockPeer removes a peer from the set of blocked peers. + UnblockPeer(ctx context.Context, p peer.ID) error + // ListBlockedPeers returns a list of blocked peers. + ListBlockedPeers(context.Context) ([]peer.ID, error) + // Protect adds a peer to the list of peers who have a bidirectional + // peering agreement that they are protected from being trimmed, dropped + // or negatively scored. + Protect(ctx context.Context, id peer.ID, tag string) error + // Unprotect removes a peer from the list of peers who have a bidirectional + // peering agreement that they are protected from being trimmed, dropped + // or negatively scored, returning a bool representing whether the given + // peer is protected or not. + Unprotect(ctx context.Context, id peer.ID, tag string) (bool, error) + // IsProtected returns whether the given peer is protected. + IsProtected(ctx context.Context, id peer.ID, tag string) (bool, error) + + // BandwidthStats returns a Stats struct with bandwidth metrics for all + // data sent/received by the local peer, regardless of protocol or remote + // peer IDs. + BandwidthStats(context.Context) (metrics.Stats, error) + // BandwidthForPeer returns a Stats struct with bandwidth metrics associated with the given peer.ID. + // The metrics returned include all traffic sent / received for the peer, regardless of protocol. + BandwidthForPeer(ctx context.Context, id peer.ID) (metrics.Stats, error) + // BandwidthForProtocol returns a Stats struct with bandwidth metrics associated with the given + // protocol.ID. + BandwidthForProtocol(ctx context.Context, proto protocol.ID) (metrics.Stats, error) + + // ResourceState returns the state of the resource manager. + ResourceState(context.Context) (rcmgr.ResourceManagerStat, error) + + // PubSubPeers returns the peer IDs of the peers joined on + // the given topic. + PubSubPeers(ctx context.Context, topic string) ([]peer.ID, error) +} + +// module contains all components necessary to access information and +// perform actions related to the node's p2p Host / operations. +type module struct { + host HostBase + ps *pubsub.PubSub + connGater *conngater.BasicConnectionGater + bw *metrics.BandwidthCounter + rm network.ResourceManager +} + +func newModule( + host HostBase, + ps *pubsub.PubSub, + cg *conngater.BasicConnectionGater, + bw *metrics.BandwidthCounter, + rm network.ResourceManager, +) Module { + return &module{ + host: host, + ps: ps, + connGater: cg, + bw: bw, + rm: rm, + } +} + +func (m *module) Info(context.Context) (peer.AddrInfo, error) { + return *libhost.InfoFromHost(m.host), nil +} + +func (m *module) Peers(context.Context) ([]peer.ID, error) { + return m.host.Network().Peers(), nil +} + +func (m *module) PeerInfo(_ context.Context, id peer.ID) (peer.AddrInfo, error) { + return m.host.Peerstore().PeerInfo(id), nil +} + +func (m *module) Connect(ctx context.Context, pi peer.AddrInfo) error { + return m.host.Connect(ctx, pi) +} + +func (m *module) ClosePeer(_ context.Context, id peer.ID) error { + return m.host.Network().ClosePeer(id) +} + +func (m *module) Connectedness(_ context.Context, id peer.ID) (network.Connectedness, error) { + return m.host.Network().Connectedness(id), nil +} + +func (m *module) NATStatus(context.Context) (network.Reachability, error) { + basic, ok := m.host.(*basichost.BasicHost) + if !ok { + return 0, fmt.Errorf("unexpected implementation of host.Host, expected %s, got %T", + reflect.TypeOf(&basichost.BasicHost{}).String(), m.host) + } + return basic.GetAutoNat().Status(), nil +} + +func (m *module) BlockPeer(_ context.Context, p peer.ID) error { + return m.connGater.BlockPeer(p) +} + +func (m *module) UnblockPeer(_ context.Context, p peer.ID) error { + return m.connGater.UnblockPeer(p) +} + +func (m *module) ListBlockedPeers(context.Context) ([]peer.ID, error) { + return m.connGater.ListBlockedPeers(), nil +} + +func (m *module) Protect(_ context.Context, id peer.ID, tag string) error { + m.host.ConnManager().Protect(id, tag) + return nil +} + +func (m *module) Unprotect(_ context.Context, id peer.ID, tag string) (bool, error) { + return m.host.ConnManager().Unprotect(id, tag), nil +} + +func (m *module) IsProtected(_ context.Context, id peer.ID, tag string) (bool, error) { + return m.host.ConnManager().IsProtected(id, tag), nil +} + +func (m *module) BandwidthStats(context.Context) (metrics.Stats, error) { + return m.bw.GetBandwidthTotals(), nil +} + +func (m *module) BandwidthForPeer(_ context.Context, id peer.ID) (metrics.Stats, error) { + return m.bw.GetBandwidthForPeer(id), nil +} + +func (m *module) BandwidthForProtocol(_ context.Context, proto protocol.ID) (metrics.Stats, error) { + return m.bw.GetBandwidthForProtocol(proto), nil +} + +func (m *module) ResourceState(context.Context) (rcmgr.ResourceManagerStat, error) { + rms, ok := m.rm.(rcmgr.ResourceManagerState) + if !ok { + return rcmgr.ResourceManagerStat{}, fmt.Errorf("network.resourceManager does not implement " + + "rcmgr.ResourceManagerState") + } + return rms.Stat(), nil +} + +func (m *module) PubSubPeers(_ context.Context, topic string) ([]peer.ID, error) { + return m.ps.ListPeers(topic), nil +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +// +//nolint:dupl +type API struct { + Internal struct { + Info func(context.Context) (peer.AddrInfo, error) `perm:"admin"` + Peers func(context.Context) ([]peer.ID, error) `perm:"admin"` + PeerInfo func(ctx context.Context, id peer.ID) (peer.AddrInfo, error) `perm:"admin"` + Connect func(ctx context.Context, pi peer.AddrInfo) error `perm:"admin"` + ClosePeer func(ctx context.Context, id peer.ID) error `perm:"admin"` + Connectedness func(ctx context.Context, id peer.ID) (network.Connectedness, error) `perm:"admin"` + NATStatus func(context.Context) (network.Reachability, error) `perm:"admin"` + BlockPeer func(ctx context.Context, p peer.ID) error `perm:"admin"` + UnblockPeer func(ctx context.Context, p peer.ID) error `perm:"admin"` + ListBlockedPeers func(context.Context) ([]peer.ID, error) `perm:"admin"` + Protect func(ctx context.Context, id peer.ID, tag string) error `perm:"admin"` + Unprotect func(ctx context.Context, id peer.ID, tag string) (bool, error) `perm:"admin"` + IsProtected func(ctx context.Context, id peer.ID, tag string) (bool, error) `perm:"admin"` + BandwidthStats func(context.Context) (metrics.Stats, error) `perm:"admin"` + BandwidthForPeer func(ctx context.Context, id peer.ID) (metrics.Stats, error) `perm:"admin"` + BandwidthForProtocol func(ctx context.Context, proto protocol.ID) (metrics.Stats, error) `perm:"admin"` + ResourceState func(context.Context) (rcmgr.ResourceManagerStat, error) `perm:"admin"` + PubSubPeers func(ctx context.Context, topic string) ([]peer.ID, error) `perm:"admin"` + } +} + +func (api *API) Info(ctx context.Context) (peer.AddrInfo, error) { + return api.Internal.Info(ctx) +} + +func (api *API) Peers(ctx context.Context) ([]peer.ID, error) { + return api.Internal.Peers(ctx) +} + +func (api *API) PeerInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { + return api.Internal.PeerInfo(ctx, id) +} + +func (api *API) Connect(ctx context.Context, pi peer.AddrInfo) error { + return api.Internal.Connect(ctx, pi) +} + +func (api *API) ClosePeer(ctx context.Context, id peer.ID) error { + return api.Internal.ClosePeer(ctx, id) +} + +func (api *API) Connectedness(ctx context.Context, id peer.ID) (network.Connectedness, error) { + return api.Internal.Connectedness(ctx, id) +} + +func (api *API) NATStatus(ctx context.Context) (network.Reachability, error) { + return api.Internal.NATStatus(ctx) +} + +func (api *API) BlockPeer(ctx context.Context, p peer.ID) error { + return api.Internal.BlockPeer(ctx, p) +} + +func (api *API) UnblockPeer(ctx context.Context, p peer.ID) error { + return api.Internal.UnblockPeer(ctx, p) +} + +func (api *API) ListBlockedPeers(ctx context.Context) ([]peer.ID, error) { + return api.Internal.ListBlockedPeers(ctx) +} + +func (api *API) Protect(ctx context.Context, id peer.ID, tag string) error { + return api.Internal.Protect(ctx, id, tag) +} + +func (api *API) Unprotect(ctx context.Context, id peer.ID, tag string) (bool, error) { + return api.Internal.Unprotect(ctx, id, tag) +} + +func (api *API) IsProtected(ctx context.Context, id peer.ID, tag string) (bool, error) { + return api.Internal.IsProtected(ctx, id, tag) +} + +func (api *API) BandwidthStats(ctx context.Context) (metrics.Stats, error) { + return api.Internal.BandwidthStats(ctx) +} + +func (api *API) BandwidthForPeer(ctx context.Context, id peer.ID) (metrics.Stats, error) { + return api.Internal.BandwidthForPeer(ctx, id) +} + +func (api *API) BandwidthForProtocol(ctx context.Context, proto protocol.ID) (metrics.Stats, error) { + return api.Internal.BandwidthForProtocol(ctx, proto) +} + +func (api *API) ResourceState(ctx context.Context) (rcmgr.ResourceManagerStat, error) { + return api.Internal.ResourceState(ctx) +} + +func (api *API) PubSubPeers(ctx context.Context, topic string) ([]peer.ID, error) { + return api.Internal.PubSubPeers(ctx, topic) +} diff --git a/nodebuilder/p2p/p2p_test.go b/nodebuilder/p2p/p2p_test.go new file mode 100644 index 0000000000..b883889859 --- /dev/null +++ b/nodebuilder/p2p/p2p_test.go @@ -0,0 +1,251 @@ +package p2p + +import ( + "context" + "crypto/rand" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + libhost "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestP2PModule_Host tests P2P Module methods on +// the instance of Host. +func TestP2PModule_Host(t *testing.T) { + net, err := mocknet.FullMeshConnected(2) + require.NoError(t, err) + host, peer := net.Hosts()[0], net.Hosts()[1] + + mgr := newModule(host, nil, nil, nil, nil) + + ctx := context.Background() + + // test all methods on `manager.host` + peers, err := mgr.Peers(ctx) + require.NoError(t, err) + assert.Equal(t, host.Network().Peers(), peers) + + peerInfo, err := mgr.PeerInfo(ctx, peer.ID()) + require.NoError(t, err) + assert.Equal(t, libhost.InfoFromHost(peer).ID, peerInfo.ID) + + connectedness, err := mgr.Connectedness(ctx, peer.ID()) + require.NoError(t, err) + assert.Equal(t, host.Network().Connectedness(peer.ID()), connectedness) + // now disconnect using manager and check for connectedness match again + assert.NoError(t, mgr.ClosePeer(ctx, peer.ID())) + connectedness, err = mgr.Connectedness(ctx, peer.ID()) + require.NoError(t, err) + assert.Equal(t, host.Network().Connectedness(peer.ID()), connectedness) +} + +// TestP2PModule_ConnManager tests P2P Module methods on +// the Host's ConnManager. Note that this test is constructed differently +// than the one above because mocknet does not provide a ConnManager to its +// mock peers. +func TestP2PModule_ConnManager(t *testing.T) { + // make two full peers and connect them + host, err := libp2p.New() + require.NoError(t, err) + + peer, err := libp2p.New() + require.NoError(t, err) + + mgr := newModule(host, nil, nil, nil, nil) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + err = mgr.Connect(ctx, *libhost.InfoFromHost(peer)) + require.NoError(t, err) + + err = mgr.Protect(ctx, peer.ID(), "test") + require.NoError(t, err) + protected, err := mgr.IsProtected(ctx, peer.ID(), "test") + require.NoError(t, err) + assert.True(t, protected) + + ok, err := mgr.Unprotect(ctx, peer.ID(), "test") + require.False(t, ok) + require.NoError(t, err) + protected, err = mgr.IsProtected(ctx, peer.ID(), "test") + require.NoError(t, err) + assert.False(t, protected) +} + +// TestP2PModule_Autonat tests P2P Module methods on +// the node's instance of AutoNAT. +func TestP2PModule_Autonat(t *testing.T) { + host, err := libp2p.New(libp2p.EnableNATService()) + require.NoError(t, err) + + mgr := newModule(host, nil, nil, nil, nil) + + status, err := mgr.NATStatus(context.Background()) + assert.NoError(t, err) + assert.Equal(t, network.ReachabilityUnknown, status) +} + +// TestP2PModule_Bandwidth tests P2P Module methods on +// the Host's bandwidth reporter. +func TestP2PModule_Bandwidth(t *testing.T) { + bw := metrics.NewBandwidthCounter() + host, err := libp2p.New(libp2p.BandwidthReporter(bw)) + require.NoError(t, err) + + protoID := protocol.ID("test") + // define a buf size, so we know how many bytes to read + bufSize := 1000 + + // create a peer to connect to + peer, err := libp2p.New(libp2p.BandwidthReporter(bw)) + require.NoError(t, err) + + // set stream handler on the host + host.SetStreamHandler(protoID, func(stream network.Stream) { + buf := make([]byte, bufSize) + _, err := stream.Read(buf) + require.NoError(t, err) + + _, err = stream.Write(buf) + require.NoError(t, err) + }) + + mgr := newModule(host, nil, nil, bw, nil) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // connect to the peer + err = mgr.Connect(ctx, *libhost.InfoFromHost(peer)) + require.NoError(t, err) + + // check to ensure they're actually connected + connectedness, err := mgr.Connectedness(ctx, peer.ID()) + require.NoError(t, err) + require.Equal(t, network.Connected, connectedness) + + // open stream with host + info, err := mgr.Info(ctx) + require.NoError(t, err) + stream, err := peer.NewStream(ctx, info.ID, protoID) + require.NoError(t, err) + + // write to stream to increase bandwidth usage get some substantive + // data to read from the bandwidth counter + buf := make([]byte, bufSize) + _, err = rand.Read(buf) + require.NoError(t, err) + _, err = stream.Write(buf) + require.NoError(t, err) + + _, err = stream.Read(buf) + require.NoError(t, err) + + // has to be ~2 seconds for the metrics reporter to collect the stats + // in the background process + time.Sleep(time.Second * 2) + + stats, err := mgr.BandwidthStats(ctx) + require.NoError(t, err) + assert.NotNil(t, stats) + + peerStat, err := mgr.BandwidthForPeer(ctx, peer.ID()) + require.NoError(t, err) + assert.NotZero(t, peerStat.TotalIn) + assert.Greater(t, int(peerStat.TotalIn), bufSize) // should be slightly more than buf size due negotiations, etc + + protoStat, err := mgr.BandwidthForProtocol(ctx, protoID) + require.NoError(t, err) + assert.NotZero(t, protoStat.TotalIn) + assert.Greater(t, int(protoStat.TotalIn), bufSize) // should be slightly more than buf size due negotiations, etc +} + +// TestP2PModule_Pubsub tests P2P Module methods on +// the instance of pubsub. +func TestP2PModule_Pubsub(t *testing.T) { + net, err := mocknet.FullMeshConnected(5) + require.NoError(t, err) + + host := net.Hosts()[0] + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + gs, err := pubsub.NewGossipSub(ctx, host) + require.NoError(t, err) + + mgr := newModule(host, gs, nil, nil, nil) + + topicStr := "test-topic" + + topic, err := gs.Join(topicStr) + require.NoError(t, err) + + // also join all peers on mocknet to topic + for _, p := range net.Hosts()[1:] { + newGs, err := pubsub.NewGossipSub(ctx, p) + require.NoError(t, err) + + tp, err := newGs.Join(topicStr) + require.NoError(t, err) + _, err = tp.Subscribe() + require.NoError(t, err) + } + + err = topic.Publish(ctx, []byte("test")) + require.NoError(t, err) + + // give for some peers to properly join the topic (this is necessary + // anywhere where gossipsub is used in tests) + time.Sleep(1 * time.Second) + + psPeers, err := mgr.PubSubPeers(context.Background(), topicStr) + require.NoError(t, err) + assert.Equal(t, len(topic.ListPeers()), len(psPeers)) +} + +// TestP2PModule_ConnGater tests P2P Module methods on +// the instance of ConnectionGater. +func TestP2PModule_ConnGater(t *testing.T) { + gater, err := connectionGater(datastore.NewMapDatastore()) + require.NoError(t, err) + + mgr := newModule(nil, nil, gater, nil, nil) + + ctx := context.Background() + + assert.NoError(t, mgr.BlockPeer(ctx, "badpeer")) + blocked, err := mgr.ListBlockedPeers(ctx) + require.NoError(t, err) + assert.Len(t, blocked, 1) + + assert.NoError(t, mgr.UnblockPeer(ctx, "badpeer")) + blocked, err = mgr.ListBlockedPeers(ctx) + require.NoError(t, err) + assert.Len(t, blocked, 0) +} + +// TestP2PModule_ResourceManager tests P2P Module methods on +// the resourceManager. +func TestP2PModule_ResourceManager(t *testing.T) { + rm, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale())) + require.NoError(t, err) + + mgr := newModule(nil, nil, nil, nil, rm) + + state, err := mgr.ResourceState(context.Background()) + require.NoError(t, err) + + assert.NotNil(t, state) +} diff --git a/nodebuilder/p2p/pubsub.go b/nodebuilder/p2p/pubsub.go new file mode 100644 index 0000000000..13d812e3ce --- /dev/null +++ b/nodebuilder/p2p/pubsub.go @@ -0,0 +1,187 @@ +package p2p + +import ( + "context" + "fmt" + "net" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p-pubsub/timecache" + hst "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "go.uber.org/fx" + "golang.org/x/crypto/blake2b" + + "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/go-fraud/fraudserv" + headp2p "github.com/celestiaorg/go-header/p2p" + + "github.com/celestiaorg/celestia-node/header" +) + +func init() { + // TODO(@Wondertan): Requires deeper analysis + // configure larger overlay parameters + // the default ones are pretty conservative + pubsub.GossipSubD = 8 + pubsub.GossipSubDscore = 6 + pubsub.GossipSubDout = 3 + pubsub.GossipSubDlo = 6 + pubsub.GossipSubDhi = 12 + pubsub.GossipSubDlazy = 12 + + pubsub.GossipSubIWantFollowupTime = 5 * time.Second + pubsub.GossipSubHistoryLength = 10 // cache msgs longer + // MutualPeers will wait for 30secs before connecting + pubsub.GossipSubDirectConnectInitialDelay = 30 * time.Second +} + +// pubSub provides a constructor for PubSub protocol with GossipSub routing. +func pubSub(cfg Config, params pubSubParams) (*pubsub.PubSub, error) { + fpeers, err := cfg.mutualPeers() + if err != nil { + return nil, err + } + + isBootstrapper := isBootstrapper() + + if isBootstrapper { + // Turn off the mesh in bootstrappers as per: + // + // + //https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#recommendations-for-network-operators + pubsub.GossipSubD = 0 + pubsub.GossipSubDscore = 0 + pubsub.GossipSubDlo = 0 + pubsub.GossipSubDhi = 0 + pubsub.GossipSubDout = 0 + pubsub.GossipSubDlazy = 64 + pubsub.GossipSubGossipFactor = 0.25 + pubsub.GossipSubPruneBackoff = 5 * time.Minute + } + + // TODO(@Wondertan) Validate and improve default peer scoring params + // Сurrent parameters are based on: + // * https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring + // * lotus + // * prysm + topicScores := topicScoreParams(params) + peerScores, err := peerScoreParams(params.Bootstrappers, cfg) + if err != nil { + return nil, err + } + + peerScores.Topics = topicScores + scoreThresholds := peerScoreThresholds() + + opts := []pubsub.Option{ + pubsub.WithSeenMessagesStrategy(timecache.Strategy_LastSeen), + pubsub.WithPeerScore(peerScores, scoreThresholds), + pubsub.WithPeerExchange(cfg.PeerExchange || isBootstrapper), + pubsub.WithDirectPeers(fpeers), + pubsub.WithMessageIdFn(hashMsgID), + // specifying sub protocol helps to avoid conflicts with + // floodsub(because gossipsub supports floodsub protocol by default). + pubsub.WithGossipSubProtocols([]protocol.ID{pubsub.GossipSubID_v11}, pubsub.GossipSubDefaultFeatures), + } + + return pubsub.NewGossipSub( + params.Ctx, + params.Host, + opts..., + ) +} + +func hashMsgID(m *pubsub_pb.Message) string { + hash := blake2b.Sum256(m.Data) + return string(hash[:]) +} + +type pubSubParams struct { + fx.In + + Ctx context.Context + Host hst.Host + Bootstrappers Bootstrappers + Network Network + Unmarshaler fraud.ProofUnmarshaler[*header.ExtendedHeader] +} + +func topicScoreParams(params pubSubParams) map[string]*pubsub.TopicScoreParams { + mp := map[string]*pubsub.TopicScoreParams{ + headp2p.PubsubTopicID(params.Network.String()): &headp2p.GossibSubScore, + } + + for _, pt := range params.Unmarshaler.List() { + mp[fraudserv.PubsubTopicID(pt.String(), params.Network.String())] = &fraudserv.GossibSubScore + } + + return mp +} + +func peerScoreParams(bootstrappers Bootstrappers, cfg Config) (*pubsub.PeerScoreParams, error) { + bootstrapperSet := map[peer.ID]struct{}{} + for _, b := range bootstrappers { + bootstrapperSet[b.ID] = struct{}{} + } + + ipColocFactWl := make([]*net.IPNet, 0, len(cfg.IPColocationWhitelist)) + for _, strIP := range cfg.IPColocationWhitelist { + _, ipNet, err := net.ParseCIDR(strIP) + if err != nil { + return nil, fmt.Errorf("error while parsing whitelist collocation CIDR string: %w", err) + } + ipColocFactWl = append(ipColocFactWl, ipNet) + } + + // See + // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function + return &pubsub.PeerScoreParams{ + AppSpecificScore: func(p peer.ID) float64 { + // return a heavy positive score for bootstrappers so that we don't unilaterally prune + // them and accept PX from them + _, ok := bootstrapperSet[p] + if ok { + return 2500 + } + + // TODO(@Wondertan): + // Plug the application specific score to the node itself in order + // to provide feedback to the pubsub system based on observed behavior + return 0 + }, + AppSpecificWeight: 1, + + // This sets the IP colocation threshold to 5 peers before we apply penalties + // The aim is to protect the PubSub from naive bots collocated on the same machine/datacenter + IPColocationFactorThreshold: 10, + IPColocationFactorWeight: -100, + IPColocationFactorWhitelist: ipColocFactWl, + + BehaviourPenaltyThreshold: 6, + BehaviourPenaltyWeight: -10, + BehaviourPenaltyDecay: pubsub.ScoreParameterDecay(time.Hour), + + // Scores should not only grow and this defines a decay function equal for each peer + DecayInterval: pubsub.DefaultDecayInterval, + DecayToZero: pubsub.DefaultDecayToZero, + + // this retains *non-positive* scores for 6 hours + RetainScore: 6 * time.Hour, + }, nil +} + +func peerScoreThresholds() *pubsub.PeerScoreThresholds { + // + //https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#overview-of-new-parameters + return &pubsub.PeerScoreThresholds{ + GossipThreshold: -1000, + PublishThreshold: -2000, + GraylistThreshold: -8000, + AcceptPXThreshold: 1000, + OpportunisticGraftThreshold: 5, + } +} diff --git a/nodebuilder/p2p/resources.go b/nodebuilder/p2p/resources.go new file mode 100644 index 0000000000..6e24e1e542 --- /dev/null +++ b/nodebuilder/p2p/resources.go @@ -0,0 +1,82 @@ +package p2p + +import ( + "context" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/network" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + ma "github.com/multiformats/go-multiaddr" + madns "github.com/multiformats/go-multiaddr-dns" + "go.uber.org/fx" +) + +func resourceManager(params resourceManagerParams) (network.ResourceManager, error) { + return rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(params.Limits)) +} + +func infiniteResources() rcmgr.ConcreteLimitConfig { + return rcmgr.InfiniteLimits +} + +func autoscaleResources() rcmgr.ConcreteLimitConfig { + limits := rcmgr.DefaultLimits + libp2p.SetDefaultServiceLimits(&limits) + return limits.AutoScale() +} + +func allowList(ctx context.Context, cfg Config, bootstrappers Bootstrappers) (rcmgr.Option, error) { + mutual, err := cfg.mutualPeers() + if err != nil { + return nil, err + } + + // TODO(@Wondertan): We should resolve their addresses only once, but currently + // we resolve it here and libp2p stuck does that as well internally + allowlist := make([]ma.Multiaddr, 0, len(bootstrappers)+len(mutual)) + for _, b := range bootstrappers { + for _, baddr := range b.Addrs { + resolved, err := madns.DefaultResolver.Resolve(ctx, baddr) + if err != nil { + log.Warnw("error resolving bootstrapper DNS", "addr", baddr.String(), "err", err) + continue + } + allowlist = append(allowlist, resolved...) + } + } + for _, m := range mutual { + for _, maddr := range m.Addrs { + resolved, err := madns.DefaultResolver.Resolve(ctx, maddr) + if err != nil { + log.Warnw("error resolving mutual peer DNS", "addr", maddr.String(), "err", err) + continue + } + allowlist = append(allowlist, resolved...) + } + } + + return rcmgr.WithAllowlistedMultiaddrs(allowlist), nil +} + +func traceReporter() rcmgr.Option { + str, err := rcmgr.NewStatsTraceReporter() + if err != nil { + panic(err) // err is always nil as per sources + } + + return rcmgr.WithTraceReporter(str) +} + +type resourceManagerParams struct { + fx.In + + Limits rcmgr.ConcreteLimitConfig + Opts []rcmgr.Option `group:"rcmgr-opts"` +} + +func resourceManagerOpt(opt any) fx.Annotated { + return fx.Annotated{ + Group: "rcmgr-opts", + Target: opt, + } +} diff --git a/nodebuilder/p2p/routing.go b/nodebuilder/p2p/routing.go new file mode 100644 index 0000000000..e9eccf1d53 --- /dev/null +++ b/nodebuilder/p2p/routing.go @@ -0,0 +1,69 @@ +package p2p + +import ( + "context" + "fmt" + + "github.com/ipfs/go-datastore" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +// contentRouting constructs nil content routing, +// as for our use-case existing ContentRouting mechanisms, e.g DHT, are unsuitable +func contentRouting(r routing.PeerRouting) routing.ContentRouting { + return r.(*dht.IpfsDHT) +} + +// peerRouting provides constructor for PeerRouting over DHT. +// Basically, this provides a way to discover peer addresses by respecting public keys. +func peerRouting(cfg Config, tp node.Type, params routingParams) (routing.PeerRouting, error) { + opts := []dht.Option{ + dht.Mode(dht.ModeAuto), + dht.BootstrapPeers(params.Peers...), + dht.ProtocolPrefix(protocol.ID(fmt.Sprintf("/celestia/%s", params.Net))), + dht.Datastore(params.DataStore), + dht.RoutingTableRefreshPeriod(cfg.RoutingTableRefreshPeriod), + } + + if isBootstrapper() { + opts = append(opts, + dht.BootstrapPeers(), // no bootstrappers for a bootstrapper ¯\_(ツ)_/¯ + ) + } + + if tp == node.Bridge || tp == node.Full { + opts = append(opts, + dht.Mode(dht.ModeServer), + ) + } + + d, err := dht.New(params.Ctx, params.Host, opts...) + if err != nil { + return nil, err + } + params.Lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + return d.Bootstrap(ctx) + }, + OnStop: func(context.Context) error { + return d.Close() + }, + }) + return d, nil +} + +type routingParams struct { + fx.In + + Ctx context.Context + Net Network + Peers Bootstrappers + Lc fx.Lifecycle + Host HostBase + DataStore datastore.Batching +} diff --git a/nodebuilder/prune/module.go b/nodebuilder/prune/module.go new file mode 100644 index 0000000000..2141b74bf1 --- /dev/null +++ b/nodebuilder/prune/module.go @@ -0,0 +1,47 @@ +package prune + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/pruner/archival" + "github.com/celestiaorg/celestia-node/pruner/light" +) + +func ConstructModule(tp node.Type) fx.Option { + baseComponents := fx.Options( + fx.Provide(fx.Annotate( + pruner.NewService, + fx.OnStart(func(ctx context.Context, p *pruner.Service) error { + return p.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, p *pruner.Service) error { + return p.Stop(ctx) + }), + )), + ) + + switch tp { + case node.Full, node.Bridge: + return fx.Module("prune", + baseComponents, + fx.Provide(func() pruner.Pruner { + return archival.NewPruner() + }), + fx.Supply(archival.Window), + ) + case node.Light: + return fx.Module("prune", + baseComponents, + fx.Provide(func() pruner.Pruner { + return light.NewPruner() + }), + fx.Supply(light.Window), + ) + default: + panic("unknown node type") + } +} diff --git a/nodebuilder/rpc/config.go b/nodebuilder/rpc/config.go new file mode 100644 index 0000000000..d6031082a8 --- /dev/null +++ b/nodebuilder/rpc/config.go @@ -0,0 +1,37 @@ +package rpc + +import ( + "fmt" + "strconv" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +type Config struct { + Address string + Port string + SkipAuth bool +} + +func DefaultConfig() Config { + return Config{ + Address: defaultBindAddress, + // do NOT expose the same port as celestia-core by default so that both can run on the same machine + Port: defaultPort, + SkipAuth: false, + } +} + +func (cfg *Config) Validate() error { + sanitizedAddress, err := utils.ValidateAddr(cfg.Address) + if err != nil { + return fmt.Errorf("service/rpc: invalid address: %w", err) + } + cfg.Address = sanitizedAddress + + _, err = strconv.Atoi(cfg.Port) + if err != nil { + return fmt.Errorf("service/rpc: invalid port: %s", err.Error()) + } + return nil +} diff --git a/nodebuilder/rpc/config_test.go b/nodebuilder/rpc/config_test.go new file mode 100644 index 0000000000..1c78a1a19f --- /dev/null +++ b/nodebuilder/rpc/config_test.go @@ -0,0 +1,59 @@ +package rpc + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestDefaultConfig tests that the default gateway config is correct. +func TestDefaultConfig(t *testing.T) { + expected := Config{ + Address: defaultBindAddress, + Port: defaultPort, + } + + assert.Equal(t, expected, DefaultConfig()) +} + +func TestConfigValidate(t *testing.T) { + tests := []struct { + name string + cfg Config + err bool + }{ + { + name: "valid config", + cfg: Config{ + Address: "127.0.0.1", + Port: "8080", + }, + err: false, + }, + { + name: "invalid address", + cfg: Config{ + Address: "invalid", + Port: "8080", + }, + err: true, + }, + { + name: "invalid port", + cfg: Config{ + Address: "127.0.0.1", + Port: "invalid", + }, + err: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.cfg.Validate() + if (err != nil) != tt.err { + t.Errorf("Config.Validate() error = %v, err %v", err, tt.err) + } + }) + } +} diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go new file mode 100644 index 0000000000..43a8055207 --- /dev/null +++ b/nodebuilder/rpc/constructors.go @@ -0,0 +1,44 @@ +package rpc + +import ( + "github.com/cristalhq/jwt" + + "github.com/celestiaorg/celestia-node/api/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +// registerEndpoints registers the given services on the rpc. +func registerEndpoints( + stateMod state.Module, + shareMod share.Module, + fraudMod fraud.Module, + headerMod header.Module, + daserMod das.Module, + p2pMod p2p.Module, + nodeMod node.Module, + blobMod blob.Module, + daMod da.Module, + serv *rpc.Server, +) { + serv.RegisterService("fraud", fraudMod, &fraud.API{}) + serv.RegisterService("das", daserMod, &das.API{}) + serv.RegisterService("header", headerMod, &header.API{}) + serv.RegisterService("state", stateMod, &state.API{}) + serv.RegisterService("share", shareMod, &share.API{}) + serv.RegisterService("p2p", p2pMod, &p2p.API{}) + serv.RegisterService("node", nodeMod, &node.API{}) + serv.RegisterService("blob", blobMod, &blob.API{}) + serv.RegisterService("da", daMod, &da.API{}) +} + +func server(cfg *Config, auth jwt.Signer) *rpc.Server { + return rpc.NewServer(cfg.Address, cfg.Port, cfg.SkipAuth, auth) +} diff --git a/nodebuilder/rpc/defaults.go b/nodebuilder/rpc/defaults.go new file mode 100644 index 0000000000..55e51a7c9b --- /dev/null +++ b/nodebuilder/rpc/defaults.go @@ -0,0 +1,6 @@ +package rpc + +const ( + defaultBindAddress = "localhost" + defaultPort = "26658" +) diff --git a/nodebuilder/rpc/defaults_test.go b/nodebuilder/rpc/defaults_test.go new file mode 100644 index 0000000000..74d9c98cfc --- /dev/null +++ b/nodebuilder/rpc/defaults_test.go @@ -0,0 +1,12 @@ +package rpc + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestServerDefaultConstants(t *testing.T) { + assert.Equal(t, "localhost", defaultBindAddress) + assert.Equal(t, "26658", defaultPort) +} diff --git a/nodebuilder/rpc/flags.go b/nodebuilder/rpc/flags.go new file mode 100644 index 0000000000..d37014004d --- /dev/null +++ b/nodebuilder/rpc/flags.go @@ -0,0 +1,59 @@ +package rpc + +import ( + "fmt" + + logging "github.com/ipfs/go-log/v2" + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +var ( + log = logging.Logger("rpc") + addrFlag = "rpc.addr" + portFlag = "rpc.port" + authFlag = "rpc.skip-auth" +) + +// Flags gives a set of hardcoded node/rpc package flags. +func Flags() *flag.FlagSet { + flags := &flag.FlagSet{} + + flags.String( + addrFlag, + "", + fmt.Sprintf("Set a custom RPC listen address (default: %s)", defaultBindAddress), + ) + flags.String( + portFlag, + "", + fmt.Sprintf("Set a custom RPC port (default: %s)", defaultPort), + ) + flags.Bool( + authFlag, + false, + "Skips authentication for RPC requests", + ) + + return flags +} + +// ParseFlags parses RPC flags from the given cmd and saves them to the passed config. +func ParseFlags(cmd *cobra.Command, cfg *Config) { + addr := cmd.Flag(addrFlag).Value.String() + if addr != "" { + cfg.Address = addr + } + port := cmd.Flag(portFlag).Value.String() + if port != "" { + cfg.Port = port + } + ok, err := cmd.Flags().GetBool(authFlag) + if err != nil { + panic(err) + } + if ok { + log.Warn("RPC authentication is disabled") + cfg.SkipAuth = true + } +} diff --git a/nodebuilder/rpc/flags_test.go b/nodebuilder/rpc/flags_test.go new file mode 100644 index 0000000000..1370995833 --- /dev/null +++ b/nodebuilder/rpc/flags_test.go @@ -0,0 +1,95 @@ +package rpc + +import ( + "fmt" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFlags(t *testing.T) { + flags := Flags() + + // Test addrFlag + addr := flags.Lookup(addrFlag) + require.NotNil(t, addr) + assert.Equal(t, "", addr.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom RPC listen address (default: %s)", defaultBindAddress), addr.Usage) + + // Test portFlag + port := flags.Lookup(portFlag) + require.NotNil(t, port) + assert.Equal(t, "", port.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom RPC port (default: %s)", defaultPort), port.Usage) +} + +// TestParseFlags tests the ParseFlags function in rpc/flags.go +func TestParseFlags(t *testing.T) { + tests := []struct { + name string + addrFlag string + portFlag string + expected *Config + }{ + { + name: "addrFlag is set", + addrFlag: "127.0.0.1:8080", + portFlag: "", + expected: &Config{ + Address: "127.0.0.1:8080", + Port: "", + }, + }, + { + name: "portFlag is set", + addrFlag: "", + portFlag: "9090", + expected: &Config{ + Address: "", + Port: "9090", + }, + }, + { + name: "both addrFlag and portFlag are set", + addrFlag: "192.168.0.1:1234", + portFlag: "5678", + expected: &Config{ + Address: "192.168.0.1:1234", + Port: "5678", + }, + }, + { + name: "neither addrFlag nor portFlag are set", + addrFlag: "", + portFlag: "", + expected: &Config{ + Address: "", + Port: "", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := &cobra.Command{} + cfg := &Config{} + + cmd.Flags().AddFlagSet(Flags()) + + err := cmd.Flags().Set(addrFlag, test.addrFlag) + if err != nil { + t.Errorf(err.Error()) + } + err = cmd.Flags().Set(portFlag, test.portFlag) + if err != nil { + t.Errorf(err.Error()) + } + + ParseFlags(cmd, cfg) + assert.Equal(t, test.expected.Address, cfg.Address) + assert.Equal(t, test.expected.Port, cfg.Port) + }) + } +} diff --git a/nodebuilder/rpc/module.go b/nodebuilder/rpc/module.go new file mode 100644 index 0000000000..141018288c --- /dev/null +++ b/nodebuilder/rpc/module.go @@ -0,0 +1,40 @@ +package rpc + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/api/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/node" +) + +func ConstructModule(tp node.Type, cfg *Config) fx.Option { + // sanitize config values before constructing module + cfgErr := cfg.Validate() + + baseComponents := fx.Options( + fx.Supply(cfg), + fx.Error(cfgErr), + fx.Provide(fx.Annotate( + server, + fx.OnStart(func(ctx context.Context, server *rpc.Server) error { + return server.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, server *rpc.Server) error { + return server.Stop(ctx) + }), + )), + ) + + switch tp { + case node.Light, node.Full, node.Bridge: + return fx.Module( + "rpc", + baseComponents, + fx.Invoke(registerEndpoints), + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/settings.go b/nodebuilder/settings.go new file mode 100644 index 0000000000..298976fda4 --- /dev/null +++ b/nodebuilder/settings.go @@ -0,0 +1,226 @@ +package nodebuilder + +import ( + "context" + "fmt" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pyroscope-io/client/pyroscope" + otelpyroscope "github.com/pyroscope-io/otel-profiling-go" + "go.opentelemetry.io/contrib/instrumentation/runtime" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.11.0" + "go.opentelemetry.io/otel/trace" + "go.uber.org/fx" + + "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" + modcore "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/das" + modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/state" +) + +const defaultMetricsCollectInterval = 10 * time.Second + +// WithNetwork specifies the Network to which the Node should connect to. +// WARNING: Use this option with caution and never run the Node with different networks over the +// same persisted Store. +func WithNetwork(net p2p.Network) fx.Option { + return fx.Replace(net) +} + +// WithBootstrappers sets custom bootstrap peers. +func WithBootstrappers(peers p2p.Bootstrappers) fx.Option { + return fx.Replace(peers) +} + +// WithPyroscope enables pyroscope profiling for the node. +func WithPyroscope(endpoint string, nodeType node.Type) fx.Option { + return fx.Options( + fx.Invoke(func(peerID peer.ID) error { + _, err := pyroscope.Start(pyroscope.Config{ + UploadRate: 15 * time.Second, + ApplicationName: "celestia.da-node", + ServerAddress: endpoint, + Tags: map[string]string{ + "type": nodeType.String(), + "peerId": peerID.String(), + }, + Logger: nil, + ProfileTypes: []pyroscope.ProfileType{ + pyroscope.ProfileCPU, + pyroscope.ProfileAllocObjects, + pyroscope.ProfileAllocSpace, + pyroscope.ProfileInuseObjects, + pyroscope.ProfileInuseSpace, + pyroscope.ProfileGoroutines, + }, + }) + return err + }), + ) +} + +// WithMetrics enables metrics exporting for the node. +func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type) fx.Option { + // TODO @renaynay: this will be refactored when there is more granular + // control over which module to enable metrics for + modhead.MetricsEnabled = true + modcore.MetricsEnabled = true + + baseComponents := fx.Options( + fx.Supply(metricOpts), + fx.Invoke(initializeMetrics), + fx.Invoke(func(ca *state.CoreAccessor) { + if ca == nil { + return + } + state.WithMetrics(ca) + }), + fx.Invoke(fraud.WithMetrics[*header.ExtendedHeader]), + fx.Invoke(node.WithMetrics), + fx.Invoke(share.WithDiscoveryMetrics), + ) + + samplingMetrics := fx.Options( + fx.Invoke(das.WithMetrics), + fx.Invoke(share.WithPeerManagerMetrics), + fx.Invoke(share.WithShrexClientMetrics), + fx.Invoke(share.WithShrexGetterMetrics), + ) + + var opts fx.Option + switch nodeType { + case node.Full: + opts = fx.Options( + baseComponents, + fx.Invoke(share.WithStoreMetrics), + fx.Invoke(share.WithShrexServerMetrics), + samplingMetrics, + ) + case node.Light: + opts = fx.Options( + baseComponents, + samplingMetrics, + ) + case node.Bridge: + opts = fx.Options( + baseComponents, + fx.Invoke(share.WithStoreMetrics), + fx.Invoke(share.WithShrexServerMetrics), + ) + default: + panic("invalid node type") + } + return opts +} + +func WithTraces(opts []otlptracehttp.Option, pyroOpts []otelpyroscope.Option) fx.Option { + options := fx.Options( + fx.Supply(opts), + fx.Supply(pyroOpts), + fx.Invoke(initializeTraces), + ) + return options +} + +func initializeTraces( + ctx context.Context, + nodeType node.Type, + peerID peer.ID, + network p2p.Network, + opts []otlptracehttp.Option, + pyroOpts []otelpyroscope.Option, +) error { + client := otlptracehttp.NewClient(opts...) + exporter, err := otlptrace.New(ctx, client) + if err != nil { + return fmt.Errorf("creating OTLP trace exporter: %w", err) + } + + var tp trace.TracerProvider + tp = tracesdk.NewTracerProvider( + tracesdk.WithSampler(tracesdk.AlwaysSample()), + // Always be sure to batch in production. + tracesdk.WithBatcher(exporter), + // Record information about this application in a Resource. + tracesdk.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNamespaceKey.String(nodeType.String()), + semconv.ServiceNameKey.String(fmt.Sprintf("%s/%s", network.String(), peerID.String()))), + )) + + if len(pyroOpts) > 0 { + tp = otelpyroscope.NewTracerProvider(tp, pyroOpts...) + } + otel.SetTracerProvider(tp) + return nil +} + +// initializeMetrics initializes the global meter provider. +func initializeMetrics( + ctx context.Context, + lc fx.Lifecycle, + peerID peer.ID, + nodeType node.Type, + network p2p.Network, + opts []otlpmetrichttp.Option, +) error { + exp, err := otlpmetrichttp.New(ctx, opts...) + if err != nil { + return err + } + + provider := sdk.NewMeterProvider( + sdk.WithReader( + sdk.NewPeriodicReader(exp, + sdk.WithTimeout(defaultMetricsCollectInterval), + sdk.WithInterval(defaultMetricsCollectInterval))), + sdk.WithResource( + resource.NewWithAttributes( + semconv.SchemaURL, + // ServiceNamespaceKey and ServiceNameKey will be concatenated into single attribute with key: + // "job" and value: "%service.namespace%/%service.name%" + semconv.ServiceNamespaceKey.String(network.String()), + semconv.ServiceNameKey.String(nodeType.String()), + // ServiceInstanceIDKey will be exported with key: "instance" + semconv.ServiceInstanceIDKey.String(peerID.String()), + ))) + + err = runtime.Start( + runtime.WithMinimumReadMemStatsInterval(defaultMetricsCollectInterval), + runtime.WithMeterProvider(provider)) + if err != nil { + return fmt.Errorf("start runtime metrics: %w", err) + } + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return provider.Shutdown(ctx) + }, + }) + otel.SetMeterProvider(provider) + otel.SetErrorHandler(&loggingErrorHandler{}) + return nil +} + +var metricsLogger = logging.Logger("otlp") + +type loggingErrorHandler struct{} + +func (loggingErrorHandler) Handle(err error) { + metricsLogger.Error(err) +} diff --git a/nodebuilder/share/cmd/share.go b/nodebuilder/share/cmd/share.go new file mode 100644 index 0000000000..b890f2d4c0 --- /dev/null +++ b/nodebuilder/share/cmd/share.go @@ -0,0 +1,179 @@ +package cmd + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + + "github.com/spf13/cobra" + + rpc "github.com/celestiaorg/celestia-node/api/rpc/client" + cmdnode "github.com/celestiaorg/celestia-node/cmd" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" +) + +func init() { + Cmd.AddCommand( + sharesAvailableCmd, + getSharesByNamespaceCmd, + getShare, + getEDS, + ) +} + +var Cmd = &cobra.Command{ + Use: "share [command]", + Short: "Allows interaction with the Share Module via JSON-RPC", + Args: cobra.NoArgs, + PersistentPreRunE: cmdnode.InitClient, +} + +var sharesAvailableCmd = &cobra.Command{ + Use: "available", + Short: "Subjectively validates if Shares committed to the given Root are available on the Network.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + eh, err := getExtendedHeaderFromCmdArg(cmd.Context(), client, args[0]) + + if err != nil { + return err + } + + err = client.Share.SharesAvailable(cmd.Context(), eh) + formatter := func(data interface{}) interface{} { + err, ok := data.(error) + available := false + if !ok { + available = true + } + return struct { + Available bool `json:"available"` + Hash []byte `json:"dah_hash"` + Reason error `json:"reason,omitempty"` + }{ + Available: available, + Hash: []byte(args[0]), + Reason: err, + } + } + return cmdnode.PrintOutput(err, nil, formatter) + }, +} + +var getSharesByNamespaceCmd = &cobra.Command{ + Use: "get-by-namespace (height | hash) namespace", + Short: "Gets all shares from an EDS within the given namespace.", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + eh, err := getExtendedHeaderFromCmdArg(cmd.Context(), client, args[0]) + + if err != nil { + return err + } + + ns, err := cmdnode.ParseV0Namespace(args[1]) + if err != nil { + return err + } + + shares, err := client.Share.GetSharesByNamespace(cmd.Context(), eh, ns) + return cmdnode.PrintOutput(shares, err, nil) + }, +} + +var getShare = &cobra.Command{ + Use: "get-share (height | hash) row col", + Short: "Gets a Share by coordinates in EDS.", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + eh, err := getExtendedHeaderFromCmdArg(cmd.Context(), client, args[0]) + + if err != nil { + return err + } + + row, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + return err + } + + col, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return err + } + + s, err := client.Share.GetShare(cmd.Context(), eh, int(row), int(col)) + + formatter := func(data interface{}) interface{} { + sh, ok := data.(share.Share) + if !ok { + return data + } + + ns := hex.EncodeToString(share.GetNamespace(sh)) + + return struct { + Namespace string `json:"namespace"` + Data []byte `json:"data"` + }{ + Namespace: ns, + Data: share.GetData(sh), + } + } + return cmdnode.PrintOutput(s, err, formatter) + }, +} + +var getEDS = &cobra.Command{ + Use: "get-eds (height | hash)", + Short: "Gets the full EDS identified by the given block height", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + eh, err := getExtendedHeaderFromCmdArg(cmd.Context(), client, args[0]) + + if err != nil { + return err + } + + shares, err := client.Share.GetEDS(cmd.Context(), eh) + return cmdnode.PrintOutput(shares, err, nil) + }, +} + +func getExtendedHeaderFromCmdArg(ctx context.Context, client *rpc.Client, arg string) (*header.ExtendedHeader, error) { + hash, err := hex.DecodeString(arg) + if err == nil { + return client.Header.GetByHash(ctx, hash) + } + height, err := strconv.ParseUint(arg, 10, 64) + if err != nil { + return nil, fmt.Errorf("can't parse the height/hash argument: %w", err) + } + return client.Header.GetByHeight(ctx, height) +} diff --git a/nodebuilder/share/config.go b/nodebuilder/share/config.go new file mode 100644 index 0000000000..1d984b6dca --- /dev/null +++ b/nodebuilder/share/config.go @@ -0,0 +1,74 @@ +package share + +import ( + "fmt" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/share/availability/light" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/p2p/peers" + "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" + "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" +) + +// TODO: some params are pointers and other are not, Let's fix this. +type Config struct { + // EDSStoreParams sets eds store configuration parameters + EDSStoreParams *eds.Parameters + + UseShareExchange bool + // ShrExEDSParams sets shrexeds client and server configuration parameters + ShrExEDSParams *shrexeds.Parameters + // ShrExNDParams sets shrexnd client and server configuration parameters + ShrExNDParams *shrexnd.Parameters + // PeerManagerParams sets peer-manager configuration parameters + PeerManagerParams peers.Parameters + + LightAvailability light.Parameters `toml:",omitempty"` + Discovery *discovery.Parameters +} + +func DefaultConfig(tp node.Type) Config { + cfg := Config{ + EDSStoreParams: eds.DefaultParameters(), + Discovery: discovery.DefaultParameters(), + ShrExEDSParams: shrexeds.DefaultParameters(), + ShrExNDParams: shrexnd.DefaultParameters(), + UseShareExchange: true, + PeerManagerParams: peers.DefaultParameters(), + } + + if tp == node.Light { + cfg.LightAvailability = light.DefaultParameters() + } + + return cfg +} + +// Validate performs basic validation of the config. +func (cfg *Config) Validate(tp node.Type) error { + if tp == node.Light { + if err := cfg.LightAvailability.Validate(); err != nil { + return fmt.Errorf("nodebuilder/share: %w", err) + } + } + + if err := cfg.Discovery.Validate(); err != nil { + return fmt.Errorf("nodebuilder/share: %w", err) + } + + if err := cfg.ShrExNDParams.Validate(); err != nil { + return fmt.Errorf("nodebuilder/share: %w", err) + } + + if err := cfg.ShrExEDSParams.Validate(); err != nil { + return fmt.Errorf("nodebuilder/share: %w", err) + } + + if err := cfg.PeerManagerParams.Validate(); err != nil { + return fmt.Errorf("nodebuilder/share: %w", err) + } + + return nil +} diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go new file mode 100644 index 0000000000..e13786a4d9 --- /dev/null +++ b/nodebuilder/share/constructors.go @@ -0,0 +1,116 @@ +package share + +import ( + "context" + "errors" + + "github.com/filecoin-project/dagstore" + "github.com/ipfs/boxo/blockservice" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/routing" + routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" + + "github.com/celestiaorg/celestia-app/pkg/da" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/ipld" + disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/p2p/peers" +) + +const ( + // fullNodesTag is the tag used to identify full nodes in the discovery service. + fullNodesTag = "full" +) + +func newDiscovery(cfg *disc.Parameters, +) func(routing.ContentRouting, host.Host, *peers.Manager) (*disc.Discovery, error) { + return func( + r routing.ContentRouting, + h host.Host, + manager *peers.Manager, + ) (*disc.Discovery, error) { + return disc.NewDiscovery( + cfg, + h, + routingdisc.NewRoutingDiscovery(r), + fullNodesTag, + disc.WithOnPeersUpdate(manager.UpdateFullNodePool), + ) + } +} + +func newShareModule(getter share.Getter, avail share.Availability) Module { + return &module{getter, avail} +} + +// ensureEmptyCARExists adds an empty EDS to the provided EDS store. +func ensureEmptyCARExists(ctx context.Context, store *eds.Store) error { + emptyEDS := share.EmptyExtendedDataSquare() + emptyDAH, err := da.NewDataAvailabilityHeader(emptyEDS) + if err != nil { + return err + } + + err = store.Put(ctx, emptyDAH.Hash(), emptyEDS) + if errors.Is(err, dagstore.ErrShardExists) { + return nil + } + return err +} + +// ensureEmptyEDSInBS checks if the given DAG contains an empty block data square. +// If it does not, it stores an empty block. This optimization exists to prevent +// redundant storing of empty block data so that it is only stored once and returned +// upon request for a block with an empty data square. +func ensureEmptyEDSInBS(ctx context.Context, bServ blockservice.BlockService) error { + _, err := ipld.AddShares(ctx, share.EmptyBlockShares(), bServ) + return err +} + +func lightGetter( + shrexGetter *getters.ShrexGetter, + ipldGetter *getters.IPLDGetter, + cfg Config, +) share.Getter { + var cascade []share.Getter + if cfg.UseShareExchange { + cascade = append(cascade, shrexGetter) + } + cascade = append(cascade, ipldGetter) + return getters.NewCascadeGetter(cascade) +} + +// ShrexGetter is added to bridge nodes for the case that a shard is removed +// after detected shard corruption. This ensures the block is fetched and stored +// by shrex the next time the data is retrieved (meaning shard recovery is +// manual after corruption is detected). +func bridgeGetter( + storeGetter *getters.StoreGetter, + shrexGetter *getters.ShrexGetter, + cfg Config, +) share.Getter { + var cascade []share.Getter + cascade = append(cascade, storeGetter) + if cfg.UseShareExchange { + cascade = append(cascade, shrexGetter) + } + return getters.NewCascadeGetter(cascade) +} + +func fullGetter( + storeGetter *getters.StoreGetter, + shrexGetter *getters.ShrexGetter, + ipldGetter *getters.IPLDGetter, + cfg Config, +) share.Getter { + var cascade []share.Getter + cascade = append(cascade, storeGetter) + if cfg.UseShareExchange { + cascade = append(cascade, shrexGetter) + } + cascade = append(cascade, ipldGetter) + return getters.NewCascadeGetter(cascade) +} diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go new file mode 100644 index 0000000000..4e21cecae0 --- /dev/null +++ b/nodebuilder/share/mocks/api.go @@ -0,0 +1,97 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/share (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + header "github.com/celestiaorg/celestia-node/header" + share "github.com/celestiaorg/celestia-node/share" + rsmt2d "github.com/celestiaorg/rsmt2d" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// GetEDS mocks base method. +func (m *MockModule) GetEDS(arg0 context.Context, arg1 *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEDS", arg0, arg1) + ret0, _ := ret[0].(*rsmt2d.ExtendedDataSquare) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEDS indicates an expected call of GetEDS. +func (mr *MockModuleMockRecorder) GetEDS(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEDS", reflect.TypeOf((*MockModule)(nil).GetEDS), arg0, arg1) +} + +// GetShare mocks base method. +func (m *MockModule) GetShare(arg0 context.Context, arg1 *header.ExtendedHeader, arg2, arg3 int) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetShare", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetShare indicates an expected call of GetShare. +func (mr *MockModuleMockRecorder) GetShare(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShare", reflect.TypeOf((*MockModule)(nil).GetShare), arg0, arg1, arg2, arg3) +} + +// GetSharesByNamespace mocks base method. +func (m *MockModule) GetSharesByNamespace(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 share.Namespace) (share.NamespacedShares, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSharesByNamespace", arg0, arg1, arg2) + ret0, _ := ret[0].(share.NamespacedShares) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSharesByNamespace indicates an expected call of GetSharesByNamespace. +func (mr *MockModuleMockRecorder) GetSharesByNamespace(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSharesByNamespace", reflect.TypeOf((*MockModule)(nil).GetSharesByNamespace), arg0, arg1, arg2) +} + +// SharesAvailable mocks base method. +func (m *MockModule) SharesAvailable(arg0 context.Context, arg1 *header.ExtendedHeader) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SharesAvailable", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SharesAvailable indicates an expected call of SharesAvailable. +func (mr *MockModuleMockRecorder) SharesAvailable(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SharesAvailable", reflect.TypeOf((*MockModule)(nil).SharesAvailable), arg0, arg1) +} diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go new file mode 100644 index 0000000000..7caaf39a92 --- /dev/null +++ b/nodebuilder/share/module.go @@ -0,0 +1,278 @@ +package share + +import ( + "context" + + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + "go.uber.org/fx" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/availability/full" + "github.com/celestiaorg/celestia-node/share/availability/light" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/getters" + disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/p2p/peers" + "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" + "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option { + // sanitize config values before constructing module + cfgErr := cfg.Validate(tp) + + baseComponents := fx.Options( + fx.Supply(*cfg), + fx.Error(cfgErr), + fx.Options(options...), + fx.Provide(newShareModule), + peerManagerComponents(tp, cfg), + discoveryComponents(cfg), + shrexSubComponents(), + ) + + bridgeAndFullComponents := fx.Options( + fx.Provide(getters.NewStoreGetter), + shrexServerComponents(cfg), + edsStoreComponents(cfg), + fullAvailabilityComponents(), + shrexGetterComponents(cfg), + fx.Provide(func(shrexSub *shrexsub.PubSub) shrexsub.BroadcastFn { + return shrexSub.Broadcast + }), + ) + + switch tp { + case node.Bridge: + return fx.Module( + "share", + baseComponents, + bridgeAndFullComponents, + fx.Provide(func() peers.Parameters { + return cfg.PeerManagerParams + }), + fx.Provide(bridgeGetter), + fx.Invoke(func(lc fx.Lifecycle, sub *shrexsub.PubSub) error { + lc.Append(fx.Hook{ + OnStart: sub.Start, + OnStop: sub.Stop, + }) + return nil + }), + ) + case node.Full: + return fx.Module( + "share", + baseComponents, + bridgeAndFullComponents, + fx.Provide(getters.NewIPLDGetter), + fx.Provide(fullGetter), + ) + case node.Light: + return fx.Module( + "share", + baseComponents, + shrexGetterComponents(cfg), + lightAvailabilityComponents(cfg), + fx.Invoke(ensureEmptyEDSInBS), + fx.Provide(getters.NewIPLDGetter), + fx.Provide(lightGetter), + // shrexsub broadcaster stub for daser + fx.Provide(func() shrexsub.BroadcastFn { + return func(context.Context, shrexsub.Notification) error { + return nil + } + }), + ) + default: + panic("invalid node type") + } +} + +func discoveryComponents(cfg *Config) fx.Option { + return fx.Options( + fx.Invoke(func(disc *disc.Discovery) {}), + fx.Provide(fx.Annotate( + newDiscovery(cfg.Discovery), + fx.OnStart(func(ctx context.Context, d *disc.Discovery) error { + return d.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, d *disc.Discovery) error { + return d.Stop(ctx) + }), + )), + ) +} + +func peerManagerComponents(tp node.Type, cfg *Config) fx.Option { + switch tp { + case node.Full, node.Light: + return fx.Options( + fx.Provide(func() peers.Parameters { + return cfg.PeerManagerParams + }), + fx.Provide( + func( + params peers.Parameters, + host host.Host, + connGater *conngater.BasicConnectionGater, + shrexSub *shrexsub.PubSub, + headerSub libhead.Subscriber[*header.ExtendedHeader], + // we must ensure Syncer is started before PeerManager + // so that Syncer registers header validator before PeerManager subscribes to headers + _ *sync.Syncer[*header.ExtendedHeader], + ) (*peers.Manager, error) { + return peers.NewManager( + params, + host, + connGater, + peers.WithShrexSubPools(shrexSub, headerSub), + ) + }, + ), + ) + case node.Bridge: + return fx.Provide(peers.NewManager) + default: + panic("invalid node type") + } +} + +func shrexSubComponents() fx.Option { + return fx.Provide( + func(ctx context.Context, h host.Host, network modp2p.Network) (*shrexsub.PubSub, error) { + return shrexsub.NewPubSub(ctx, h, network.String()) + }, + ) +} + +// shrexGetterComponents provides components for a shrex getter that +// is capable of requesting +func shrexGetterComponents(cfg *Config) fx.Option { + return fx.Options( + // shrex-nd client + fx.Provide( + func(host host.Host, network modp2p.Network) (*shrexnd.Client, error) { + cfg.ShrExNDParams.WithNetworkID(network.String()) + return shrexnd.NewClient(cfg.ShrExNDParams, host) + }, + ), + + // shrex-eds client + fx.Provide( + func(host host.Host, network modp2p.Network) (*shrexeds.Client, error) { + cfg.ShrExEDSParams.WithNetworkID(network.String()) + return shrexeds.NewClient(cfg.ShrExEDSParams, host) + }, + ), + + fx.Provide(fx.Annotate( + getters.NewShrexGetter, + fx.OnStart(func(ctx context.Context, getter *getters.ShrexGetter) error { + return getter.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, getter *getters.ShrexGetter) error { + return getter.Stop(ctx) + }), + )), + ) +} + +func shrexServerComponents(cfg *Config) fx.Option { + return fx.Options( + fx.Invoke(func(edsSrv *shrexeds.Server, ndSrc *shrexnd.Server) {}), + fx.Provide(fx.Annotate( + func(host host.Host, store *eds.Store, network modp2p.Network) (*shrexeds.Server, error) { + cfg.ShrExEDSParams.WithNetworkID(network.String()) + return shrexeds.NewServer(cfg.ShrExEDSParams, host, store) + }, + fx.OnStart(func(ctx context.Context, server *shrexeds.Server) error { + return server.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, server *shrexeds.Server) error { + return server.Stop(ctx) + }), + )), + fx.Provide(fx.Annotate( + func( + host host.Host, + store *eds.Store, + network modp2p.Network, + ) (*shrexnd.Server, error) { + cfg.ShrExNDParams.WithNetworkID(network.String()) + return shrexnd.NewServer(cfg.ShrExNDParams, host, store) + }, + fx.OnStart(func(ctx context.Context, server *shrexnd.Server) error { + return server.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, server *shrexnd.Server) error { + return server.Stop(ctx) + })), + ), + ) +} + +func edsStoreComponents(cfg *Config) fx.Option { + return fx.Options( + fx.Provide(fx.Annotate( + func(path node.StorePath, ds datastore.Batching) (*eds.Store, error) { + return eds.NewStore(cfg.EDSStoreParams, string(path), ds) + }, + fx.OnStart(func(ctx context.Context, store *eds.Store) error { + err := store.Start(ctx) + if err != nil { + return err + } + return ensureEmptyCARExists(ctx, store) + }), + fx.OnStop(func(ctx context.Context, store *eds.Store) error { + return store.Stop(ctx) + }), + )), + ) +} + +func fullAvailabilityComponents() fx.Option { + return fx.Options( + fx.Provide(fx.Annotate( + full.NewShareAvailability, + fx.OnStart(func(ctx context.Context, avail *full.ShareAvailability) error { + return avail.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, avail *full.ShareAvailability) error { + return avail.Stop(ctx) + }), + )), + fx.Provide(func(avail *full.ShareAvailability) share.Availability { + return avail + }), + ) +} + +func lightAvailabilityComponents(cfg *Config) fx.Option { + return fx.Options( + fx.Provide(fx.Annotate( + light.NewShareAvailability, + fx.OnStop(func(ctx context.Context, la *light.ShareAvailability) error { + return la.Close(ctx) + }), + )), + fx.Provide(func() []light.Option { + return []light.Option{ + light.WithSampleAmount(cfg.LightAvailability.SampleAmount), + } + }), + fx.Provide(func(avail *light.ShareAvailability) share.Availability { + return avail + }), + ) +} diff --git a/nodebuilder/share/opts.go b/nodebuilder/share/opts.go new file mode 100644 index 0000000000..e236847f41 --- /dev/null +++ b/nodebuilder/share/opts.go @@ -0,0 +1,48 @@ +package share + +import ( + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/getters" + disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/p2p/peers" + "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" + "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" +) + +// WithPeerManagerMetrics is a utility function to turn on peer manager metrics and that is +// expected to be "invoked" by the fx lifecycle. +func WithPeerManagerMetrics(m *peers.Manager) error { + return m.WithMetrics() +} + +// WithDiscoveryMetrics is a utility function to turn on discovery metrics and that is expected to +// be "invoked" by the fx lifecycle. +func WithDiscoveryMetrics(d *disc.Discovery) error { + return d.WithMetrics() +} + +func WithShrexClientMetrics(edsClient *shrexeds.Client, ndClient *shrexnd.Client) error { + err := edsClient.WithMetrics() + if err != nil { + return err + } + + return ndClient.WithMetrics() +} + +func WithShrexServerMetrics(edsServer *shrexeds.Server, ndServer *shrexnd.Server) error { + err := edsServer.WithMetrics() + if err != nil { + return err + } + + return ndServer.WithMetrics() +} + +func WithShrexGetterMetrics(sg *getters.ShrexGetter) error { + return sg.WithMetrics() +} + +func WithStoreMetrics(s *eds.Store) error { + return s.WithMetrics() +} diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go new file mode 100644 index 0000000000..a8e1e1c895 --- /dev/null +++ b/nodebuilder/share/share.go @@ -0,0 +1,94 @@ +package share + +import ( + "context" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" +) + +var _ Module = (*API)(nil) + +// Module provides access to any data square or block share on the network. +// +// All Get methods provided on Module follow the following flow: +// 1. Check local storage for the requested Share. +// 2. If exists +// * Load from disk +// * Return +// 3. If not +// * Find provider on the network +// * Fetch the Share from the provider +// * Store the Share +// * Return +// +// Any method signature changed here needs to also be changed in the API struct. +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // SharesAvailable subjectively validates if Shares committed to the given + // ExtendedHeader are available on the Network. + SharesAvailable(context.Context, *header.ExtendedHeader) error + // GetShare gets a Share by coordinates in EDS. + GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) + // GetEDS gets the full EDS identified by the given extended header. + GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) + // GetSharesByNamespace gets all shares from an EDS within the given namespace. + // Shares are returned in a row-by-row order if the namespace spans multiple rows. + GetSharesByNamespace( + ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, + ) (share.NamespacedShares, error) +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +type API struct { + Internal struct { + SharesAvailable func(context.Context, *header.ExtendedHeader) error `perm:"read"` + GetShare func( + ctx context.Context, + header *header.ExtendedHeader, + row, col int, + ) (share.Share, error) `perm:"read"` + GetEDS func( + ctx context.Context, + header *header.ExtendedHeader, + ) (*rsmt2d.ExtendedDataSquare, error) `perm:"read"` + GetSharesByNamespace func( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, + ) (share.NamespacedShares, error) `perm:"read"` + } +} + +func (api *API) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { + return api.Internal.SharesAvailable(ctx, header) +} + +func (api *API) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { + return api.Internal.GetShare(ctx, header, row, col) +} + +func (api *API) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + return api.Internal.GetEDS(ctx, header) +} + +func (api *API) GetSharesByNamespace( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, +) (share.NamespacedShares, error) { + return api.Internal.GetSharesByNamespace(ctx, header, namespace) +} + +type module struct { + share.Getter + share.Availability +} + +func (m module) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { + return m.Availability.SharesAvailable(ctx, header) +} diff --git a/nodebuilder/share/share_test.go b/nodebuilder/share/share_test.go new file mode 100644 index 0000000000..db170709db --- /dev/null +++ b/nodebuilder/share/share_test.go @@ -0,0 +1,43 @@ +package share + +import ( + "context" + "testing" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" +) + +func Test_EmptyCARExists(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + edsStore, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), ds) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + eds := share.EmptyExtendedDataSquare() + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + // add empty EDS to store + err = ensureEmptyCARExists(ctx, edsStore) + assert.NoError(t, err) + + // assert that the empty car exists + has, err := edsStore.Has(ctx, dah.Hash()) + assert.True(t, has) + assert.NoError(t, err) + + // assert that the empty car is, in fact, empty + emptyEds, err := edsStore.Get(ctx, dah.Hash()) + assert.Equal(t, eds.Flattened(), emptyEds.Flattened()) + assert.NoError(t, err) +} diff --git a/nodebuilder/state/cmd/state.go b/nodebuilder/state/cmd/state.go new file mode 100644 index 0000000000..d35c4a1b4f --- /dev/null +++ b/nodebuilder/state/cmd/state.go @@ -0,0 +1,412 @@ +package cmd + +import ( + "encoding/hex" + "fmt" + "strconv" + + "cosmossdk.io/math" + "github.com/spf13/cobra" + + cmdnode "github.com/celestiaorg/celestia-node/cmd" + "github.com/celestiaorg/celestia-node/state" +) + +func init() { + Cmd.AddCommand( + accountAddressCmd, + balanceCmd, + balanceForAddressCmd, + transferCmd, + submitTxCmd, + cancelUnbondingDelegationCmd, + beginRedelegateCmd, + undelegateCmd, + delegateCmd, + queryDelegationCmd, + queryUnbondingCmd, + queryRedelegationCmd, + ) +} + +var Cmd = &cobra.Command{ + Use: "state [command]", + Short: "Allows interaction with the State Module via JSON-RPC", + Args: cobra.NoArgs, + PersistentPreRunE: cmdnode.InitClient, +} + +var accountAddressCmd = &cobra.Command{ + Use: "account-address", + Short: "Retrieves the address of the node's account/signer.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + address, err := client.State.AccountAddress(cmd.Context()) + return cmdnode.PrintOutput(address, err, nil) + }, +} + +var balanceCmd = &cobra.Command{ + Use: "balance", + Short: "Retrieves the Celestia coin balance for the node's account/signer and verifies it against " + + "the corresponding block's AppHash.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + balance, err := client.State.Balance(cmd.Context()) + return cmdnode.PrintOutput(balance, err, nil) + }, +} + +var balanceForAddressCmd = &cobra.Command{ + Use: "balance-for-address [address]", + Short: "Retrieves the Celestia coin balance for the given address and verifies the returned balance against " + + "the corresponding block's AppHash.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + addr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + balance, err := client.State.BalanceForAddress(cmd.Context(), addr) + return cmdnode.PrintOutput(balance, err, nil) + }, +} + +var transferCmd = &cobra.Command{ + Use: "transfer [address] [amount] [fee] [gasLimit]", + Short: "Sends the given amount of coins from default wallet of the node to the given account address.", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + addr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + amount, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing an amount:%v", err) + } + fee, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a fee:%v", err) + } + gasLimit, err := strconv.ParseUint(args[3], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a gas limit:%v", err) + } + + txResponse, err := client.State.Transfer( + cmd.Context(), + addr.Address.(state.AccAddress), + math.NewInt(amount), + math.NewInt(fee), gasLimit, + ) + return cmdnode.PrintOutput(txResponse, err, nil) + }, +} + +var submitTxCmd = &cobra.Command{ + Use: "submit-tx [tx]", + Short: "Submits the given transaction/message to the Celestia network and blocks until the tx is included in a block.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + decoded, err := hex.DecodeString(args[0]) + if err != nil { + return fmt.Errorf("failed to decode tx: %v", err) + } + txResponse, err := client.State.SubmitTx( + cmd.Context(), + decoded, + ) + return cmdnode.PrintOutput(txResponse, err, nil) + }, +} + +var cancelUnbondingDelegationCmd = &cobra.Command{ + Use: "cancel-unbonding-delegation [address] [amount] [height] [fee] [gasLimit]", + Short: "Cancels a user's pending undelegation from a validator.", + Args: cobra.ExactArgs(5), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + addr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + amount, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing an amount:%v", err) + } + + height, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a fee:%v", err) + } + + fee, err := strconv.ParseInt(args[3], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a fee:%v", err) + } + + gasLimit, err := strconv.ParseUint(args[4], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a gas limit:%v", err) + } + + txResponse, err := client.State.CancelUnbondingDelegation( + cmd.Context(), + addr.Address.(state.ValAddress), + math.NewInt(amount), + math.NewInt(height), + math.NewInt(fee), + gasLimit, + ) + return cmdnode.PrintOutput(txResponse, err, nil) + }, +} + +var beginRedelegateCmd = &cobra.Command{ + Use: "begin-redelegate [srcAddress] [dstAddress] [amount] [fee] [gasLimit]", + Short: "Sends a user's delegated tokens to a new validator for redelegation", + Args: cobra.ExactArgs(5), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + srcAddr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + dstAddr, err := parseAddressFromString(args[1]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + amount, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return fmt.Errorf("error parsing an amount:%v", err) + } + + fee, err := strconv.ParseInt(args[3], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a fee:%v", err) + } + gasLimit, err := strconv.ParseUint(args[4], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a gas limit:%v", err) + } + + txResponse, err := client.State.BeginRedelegate( + cmd.Context(), + srcAddr.Address.(state.ValAddress), + dstAddr.Address.(state.ValAddress), + math.NewInt(amount), + math.NewInt(fee), + gasLimit, + ) + return cmdnode.PrintOutput(txResponse, err, nil) + }, +} + +var undelegateCmd = &cobra.Command{ + Use: "undelegate [valAddress] [amount] [fee] [gasLimit]", + Short: "Undelegates a user's delegated tokens, unbonding them from the current validator.", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + addr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + amount, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing an amount:%v", err) + } + fee, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a fee:%v", err) + } + gasLimit, err := strconv.ParseUint(args[3], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a gas limit:%v", err) + } + + txResponse, err := client.State.Undelegate( + cmd.Context(), + addr.Address.(state.ValAddress), + math.NewInt(amount), + math.NewInt(fee), + gasLimit, + ) + return cmdnode.PrintOutput(txResponse, err, nil) + }, +} + +var delegateCmd = &cobra.Command{ + Use: "delegate [valAddress] [amount] [fee] [gasLimit]", + Short: "Sends a user's liquid tokens to a validator for delegation.", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + addr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + amount, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing an amount:%v", err) + } + + fee, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a fee:%v", err) + } + + gasLimit, err := strconv.ParseUint(args[3], 10, 64) + if err != nil { + return fmt.Errorf("error parsing a gas limit:%v", err) + } + + txResponse, err := client.State.Delegate( + cmd.Context(), + addr.Address.(state.ValAddress), + math.NewInt(amount), + math.NewInt(fee), + gasLimit, + ) + return cmdnode.PrintOutput(txResponse, err, nil) + }, +} + +var queryDelegationCmd = &cobra.Command{ + Use: "get-delegation [valAddress]", + Short: "Retrieves the delegation information between a delegator and a validator.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + addr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + balance, err := client.State.QueryDelegation(cmd.Context(), addr.Address.(state.ValAddress)) + return cmdnode.PrintOutput(balance, err, nil) + }, +} + +var queryUnbondingCmd = &cobra.Command{ + Use: "get-unbonding [valAddress]", + Short: "Retrieves the unbonding status between a delegator and a validator.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + addr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing an address:%v", err) + } + + response, err := client.State.QueryUnbonding(cmd.Context(), addr.Address.(state.ValAddress)) + return cmdnode.PrintOutput(response, err, nil) + }, +} + +var queryRedelegationCmd = &cobra.Command{ + Use: "get-redelegations [srcAddress] [dstAddress]", + Short: "Retrieves the status of the redelegations between a delegator and a validator.", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + client, err := cmdnode.ParseClientFromCtx(cmd.Context()) + if err != nil { + return err + } + defer client.Close() + + srcAddr, err := parseAddressFromString(args[0]) + if err != nil { + return fmt.Errorf("error parsing a src address:%v", err) + } + + dstAddr, err := parseAddressFromString(args[1]) + if err != nil { + return fmt.Errorf("error parsing a dst address:%v", err) + } + + response, err := client.State.QueryRedelegations( + cmd.Context(), + srcAddr.Address.(state.ValAddress), + dstAddr.Address.(state.ValAddress), + ) + return cmdnode.PrintOutput(response, err, nil) + }, +} + +func parseAddressFromString(addrStr string) (state.Address, error) { + var address state.Address + err := address.UnmarshalJSON([]byte(addrStr)) + if err != nil { + return address, err + } + return address, nil +} diff --git a/nodebuilder/state/config.go b/nodebuilder/state/config.go new file mode 100644 index 0000000000..f42e646b76 --- /dev/null +++ b/nodebuilder/state/config.go @@ -0,0 +1,26 @@ +package state + +import ( + "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +var defaultKeyringBackend = keyring.BackendTest + +// Config contains configuration parameters for constructing +// the node's keyring signer. +type Config struct { + KeyringAccName string + KeyringBackend string +} + +func DefaultConfig() Config { + return Config{ + KeyringAccName: "", + KeyringBackend: defaultKeyringBackend, + } +} + +// Validate performs basic validation of the config. +func (cfg *Config) Validate() error { + return nil +} diff --git a/nodebuilder/state/core.go b/nodebuilder/state/core.go new file mode 100644 index 0000000000..f8f8508540 --- /dev/null +++ b/nodebuilder/state/core.go @@ -0,0 +1,30 @@ +package state + +import ( + apptypes "github.com/celestiaorg/celestia-app/x/blob/types" + libfraud "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/go-header/sync" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/nodebuilder/core" + modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/state" +) + +// coreAccessor constructs a new instance of state.Module over +// a celestia-core connection. +func coreAccessor( + corecfg core.Config, + signer *apptypes.KeyringSigner, + sync *sync.Syncer[*header.ExtendedHeader], + fraudServ libfraud.Service[*header.ExtendedHeader], +) (*state.CoreAccessor, Module, *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) { + ca := state.NewCoreAccessor(signer, sync, corecfg.IP, corecfg.RPCPort, corecfg.GRPCPort) + + return ca, ca, &modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]{ + Service: ca, + FraudType: byzantine.BadEncoding, + FraudServ: fraudServ, + } +} diff --git a/nodebuilder/state/flags.go b/nodebuilder/state/flags.go new file mode 100644 index 0000000000..7e35bfa078 --- /dev/null +++ b/nodebuilder/state/flags.go @@ -0,0 +1,35 @@ +package state + +import ( + "fmt" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +var ( + keyringAccNameFlag = "keyring.accname" + keyringBackendFlag = "keyring.backend" +) + +// Flags gives a set of hardcoded State flags. +func Flags() *flag.FlagSet { + flags := &flag.FlagSet{} + + flags.String(keyringAccNameFlag, "", "Directs node's keyring signer to use the key prefixed with the "+ + "given string.") + flags.String(keyringBackendFlag, defaultKeyringBackend, fmt.Sprintf("Directs node's keyring signer to use the given "+ + "backend. Default is %s.", defaultKeyringBackend)) + + return flags +} + +// ParseFlags parses State flags from the given cmd and saves them to the passed config. +func ParseFlags(cmd *cobra.Command, cfg *Config) { + keyringAccName := cmd.Flag(keyringAccNameFlag).Value.String() + if keyringAccName != "" { + cfg.KeyringAccName = keyringAccName + } + + cfg.KeyringBackend = cmd.Flag(keyringBackendFlag).Value.String() +} diff --git a/nodebuilder/state/keyring.go b/nodebuilder/state/keyring.go new file mode 100644 index 0000000000..5aeaff69e2 --- /dev/null +++ b/nodebuilder/state/keyring.go @@ -0,0 +1,44 @@ +package state + +import ( + kr "github.com/cosmos/cosmos-sdk/crypto/keyring" + + apptypes "github.com/celestiaorg/celestia-app/x/blob/types" + + "github.com/celestiaorg/celestia-node/libs/keystore" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" +) + +const DefaultAccountName = "my_celes_key" + +// KeyringSigner constructs a new keyring signer. +// NOTE: we construct keyring signer before constructing node for easier UX +// as having keyring-backend set to `file` prompts user for password. +func KeyringSigner(cfg Config, ks keystore.Keystore, net p2p.Network) (*apptypes.KeyringSigner, error) { + ring := ks.Keyring() + var info *kr.Record + // if custom keyringAccName provided, find key for that name + if cfg.KeyringAccName != "" { + keyInfo, err := ring.Key(cfg.KeyringAccName) + if err != nil { + log.Errorw("failed to find key by given name", "keyring.accname", cfg.KeyringAccName) + return nil, err + } + info = keyInfo + } else { + // use default key + keyInfo, err := ring.Key(DefaultAccountName) + if err != nil { + log.Errorw("could not access key in keyring", "name", DefaultAccountName) + return nil, err + } + info = keyInfo + } + // construct signer using the default key found / generated above + signer := apptypes.NewKeyringSigner(ring, info.Name, string(net)) + signerInfo := signer.GetSignerInfo() + log.Infow("constructed keyring signer", "backend", cfg.KeyringBackend, "path", ks.Path(), + "key name", signerInfo.Name, "chain-id", string(net)) + + return signer, nil +} diff --git a/nodebuilder/state/mocks/api.go b/nodebuilder/state/mocks/api.go new file mode 100644 index 0000000000..1861a86e66 --- /dev/null +++ b/nodebuilder/state/mocks/api.go @@ -0,0 +1,236 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/state (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + math "cosmossdk.io/math" + blob "github.com/celestiaorg/celestia-node/blob" + state "github.com/celestiaorg/celestia-node/state" + types "github.com/cosmos/cosmos-sdk/types" + types0 "github.com/cosmos/cosmos-sdk/x/staking/types" + gomock "github.com/golang/mock/gomock" + types1 "github.com/tendermint/tendermint/types" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// AccountAddress mocks base method. +func (m *MockModule) AccountAddress(arg0 context.Context) (state.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AccountAddress", arg0) + ret0, _ := ret[0].(state.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AccountAddress indicates an expected call of AccountAddress. +func (mr *MockModuleMockRecorder) AccountAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AccountAddress", reflect.TypeOf((*MockModule)(nil).AccountAddress), arg0) +} + +// Balance mocks base method. +func (m *MockModule) Balance(arg0 context.Context) (*types.Coin, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Balance", arg0) + ret0, _ := ret[0].(*types.Coin) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Balance indicates an expected call of Balance. +func (mr *MockModuleMockRecorder) Balance(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Balance", reflect.TypeOf((*MockModule)(nil).Balance), arg0) +} + +// BalanceForAddress mocks base method. +func (m *MockModule) BalanceForAddress(arg0 context.Context, arg1 state.Address) (*types.Coin, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BalanceForAddress", arg0, arg1) + ret0, _ := ret[0].(*types.Coin) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BalanceForAddress indicates an expected call of BalanceForAddress. +func (mr *MockModuleMockRecorder) BalanceForAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BalanceForAddress", reflect.TypeOf((*MockModule)(nil).BalanceForAddress), arg0, arg1) +} + +// BeginRedelegate mocks base method. +func (m *MockModule) BeginRedelegate(arg0 context.Context, arg1, arg2 types.ValAddress, arg3, arg4 math.Int, arg5 uint64) (*types.TxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginRedelegate", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(*types.TxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginRedelegate indicates an expected call of BeginRedelegate. +func (mr *MockModuleMockRecorder) BeginRedelegate(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginRedelegate", reflect.TypeOf((*MockModule)(nil).BeginRedelegate), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// CancelUnbondingDelegation mocks base method. +func (m *MockModule) CancelUnbondingDelegation(arg0 context.Context, arg1 types.ValAddress, arg2, arg3, arg4 math.Int, arg5 uint64) (*types.TxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CancelUnbondingDelegation", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(*types.TxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelUnbondingDelegation indicates an expected call of CancelUnbondingDelegation. +func (mr *MockModuleMockRecorder) CancelUnbondingDelegation(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelUnbondingDelegation", reflect.TypeOf((*MockModule)(nil).CancelUnbondingDelegation), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// Delegate mocks base method. +func (m *MockModule) Delegate(arg0 context.Context, arg1 types.ValAddress, arg2, arg3 math.Int, arg4 uint64) (*types.TxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delegate", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.TxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Delegate indicates an expected call of Delegate. +func (mr *MockModuleMockRecorder) Delegate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delegate", reflect.TypeOf((*MockModule)(nil).Delegate), arg0, arg1, arg2, arg3, arg4) +} + +// QueryDelegation mocks base method. +func (m *MockModule) QueryDelegation(arg0 context.Context, arg1 types.ValAddress) (*types0.QueryDelegationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryDelegation", arg0, arg1) + ret0, _ := ret[0].(*types0.QueryDelegationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryDelegation indicates an expected call of QueryDelegation. +func (mr *MockModuleMockRecorder) QueryDelegation(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryDelegation", reflect.TypeOf((*MockModule)(nil).QueryDelegation), arg0, arg1) +} + +// QueryRedelegations mocks base method. +func (m *MockModule) QueryRedelegations(arg0 context.Context, arg1, arg2 types.ValAddress) (*types0.QueryRedelegationsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryRedelegations", arg0, arg1, arg2) + ret0, _ := ret[0].(*types0.QueryRedelegationsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryRedelegations indicates an expected call of QueryRedelegations. +func (mr *MockModuleMockRecorder) QueryRedelegations(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryRedelegations", reflect.TypeOf((*MockModule)(nil).QueryRedelegations), arg0, arg1, arg2) +} + +// QueryUnbonding mocks base method. +func (m *MockModule) QueryUnbonding(arg0 context.Context, arg1 types.ValAddress) (*types0.QueryUnbondingDelegationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryUnbonding", arg0, arg1) + ret0, _ := ret[0].(*types0.QueryUnbondingDelegationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryUnbonding indicates an expected call of QueryUnbonding. +func (mr *MockModuleMockRecorder) QueryUnbonding(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryUnbonding", reflect.TypeOf((*MockModule)(nil).QueryUnbonding), arg0, arg1) +} + +// SubmitPayForBlob mocks base method. +func (m *MockModule) SubmitPayForBlob(arg0 context.Context, arg1 math.Int, arg2 uint64, arg3 []*blob.Blob) (*types.TxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitPayForBlob", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.TxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitPayForBlob indicates an expected call of SubmitPayForBlob. +func (mr *MockModuleMockRecorder) SubmitPayForBlob(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitPayForBlob", reflect.TypeOf((*MockModule)(nil).SubmitPayForBlob), arg0, arg1, arg2, arg3) +} + +// SubmitTx mocks base method. +func (m *MockModule) SubmitTx(arg0 context.Context, arg1 types1.Tx) (*types.TxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitTx", arg0, arg1) + ret0, _ := ret[0].(*types.TxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitTx indicates an expected call of SubmitTx. +func (mr *MockModuleMockRecorder) SubmitTx(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitTx", reflect.TypeOf((*MockModule)(nil).SubmitTx), arg0, arg1) +} + +// Transfer mocks base method. +func (m *MockModule) Transfer(arg0 context.Context, arg1 types.AccAddress, arg2, arg3 math.Int, arg4 uint64) (*types.TxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Transfer", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.TxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Transfer indicates an expected call of Transfer. +func (mr *MockModuleMockRecorder) Transfer(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Transfer", reflect.TypeOf((*MockModule)(nil).Transfer), arg0, arg1, arg2, arg3, arg4) +} + +// Undelegate mocks base method. +func (m *MockModule) Undelegate(arg0 context.Context, arg1 types.ValAddress, arg2, arg3 math.Int, arg4 uint64) (*types.TxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Undelegate", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.TxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Undelegate indicates an expected call of Undelegate. +func (mr *MockModuleMockRecorder) Undelegate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Undelegate", reflect.TypeOf((*MockModule)(nil).Undelegate), arg0, arg1, arg2, arg3, arg4) +} diff --git a/nodebuilder/state/module.go b/nodebuilder/state/module.go new file mode 100644 index 0000000000..733419a918 --- /dev/null +++ b/nodebuilder/state/module.go @@ -0,0 +1,53 @@ +package state + +import ( + "context" + + logging "github.com/ipfs/go-log/v2" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/fxutil" + "github.com/celestiaorg/celestia-node/nodebuilder/core" + modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/state" +) + +var log = logging.Logger("module/state") + +// ConstructModule provides all components necessary to construct the +// state service. +func ConstructModule(tp node.Type, cfg *Config, coreCfg *core.Config) fx.Option { + // sanitize config values before constructing module + cfgErr := cfg.Validate() + + baseComponents := fx.Options( + fx.Supply(*cfg), + fx.Error(cfgErr), + fxutil.ProvideIf(coreCfg.IsEndpointConfigured(), fx.Annotate( + coreAccessor, + fx.OnStart(func(ctx context.Context, + breaker *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) error { + return breaker.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, + breaker *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) error { + return breaker.Stop(ctx) + }), + )), + fxutil.ProvideIf(!coreCfg.IsEndpointConfigured(), func() (*state.CoreAccessor, Module) { + return nil, &stubbedStateModule{} + }), + ) + + switch tp { + case node.Light, node.Full, node.Bridge: + return fx.Module( + "state", + baseComponents, + ) + default: + panic("invalid node type") + } +} diff --git a/nodebuilder/state/opts.go b/nodebuilder/state/opts.go new file mode 100644 index 0000000000..0b357b8396 --- /dev/null +++ b/nodebuilder/state/opts.go @@ -0,0 +1,13 @@ +package state + +import ( + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-app/x/blob/types" +) + +// WithKeyringSigner overrides the default keyring signer constructed +// by the node. +func WithKeyringSigner(signer *types.KeyringSigner) fx.Option { + return fx.Replace(signer) +} diff --git a/nodebuilder/state/state.go b/nodebuilder/state/state.go new file mode 100644 index 0000000000..52a2317445 --- /dev/null +++ b/nodebuilder/state/state.go @@ -0,0 +1,258 @@ +package state + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/state" +) + +var _ Module = (*API)(nil) + +// Module represents the behaviors necessary for a user to +// query for state-related information and submit transactions/ +// messages to the Celestia network. +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +//nolint:dupl +type Module interface { + + // AccountAddress retrieves the address of the node's account/signer + AccountAddress(ctx context.Context) (state.Address, error) + // Balance retrieves the Celestia coin balance for the node's account/signer + // and verifies it against the corresponding block's AppHash. + Balance(ctx context.Context) (*state.Balance, error) + // BalanceForAddress retrieves the Celestia coin balance for the given address and verifies + // the returned balance against the corresponding block's AppHash. + // + // NOTE: the balance returned is the balance reported by the block right before + // the node's current head (head-1). This is due to the fact that for block N, the block's + // `AppHash` is the result of applying the previous block's transaction list. + BalanceForAddress(ctx context.Context, addr state.Address) (*state.Balance, error) + + // Transfer sends the given amount of coins from default wallet of the node to the given account + // address. + Transfer( + ctx context.Context, to state.AccAddress, amount, fee state.Int, gasLimit uint64, + ) (*state.TxResponse, error) + // SubmitTx submits the given transaction/message to the + // Celestia network and blocks until the tx is included in + // a block. + SubmitTx(ctx context.Context, tx state.Tx) (*state.TxResponse, error) + // SubmitPayForBlob builds, signs and submits a PayForBlob transaction. + SubmitPayForBlob( + ctx context.Context, + fee state.Int, + gasLim uint64, + blobs []*blob.Blob, + ) (*state.TxResponse, error) + + // CancelUnbondingDelegation cancels a user's pending undelegation from a validator. + CancelUnbondingDelegation( + ctx context.Context, + valAddr state.ValAddress, + amount, + height, + fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) + // BeginRedelegate sends a user's delegated tokens to a new validator for redelegation. + BeginRedelegate( + ctx context.Context, + srcValAddr, + dstValAddr state.ValAddress, + amount, + fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) + // Undelegate undelegates a user's delegated tokens, unbonding them from the current validator. + Undelegate( + ctx context.Context, + delAddr state.ValAddress, + amount, fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) + // Delegate sends a user's liquid tokens to a validator for delegation. + Delegate( + ctx context.Context, + delAddr state.ValAddress, + amount, fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) + + // QueryDelegation retrieves the delegation information between a delegator and a validator. + QueryDelegation(ctx context.Context, valAddr state.ValAddress) (*types.QueryDelegationResponse, error) + // QueryUnbonding retrieves the unbonding status between a delegator and a validator. + QueryUnbonding(ctx context.Context, valAddr state.ValAddress) (*types.QueryUnbondingDelegationResponse, error) + // QueryRedelegations retrieves the status of the redelegations between a delegator and a validator. + QueryRedelegations( + ctx context.Context, + srcValAddr, + dstValAddr state.ValAddress, + ) (*types.QueryRedelegationsResponse, error) +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +// +//nolint:dupl +type API struct { + Internal struct { + AccountAddress func(ctx context.Context) (state.Address, error) `perm:"read"` + Balance func(ctx context.Context) (*state.Balance, error) `perm:"read"` + BalanceForAddress func(ctx context.Context, addr state.Address) (*state.Balance, error) `perm:"read"` + Transfer func( + ctx context.Context, + to state.AccAddress, + amount, + fee state.Int, + gasLimit uint64, + ) (*state.TxResponse, error) `perm:"write"` + SubmitTx func(ctx context.Context, tx state.Tx) (*state.TxResponse, error) `perm:"read"` + SubmitPayForBlob func( + ctx context.Context, + fee state.Int, + gasLim uint64, + blobs []*blob.Blob, + ) (*state.TxResponse, error) `perm:"write"` + CancelUnbondingDelegation func( + ctx context.Context, + valAddr state.ValAddress, + amount, + height, + fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) `perm:"write"` + BeginRedelegate func( + ctx context.Context, + srcValAddr, + dstValAddr state.ValAddress, + amount, + fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) `perm:"write"` + Undelegate func( + ctx context.Context, + delAddr state.ValAddress, + amount, + fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) `perm:"write"` + Delegate func( + ctx context.Context, + delAddr state.ValAddress, + amount, + fee state.Int, + gasLim uint64, + ) (*state.TxResponse, error) `perm:"write"` + QueryDelegation func( + ctx context.Context, + valAddr state.ValAddress, + ) (*types.QueryDelegationResponse, error) `perm:"read"` + QueryUnbonding func( + ctx context.Context, + valAddr state.ValAddress, + ) (*types.QueryUnbondingDelegationResponse, error) `perm:"read"` + QueryRedelegations func( + ctx context.Context, + srcValAddr, + dstValAddr state.ValAddress, + ) (*types.QueryRedelegationsResponse, error) `perm:"read"` + } +} + +func (api *API) AccountAddress(ctx context.Context) (state.Address, error) { + return api.Internal.AccountAddress(ctx) +} + +func (api *API) BalanceForAddress(ctx context.Context, addr state.Address) (*state.Balance, error) { + return api.Internal.BalanceForAddress(ctx, addr) +} + +func (api *API) Transfer( + ctx context.Context, + to state.AccAddress, + amount, + fee state.Int, + gasLimit uint64, +) (*state.TxResponse, error) { + return api.Internal.Transfer(ctx, to, amount, fee, gasLimit) +} + +func (api *API) SubmitTx(ctx context.Context, tx state.Tx) (*state.TxResponse, error) { + return api.Internal.SubmitTx(ctx, tx) +} + +func (api *API) SubmitPayForBlob( + ctx context.Context, + fee state.Int, + gasLim uint64, + blobs []*blob.Blob, +) (*state.TxResponse, error) { + return api.Internal.SubmitPayForBlob(ctx, fee, gasLim, blobs) +} + +func (api *API) CancelUnbondingDelegation( + ctx context.Context, + valAddr state.ValAddress, + amount, + height, + fee state.Int, + gasLim uint64, +) (*state.TxResponse, error) { + return api.Internal.CancelUnbondingDelegation(ctx, valAddr, amount, height, fee, gasLim) +} + +func (api *API) BeginRedelegate( + ctx context.Context, + srcValAddr, dstValAddr state.ValAddress, + amount, + fee state.Int, + gasLim uint64, +) (*state.TxResponse, error) { + return api.Internal.BeginRedelegate(ctx, srcValAddr, dstValAddr, amount, fee, gasLim) +} + +func (api *API) Undelegate( + ctx context.Context, + delAddr state.ValAddress, + amount, + fee state.Int, + gasLim uint64, +) (*state.TxResponse, error) { + return api.Internal.Undelegate(ctx, delAddr, amount, fee, gasLim) +} + +func (api *API) Delegate( + ctx context.Context, + delAddr state.ValAddress, + amount, + fee state.Int, + gasLim uint64, +) (*state.TxResponse, error) { + return api.Internal.Delegate(ctx, delAddr, amount, fee, gasLim) +} + +func (api *API) QueryDelegation(ctx context.Context, valAddr state.ValAddress) (*types.QueryDelegationResponse, error) { + return api.Internal.QueryDelegation(ctx, valAddr) +} + +func (api *API) QueryUnbonding( + ctx context.Context, + valAddr state.ValAddress, +) (*types.QueryUnbondingDelegationResponse, error) { + return api.Internal.QueryUnbonding(ctx, valAddr) +} + +func (api *API) QueryRedelegations( + ctx context.Context, + srcValAddr, dstValAddr state.ValAddress, +) (*types.QueryRedelegationsResponse, error) { + return api.Internal.QueryRedelegations(ctx, srcValAddr, dstValAddr) +} + +func (api *API) Balance(ctx context.Context) (*state.Balance, error) { + return api.Internal.Balance(ctx) +} diff --git a/nodebuilder/state/stub.go b/nodebuilder/state/stub.go new file mode 100644 index 0000000000..30a431aba5 --- /dev/null +++ b/nodebuilder/state/stub.go @@ -0,0 +1,112 @@ +package state + +import ( + "context" + "errors" + + "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/state" +) + +var ErrNoStateAccess = errors.New("node is running without state access. run with --core.ip to resolve") + +// stubbedStateModule provides a stub for the state module to return +// errors when state endpoints are accessed without a running connection +// to a core endpoint. +type stubbedStateModule struct{} + +func (s stubbedStateModule) AccountAddress(context.Context) (state.Address, error) { + return state.Address{}, ErrNoStateAccess +} + +func (s stubbedStateModule) Balance(context.Context) (*state.Balance, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) BalanceForAddress( + context.Context, + state.Address, +) (*state.Balance, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) Transfer( + _ context.Context, + _ state.AccAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) SubmitTx(context.Context, state.Tx) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) SubmitPayForBlob( + context.Context, + state.Int, + uint64, + []*blob.Blob, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) CancelUnbondingDelegation( + _ context.Context, + _ state.ValAddress, + _, _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) BeginRedelegate( + _ context.Context, + _, _ state.ValAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) Undelegate( + _ context.Context, + _ state.ValAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) Delegate( + _ context.Context, + _ state.ValAddress, + _, _ state.Int, + _ uint64, +) (*state.TxResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) QueryDelegation( + context.Context, + state.ValAddress, +) (*types.QueryDelegationResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) QueryUnbonding( + context.Context, + state.ValAddress, +) (*types.QueryUnbondingDelegationResponse, error) { + return nil, ErrNoStateAccess +} + +func (s stubbedStateModule) QueryRedelegations( + _ context.Context, + _, _ state.ValAddress, +) (*types.QueryRedelegationsResponse, error) { + return nil, ErrNoStateAccess +} diff --git a/nodebuilder/store.go b/nodebuilder/store.go new file mode 100644 index 0000000000..7f67a9e782 --- /dev/null +++ b/nodebuilder/store.go @@ -0,0 +1,248 @@ +package nodebuilder + +import ( + "errors" + "fmt" + "path/filepath" + "runtime" + "sync" + "time" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/dgraph-io/badger/v4/options" + "github.com/ipfs/go-datastore" + dsbadger "github.com/ipfs/go-ds-badger4" + "github.com/mitchellh/go-homedir" + + "github.com/celestiaorg/celestia-node/libs/fslock" + "github.com/celestiaorg/celestia-node/libs/keystore" + "github.com/celestiaorg/celestia-node/share" +) + +var ( + // ErrOpened is thrown on attempt to open already open/in-use Store. + ErrOpened = errors.New("node: store is in use") + // ErrNotInited is thrown on attempt to open Store without initialization. + ErrNotInited = errors.New("node: store is not initialized") +) + +// Store encapsulates storage for the Node. Basically, it is the Store of all Stores. +// It provides access for the Node data stored in root directory e.g. '~/.celestia'. +type Store interface { + // Path reports the FileSystem path of Store. + Path() string + + // Keystore provides a Keystore to access keys. + Keystore() (keystore.Keystore, error) + + // Datastore provides a Datastore - a KV store for arbitrary data to be stored on disk. + Datastore() (datastore.Batching, error) + + // Config loads the stored Node config. + Config() (*Config, error) + + // PutConfig alters the stored Node config. + PutConfig(*Config) error + + // Close closes the Store freeing up acquired resources and locks. + Close() error +} + +// OpenStore creates new FS Store under the given 'path'. +// To be opened the Store must be initialized first, otherwise ErrNotInited is thrown. +// OpenStore takes a file Lock on directory, hence only one Store can be opened at a time under the +// given 'path', otherwise ErrOpened is thrown. +func OpenStore(path string, ring keyring.Keyring) (Store, error) { + path, err := storePath(path) + if err != nil { + return nil, err + } + + flock, err := fslock.Lock(lockPath(path)) + if err != nil { + if err == fslock.ErrLocked { + return nil, ErrOpened + } + return nil, err + } + + ok := IsInit(path) + if !ok { + flock.Unlock() //nolint:errcheck + return nil, ErrNotInited + } + + ks, err := keystore.NewFSKeystore(keysPath(path), ring) + if err != nil { + return nil, err + } + + return &fsStore{ + path: path, + dirLock: flock, + keys: ks, + }, nil +} + +func (f *fsStore) Path() string { + return f.path +} + +func (f *fsStore) Config() (*Config, error) { + cfg, err := LoadConfig(configPath(f.path)) + if err != nil { + return nil, fmt.Errorf("node: can't load Config: %w", err) + } + + return cfg, nil +} + +func (f *fsStore) PutConfig(cfg *Config) error { + err := SaveConfig(configPath(f.path), cfg) + if err != nil { + return fmt.Errorf("node: can't save Config: %w", err) + } + + return nil +} + +func (f *fsStore) Keystore() (_ keystore.Keystore, err error) { + if f.keys == nil { + return nil, fmt.Errorf("node: no Keystore found") + } + return f.keys, nil +} + +func (f *fsStore) Datastore() (datastore.Batching, error) { + f.dataMu.Lock() + defer f.dataMu.Unlock() + if f.data != nil { + return f.data, nil + } + + cfg := constraintBadgerConfig() + ds, err := dsbadger.NewDatastore(dataPath(f.path), cfg) + if err != nil { + return nil, fmt.Errorf("node: can't open Badger Datastore: %w", err) + } + + f.data = ds + return ds, nil +} + +func (f *fsStore) Close() (err error) { + err = errors.Join(err, f.dirLock.Unlock()) + f.dataMu.Lock() + if f.data != nil { + err = errors.Join(err, f.data.Close()) + } + f.dataMu.Unlock() + return +} + +type fsStore struct { + path string + + dataMu sync.Mutex + data datastore.Batching + keys keystore.Keystore + dirLock *fslock.Locker // protects directory +} + +func storePath(path string) (string, error) { + return homedir.Expand(filepath.Clean(path)) +} + +func configPath(base string) string { + return filepath.Join(base, "config.toml") +} + +func lockPath(base string) string { + return filepath.Join(base, "lock") +} + +func keysPath(base string) string { + return filepath.Join(base, "keys") +} + +func blocksPath(base string) string { + return filepath.Join(base, "blocks") +} + +func transientsPath(base string) string { + // we don't actually use the transients directory anymore, but it could be populated from previous + // versions. + return filepath.Join(base, "transients") +} + +func indexPath(base string) string { + return filepath.Join(base, "index") +} + +func dataPath(base string) string { + return filepath.Join(base, "data") +} + +// constraintBadgerConfig returns BadgerDB configuration optimized for low memory usage and more frequent +// compaction which prevents memory spikes. +// This is particularly important for LNs with restricted memory resources. +// +// With the following configuration, a LN uses up to 300iB of RAM during initial sync/sampling +// and up to 200MiB during normal operation. (on 4 core CPU, 8GiB RAM droplet) +// +// With the following configuration and "-tags=jemalloc", a LN uses no more than 180MiB during initial +// sync/sampling and up to 100MiB during normal operation. (same hardware spec) +// NOTE: To enable jemalloc, build celestia-node with "-tags=jemalloc" flag, which configures Badger to +// use jemalloc instead of Go's default allocator. +// +// TODO(@Wondertan): Consider alternative less constraint configuration for FN/BN +// TODO(@Wondertan): Consider dynamic memory allocation based on available RAM +func constraintBadgerConfig() *dsbadger.Options { + opts := dsbadger.DefaultOptions // this must be copied + // ValueLog: + // 2mib default => share.Size - makes sure headers and samples are stored in value log + // This *tremendously* reduces the amount of memory used by the node, up to 10 times less during + // compaction + opts.ValueThreshold = share.Size + // make sure we don't have any limits for stored headers + opts.ValueLogMaxEntries = 100000000 + // run value log GC more often to spread the work over time + opts.GcInterval = time.Minute * 1 + // default 0.5 => 0.125 - makes sure value log GC is more aggressive on reclaiming disk space + opts.GcDiscardRatio = 0.125 + + // badger stores checksum for every value, but doesn't verify it by default + // enabling this option may allow us to see detect corrupted data + opts.ChecksumVerificationMode = options.OnBlockRead + opts.VerifyValueChecksum = true + // default 64mib => 0 - disable block cache + // most of our component maintain their own caches, so this is not needed + opts.BlockCacheSize = 0 + // not much gain as it compresses the LSM only as well compression requires block cache + opts.Compression = options.None + + // MemTables: + // default 64mib => 16mib - decreases memory usage and makes compaction more often + opts.MemTableSize = 16 << 20 + // default 5 => 3 + opts.NumMemtables = 3 + // default 5 => 3 + opts.NumLevelZeroTables = 3 + // default 15 => 5 - this prevents memory growth on CPU constraint systems by blocking all writers + opts.NumLevelZeroTablesStall = 5 + + // Compaction: + // Dynamic compactor allocation + compactors := runtime.NumCPU() / 2 + if compactors < 2 { + compactors = 2 // can't be less than 2 + } + if compactors > opts.MaxLevels { // ensure there is no more compactors than db table levels + compactors = opts.MaxLevels + } + opts.NumCompactors = compactors + // makes sure badger is always compacted on shutdown + opts.CompactL0OnClose = true + + return &opts +} diff --git a/node/store_mem.go b/nodebuilder/store_mem.go similarity index 97% rename from node/store_mem.go rename to nodebuilder/store_mem.go index 84759a882d..61d16c7c8d 100644 --- a/node/store_mem.go +++ b/nodebuilder/store_mem.go @@ -1,4 +1,4 @@ -package node +package nodebuilder import ( "sync" diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go new file mode 100644 index 0000000000..51bd89c5a7 --- /dev/null +++ b/nodebuilder/store_test.go @@ -0,0 +1,186 @@ +//go:build !race + +package nodebuilder + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestRepo(t *testing.T) { + var tests = []struct { + tp node.Type + }{ + {tp: node.Bridge}, {tp: node.Light}, {tp: node.Full}, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + dir := t.TempDir() + + _, err := OpenStore(dir, nil) + assert.ErrorIs(t, err, ErrNotInited) + + err = Init(*DefaultConfig(tt.tp), dir, tt.tp) + require.NoError(t, err) + + store, err := OpenStore(dir, nil) + require.NoError(t, err) + + _, err = OpenStore(dir, nil) + assert.ErrorIs(t, err, ErrOpened) + + ks, err := store.Keystore() + assert.NoError(t, err) + assert.NotNil(t, ks) + + data, err := store.Datastore() + assert.NoError(t, err) + assert.NotNil(t, data) + + cfg, err := store.Config() + assert.NoError(t, err) + assert.NotNil(t, cfg) + + err = store.Close() + assert.NoError(t, err) + }) + } +} + +func BenchmarkStore(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(cancel) + + // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) + b.Run("bench put 128", func(b *testing.B) { + dir := b.TempDir() + err := Init(*DefaultConfig(node.Full), dir, node.Full) + require.NoError(b, err) + + store := newStore(ctx, b, eds.DefaultParameters(), dir) + size := 128 + b.Run("enabled eds proof caching", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + adder := ipld.NewProofsAdder(size * 2) + shares := sharetest.RandShares(b, size*size) + eds, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(size), + nmt.NodeVisitor(adder.VisitFn())), + ) + require.NoError(b, err) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + ctx := ipld.CtxWithProofsAdder(ctx, adder) + + b.StartTimer() + err = store.edsStore.Put(ctx, dah.Hash(), eds) + b.StopTimer() + require.NoError(b, err) + } + }) + + b.Run("disabled eds proof caching", func(b *testing.B) { + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + eds := edstest.RandEDS(b, size) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + + b.StartTimer() + err = store.edsStore.Put(ctx, dah.Hash(), eds) + b.StopTimer() + require.NoError(b, err) + } + }) + }) +} + +func TestStoreRestart(t *testing.T) { + const ( + blocks = 5 + size = 32 + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + dir := t.TempDir() + err := Init(*DefaultConfig(node.Full), dir, node.Full) + require.NoError(t, err) + + store := newStore(ctx, t, eds.DefaultParameters(), dir) + + hashes := make([][]byte, blocks) + for i := range hashes { + edss := edstest.RandEDS(t, size) + require.NoError(t, err) + dah, err := da.NewDataAvailabilityHeader(edss) + require.NoError(t, err) + err = store.edsStore.Put(ctx, dah.Hash(), edss) + require.NoError(t, err) + + // store hashes for read loop later + hashes[i] = dah.Hash() + } + + // restart store + store.stop(ctx, t) + store = newStore(ctx, t, eds.DefaultParameters(), dir) + + for _, h := range hashes { + edsReader, err := store.edsStore.GetCAR(ctx, h) + require.NoError(t, err) + odsReader, err := eds.ODSReader(edsReader) + require.NoError(t, err) + _, err = eds.ReadEDS(ctx, odsReader, h) + require.NoError(t, err) + require.NoError(t, edsReader.Close()) + } +} + +type store struct { + s Store + edsStore *eds.Store +} + +func newStore(ctx context.Context, t require.TestingT, params *eds.Parameters, dir string) store { + s, err := OpenStore(dir, nil) + require.NoError(t, err) + ds, err := s.Datastore() + require.NoError(t, err) + edsStore, err := eds.NewStore(params, dir, ds) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + return store{ + s: s, + edsStore: edsStore, + } +} + +func (s *store) stop(ctx context.Context, t *testing.T) { + require.NoError(t, s.edsStore.Stop(ctx)) + require.NoError(t, s.s.Close()) +} diff --git a/nodebuilder/testing.go b/nodebuilder/testing.go new file mode 100644 index 0000000000..8d49772aef --- /dev/null +++ b/nodebuilder/testing.go @@ -0,0 +1,74 @@ +package nodebuilder + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/stretchr/testify/require" + "go.uber.org/fx" + + apptypes "github.com/celestiaorg/celestia-app/x/blob/types" + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/core" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/libs/fxutil" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +// MockStore provides mock in memory Store for testing purposes. +func MockStore(t *testing.T, cfg *Config) Store { + t.Helper() + store := NewMemStore() + err := store.PutConfig(cfg) + require.NoError(t, err) + return store +} + +func TestNode(t *testing.T, tp node.Type, opts ...fx.Option) *Node { + return TestNodeWithConfig(t, tp, DefaultConfig(tp), opts...) +} + +func TestNodeWithConfig(t *testing.T, tp node.Type, cfg *Config, opts ...fx.Option) *Node { + // avoids port conflicts + cfg.RPC.Port = "0" + cfg.Header.TrustedPeers = []string{"/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p"} + + store := MockStore(t, cfg) + ks, err := store.Keystore() + require.NoError(t, err) + + opts = append(opts, + // avoid writing keyring on disk + state.WithKeyringSigner(TestKeyringSigner(t, ks.Keyring())), + // temp dir for the eds store FIXME: Should be in mem + fx.Replace(node.StorePath(t.TempDir())), + // avoid requesting trustedPeer during initialization + fxutil.ReplaceAs(headertest.NewStore(t), new(libhead.Store[*header.ExtendedHeader])), + ) + + // in fact, we don't need core.Client in tests, but Bridge requires is a valid one + // or fails otherwise with failed attempt to connect with custom build client + if tp == node.Bridge { + cctx := core.StartTestNode(t) + opts = append(opts, + fxutil.ReplaceAs(cctx.Client, new(core.Client)), + ) + } + + nd, err := New(tp, p2p.Private, store, opts...) + require.NoError(t, err) + return nd +} + +func TestKeyringSigner(t *testing.T, ring keyring.Keyring) *apptypes.KeyringSigner { + signer := apptypes.NewKeyringSigner(ring, "", string(p2p.Private)) + _, _, err := signer.NewMnemonic("test_celes", keyring.English, "", + "", hd.Secp256k1) + require.NoError(t, err) + return signer +} diff --git a/node/tests/README.md b/nodebuilder/tests/README.md similarity index 91% rename from node/tests/README.md rename to nodebuilder/tests/README.md index 176ee2ba21..dd2040ab42 100644 --- a/node/tests/README.md +++ b/nodebuilder/tests/README.md @@ -1,6 +1,6 @@ # Swamp: In-Memory Test Tool -Swamp is a testing tool that creates an environment for deploying `celestia-node` and testing instances against each other. +Swamp is our integration testing tool that creates an environment for deploying `celestia-node` and testing instances against each other. While the swamp takes care of setting up networking and initial configuration of node types, the user can focus on tailoring test scenarios. ## Usage @@ -38,7 +38,7 @@ require.NoError(t, err) light := sw.NewLightClient(node.WithTrustedPeer(addrs[0].String())) ``` -## Concenptual overview +## Conceptual overview Each of the test scenario requires flexibility in network topology. The user can define the necessary amount of each type of node and be able to control each of them. diff --git a/nodebuilder/tests/api_test.go b/nodebuilder/tests/api_test.go new file mode 100644 index 0000000000..a3b99a750b --- /dev/null +++ b/nodebuilder/tests/api_test.go @@ -0,0 +1,168 @@ +//go:build api || integration + +package tests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" +) + +const ( + btime = time.Millisecond * 300 +) + +func TestNodeModule(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() + + writePerms := []auth.Permission{"public", "read", "write"} + adminPerms := []auth.Permission{"public", "read", "write", "admin"} + jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) + require.NoError(t, err) + + client, err := client.NewClient(ctx, bridgeAddr, jwt) + require.NoError(t, err) + + info, err := client.Node.Info(ctx) + require.NoError(t, err) + require.Equal(t, info.APIVersion, node.APIVersion) + + ready, err := client.Node.Ready(ctx) + require.NoError(t, err) + require.True(t, ready) + + perms, err := client.Node.AuthVerify(ctx, jwt) + require.NoError(t, err) + require.Equal(t, perms, adminPerms) + + writeJWT, err := client.Node.AuthNew(ctx, writePerms) + require.NoError(t, err) + + perms, err = client.Node.AuthVerify(ctx, writeJWT) + require.NoError(t, err) + require.Equal(t, perms, writePerms) + +} + +func TestGetByHeight(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) + + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + rpcClient := getAdminClient(ctx, bridge, t) + + // let a few blocks be produced + _, err = rpcClient.Header.WaitForHeight(ctx, 3) + require.NoError(t, err) + + networkHead, err := rpcClient.Header.NetworkHead(ctx) + require.NoError(t, err) + _, err = rpcClient.Header.GetByHeight(ctx, networkHead.Height()+1) + require.Nil(t, err, "Requesting syncer.Head()+1 shouldn't return an error") + + networkHead, err = rpcClient.Header.NetworkHead(ctx) + require.NoError(t, err) + _, err = rpcClient.Header.GetByHeight(ctx, networkHead.Height()+2) + require.ErrorContains(t, err, "given height is from the future") +} + +// TestBlobRPC ensures that blobs can be submitted via rpc +func TestBlobRPC(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + rpcClient := getAdminClient(ctx, bridge, t) + + appBlobs, err := blobtest.GenerateV0Blobs([]int{8}, false) + require.NoError(t, err) + + newBlob, err := blob.NewBlob( + appBlobs[0].ShareVersion, + append([]byte{appBlobs[0].NamespaceVersion}, appBlobs[0].NamespaceID...), + appBlobs[0].Data, + ) + require.NoError(t, err) + + height, err := rpcClient.Blob.Submit(ctx, []*blob.Blob{newBlob}, blob.DefaultGasPrice()) + require.NoError(t, err) + require.True(t, height != 0) +} + +// TestHeaderSubscription ensures that the header subscription over RPC works +// as intended and gets canceled successfully after rpc context cancellation. +func TestHeaderSubscription(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + cfg := nodebuilder.DefaultConfig(node.Light) + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) + + // start a light node that's connected to the bridge node + light := sw.NewNodeWithConfig(node.Light, cfg) + err = light.Start(ctx) + require.NoError(t, err) + + lightClient := getAdminClient(ctx, light, t) + + // subscribe to headers via the light node's RPC header subscription + subctx, subcancel := context.WithCancel(ctx) + sub, err := lightClient.Header.Subscribe(subctx) + require.NoError(t, err) + // listen for 5 headers + for i := 0; i < 5; i++ { + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case <-sub: + } + } + // cancel subscription via context + subcancel() + + // stop the light node and expect no outstanding subscription errors + err = light.Stop(ctx) + require.NoError(t, err) +} diff --git a/nodebuilder/tests/blob_test.go b/nodebuilder/tests/blob_test.go new file mode 100644 index 0000000000..d0aeefd568 --- /dev/null +++ b/nodebuilder/tests/blob_test.go @@ -0,0 +1,207 @@ +//go:build blob || integration + +package tests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share" +) + +func TestBlobModule(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) + t.Cleanup(cancel) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second*1)) + + appBlobs0, err := blobtest.GenerateV0Blobs([]int{8, 4}, true) + require.NoError(t, err) + appBlobs1, err := blobtest.GenerateV0Blobs([]int{4}, false) + require.NoError(t, err) + blobs := make([]*blob.Blob, 0, len(appBlobs0)+len(appBlobs1)) + + for _, b := range append(appBlobs0, appBlobs1...) { + blob, err := blob.NewBlob(b.ShareVersion, append([]byte{b.NamespaceVersion}, b.NamespaceID...), b.Data) + require.NoError(t, err) + blobs = append(blobs, blob) + } + + require.NoError(t, err) + bridge := sw.NewBridgeNode() + require.NoError(t, bridge.Start(ctx)) + + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) + + fullCfg := sw.DefaultTestConfig(node.Full) + fullCfg.Header.TrustedPeers = append(fullCfg.Header.TrustedPeers, addrs[0].String()) + fullNode := sw.NewNodeWithConfig(node.Full, fullCfg) + require.NoError(t, fullNode.Start(ctx)) + + addrsFull, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(fullNode.Host)) + require.NoError(t, err) + + lightCfg := sw.DefaultTestConfig(node.Light) + lightCfg.Header.TrustedPeers = append(lightCfg.Header.TrustedPeers, addrsFull[0].String()) + lightNode := sw.NewNodeWithConfig(node.Light, lightCfg) + require.NoError(t, lightNode.Start(ctx)) + + fullClient := getAdminClient(ctx, fullNode, t) + lightClient := getAdminClient(ctx, lightNode, t) + + height, err := fullClient.Blob.Submit(ctx, blobs, blob.DefaultGasPrice()) + require.NoError(t, err) + + _, err = fullClient.Header.WaitForHeight(ctx, height) + require.NoError(t, err) + _, err = lightClient.Header.WaitForHeight(ctx, height) + require.NoError(t, err) + + var test = []struct { + name string + doFn func(t *testing.T) + }{ + { + name: "Get", + doFn: func(t *testing.T) { + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) + blob1, err := fullClient.Blob.Get(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + require.Equal(t, blobs[0], blob1) + }, + }, + { + name: "GetAll", + doFn: func(t *testing.T) { + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) + newBlobs, err := fullClient.Blob.GetAll(ctx, height, []share.Namespace{blobs[0].Namespace()}) + require.NoError(t, err) + require.Len(t, newBlobs, len(appBlobs0)) + require.True(t, bytes.Equal(blobs[0].Commitment, newBlobs[0].Commitment)) + require.True(t, bytes.Equal(blobs[1].Commitment, newBlobs[1].Commitment)) + }, + }, + { + name: "Included", + doFn: func(t *testing.T) { + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) + proof, err := fullClient.Blob.GetProof(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + + included, err := lightClient.Blob.Included( + ctx, + height, + blobs[0].Namespace(), + proof, + blobs[0].Commitment, + ) + require.NoError(t, err) + require.True(t, included) + }, + }, + { + name: "Not Found", + doFn: func(t *testing.T) { + appBlob, err := blobtest.GenerateV0Blobs([]int{4}, false) + require.NoError(t, err) + newBlob, err := blob.NewBlob( + appBlob[0].ShareVersion, + append([]byte{appBlob[0].NamespaceVersion}, appBlob[0].NamespaceID...), + appBlob[0].Data, + ) + require.NoError(t, err) + + b, err := fullClient.Blob.Get(ctx, height, newBlob.Namespace(), newBlob.Commitment) + assert.Nil(t, b) + require.Error(t, err) + require.ErrorContains(t, err, blob.ErrBlobNotFound.Error()) + }, + }, + { + name: "Submit equal blobs", + doFn: func(t *testing.T) { + appBlob, err := blobtest.GenerateV0Blobs([]int{8, 4}, true) + require.NoError(t, err) + b, err := blob.NewBlob( + appBlob[0].ShareVersion, + append([]byte{appBlob[0].NamespaceVersion}, appBlob[0].NamespaceID...), + appBlob[0].Data, + ) + require.NoError(t, err) + + height, err := fullClient.Blob.Submit(ctx, []*blob.Blob{b, b}, blob.DefaultGasPrice()) + require.NoError(t, err) + + _, err = fullClient.Header.WaitForHeight(ctx, height) + require.NoError(t, err) + + b0, err := fullClient.Blob.Get(ctx, height, b.Namespace(), b.Commitment) + require.NoError(t, err) + require.Equal(t, b, b0) + + // give some time to store the data, + // otherwise the test will hang on the IPLD level. + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) + + proof, err := fullClient.Blob.GetProof(ctx, height, b.Namespace(), b.Commitment) + require.NoError(t, err) + + included, err := fullClient.Blob.Included(ctx, height, b.Namespace(), proof, b.Commitment) + require.NoError(t, err) + require.True(t, included) + }, + }, + { + // This test allows to check that the blob won't be + // deduplicated if it will be sent multiple times in + // different pfbs. + name: "Submit the same blob in different pfb", + doFn: func(t *testing.T) { + h, err := fullClient.Blob.Submit(ctx, []*blob.Blob{blobs[0]}, blob.DefaultGasPrice()) + require.NoError(t, err) + + _, err = fullClient.Header.WaitForHeight(ctx, h) + require.NoError(t, err) + + b0, err := fullClient.Blob.Get(ctx, h, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + require.Equal(t, blobs[0], b0) + + // give some time to store the data, + // otherwise the test will hang on the IPLD level. + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) + + proof, err := fullClient.Blob.GetProof(ctx, h, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + + included, err := fullClient.Blob.Included(ctx, h, blobs[0].Namespace(), proof, blobs[0].Commitment) + require.NoError(t, err) + require.True(t, included) + + }, + }, + } + + for _, tt := range test { + tt := tt + t.Run(tt.name, func(t *testing.T) { + tt.doFn(t) + }) + } +} diff --git a/nodebuilder/tests/da_test.go b/nodebuilder/tests/da_test.go new file mode 100644 index 0000000000..bdcd4e638c --- /dev/null +++ b/nodebuilder/tests/da_test.go @@ -0,0 +1,145 @@ +//go:build da || integration + +package tests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share" +) + +func TestDaModule(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) + t.Cleanup(cancel) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) + + namespace, err := share.NewBlobNamespaceV0([]byte("namespace")) + require.NoError(t, err) + + appBlobs0, err := blobtest.GenerateV0Blobs([]int{8, 4}, true) + require.NoError(t, err) + appBlobs1, err := blobtest.GenerateV0Blobs([]int{4}, false) + require.NoError(t, err) + blobs := make([]*blob.Blob, 0, len(appBlobs0)+len(appBlobs1)) + daBlobs := make([][]byte, 0, len(appBlobs0)+len(appBlobs1)) + + for _, b := range append(appBlobs0, appBlobs1...) { + blob, err := blob.NewBlob(b.ShareVersion, namespace, b.Data) + require.NoError(t, err) + blobs = append(blobs, blob) + daBlobs = append(daBlobs, blob.Data) + } + + require.NoError(t, err) + bridge := sw.NewBridgeNode() + require.NoError(t, bridge.Start(ctx)) + + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) + + fullCfg := sw.DefaultTestConfig(node.Full) + fullCfg.Header.TrustedPeers = append(fullCfg.Header.TrustedPeers, addrs[0].String()) + fullNode := sw.NewNodeWithConfig(node.Full, fullCfg) + require.NoError(t, fullNode.Start(ctx)) + + addrsFull, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(fullNode.Host)) + require.NoError(t, err) + + lightCfg := sw.DefaultTestConfig(node.Light) + lightCfg.Header.TrustedPeers = append(lightCfg.Header.TrustedPeers, addrsFull[0].String()) + lightNode := sw.NewNodeWithConfig(node.Light, lightCfg) + require.NoError(t, lightNode.Start(ctx)) + + fullClient := getAdminClient(ctx, fullNode, t) + lightClient := getAdminClient(ctx, lightNode, t) + + ids, err := fullClient.DA.Submit(ctx, daBlobs, -1, namespace) + require.NoError(t, err) + + var test = []struct { + name string + doFn func(t *testing.T) + }{ + { + name: "MaxBlobSize", + doFn: func(t *testing.T) { + mbs, err := fullClient.DA.MaxBlobSize(ctx) + require.NoError(t, err) + require.Equal(t, mbs, uint64(appconsts.DefaultMaxBytes)) + }, + }, + { + name: "GetProofs + Validate", + doFn: func(t *testing.T) { + t.Skip() + h, _ := da.SplitID(ids[0]) + lightClient.Header.WaitForHeight(ctx, h) + proofs, err := lightClient.DA.GetProofs(ctx, ids, namespace) + require.NoError(t, err) + require.NotEmpty(t, proofs) + valid, err := fullClient.DA.Validate(ctx, ids, proofs, namespace) + require.NoError(t, err) + for _, v := range valid { + require.True(t, v) + } + }, + }, + { + name: "GetIDs", + doFn: func(t *testing.T) { + t.Skip() + height, _ := da.SplitID(ids[0]) + ids2, err := fullClient.DA.GetIDs(ctx, height, namespace) + require.NoError(t, err) + require.EqualValues(t, ids, ids2) + }, + }, + { + name: "Get", + doFn: func(t *testing.T) { + h, _ := da.SplitID(ids[0]) + lightClient.Header.WaitForHeight(ctx, h) + fetched, err := lightClient.DA.Get(ctx, ids, namespace) + require.NoError(t, err) + require.Len(t, fetched, len(ids)) + for i := range fetched { + require.True(t, bytes.Equal(fetched[i], daBlobs[i])) + } + }, + }, + { + name: "Commit", + doFn: func(t *testing.T) { + t.Skip() + fetched, err := fullClient.DA.Commit(ctx, ids, namespace) + require.NoError(t, err) + require.Len(t, fetched, len(ids)) + for i := range fetched { + _, commitment := da.SplitID(ids[i]) + require.EqualValues(t, fetched[i], commitment) + } + }, + }, + } + + for _, tt := range test { + tt := tt + t.Run(tt.name, func(t *testing.T) { + tt.doFn(t) + }) + } +} diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go new file mode 100644 index 0000000000..6496cdbb53 --- /dev/null +++ b/nodebuilder/tests/fraud_test.go @@ -0,0 +1,177 @@ +//go:build fraud || integration + +package tests + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" + "go.uber.org/fx" + + "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" + headerfraud "github.com/celestiaorg/celestia-node/header/headertest/fraud" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" +) + +/* +Test-Case: Full Node will propagate a fraud proof to the network, once ByzantineError will be received from sampling. +Pre-Requisites: +- CoreClient is started by swamp. +Steps: +1. Create a Bridge Node(BN) with broken extended header at height 10. +2. Start a BN. +3. Create a Full Node(FN) with a connection to BN as a trusted peer. +4. Start a FN. +5. Subscribe to a fraud proof and wait when it will be received. +6. Check FN is not synced to 15. +Note: 15 is not available because DASer/Syncer will be stopped +before reaching this height due to receiving a fraud proof. +Another note: this test disables share exchange to speed up test results. +7. Spawn a Light Node(LN) in order to sync a BEFP. +8. Ensure that the BEFP was received. +9. Try to start a Full Node(FN) that contains a BEFP in its store. +*/ +func TestFraudProofHandling(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + const ( + blocks = 15 + blockSize = 4 + blockTime = time.Second + ) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(blockTime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, blockSize, blocks) + set, val := sw.Validators(t) + fMaker := headerfraud.NewFraudMaker(t, 10, []types.PrivValidator{val}, set) + + storeCfg := eds.DefaultParameters() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + edsStore, err := eds.NewStore(storeCfg, t.TempDir(), ds) + require.NoError(t, err) + require.NoError(t, edsStore.Start(ctx)) + t.Cleanup(func() { + _ = edsStore.Stop(ctx) + }) + + cfg := nodebuilder.DefaultConfig(node.Bridge) + // 1. + bridge := sw.NewNodeWithConfig( + node.Bridge, + cfg, + core.WithHeaderConstructFn(fMaker.MakeExtendedHeader(16, edsStore)), + fx.Replace(edsStore), + ) + // 2. + err = bridge.Start(ctx) + require.NoError(t, err) + + // 3. + cfg = nodebuilder.DefaultConfig(node.Full) + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) + cfg.Share.UseShareExchange = false + store := nodebuilder.MockStore(t, cfg) + full := sw.NewNodeWithStore(node.Full, store) + + // 4. + err = full.Start(ctx) + require.NoError(t, err) + + fullClient := getAdminClient(ctx, full, t) + + // 5. + subCtx, subCancel := context.WithCancel(ctx) + subscr, err := fullClient.Fraud.Subscribe(subCtx, byzantine.BadEncoding) + require.NoError(t, err) + select { + case p := <-subscr: + require.Equal(t, 10, int(p.Height())) + t.Log("Caught the proof....") + subCancel() + case <-ctx.Done(): + subCancel() + t.Fatal("full node did not receive a fraud proof in time") + } + + getCtx, getCancel := context.WithTimeout(ctx, time.Second) + proofs, err := fullClient.Fraud.Get(getCtx, byzantine.BadEncoding) + getCancel() + + require.NoError(t, err) + require.Len(t, proofs, 1) + require.True(t, proofs[0].Type() == byzantine.BadEncoding) + // This is an obscure way to check if the Syncer was stopped. + // If we cannot get a height header within a timeframe it means the syncer was stopped + // FIXME: Eventually, this should be a check on service registry managing and keeping + // lifecycles of each Module. + // 6. + // random height after befp.height + height := uint64(15) + // initial timeout is set to 5 sec, as we are targeting the height=15, + // blockTime=1 sec, expected befp.height=10 + timeOut := blockTime * 5 + // during befp validation the node can still receive headers and it mostly depends on + // the operating system or hardware(e.g. on macOS tests is working 100% time with a single + // height=15, and on the Linux VM sometimes the last height is 17-18). So, lets give a chance for + // our befp validator to check the fraud proof and stop the syncer. + for height < 20 { + syncCtx, syncCancel := context.WithTimeout(context.Background(), timeOut) + _, err = full.HeaderServ.WaitForHeight(syncCtx, height) + syncCancel() + if err != nil { + break + } + timeOut = blockTime + height++ + } + require.ErrorIs(t, err, context.DeadlineExceeded) + + // 7. + cfg = nodebuilder.DefaultConfig(node.Light) + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) + lnStore := nodebuilder.MockStore(t, cfg) + light := sw.NewNodeWithStore(node.Light, lnStore) + require.NoError(t, light.Start(ctx)) + lightClient := getAdminClient(ctx, light, t) + + // 8. + subCtx, subCancel = context.WithCancel(ctx) + subscr, err = lightClient.Fraud.Subscribe(subCtx, byzantine.BadEncoding) + require.NoError(t, err) + select { + case p := <-subscr: + require.Equal(t, 10, int(p.Height())) + subCancel() + case <-ctx.Done(): + subCancel() + t.Fatal("light node did not receive a fraud proof in time") + } + + // 9. + fN := sw.NewNodeWithStore(node.Full, store) + err = fN.Start(ctx) + var fpExist *fraud.ErrFraudExists[*header.ExtendedHeader] + require.ErrorAs(t, err, &fpExist) + + sw.StopNode(ctx, bridge) + sw.StopNode(ctx, full) + sw.StopNode(ctx, light) + require.NoError(t, <-fillDn) +} diff --git a/nodebuilder/tests/helpers_test.go b/nodebuilder/tests/helpers_test.go new file mode 100644 index 0000000000..978b66553d --- /dev/null +++ b/nodebuilder/tests/helpers_test.go @@ -0,0 +1,35 @@ +//nolint:unused +package tests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/libs/authtoken" + "github.com/celestiaorg/celestia-node/nodebuilder" +) + +func getAdminClient(ctx context.Context, nd *nodebuilder.Node, t *testing.T) *client.Client { + t.Helper() + + signer := nd.AdminSigner + listenAddr := "ws://" + nd.RPCServer.ListenAddr() + + jwt, err := authtoken.NewSignedJWT(signer, []auth.Permission{"public", "read", "write", "admin"}) + require.NoError(t, err) + + client, err := client.NewClient(ctx, listenAddr, jwt) + require.NoError(t, err) + + return client +} + +func setTimeInterval(cfg *nodebuilder.Config, interval time.Duration) { + cfg.P2P.RoutingTableRefreshPeriod = interval + cfg.Share.Discovery.AdvertiseInterval = interval +} diff --git a/nodebuilder/tests/nd_test.go b/nodebuilder/tests/nd_test.go new file mode 100644 index 0000000000..338aa6d0c1 --- /dev/null +++ b/nodebuilder/tests/nd_test.go @@ -0,0 +1,212 @@ +//go:build nd || integration + +package tests + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/stretchr/testify/require" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" +) + +func TestShrexNDFromLights(t *testing.T) { + const ( + blocks = 10 + btime = time.Millisecond * 300 + bsize = 16 + ) + + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + + cfg := nodebuilder.DefaultConfig(node.Light) + cfg.Share.Discovery.PeersLimit = 1 + light := sw.NewNodeWithConfig(node.Light, cfg) + + err := bridge.Start(ctx) + require.NoError(t, err) + err = light.Start(ctx) + require.NoError(t, err) + + bridgeClient := getAdminClient(ctx, bridge, t) + lightClient := getAdminClient(ctx, light, t) + + // wait for chain to be filled + require.NoError(t, <-fillDn) + + // first 15 blocks are not filled with data + // + // TODO: we need to stop guessing + // the block that actually has transactions. We can get this data from the + // response returned by FillBlock. + for i := 16; i < blocks; i++ { + h, err := bridgeClient.Header.GetByHeight(ctx, uint64(i)) + require.NoError(t, err) + + reqCtx, cancel := context.WithTimeout(ctx, time.Second*5) + + // ensure to fetch random namespace (not the reserved namespace) + namespace := h.DAH.RowRoots[1][:share.NamespaceSize] + + expected, err := bridgeClient.Share.GetSharesByNamespace(reqCtx, h, namespace) + require.NoError(t, err) + got, err := lightClient.Share.GetSharesByNamespace(reqCtx, h, namespace) + require.NoError(t, err) + + require.True(t, len(got[0].Shares) > 0) + require.Equal(t, expected, got) + + cancel() + } +} + +func TestShrexNDFromLightsWithBadFulls(t *testing.T) { + const ( + blocks = 10 + btime = time.Millisecond * 300 + bsize = 16 + amountOfFulls = 5 + testTimeout = time.Second * 10 + ) + + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + + // create full nodes with basic stream.reset handler + ndHandler := func(stream network.Stream) { + _ = stream.Reset() + } + fulls := make([]*nodebuilder.Node, 0, amountOfFulls) + for i := 0; i < amountOfFulls; i++ { + cfg := nodebuilder.DefaultConfig(node.Full) + setTimeInterval(cfg, testTimeout) + full := sw.NewNodeWithConfig(node.Full, cfg, replaceNDServer(cfg, ndHandler), replaceShareGetter()) + fulls = append(fulls, full) + } + + lnConfig := nodebuilder.DefaultConfig(node.Light) + lnConfig.Share.Discovery.PeersLimit = uint(amountOfFulls) + light := sw.NewNodeWithConfig(node.Light, lnConfig) + + // start all nodes + require.NoError(t, bridge.Start(ctx)) + require.NoError(t, startFullNodes(ctx, fulls...)) + require.NoError(t, light.Start(ctx)) + + bridgeClient := getAdminClient(ctx, bridge, t) + lightClient := getAdminClient(ctx, light, t) + + // wait for chain to fill up + require.NoError(t, <-fillDn) + + // first 2 blocks are not filled with data + for i := 3; i < blocks; i++ { + h, err := bridgeClient.Header.GetByHeight(ctx, uint64(i)) + require.NoError(t, err) + + if len(h.DAH.RowRoots) != bsize*2 { + // fill blocks does not always fill every block to the given block + // size - this check prevents trying to fetch shares for the parity + // namespace. + continue + } + + reqCtx, cancel := context.WithTimeout(ctx, time.Second*5) + + // ensure to fetch random namespace (not the reserved namespace) + namespace := h.DAH.RowRoots[1][:share.NamespaceSize] + + expected, err := bridgeClient.Share.GetSharesByNamespace(reqCtx, h, namespace) + require.NoError(t, err) + require.True(t, len(expected[0].Shares) > 0) + + // choose a random full to test + fN := fulls[len(fulls)/2] + fnClient := getAdminClient(ctx, fN, t) + gotFull, err := fnClient.Share.GetSharesByNamespace(reqCtx, h, namespace) + require.NoError(t, err) + require.True(t, len(gotFull[0].Shares) > 0) + + gotLight, err := lightClient.Share.GetSharesByNamespace(reqCtx, h, namespace) + require.NoError(t, err) + require.True(t, len(gotLight[0].Shares) > 0) + + require.Equal(t, expected, gotFull) + require.Equal(t, expected, gotLight) + + cancel() + } +} + +func startFullNodes(ctx context.Context, fulls ...*nodebuilder.Node) error { + for _, full := range fulls { + err := full.Start(ctx) + if err != nil { + return err + } + } + return nil +} + +func replaceNDServer(cfg *nodebuilder.Config, handler network.StreamHandler) fx.Option { + return fx.Decorate(fx.Annotate( + func( + host host.Host, + store *eds.Store, + network p2p.Network, + ) (*shrexnd.Server, error) { + cfg.Share.ShrExNDParams.WithNetworkID(network.String()) + return shrexnd.NewServer(cfg.Share.ShrExNDParams, host, store) + }, + fx.OnStart(func(ctx context.Context, server *shrexnd.Server) error { + // replace handler for server + server.SetHandler(handler) + return server.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, server *shrexnd.Server) error { + return server.Start(ctx) + }), + )) +} + +func replaceShareGetter() fx.Option { + return fx.Decorate(fx.Annotate( + func( + host host.Host, + store *eds.Store, + storeGetter *getters.StoreGetter, + shrexGetter *getters.ShrexGetter, + network p2p.Network, + ) share.Getter { + cascade := make([]share.Getter, 0, 2) + cascade = append(cascade, storeGetter) + cascade = append(cascade, shrexGetter) + return getters.NewCascadeGetter(cascade) + }, + )) +} diff --git a/nodebuilder/tests/p2p_test.go b/nodebuilder/tests/p2p_test.go new file mode 100644 index 0000000000..98e9fc15b4 --- /dev/null +++ b/nodebuilder/tests/p2p_test.go @@ -0,0 +1,203 @@ +//go:build p2p || integration + +package tests + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" +) + +/* +Test-Case: Full/Light Nodes connection to Bridge as a Bootstrapper +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Create full/light nodes with bridge node as bootstrap peer +4. Start full/light nodes +5. Check that nodes are connected to bridge +*/ +func TestBridgeNodeAsBootstrapper(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t) + + // create and start BN + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + addr := host.InfoFromHost(bridge.Host) + + full := sw.NewFullNode(nodebuilder.WithBootstrappers([]peer.AddrInfo{*addr})) + light := sw.NewLightNode(nodebuilder.WithBootstrappers([]peer.AddrInfo{*addr})) + + for _, nd := range []*nodebuilder.Node{full, light} { + // start node and ensure that BN is correctly set as bootstrapper + require.NoError(t, nd.Start(ctx)) + assert.Equal(t, *addr, nd.Bootstrappers[0]) + // ensure that node is actually connected to BN + client := getAdminClient(ctx, nd, t) + connectedenss, err := client.P2P.Connectedness(ctx, addr.ID) + require.NoError(t, err) + assert.Equal(t, connectedenss, network.Connected) + } +} + +/* +Test-Case: Connect Full And Light using Bridge node as a bootstrapper +Steps: + 1. Create a Bridge Node(BN) + 2. Start a BN + 3. Create full/light nodes with bridge node as bootstrapped peer + 4. Start full/light nodes + 5. Ensure that nodes are connected to bridge + 6. Wait until light will find full node + 7. Check that full and light nodes are connected to each other +*/ +func TestFullDiscoveryViaBootstrapper(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + const defaultTimeInterval = time.Second * 2 + + sw := swamp.NewSwamp(t) + + // create and start a BN + cfg := nodebuilder.DefaultConfig(node.Bridge) + setTimeInterval(cfg, defaultTimeInterval) + bridge := sw.NewNodeWithConfig(node.Bridge, cfg) + err := bridge.Start(ctx) + require.NoError(t, err) + + // use BN as the bootstrapper + bootstrapper := host.InfoFromHost(bridge.Host) + + // create FN with BN as bootstrapper + cfg = nodebuilder.DefaultConfig(node.Full) + setTimeInterval(cfg, defaultTimeInterval) + full := sw.NewNodeWithConfig( + node.Full, + cfg, + nodebuilder.WithBootstrappers([]peer.AddrInfo{*bootstrapper}), + ) + + // create LN with BN as bootstrapper + cfg = nodebuilder.DefaultConfig(node.Light) + setTimeInterval(cfg, defaultTimeInterval) + light := sw.NewNodeWithConfig( + node.Light, + cfg, + nodebuilder.WithBootstrappers([]peer.AddrInfo{*bootstrapper}), + ) + + // start FN and LN and ensure they are both connected to BN as a bootstrapper + nodes := []*nodebuilder.Node{full, light} + for index := range nodes { + require.NoError(t, nodes[index].Start(ctx)) + assert.Equal(t, *bootstrapper, nodes[index].Bootstrappers[0]) + // ensure that node is actually connected to BN + client := getAdminClient(ctx, nodes[index], t) + connectedness, err := client.P2P.Connectedness(ctx, bootstrapper.ID) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) + } + + for { + if ctx.Err() != nil { + t.Fatal(ctx.Err()) + } + // LN discovered FN successfully and is now connected + client := getAdminClient(ctx, light, t) + connectedness, err := client.P2P.Connectedness(ctx, host.InfoFromHost(full.Host).ID) + require.NoError(t, err) + if connectedness == network.Connected { + break + } + } +} + +/* +Test-Case: Full node discovery of disconnected full nodes +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Create 2 FNs with bridge node as bootstrapper peer and start them +4. Check that the FNs discover each other +5. Disconnect the FNs +6. Create one more node with discovery process disabled (however advertisement is still enabled) +7. Check that the FN with discovery disabled is still found by the other two FNs +*NOTE*: this test will take some time because it relies on several cycles of peer discovery +*/ +func TestRestartNodeDiscovery(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + const ( + defaultTimeInterval = time.Second * 2 + numFulls = 2 + ) + + sw := swamp.NewSwamp(t) + + // create and start a BN as a bootstrapper + fullCfg := nodebuilder.DefaultConfig(node.Bridge) + setTimeInterval(fullCfg, defaultTimeInterval) + bridge := sw.NewNodeWithConfig(node.Bridge, fullCfg) + err := bridge.Start(ctx) + require.NoError(t, err) + + bridgeAddr := host.InfoFromHost(bridge.Host) + + fullCfg = nodebuilder.DefaultConfig(node.Full) + setTimeInterval(fullCfg, defaultTimeInterval) + nodesConfig := nodebuilder.WithBootstrappers([]peer.AddrInfo{*bridgeAddr}) + + // create two FNs and start them, ensuring they are connected to BN as + // bootstrapper + nodes := make([]*nodebuilder.Node, numFulls) + for index := 0; index < numFulls; index++ { + nodes[index] = sw.NewNodeWithConfig(node.Full, fullCfg, nodesConfig) + require.NoError(t, nodes[index].Start(ctx)) + client := getAdminClient(ctx, nodes[index], t) + connectedness, err := client.P2P.Connectedness(ctx, bridgeAddr.ID) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) + } + + // ensure FNs are connected to each other + fullClient1 := getAdminClient(ctx, nodes[0], t) + fullClient2 := getAdminClient(ctx, nodes[1], t) + + connectedness, err := fullClient1.P2P.Connectedness(ctx, nodes[1].Host.ID()) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) + + // disconnect the FNs + sw.Disconnect(t, nodes[0], nodes[1]) + + // create and start one more FN with disabled discovery + disabledDiscoveryFN := sw.NewNodeWithConfig(node.Full, fullCfg, nodesConfig) + require.NoError(t, err) + + // ensure that the FN with disabled discovery is discovered by both of the + // running FNs that have discovery enabled + connectedness, err = fullClient1.P2P.Connectedness(ctx, disabledDiscoveryFN.Host.ID()) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) + + connectedness, err = fullClient2.P2P.Connectedness(ctx, disabledDiscoveryFN.Host.ID()) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) +} diff --git a/nodebuilder/tests/reconstruct_test.go b/nodebuilder/tests/reconstruct_test.go new file mode 100644 index 0000000000..d047182669 --- /dev/null +++ b/nodebuilder/tests/reconstruct_test.go @@ -0,0 +1,363 @@ +//go:build reconstruction || integration + +package tests + +import ( + "context" + "os" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share/availability/light" + "github.com/celestiaorg/celestia-node/share/eds" +) + +/* +Test-Case: Full Node reconstructs blocks from a Bridge node +Pre-Reqs: +- First 20 blocks have a block size of 16 +- Blocktime is 100 ms +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Create a Full Node(FN) with BN as a trusted peer +4. Start a FN +5. Check that a FN can retrieve shares from 1 to 20 blocks +*/ +func TestFullReconstructFromBridge(t *testing.T) { + const ( + blocks = 20 + bsize = 16 + btime = time.Millisecond * 300 + ) + + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) + + // TODO: This is required to avoid flakes coming from unfinished retry + // mechanism for the same peer in go-header + _, err = bridgeClient.Header.WaitForHeight(ctx, uint64(blocks)) + require.NoError(t, err) + + cfg := nodebuilder.DefaultConfig(node.Full) + cfg.Share.UseShareExchange = false + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, getMultiAddr(t, bridge.Host)) + full := sw.NewNodeWithConfig(node.Full, cfg) + err = full.Start(ctx) + require.NoError(t, err) + fullClient := getAdminClient(ctx, full, t) + + errg, bctx := errgroup.WithContext(ctx) + for i := 1; i <= blocks+1; i++ { + i := i + errg.Go(func() error { + h, err := fullClient.Header.WaitForHeight(bctx, uint64(i)) + if err != nil { + return err + } + + return fullClient.Share.SharesAvailable(bctx, h) + }) + } + require.NoError(t, <-fillDn) + require.NoError(t, errg.Wait()) +} + +/* +Test-Case: Full Node reconstructs blocks from each other, after unsuccessfully syncing the complete +block from LN subnetworks. Analog to TestShareAvailable_DisconnectedFullNodes. +*/ +func TestFullReconstructFromFulls(t *testing.T) { + if testing.Short() { + t.Skip() + } + + light.DefaultSampleAmount = 10 // s + const ( + blocks = 10 + btime = time.Millisecond * 300 + bsize = 8 // k + lnodes = 12 // c - total number of nodes on two subnetworks + ) + + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + const defaultTimeInterval = time.Second * 5 + bridge := sw.NewBridgeNode() + + sw.SetBootstrapper(t, bridge) + require.NoError(t, bridge.Start(ctx)) + bridgeClient := getAdminClient(ctx, bridge, t) + + // TODO: This is required to avoid flakes coming from unfinished retry + // mechanism for the same peer in go-header + _, err := bridgeClient.Header.WaitForHeight(ctx, uint64(blocks)) + require.NoError(t, err) + + lights1 := make([]*nodebuilder.Node, lnodes/2) + lights2 := make([]*nodebuilder.Node, lnodes/2) + subs := make([]event.Subscription, lnodes) + errg, errCtx := errgroup.WithContext(ctx) + for i := 0; i < lnodes/2; i++ { + i := i + errg.Go(func() error { + lnConfig := nodebuilder.DefaultConfig(node.Light) + setTimeInterval(lnConfig, defaultTimeInterval) + light := sw.NewNodeWithConfig(node.Light, lnConfig) + sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) + if err != nil { + return err + } + subs[i] = sub + lights1[i] = light + return light.Start(errCtx) + }) + errg.Go(func() error { + lnConfig := nodebuilder.DefaultConfig(node.Light) + setTimeInterval(lnConfig, defaultTimeInterval) + light := sw.NewNodeWithConfig(node.Light, lnConfig) + sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) + if err != nil { + return err + } + subs[(lnodes/2)+i] = sub + lights2[i] = light + return light.Start(errCtx) + }) + } + + require.NoError(t, errg.Wait()) + + for i := 0; i < lnodes; i++ { + select { + case <-ctx.Done(): + t.Fatal("peer was not found") + case <-subs[i].Out(): + require.NoError(t, subs[i].Close()) + continue + } + } + + // Remove bootstrappers to prevent FNs from connecting to bridge + sw.Bootstrappers = []ma.Multiaddr{} + // Use light nodes from respective subnetworks as bootstrappers to prevent connection to bridge + lnBootstrapper1, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(lights1[0].Host)) + require.NoError(t, err) + lnBootstrapper2, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(lights2[0].Host)) + require.NoError(t, err) + + cfg := nodebuilder.DefaultConfig(node.Full) + setTimeInterval(cfg, defaultTimeInterval) + cfg.Share.UseShareExchange = false + cfg.Share.Discovery.PeersLimit = 0 + cfg.Header.TrustedPeers = []string{lnBootstrapper1[0].String()} + full1 := sw.NewNodeWithConfig(node.Full, cfg) + cfg.Header.TrustedPeers = []string{lnBootstrapper2[0].String()} + full2 := sw.NewNodeWithConfig(node.Full, cfg) + require.NoError(t, full1.Start(ctx)) + require.NoError(t, full2.Start(ctx)) + + fullClient1 := getAdminClient(ctx, full1, t) + fullClient2 := getAdminClient(ctx, full2, t) + + // Form topology + for i := 0; i < lnodes/2; i++ { + // Separate light nodes into two subnetworks + for j := 0; j < lnodes/2; j++ { + sw.Disconnect(t, lights1[i], lights2[j]) + if i != j { + sw.Connect(t, lights1[i], lights1[j]) + sw.Connect(t, lights2[i], lights2[j]) + } + } + + sw.Connect(t, full1, lights1[i]) + sw.Disconnect(t, full1, lights2[i]) + + sw.Connect(t, full2, lights2[i]) + sw.Disconnect(t, full2, lights1[i]) + } + + // Ensure the fulls are not connected to the bridge + sw.Disconnect(t, full1, full2) + sw.Disconnect(t, full1, bridge) + sw.Disconnect(t, full2, bridge) + + h, err := fullClient1.Header.WaitForHeight(ctx, uint64(10+blocks-1)) + require.NoError(t, err) + + // Ensure that the full nodes cannot reconstruct before being connected to each other + ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*30) + errg, errCtx = errgroup.WithContext(ctxErr) + errg.Go(func() error { + return fullClient1.Share.SharesAvailable(errCtx, h) + }) + errg.Go(func() error { + return fullClient1.Share.SharesAvailable(errCtx, h) + }) + require.Error(t, errg.Wait()) + cancelErr() + + // Reconnect FNs + sw.Connect(t, full1, full2) + + errg, bctx := errgroup.WithContext(ctx) + for i := 10; i < blocks+11; i++ { + h, err := fullClient1.Header.WaitForHeight(bctx, uint64(i)) + require.NoError(t, err) + errg.Go(func() error { + return fullClient1.Share.SharesAvailable(bctx, h) + }) + errg.Go(func() error { + return fullClient2.Share.SharesAvailable(bctx, h) + }) + } + + require.NoError(t, <-fillDn) + require.NoError(t, errg.Wait()) +} + +/* +Test-Case: Full Node reconstructs blocks only from Light Nodes +Pre-Reqs: +- First 20 blocks have a block size of 16 +- Blocktime is 100 ms +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Create a Full Node(FN) that will act as a bootstrapper +4. Create 69 Light Nodes(LNs) with BN as a trusted peer and a bootstrapper +5. Start 69 LNs +6. Create a Full Node(FN) with a bootstrapper +7. Unlink FN connection to BN +8. Start a FN +9. Check that the FN can retrieve shares from 1 to 20 blocks +*/ +func TestFullReconstructFromLights(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eds.RetrieveQuadrantTimeout = time.Millisecond * 100 + light.DefaultSampleAmount = 20 + const ( + blocks = 20 + btime = time.Millisecond * 300 + bsize = 16 + lnodes = 69 + ) + + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + + t.Cleanup(cancel) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + + const defaultTimeInterval = time.Second * 5 + cfg := nodebuilder.DefaultConfig(node.Full) + setTimeInterval(cfg, defaultTimeInterval) + + bridge := sw.NewBridgeNode() + addrsBridge, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) + + os.Setenv(p2p.EnvKeyCelestiaBootstrapper, "true") + cfg.Header.TrustedPeers = []string{ + "/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p", + } + bootstrapper := sw.NewNodeWithConfig(node.Full, cfg) + require.NoError(t, bootstrapper.Start(ctx)) + bootstrapperAddr := host.InfoFromHost(bootstrapper.Host) + + require.NoError(t, bridge.Start(ctx)) + bridgeClient := getAdminClient(ctx, bridge, t) + + // TODO: This is required to avoid flakes coming from unfinished retry + // mechanism for the same peer in go-header + _, err = bridgeClient.Header.WaitForHeight(ctx, uint64(blocks)) + require.NoError(t, err) + + cfg = nodebuilder.DefaultConfig(node.Full) + setTimeInterval(cfg, defaultTimeInterval) + cfg.Share.UseShareExchange = false + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrsBridge[0].String()) + nodesConfig := nodebuilder.WithBootstrappers([]peer.AddrInfo{*bootstrapperAddr}) + full := sw.NewNodeWithConfig(node.Full, cfg, nodesConfig) + os.Setenv(p2p.EnvKeyCelestiaBootstrapper, "false") + + lights := make([]*nodebuilder.Node, lnodes) + subs := make([]event.Subscription, lnodes) + errg, errCtx := errgroup.WithContext(ctx) + for i := 0; i < lnodes; i++ { + i := i + errg.Go(func() error { + lnConfig := nodebuilder.DefaultConfig(node.Light) + setTimeInterval(lnConfig, defaultTimeInterval) + lnConfig.Header.TrustedPeers = append(lnConfig.Header.TrustedPeers, addrsBridge[0].String()) + light := sw.NewNodeWithConfig(node.Light, lnConfig, nodesConfig) + sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerIdentificationCompleted{}) + if err != nil { + return err + } + subs[i] = sub + lights[i] = light + return light.Start(errCtx) + }) + } + + require.NoError(t, errg.Wait()) + require.NoError(t, full.Start(ctx)) + fullClient := getAdminClient(ctx, full, t) + + for i := 0; i < lnodes; i++ { + select { + case <-ctx.Done(): + t.Fatal("peer was not found") + case <-subs[i].Out(): + require.NoError(t, subs[i].Close()) + continue + } + } + errg, bctx := errgroup.WithContext(ctx) + for i := 1; i <= blocks+1; i++ { + i := i + errg.Go(func() error { + h, err := fullClient.Header.WaitForHeight(bctx, uint64(i)) + if err != nil { + return err + } + + return fullClient.Share.SharesAvailable(bctx, h) + }) + } + require.NoError(t, <-fillDn) + require.NoError(t, errg.Wait()) +} + +func getMultiAddr(t *testing.T, h host.Host) string { + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(h)) + require.NoError(t, err) + return addrs[0].String() +} diff --git a/nodebuilder/tests/swamp/config.go b/nodebuilder/tests/swamp/config.go new file mode 100644 index 0000000000..047baa9f59 --- /dev/null +++ b/nodebuilder/tests/swamp/config.go @@ -0,0 +1,32 @@ +package swamp + +import ( + "time" + + "github.com/celestiaorg/celestia-app/test/util/testnode" + + "github.com/celestiaorg/celestia-node/core" +) + +// DefaultConfig creates a celestia-app instance with a block time of around +// 100ms +func DefaultConfig() *testnode.Config { + cfg := core.DefaultTestConfig() + // timeout commit lower than this tend to be flakier + cfg.TmConfig.Consensus.TimeoutCommit = 200 * time.Millisecond + return cfg +} + +// Option for modifying Swamp's Config. +type Option func(*testnode.Config) + +// WithBlockTime sets a custom interval for block creation. +func WithBlockTime(t time.Duration) Option { + return func(c *testnode.Config) { + // for empty block + c.TmConfig.Consensus.CreateEmptyBlocksInterval = t + // for filled block + c.TmConfig.Consensus.TimeoutCommit = t + c.TmConfig.Consensus.SkipTimeoutCommit = false + } +} diff --git a/node/tests/swamp/img/test_swamp.svg b/nodebuilder/tests/swamp/img/test_swamp.svg similarity index 100% rename from node/tests/swamp/img/test_swamp.svg rename to nodebuilder/tests/swamp/img/test_swamp.svg diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go new file mode 100644 index 0000000000..9faf69744d --- /dev/null +++ b/nodebuilder/tests/swamp/swamp.go @@ -0,0 +1,354 @@ +package swamp + +import ( + "context" + "crypto/rand" + "fmt" + "net" + "sync" + "testing" + "time" + + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" + "go.uber.org/fx" + "golang.org/x/exp/maps" + + "github.com/celestiaorg/celestia-app/test/util/testnode" + apptypes "github.com/celestiaorg/celestia-app/x/blob/types" + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/core" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/keystore" + "github.com/celestiaorg/celestia-node/logs" + "github.com/celestiaorg/celestia-node/nodebuilder" + coremodule "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/state" + "github.com/celestiaorg/celestia-node/share/eds" +) + +var blackholeIP6 = net.ParseIP("100::") + +// DefaultTestTimeout should be used as the default timeout on all the Swamp tests. +// It's generously set to 5 minutes to give enough time for CI. +const DefaultTestTimeout = time.Minute * 5 + +// Swamp represents the main functionality that is needed for the test-case: +// - Network to link the nodes +// - CoreClient to share between Bridge nodes +// - Slices of created Bridge/Full/Light Nodes +// - trustedHash taken from the CoreClient and shared between nodes +type Swamp struct { + t *testing.T + cfg *testnode.Config + + Network mocknet.Mocknet + Bootstrappers []ma.Multiaddr + + ClientContext testnode.Context + Accounts []string + + nodesMu sync.Mutex + nodes map[*nodebuilder.Node]struct{} + + genesis *header.ExtendedHeader +} + +// NewSwamp creates a new instance of Swamp. +func NewSwamp(t *testing.T, options ...Option) *Swamp { + if testing.Verbose() { + logs.SetDebugLogging() + } + + ic := DefaultConfig() + for _, option := range options { + option(ic) + } + + // Now, we are making an assumption that consensus mechanism is already tested out + // so, we are not creating bridge nodes with each one containing its own core client + // instead we are assigning all created BNs to 1 Core from the swamp + ic.WithChainID("private") + cctx := core.StartTestNodeWithConfig(t, ic) + swp := &Swamp{ + t: t, + cfg: ic, + Network: mocknet.New(), + ClientContext: cctx, + Accounts: ic.Accounts, + nodes: map[*nodebuilder.Node]struct{}{}, + } + + swp.t.Cleanup(swp.cleanup) + swp.setupGenesis() + return swp +} + +// cleanup frees up all the resources +// including stop of all created nodes +func (s *Swamp) cleanup() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + require.NoError(s.t, s.Network.Close()) + + s.nodesMu.Lock() + defer s.nodesMu.Unlock() + maps.DeleteFunc(s.nodes, func(nd *nodebuilder.Node, _ struct{}) bool { + require.NoError(s.t, nd.Stop(ctx)) + return true + }) +} + +// GetCoreBlockHashByHeight returns a tendermint block's hash by provided height +func (s *Swamp) GetCoreBlockHashByHeight(ctx context.Context, height int64) libhead.Hash { + b, err := s.ClientContext.Client.Block(ctx, &height) + require.NoError(s.t, err) + return libhead.Hash(b.BlockID.Hash) +} + +// WaitTillHeight holds the test execution until the given amount of blocks +// has been produced by the CoreClient. +func (s *Swamp) WaitTillHeight(ctx context.Context, height int64) libhead.Hash { + require.Greater(s.t, height, int64(0)) + + t := time.NewTicker(time.Millisecond * 50) + defer t.Stop() + for { + select { + case <-ctx.Done(): + require.NoError(s.t, ctx.Err()) + case <-t.C: + latest, err := s.ClientContext.LatestHeight() + require.NoError(s.t, err) + if latest >= height { + res, err := s.ClientContext.Client.Block(ctx, &latest) + require.NoError(s.t, err) + return libhead.Hash(res.BlockID.Hash) + } + } + } +} + +// createPeer is a helper for celestia nodes to initialize +// with a real key instead of using a bogus one. +func (s *Swamp) createPeer(ks keystore.Keystore) host.Host { + key, err := p2p.Key(ks) + require.NoError(s.t, err) + + // IPv6 will be starting with 100:0 + token := make([]byte, 12) + _, _ = rand.Read(token) + ip := append(net.IP{}, blackholeIP6...) + copy(ip[net.IPv6len-len(token):], token) + + // reference to GenPeer func in libp2p/p2p/net/mock/mock_net.go + // on how we generate new multiaddr for new peer + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/4242", ip)) + require.NoError(s.t, err) + + host, err := s.Network.AddPeer(key, a) + require.NoError(s.t, err) + + require.NoError(s.t, s.Network.LinkAll()) + return host +} + +// setupGenesis sets up genesis Header. +// This is required to initialize and start correctly. +func (s *Swamp) setupGenesis() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + // ensure core has surpassed genesis block + s.WaitTillHeight(ctx, 2) + + ds := ds_sync.MutexWrap(ds.NewMapDatastore()) + store, err := eds.NewStore(eds.DefaultParameters(), s.t.TempDir(), ds) + require.NoError(s.t, err) + + ex, err := core.NewExchange( + core.NewBlockFetcher(s.ClientContext.Client), + store, + header.MakeExtendedHeader, + ) + require.NoError(s.t, err) + + h, err := ex.GetByHeight(ctx, 1) + require.NoError(s.t, err) + s.genesis = h +} + +// DefaultTestConfig creates a test config with the access to the core node for the tp +func (s *Swamp) DefaultTestConfig(tp node.Type) *nodebuilder.Config { + cfg := nodebuilder.DefaultConfig(tp) + + ip, port, err := net.SplitHostPort(s.cfg.AppConfig.GRPC.Address) + require.NoError(s.t, err) + + cfg.Core.IP = ip + cfg.Core.GRPCPort = port + return cfg +} + +// NewBridgeNode creates a new instance of a BridgeNode providing a default config +// and a mockstore to the NewNodeWithStore method +func (s *Swamp) NewBridgeNode(options ...fx.Option) *nodebuilder.Node { + cfg := s.DefaultTestConfig(node.Bridge) + store := nodebuilder.MockStore(s.t, cfg) + + return s.NewNodeWithStore(node.Bridge, store, options...) +} + +// NewFullNode creates a new instance of a FullNode providing a default config +// and a mockstore to the NewNodeWithStore method +func (s *Swamp) NewFullNode(options ...fx.Option) *nodebuilder.Node { + cfg := s.DefaultTestConfig(node.Full) + cfg.Header.TrustedPeers = []string{ + "/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p", + } + // add all bootstrappers in suite as trusted peers + for _, bootstrapper := range s.Bootstrappers { + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, bootstrapper.String()) + } + store := nodebuilder.MockStore(s.t, cfg) + + return s.NewNodeWithStore(node.Full, store, options...) +} + +// NewLightNode creates a new instance of a LightNode providing a default config +// and a mockstore to the NewNodeWithStore method +func (s *Swamp) NewLightNode(options ...fx.Option) *nodebuilder.Node { + cfg := s.DefaultTestConfig(node.Light) + cfg.Header.TrustedPeers = []string{ + "/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p", + } + // add all bootstrappers in suite as trusted peers + for _, bootstrapper := range s.Bootstrappers { + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, bootstrapper.String()) + } + + store := nodebuilder.MockStore(s.t, cfg) + + return s.NewNodeWithStore(node.Light, store, options...) +} + +func (s *Swamp) NewNodeWithConfig(nodeType node.Type, cfg *nodebuilder.Config, options ...fx.Option) *nodebuilder.Node { + store := nodebuilder.MockStore(s.t, cfg) + // add all bootstrappers in suite as trusted peers + for _, bootstrapper := range s.Bootstrappers { + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, bootstrapper.String()) + } + return s.NewNodeWithStore(nodeType, store, options...) +} + +// NewNodeWithStore creates a new instance of Node with predefined Store. +func (s *Swamp) NewNodeWithStore( + tp node.Type, + store nodebuilder.Store, + options ...fx.Option, +) *nodebuilder.Node { + signer := apptypes.NewKeyringSigner(s.ClientContext.Keyring, s.Accounts[0], s.ClientContext.ChainID) + options = append(options, + state.WithKeyringSigner(signer), + ) + + switch tp { + case node.Bridge: + options = append(options, + coremodule.WithClient(s.ClientContext.Client), + ) + default: + } + + nd := s.newNode(tp, store, options...) + s.nodesMu.Lock() + s.nodes[nd] = struct{}{} + s.nodesMu.Unlock() + return nd +} + +func (s *Swamp) newNode(t node.Type, store nodebuilder.Store, options ...fx.Option) *nodebuilder.Node { + ks, err := store.Keystore() + require.NoError(s.t, err) + + // TODO(@Bidon15): If for some reason, we receive one of existing options + // like from the test case, we need to check them and not use + // default that are set here + cfg, _ := store.Config() + cfg.RPC.Port = "0" + + // tempDir is used for the eds.Store + tempDir := s.t.TempDir() + options = append(options, + p2p.WithHost(s.createPeer(ks)), + fx.Replace(node.StorePath(tempDir)), + fx.Invoke(func(ctx context.Context, store libhead.Store[*header.ExtendedHeader]) error { + return store.Init(ctx, s.genesis) + }), + ) + node, err := nodebuilder.New(t, p2p.Private, store, options...) + require.NoError(s.t, err) + return node +} + +// StopNode stops the node and removes from Swamp. +// TODO(@Wondertan): For clean and symmetrical API, we may want to add StartNode. +func (s *Swamp) StopNode(ctx context.Context, nd *nodebuilder.Node) { + s.nodesMu.Lock() + delete(s.nodes, nd) + s.nodesMu.Unlock() + require.NoError(s.t, nd.Stop(ctx)) +} + +// Connect allows to connect peers after hard disconnection. +func (s *Swamp) Connect(t *testing.T, peerA, peerB *nodebuilder.Node) { + _, err := s.Network.LinkPeers(peerA.Host.ID(), peerB.Host.ID()) + require.NoError(t, err) + _, err = s.Network.ConnectPeers(peerA.Host.ID(), peerB.Host.ID()) + require.NoError(t, err) +} + +// Disconnect allows to break a connection between two peers without any possibility to +// re-establish it. Order is very important here. We have to unlink peers first, and only after +// that call disconnect. This is hard disconnect and peers will not be able to reconnect. +// In order to reconnect peers again, please use swamp.Connect +func (s *Swamp) Disconnect(t *testing.T, peerA, peerB *nodebuilder.Node) { + require.NoError(t, s.Network.UnlinkPeers(peerA.Host.ID(), peerB.Host.ID())) + require.NoError(t, s.Network.DisconnectPeers(peerA.Host.ID(), peerB.Host.ID())) +} + +// SetBootstrapper sets the given bootstrappers as the "bootstrappers" for the +// Swamp test suite. Every new full or light node created on the suite afterwards +// will automatically add the suite's bootstrappers as trusted peers to their config. +// NOTE: Bridge nodes do not automatically add the bootstrappers as trusted peers. +// NOTE: Use `NewNodeWithStore` to avoid this automatic configuration. +func (s *Swamp) SetBootstrapper(t *testing.T, bootstrappers ...*nodebuilder.Node) { + for _, trusted := range bootstrappers { + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(trusted.Host)) + require.NoError(t, err) + s.Bootstrappers = append(s.Bootstrappers, addrs[0]) + } +} + +// Validators retrieves keys from the app node in order to build the validators. +func (s *Swamp) Validators(t *testing.T) (*types.ValidatorSet, types.PrivValidator) { + privPath := s.cfg.TmConfig.PrivValidatorKeyFile() + statePath := s.cfg.TmConfig.PrivValidatorStateFile() + priv := privval.LoadFilePV(privPath, statePath) + key, err := priv.GetPubKey() + require.NoError(t, err) + validator := types.NewValidator(key, 100) + set := types.NewValidatorSet([]*types.Validator{validator}) + return set, priv +} diff --git a/nodebuilder/tests/swamp/swamp_tx.go b/nodebuilder/tests/swamp/swamp_tx.go new file mode 100644 index 0000000000..656b2f341d --- /dev/null +++ b/nodebuilder/tests/swamp/swamp_tx.go @@ -0,0 +1,34 @@ +package swamp + +import ( + "context" + "time" + + "github.com/cosmos/cosmos-sdk/client/flags" + + "github.com/celestiaorg/celestia-app/test/util/testnode" +) + +// FillBlocks produces the given amount of contiguous blocks with customizable size. +// The returned channel reports when the process is finished. +func FillBlocks(ctx context.Context, cctx testnode.Context, accounts []string, bsize, blocks int) chan error { + errCh := make(chan error) + go func() { + // TODO: FillBlock must respect the context + // fill blocks is not working correctly without sleep rn. + time.Sleep(time.Millisecond * 50) + var err error + for i := 0; i < blocks; i++ { + _, err = cctx.FillBlock(bsize, accounts, flags.BroadcastBlock) + if err != nil { + break + } + } + + select { + case errCh <- err: + case <-ctx.Done(): + } + }() + return errCh +} diff --git a/nodebuilder/tests/sync_test.go b/nodebuilder/tests/sync_test.go new file mode 100644 index 0000000000..ec8386ea43 --- /dev/null +++ b/nodebuilder/tests/sync_test.go @@ -0,0 +1,413 @@ +//go:build sync || integration + +package tests + +import ( + "context" + "testing" + "time" + + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" +) + +// Common consts for tests producing filled blocks +const ( + numBlocks = 20 + bsize = 16 + sbtime = time.Millisecond * 300 +) + +/* +Test-Case: Header and block/sample sync against a Bridge Node of non-empty blocks. + +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Check BN is synced to height 20 + +Light node: +4. Create a Light Node (LN) with bridge as a trusted peer +5. Start a LN with a defined connection to the BN +6. Check LN is header-synced to height 20 +7. Wait until LN has sampled height 20 +8. Wait for LN DASer to catch up to network head + +Full node: +4. Create a Full Node (FN) with bridge as a trusted peer +5. Start a FN with a defined connection to the BN +6. Check FN is header-synced to height 20 +7. Wait until FN has synced block at height 20 +8. Wait for FN DASer to catch up to network head +*/ +func TestSyncAgainstBridge_NonEmptyChain(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(sbtime)) + // wait for core network to fill 20 blocks + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + + // create a bridge node and set it as the bootstrapper for the suite + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // start bridge and wait for it to sync to 20 + err := bridge.Start(ctx) + require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) + + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + t.Run("light sync against bridge", func(t *testing.T) { + // create a light node that is connected to the bridge node as + // a bootstrapper + light := sw.NewLightNode() + // start light node and wait for it to sync 20 blocks + err = light.Start(ctx) + require.NoError(t, err) + lightClient := getAdminClient(ctx, light, t) + h, err = lightClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check that the light node has also sampled over the block at height 20 + err = lightClient.Share.SharesAvailable(ctx, h) + assert.NoError(t, err) + + // wait until the entire chain (up to network head) has been sampled + err = lightClient.DAS.WaitCatchUp(ctx) + require.NoError(t, err) + }) + + t.Run("full sync against bridge", func(t *testing.T) { + // create a full node with bridge node as its bootstrapper + full := sw.NewFullNode() + // let full node sync 20 blocks + err = full.Start(ctx) + require.NoError(t, err) + fullClient := getAdminClient(ctx, full, t) + h, err = fullClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check to ensure the full node can sync the 20th block's data + err = fullClient.Share.SharesAvailable(ctx, h) + assert.NoError(t, err) + + // wait for full node to sync up the blocks from genesis -> network head. + err = fullClient.DAS.WaitCatchUp(ctx) + require.NoError(t, err) + }) + + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } +} + +/* +Test-Case: Header and block/sample sync against a Bridge Node of empty blocks. + +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Check BN is synced to height 20 + +Light node: +4. Create a Light Node (LN) with bridge as a trusted peer +5. Start a LN with a defined connection to the BN +6. Check LN is header-synced to height 20 +7. Wait until LN has sampled height 20 +8. Wait for LN DASer to catch up to network head + +Full node: +4. Create a Full Node (FN) with bridge as a trusted peer +5. Start a FN with a defined connection to the BN +6. Check FN is header-synced to height 20 +7. Wait until FN has synced block at height 20 +8. Wait for FN DASer to catch up to network head +*/ +func TestSyncAgainstBridge_EmptyChain(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(sbtime)) + sw.WaitTillHeight(ctx, numBlocks) + + // create bridge node and set it as the bootstrapper for the suite + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // start bridge and wait for it to sync to 20 + err := bridge.Start(ctx) + require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + t.Run("light sync against bridge", func(t *testing.T) { + // create a light node that is connected to the bridge node as + // a bootstrapper + light := sw.NewLightNode() + // start light node and wait for it to sync 20 blocks + err = light.Start(ctx) + require.NoError(t, err) + lightClient := getAdminClient(ctx, light, t) + h, err = lightClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check that the light node has also sampled over the block at height 20 + err = lightClient.Share.SharesAvailable(ctx, h) + assert.NoError(t, err) + + // wait until the entire chain (up to network head) has been sampled + err = lightClient.DAS.WaitCatchUp(ctx) + require.NoError(t, err) + }) + + t.Run("full sync against bridge", func(t *testing.T) { + // create a full node with bridge node as its bootstrapper + full := sw.NewFullNode() + // let full node sync 20 blocks + err = full.Start(ctx) + require.NoError(t, err) + fullClient := getAdminClient(ctx, full, t) + h, err = fullClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check to ensure the full node can sync the 20th block's data + err = fullClient.Share.SharesAvailable(ctx, h) + assert.NoError(t, err) + + // wait for full node to sync up the blocks from genesis -> network head. + err = fullClient.DAS.WaitCatchUp(ctx) + require.NoError(t, err) + }) +} + +/* +Test-Case: Light Node continues sync after abrupt stop/start +Pre-Requisites: +- CoreClient is started by swamp +- CoreClient has generated 50 blocks +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Check BN is synced to height 20 +4. Create a Light Node(LN) with a trusted peer +5. Start a LN with a defined connection to the BN +6. Check LN is synced to height 20 +7. Disconnect LN from BN for 3 seconds while BN continues broadcasting new blocks from core +8. Re-connect LN and let it sync up again +9. Check LN is synced to height 40 +*/ +func TestSyncStartStopLightWithBridge(t *testing.T) { + if testing.Short() { + t.Skip("skipping TestSyncStartStopLightWithBridge test in short mode.") + } + + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + defer cancel() + + sw := swamp.NewSwamp(t) + // wait for core network to fill 20 blocks + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + + // create bridge and set it as a bootstrapper + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // and let bridge node sync up 20 blocks + err := bridge.Start(ctx) + require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // create a light node and connect it to the bridge node as a bootstrapper + light := sw.NewLightNode() + // start light node and let it sync to 20 + err = light.Start(ctx) + require.NoError(t, err) + lightClient := getAdminClient(ctx, light, t) + h, err = lightClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + sw.StopNode(ctx, light) + + light = sw.NewLightNode() + require.NoError(t, light.Start(ctx)) + + // ensure when light node comes back up, it can sync the remainder of the chain it + // missed while sleeping + h, err = lightClient.Header.WaitForHeight(ctx, 40) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 40)) + + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } +} + +/* +Test-Case: Sync a Light Node from a Full Node +Pre-Requisites: +- CoreClient is started by swamp +- CoreClient has generated 20 blocks +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Check BN is synced to height 20 +4. Create a Full Node(FN) with a connection to BN as a trusted peer +5. Start a FN +6. Check FN is synced to network head +7. Create a Light Node(LN) with a connection to FN as a trusted peer +8. Ensure LN is NOT connected to BN and only connected to FN +9. Start LN +10. Check LN is synced to network head +*/ +func TestSyncLightAgainstFull(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t) + // wait for the core network to fill up 20 blocks + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + + // create bridge and set it as a bootstrapper + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // start a bridge node and wait for it to sync up 20 blocks + err := bridge.Start(ctx) + require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // create a FN with BN as a trusted peer + full := sw.NewFullNode() + // start FN and wait for it to sync up to head of BN + err = full.Start(ctx) + require.NoError(t, err) + fullClient := getAdminClient(ctx, full, t) + bridgeHead, err := bridgeClient.Header.LocalHead(ctx) + require.NoError(t, err) + _, err = fullClient.Header.WaitForHeight(ctx, bridgeHead.Height()) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // reset suite bootstrapper list and set full node as a bootstrapper for + // LN to connect to + sw.Bootstrappers = make([]ma.Multiaddr, 0) + sw.SetBootstrapper(t, full) + + // create an LN with FN as a trusted peer + light := sw.NewLightNode() + + // ensure there is no direct connection between LN and BN so that + // LN relies only on FN for syncing + err = sw.Network.UnlinkPeers(bridge.Host.ID(), light.Host.ID()) + require.NoError(t, err) + + // start LN and wait for it to sync up to network head against the head of the FN + err = light.Start(ctx) + require.NoError(t, err) + lightClient := getAdminClient(ctx, light, t) + fullHead, err := fullClient.Header.LocalHead(ctx) + require.NoError(t, err) + _, err = lightClient.Header.WaitForHeight(ctx, fullHead.Height()) + require.NoError(t, err) + + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } +} + +/* +Test-Case: Sync a Light Node with multiple trusted peers +Pre-Requisites: +- CoreClient is started by swamp +- CoreClient has generated 20 blocks +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Check BN is synced to height 20 +4. Create a Full Node(FN) with a connection to BN as a trusted peer +5. Start a FN +6. Check FN is synced to network head +7. Create a Light Node(LN) with a connection to BN and FN as trusted peers +8. Start LN +9. Check LN is synced to network head. +*/ +func TestSyncLightWithTrustedPeers(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + + // create a BN and set as a bootstrapper + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // let it sync to network head + err := bridge.Start(ctx) + require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) + _, err = bridgeClient.Header.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + + // create a FN with BN as trusted peer + full := sw.NewFullNode() + + // let FN sync to network head + err = full.Start(ctx) + require.NoError(t, err) + fullClient := getAdminClient(ctx, full, t) + err = fullClient.Header.SyncWait(ctx) + require.NoError(t, err) + + // add full node as a bootstrapper for the suite + sw.SetBootstrapper(t, full) + + // create a LN with both FN and BN as trusted peers + light := sw.NewLightNode() + + // let LN sync to network head + err = light.Start(ctx) + require.NoError(t, err) + lightClient := getAdminClient(ctx, light, t) + err = lightClient.Header.SyncWait(ctx) + require.NoError(t, err) + + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } +} diff --git a/params/bootstrap.go b/params/bootstrap.go deleted file mode 100644 index 6789387534..0000000000 --- a/params/bootstrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package params - -import ( - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -var log = logging.Logger("params") - -// BootstrappersFor returns address information of bootstrap peers for a given network. -func BootstrappersFor(net Network) (Bootstrappers, error) { - bs, err := bootstrappersFor(net) - if err != nil { - return nil, err - } - - return parseAddrInfos(bs) -} - -// bootstrappersFor reports multiaddresses of bootstrap peers for a given network. -func bootstrappersFor(net Network) ([]string, error) { - if err := net.Validate(); err != nil { - return nil, err - } - - return bootstrapList[net], nil -} - -// NOTE: Every time we add a new long-running network, its bootstrap peers have to be added here. -var bootstrapList = map[Network][]string{ - Mamaki: { - "/dns4/andromeda.celestia-devops.dev/tcp/2121/p2p/12D3KooWKvPXtV1yaQ6e3BRNUHa5Phh8daBwBi3KkGaSSkUPys6D", - "/dns4/libra.celestia-devops.dev/tcp/2121/p2p/12D3KooWK5aDotDcLsabBmWDazehQLMsDkRyARm1k7f1zGAXqbt4", - "/dns4/norma.celestia-devops.dev/tcp/2121/p2p/12D3KooWHYczJDVNfYVkLcNHPTDKCeiVvRhg8Q9JU3bE3m9eEVyY", - }, - Private: {}, -} - -// parseAddrInfos converts strings to AddrInfos -func parseAddrInfos(addrs []string) ([]peer.AddrInfo, error) { - infos := make([]peer.AddrInfo, 0, len(addrs)) - for _, addr := range addrs { - maddr, err := ma.NewMultiaddr(addr) - if err != nil { - log.Errorw("parsing and validating addr", "addr", addr, "err", err) - return nil, err - } - - info, err := peer.AddrInfoFromP2pAddr(maddr) - if err != nil { - log.Errorw("parsing info from multiaddr", "maddr", maddr, "err", err) - return nil, err - } - infos = append(infos, *info) - } - - return infos, nil -} diff --git a/params/default.go b/params/default.go deleted file mode 100644 index edb8e89d38..0000000000 --- a/params/default.go +++ /dev/null @@ -1,60 +0,0 @@ -package params - -import ( - "fmt" - "os" - "strings" -) - -const ( - EnvCustomNetwork = "CELESTIA_CUSTOM" - EnvPrivateGenesis = "CELESTIA_PRIVATE_GENESIS" -) - -// defaultNetwork defines a default network for the Celestia Node. -var defaultNetwork = Mamaki - -// DefaultNetwork returns the network of the current build. -func DefaultNetwork() Network { - return defaultNetwork -} - -func init() { - // check if custom network option set - // format: CELESTIA_CUSTOM=:: - if custom, ok := os.LookupEnv(EnvCustomNetwork); ok { - fmt.Print("\n\nWARNING: Celestia custom network specified. Only use this option if the node is " + - "freshly created and initialized.\n**DO NOT** run a custom network over an already-existing node " + - "store!\n\n") - // ensure at least custom network is set - params := strings.Split(custom, ":") - if len(params) == 0 { - panic("params: must provide at least to use a custom network") - } - netID := params[0] - defaultNetwork = Network(netID) - networksList[defaultNetwork] = struct{}{} - // check if genesis hash provided and register it if exists - if len(params) >= 2 { - genHash := params[1] - genesisList[defaultNetwork] = strings.ToUpper(genHash) - } - // check if bootstrappers were provided and register - if len(params) == 3 { - bootstrappers := params[2] - // validate bootstrappers - bs := strings.Split(bootstrappers, ",") - _, err := parseAddrInfos(bs) - if err != nil { - println(fmt.Sprintf("params: env %s: contains invalid multiaddress", EnvCustomNetwork)) - panic(err) - } - bootstrapList[Network(netID)] = bs - } - } - // check if private network option set - if genesis, ok := os.LookupEnv(EnvPrivateGenesis); ok { - defaultNetwork = Private - genesisList[Private] = strings.ToUpper(genesis) - } -} diff --git a/params/network.go b/params/network.go deleted file mode 100644 index a86b9a6bb8..0000000000 --- a/params/network.go +++ /dev/null @@ -1,39 +0,0 @@ -package params - -import ( - "errors" - - "github.com/libp2p/go-libp2p-core/peer" -) - -// NOTE: Every time we add a new long-running network, it has to be added here. -const ( - // Mamaki testnet. See: celestiaorg/networks. - Mamaki Network = "mamaki" - // Private can be used to set up any private network, including local testing setups. - // Use CELESTIA_PRIVATE_GENESIS env var to enable Private by specifying its genesis block hash. - Private Network = "private" -) - -// Network is a type definition for DA network run by Celestia Node. -type Network string - -// Bootstrappers is a type definition for nodes that will be used as bootstrappers -type Bootstrappers []peer.AddrInfo - -// ErrInvalidNetwork is thrown when unknown network is used. -var ErrInvalidNetwork = errors.New("params: invalid network") - -// Validate the network. -func (n Network) Validate() error { - if _, ok := networksList[n]; !ok { - return ErrInvalidNetwork - } - return nil -} - -// networksList is a strict list of all known long-standing networks. -var networksList = map[Network]struct{}{ - Mamaki: {}, - Private: {}, -} diff --git a/pruner/archival/pruner.go b/pruner/archival/pruner.go new file mode 100644 index 0000000000..7b1cb935f3 --- /dev/null +++ b/pruner/archival/pruner.go @@ -0,0 +1,20 @@ +package archival + +import ( + "context" + + "github.com/celestiaorg/celestia-node/header" +) + +// Pruner is a noop implementation of the pruner.Factory interface +// that allows archival nodes to sync and retain historical data +// that is out of the availability window. +type Pruner struct{} + +func NewPruner() *Pruner { + return &Pruner{} +} + +func (p *Pruner) Prune(context.Context, ...*header.ExtendedHeader) error { + return nil +} diff --git a/pruner/archival/window.go b/pruner/archival/window.go new file mode 100644 index 0000000000..b89a779816 --- /dev/null +++ b/pruner/archival/window.go @@ -0,0 +1,5 @@ +package archival + +import "github.com/celestiaorg/celestia-node/pruner" + +const Window = pruner.AvailabilityWindow(0) diff --git a/pruner/light/pruner.go b/pruner/light/pruner.go new file mode 100644 index 0000000000..513bfa2b66 --- /dev/null +++ b/pruner/light/pruner.go @@ -0,0 +1,17 @@ +package light + +import ( + "context" + + "github.com/celestiaorg/celestia-node/header" +) + +type Pruner struct{} + +func NewPruner() *Pruner { + return &Pruner{} +} + +func (p *Pruner) Prune(context.Context, ...*header.ExtendedHeader) error { + return nil +} diff --git a/pruner/light/window.go b/pruner/light/window.go new file mode 100644 index 0000000000..dc1a9e4444 --- /dev/null +++ b/pruner/light/window.go @@ -0,0 +1,11 @@ +package light + +import ( + "time" + + "github.com/celestiaorg/celestia-node/pruner" +) + +// Window is the availability window for light nodes in the Celestia +// network (30 days). +const Window = pruner.AvailabilityWindow(time.Second * 86400 * 30) diff --git a/pruner/pruner.go b/pruner/pruner.go new file mode 100644 index 0000000000..fae60e483c --- /dev/null +++ b/pruner/pruner.go @@ -0,0 +1,13 @@ +package pruner + +import ( + "context" + + "github.com/celestiaorg/celestia-node/header" +) + +// Pruner contains methods necessary to prune data +// from the node's datastore. +type Pruner interface { + Prune(context.Context, ...*header.ExtendedHeader) error +} diff --git a/pruner/service.go b/pruner/service.go new file mode 100644 index 0000000000..f67265977a --- /dev/null +++ b/pruner/service.go @@ -0,0 +1,25 @@ +package pruner + +import ( + "context" +) + +// Service handles the pruning routine for the node using the +// prune Pruner. +type Service struct { + pruner Pruner +} + +func NewService(p Pruner) *Service { + return &Service{ + pruner: p, + } +} + +func (s *Service) Start(context.Context) error { + return nil +} + +func (s *Service) Stop(context.Context) error { + return nil +} diff --git a/pruner/window.go b/pruner/window.go new file mode 100644 index 0000000000..0a86c535ce --- /dev/null +++ b/pruner/window.go @@ -0,0 +1,7 @@ +package pruner + +import ( + "time" +) + +type AvailabilityWindow time.Duration diff --git a/service/header/service.go b/service/header/service.go deleted file mode 100644 index 6166e51d85..0000000000 --- a/service/header/service.go +++ /dev/null @@ -1,69 +0,0 @@ -package header - -import ( - "context" - - logging "github.com/ipfs/go-log/v2" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/p2p" - "github.com/celestiaorg/celestia-node/header/sync" -) - -var log = logging.Logger("service/header") - -// Service represents the header service that can be started / stopped on a node. -// Service's main function is to manage its sub-services. Service can contain several -// sub-services, such as Exchange, ExchangeServer, Syncer, and so forth. -type Service struct { - ex header.Exchange - - syncer *sync.Syncer - sub header.Subscriber - p2pServer *p2p.ExchangeServer - store header.Store -} - -// NewHeaderService creates a new instance of header Service. -func NewHeaderService( - syncer *sync.Syncer, - sub header.Subscriber, - p2pServer *p2p.ExchangeServer, - ex header.Exchange, - store header.Store) *Service { - return &Service{ - syncer: syncer, - sub: sub, - p2pServer: p2pServer, - ex: ex, - store: store, - } -} - -// Start starts the header Service. -func (s *Service) Start(context.Context) error { - log.Info("starting header service") - return nil -} - -// Stop stops the header Service. -func (s *Service) Stop(context.Context) error { - log.Info("stopping header service") - return nil -} - -// GetByHeight returns the ExtendedHeader at the given height, blocking -// until header has been processed by the store or context deadline is exceeded. -func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return s.store.GetByHeight(ctx, height) -} - -// Head returns the ExtendedHeader of the chain head. -func (s *Service) Head(ctx context.Context) (*header.ExtendedHeader, error) { - return s.store.Head(ctx) -} - -// IsSyncing returns the status of sync -func (s *Service) IsSyncing() bool { - return !s.syncer.State().Finished() -} diff --git a/service/rpc/config.go b/service/rpc/config.go deleted file mode 100644 index 1b8e120e95..0000000000 --- a/service/rpc/config.go +++ /dev/null @@ -1,14 +0,0 @@ -package rpc - -type Config struct { - Address string - Port string -} - -func DefaultConfig() Config { - return Config{ - Address: "0.0.0.0", - // do NOT expose the same port as celestia-core by default so that both can run on the same machine - Port: "26658", - } -} diff --git a/service/rpc/das.go b/service/rpc/das.go deleted file mode 100644 index f6369a144e..0000000000 --- a/service/rpc/das.go +++ /dev/null @@ -1,35 +0,0 @@ -package rpc - -import ( - "encoding/json" - "net/http" - - "github.com/celestiaorg/celestia-node/das" -) - -const ( - dasStateEndpoint = "/daser/state" -) - -// DasStateResponse encompasses the fields returned in response -// to a `/daser` request. -type DasStateResponse struct { - SampleRoutine das.RoutineState `json:"sample_routine"` - CatchUpRoutine das.JobInfo `json:"catch_up_routine"` -} - -func (h *Handler) handleDASStateRequest(w http.ResponseWriter, r *http.Request) { - dasState := new(DasStateResponse) - dasState.SampleRoutine = h.das.SampleRoutineState() - dasState.CatchUpRoutine = h.das.CatchUpRoutineState() - - resp, err := json.Marshal(dasState) - if err != nil { - writeError(w, http.StatusInternalServerError, dasStateEndpoint, err) - return - } - _, err = w.Write(resp) - if err != nil { - log.Errorw("serving request", "endpoint", dasStateEndpoint, "err", err) - } -} diff --git a/service/rpc/endpoints.go b/service/rpc/endpoints.go deleted file mode 100644 index 3ba7dbc1a7..0000000000 --- a/service/rpc/endpoints.go +++ /dev/null @@ -1,41 +0,0 @@ -package rpc - -import ( - "fmt" - "net/http" -) - -func (h *Handler) RegisterEndpoints(rpc *Server) { - // state endpoints - rpc.RegisterHandlerFunc(balanceEndpoint, h.handleBalanceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", balanceEndpoint, addrKey), h.handleBalanceRequest, - http.MethodGet) - rpc.RegisterHandlerFunc(submitTxEndpoint, h.handleSubmitTx, http.MethodPost) - rpc.RegisterHandlerFunc(submitPFDEndpoint, h.handleSubmitPFD, http.MethodPost) - rpc.RegisterHandlerFunc(transferEndpoint, h.handleTransfer, http.MethodPost) - - // share endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedSharesEndpoint, nIDKey, heightKey), - h.handleSharesByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, nIDKey), - h.handleSharesByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, nIDKey, heightKey), - h.handleDataByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, nIDKey), - h.handleDataByNamespaceRequest, http.MethodGet) - - // DAS endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", heightAvailabilityEndpoint, heightKey), - h.handleHeightAvailabilityRequest, http.MethodGet) - - // header endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", headerByHeightEndpoint, heightKey), h.handleHeaderRequest, - http.MethodGet) - rpc.RegisterHandlerFunc(headEndpoint, h.handleHeadRequest, http.MethodGet) - - // DASer endpoints - // only register if DASer service is available - if h.das != nil { - rpc.RegisterHandlerFunc(dasStateEndpoint, h.handleDASStateRequest, http.MethodGet) - } -} diff --git a/service/rpc/handler.go b/service/rpc/handler.go deleted file mode 100644 index fc3d9406c1..0000000000 --- a/service/rpc/handler.go +++ /dev/null @@ -1,33 +0,0 @@ -package rpc - -import ( - logging "github.com/ipfs/go-log/v2" - - "github.com/celestiaorg/celestia-node/das" - "github.com/celestiaorg/celestia-node/service/header" - "github.com/celestiaorg/celestia-node/service/share" - "github.com/celestiaorg/celestia-node/service/state" -) - -var log = logging.Logger("rpc") - -type Handler struct { - state *state.Service - share *share.Service - header *header.Service - das *das.DASer -} - -func NewHandler( - state *state.Service, - share *share.Service, - header *header.Service, - das *das.DASer, -) *Handler { - return &Handler{ - state: state, - share: share, - header: header, - das: das, - } -} diff --git a/service/rpc/middleware.go b/service/rpc/middleware.go deleted file mode 100644 index ce1189580c..0000000000 --- a/service/rpc/middleware.go +++ /dev/null @@ -1,36 +0,0 @@ -package rpc - -import ( - "errors" - "net/http" - - "github.com/gorilla/mux" - - "github.com/celestiaorg/celestia-node/service/state" -) - -func (h *Handler) RegisterMiddleware(rpc *Server) { - rpc.RegisterMiddleware(setContentType) - rpc.RegisterMiddleware(checkPostDisabled(h.state)) -} - -func setContentType(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/json") - next.ServeHTTP(w, r) - }) -} - -// checkPostDisabled ensures that context was canceled and prohibit POST requests. -func checkPostDisabled(state *state.Service) mux.MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // check if state service was halted and deny the transaction - if r.Method == http.MethodPost && state.IsStopped() { - writeError(w, http.StatusMethodNotAllowed, r.URL.Path, errors.New("not possible to submit data")) - return - } - next.ServeHTTP(w, r) - }) - } -} diff --git a/service/rpc/server.go b/service/rpc/server.go deleted file mode 100644 index 14844f376c..0000000000 --- a/service/rpc/server.go +++ /dev/null @@ -1,85 +0,0 @@ -package rpc - -import ( - "context" - "fmt" - "net" - "net/http" - - "github.com/gorilla/mux" -) - -// Server represents an RPC server on the Node. -// TODO @renaynay: eventually, rpc server should be able to be toggled on and off. -type Server struct { - cfg Config - - srv *http.Server - srvMux *mux.Router // http request multiplexer - listener net.Listener -} - -// NewServer returns a new RPC Server. -func NewServer(cfg Config) *Server { - srvMux := mux.NewRouter() - srvMux.Use(setContentType) - - server := &Server{ - cfg: cfg, - srvMux: srvMux, - } - server.srv = &http.Server{ - Handler: server, - } - return server -} - -// Start starts the RPC Server, listening on the given address. -func (s *Server) Start(context.Context) error { - listenAddr := fmt.Sprintf("%s:%s", s.cfg.Address, s.cfg.Port) - listener, err := net.Listen("tcp", listenAddr) - if err != nil { - return err - } - s.listener = listener - log.Infow("RPC server started", "listening on", listener.Addr().String()) - //nolint:errcheck - go s.srv.Serve(listener) - return nil -} - -// Stop stops the RPC Server. -func (s *Server) Stop(context.Context) error { - // if server already stopped, return - if s.listener == nil { - return nil - } - if err := s.listener.Close(); err != nil { - return err - } - s.listener = nil - log.Info("RPC server stopped") - return nil -} - -// RegisterMiddleware allows to register a custom middleware that will be called before http.Request will reach handler. -func (s *Server) RegisterMiddleware(m mux.MiddlewareFunc) { - // `router.Use` appends new middleware to existing - s.srvMux.Use(m) -} - -// RegisterHandlerFunc registers the given http.HandlerFunc on the Server's multiplexer -// on the given pattern. -func (s *Server) RegisterHandlerFunc(pattern string, handlerFunc http.HandlerFunc, method string) { - s.srvMux.HandleFunc(pattern, handlerFunc).Methods(method) -} - -// ServeHTTP serves inbound requests on the Server. -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.srvMux.ServeHTTP(w, r) -} - -// ListenAddr returns the listen address of the server. -func (s *Server) ListenAddr() string { - return s.listener.Addr().String() -} diff --git a/service/rpc/server_test.go b/service/rpc/server_test.go deleted file mode 100644 index 943d779c2c..0000000000 --- a/service/rpc/server_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package rpc - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestServer(t *testing.T) { - server := NewServer(Config{ - Address: "0.0.0.0", - Port: "0", - }) - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - err := server.Start(ctx) - require.NoError(t, err) - - // register ping handler - ping := new(ping) - server.RegisterHandlerFunc("/ping", ping.ServeHTTP, http.MethodGet) - - url := fmt.Sprintf("http://%s/ping", server.listener.Addr().String()) - - resp, err := http.Get(url) - require.NoError(t, err) - - buf, err := ioutil.ReadAll(resp.Body) - require.NoError(t, err) - t.Cleanup(func() { - resp.Body.Close() - }) - assert.Equal(t, "pong", string(buf)) - - err = server.Stop(ctx) - require.NoError(t, err) -} - -type ping struct{} - -func (p ping) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //nolint:errcheck - w.Write([]byte("pong")) -} diff --git a/service/rpc/state.go b/service/rpc/state.go deleted file mode 100644 index 998c5bfc2b..0000000000 --- a/service/rpc/state.go +++ /dev/null @@ -1,174 +0,0 @@ -package rpc - -import ( - "encoding/hex" - "encoding/json" - "errors" - "net/http" - - "github.com/cosmos/cosmos-sdk/types" - "github.com/gorilla/mux" - - "github.com/celestiaorg/celestia-node/service/state" -) - -const ( - balanceEndpoint = "/balance" - submitTxEndpoint = "/submit_tx" - submitPFDEndpoint = "/submit_pfd" - transferEndpoint = "/transfer" -) - -var addrKey = "address" - -// submitTxRequest represents a request to submit a raw transaction -type submitTxRequest struct { - Tx string `json:"tx"` -} - -// submitPFDRequest represents a request to submit a PayForData -// transaction. -type submitPFDRequest struct { - NamespaceID string `json:"namespace_id"` - Data string `json:"data"` - GasLimit uint64 `json:"gas_limit"` -} - -type transferRequest struct { - To string `json:"to"` - Amount int64 `json:"amount"` - GasLimit uint64 `json:"gas_limit"` -} - -func (h *Handler) handleBalanceRequest(w http.ResponseWriter, r *http.Request) { - var ( - bal *state.Balance - err error - ) - // read and parse request - vars := mux.Vars(r) - addrStr, exists := vars[addrKey] - if exists { - // convert address to Address type - addr, addrerr := types.AccAddressFromBech32(addrStr) - if addrerr != nil { - writeError(w, http.StatusBadRequest, balanceEndpoint, addrerr) - return - } - bal, err = h.state.BalanceForAddress(r.Context(), addr) - } else { - bal, err = h.state.Balance(r.Context()) - } - if err != nil { - writeError(w, http.StatusInternalServerError, balanceEndpoint, err) - return - } - resp, err := json.Marshal(bal) - if err != nil { - writeError(w, http.StatusInternalServerError, balanceEndpoint, err) - return - } - _, err = w.Write(resp) - if err != nil { - log.Errorw("writing response", "endpoint", balanceEndpoint, "err", err) - } -} - -func (h *Handler) handleSubmitTx(w http.ResponseWriter, r *http.Request) { - // decode request - var req submitTxRequest - err := json.NewDecoder(r.Body).Decode(&req) - if err != nil { - writeError(w, http.StatusBadRequest, submitTxEndpoint, err) - return - } - rawTx, err := hex.DecodeString(req.Tx) - if err != nil { - writeError(w, http.StatusBadRequest, submitTxEndpoint, err) - return - } - // perform request - txResp, err := h.state.SubmitTx(r.Context(), rawTx) - if err != nil { - writeError(w, http.StatusInternalServerError, submitTxEndpoint, err) - return - } - resp, err := json.Marshal(txResp) - if err != nil { - writeError(w, http.StatusInternalServerError, submitTxEndpoint, err) - return - } - _, err = w.Write(resp) - if err != nil { - log.Errorw("writing response", "endpoint", submitTxEndpoint, "err", err) - } -} - -func (h *Handler) handleSubmitPFD(w http.ResponseWriter, r *http.Request) { - // decode request - var req submitPFDRequest - err := json.NewDecoder(r.Body).Decode(&req) - if err != nil { - writeError(w, http.StatusBadRequest, submitPFDEndpoint, err) - return - } - nID, err := hex.DecodeString(req.NamespaceID) - if err != nil { - writeError(w, http.StatusBadRequest, submitPFDEndpoint, err) - return - } - data, err := hex.DecodeString(req.Data) - if err != nil { - writeError(w, http.StatusBadRequest, submitPFDEndpoint, err) - return - } - // perform request - txResp, err := h.state.SubmitPayForData(r.Context(), nID, data, req.GasLimit) - if err != nil { - writeError(w, http.StatusInternalServerError, submitPFDEndpoint, err) - return - } - resp, err := json.Marshal(txResp) - if err != nil { - writeError(w, http.StatusInternalServerError, submitPFDEndpoint, err) - return - } - _, err = w.Write(resp) - if err != nil { - log.Errorw("writing response", "endpoint", submitPFDEndpoint, "err", err) - } -} - -func (h *Handler) handleTransfer(w http.ResponseWriter, r *http.Request) { - var req transferRequest - err := json.NewDecoder(r.Body).Decode(&req) - if err != nil { - writeError(w, http.StatusBadRequest, transferEndpoint, err) - return - } - if req.Amount <= 0 { - writeError(w, http.StatusBadRequest, transferEndpoint, errors.New("amount must be greater than 0")) - return - } - addr, err := types.AccAddressFromBech32(req.To) - if err != nil { - writeError(w, http.StatusBadRequest, transferEndpoint, err) - return - } - amount := types.NewInt(req.Amount) - - txResp, err := h.state.Transfer(r.Context(), addr, amount, req.GasLimit) - if err != nil { - writeError(w, http.StatusInternalServerError, transferEndpoint, err) - return - } - resp, err := json.Marshal(txResp) - if err != nil { - writeError(w, http.StatusInternalServerError, transferEndpoint, err) - return - } - _, err = w.Write(resp) - if err != nil { - log.Errorw("writing response", "endpoint", transferEndpoint, "err", err) - } -} diff --git a/service/rpc/util.go b/service/rpc/util.go deleted file mode 100644 index be7fe34c7b..0000000000 --- a/service/rpc/util.go +++ /dev/null @@ -1,21 +0,0 @@ -package rpc - -import ( - "encoding/json" - "net/http" -) - -func writeError(w http.ResponseWriter, statusCode int, endpoint string, err error) { - log.Errorw("serving request", "endpoint", endpoint, "err", err) - - w.WriteHeader(statusCode) - errBody, jerr := json.Marshal(err.Error()) - if jerr != nil { - log.Errorw("serializing error", "endpoint", endpoint, "err", jerr) - return - } - _, werr := w.Write(errBody) - if werr != nil { - log.Errorw("writing error response", "endpoint", endpoint, "err", werr) - } -} diff --git a/service/share/backoff.go b/service/share/backoff.go deleted file mode 100644 index 4b4b95c5af..0000000000 --- a/service/share/backoff.go +++ /dev/null @@ -1,96 +0,0 @@ -package share - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p/p2p/discovery/backoff" -) - -// gcInterval is a default period after which disconnected peers will be removed from cache -const gcInterval = time.Hour - -var defaultBackoffFactory = backoff.NewFixedBackoff(time.Hour) - -// backoffConnector wraps a libp2p.Host to establish a connection with peers -// with adding a delay for the next connection attempt. -type backoffConnector struct { - h host.Host - backoff backoff.BackoffFactory - - cacheLk sync.Mutex - cacheData map[peer.ID]*backoffData -} - -// backoffData stores time when next connection attempt with the remote peer. -type backoffData struct { - nexttry time.Time - backoff backoff.BackoffStrategy -} - -func newBackoffConnector(h host.Host, factory backoff.BackoffFactory) *backoffConnector { - return &backoffConnector{ - h: h, - backoff: factory, - cacheData: make(map[peer.ID]*backoffData), - } -} - -// Connect puts peer to the backoffCache and tries to establish a connection with it. -func (b *backoffConnector) Connect(ctx context.Context, p peer.AddrInfo) error { - // we should lock the mutex before calling connectionData and not inside because otherwise it could be modified - // from another goroutine as it returns a pointer - b.cacheLk.Lock() - cache := b.connectionData(p.ID) - if time.Now().Before(cache.nexttry) { - b.cacheLk.Unlock() - return fmt.Errorf("share/discovery: backoff period is not ended for peer=%s", p.ID.String()) - } - cache.nexttry = time.Now().Add(cache.backoff.Delay()) - b.cacheLk.Unlock() - return b.h.Connect(ctx, p) -} - -// connectionData returns backoffData from the map if it was stored, otherwise it will instantiate -// a new one. -func (b *backoffConnector) connectionData(p peer.ID) *backoffData { - cache, ok := b.cacheData[p] - if !ok { - cache = &backoffData{} - cache.backoff = b.backoff() - b.cacheData[p] = cache - } - return cache -} - -// RestartBackoff resets delay time between attempts and adds a delay for the next connection attempt to remote peer. -// It will mostly be called when host receives a notification that remote peer was disconnected. -func (b *backoffConnector) RestartBackoff(p peer.ID) { - b.cacheLk.Lock() - defer b.cacheLk.Unlock() - cache := b.connectionData(p) - cache.backoff.Reset() - cache.nexttry = time.Now().Add(cache.backoff.Delay()) -} - -func (b *backoffConnector) GC(ctx context.Context) { - ticker := time.NewTicker(gcInterval) - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - b.cacheLk.Lock() - for id, cache := range b.cacheData { - if cache.nexttry.Before(time.Now()) { - delete(b.cacheData, id) - } - } - b.cacheLk.Unlock() - } - } -} diff --git a/service/share/cache_availability.go b/service/share/cache_availability.go deleted file mode 100644 index 02cdb49905..0000000000 --- a/service/share/cache_availability.go +++ /dev/null @@ -1,96 +0,0 @@ -package share - -import ( - "bytes" - "context" - "sync" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/autobatch" - "github.com/ipfs/go-datastore/namespace" - "github.com/tendermint/tendermint/pkg/da" -) - -var ( - // DefaultWriteBatchSize defines the size of the batched header write. - // Headers are written in batches not to thrash the underlying Datastore with writes. - // TODO(@Wondertan, @renaynay): Those values must be configurable and proper defaults should be set for specific node - // type. (#709) - DefaultWriteBatchSize = 2048 - cacheAvailabilityPrefix = datastore.NewKey("sampling_result") - - minRoot = da.MinDataAvailabilityHeader() -) - -// CacheAvailability wraps a given Availability (whether it's light or full) -// and stores the results of a successful sampling routine over a given Root's hash -// to disk. -type CacheAvailability struct { - avail Availability - - // TODO(@Wondertan): Once we come to parallelized DASer, this lock becomes a contention point - // Related to #483 - dsLk sync.RWMutex - ds *autobatch.Datastore -} - -// NewCacheAvailability wraps the given Availability with an additional datastore -// for sampling result caching. -func NewCacheAvailability(avail Availability, ds datastore.Batching) *CacheAvailability { - ds = namespace.Wrap(ds, cacheAvailabilityPrefix) - autoDS := autobatch.NewAutoBatching(ds, DefaultWriteBatchSize) - - return &CacheAvailability{ - avail: avail, - ds: autoDS, - } -} - -// SharesAvailable will store, upon success, the hash of the given Root to disk. -func (ca *CacheAvailability) SharesAvailable(ctx context.Context, root *Root) error { - // short-circuit if the given root is minimum DAH of an empty data square - if isMinRoot(root) { - return nil - } - // do not sample over Root that has already been sampled - key := rootKey(root) - - ca.dsLk.RLock() - exists, err := ca.ds.Has(ctx, key) - ca.dsLk.RUnlock() - if err != nil || exists { - return err - } - - err = ca.avail.SharesAvailable(ctx, root) - if err != nil { - return err - } - - ca.dsLk.Lock() - err = ca.ds.Put(ctx, key, []byte{}) - ca.dsLk.Unlock() - if err != nil { - log.Errorw("storing root of successful SharesAvailable request to disk", "err", err) - } - return err -} - -func (ca *CacheAvailability) ProbabilityOfAvailability() float64 { - return ca.avail.ProbabilityOfAvailability() -} - -// Close flushes all queued writes to disk. -func (ca *CacheAvailability) Close(ctx context.Context) error { - return ca.ds.Flush(ctx) -} - -func rootKey(root *Root) datastore.Key { - return datastore.NewKey(root.String()) -} - -// isMinRoot returns whether the given root is a minimum (empty) -// DataAvailabilityHeader (DAH). -func isMinRoot(root *Root) bool { - return bytes.Equal(minRoot.Hash(), root.Hash()) -} diff --git a/service/share/cache_availability_test.go b/service/share/cache_availability_test.go deleted file mode 100644 index d2519ff841..0000000000 --- a/service/share/cache_availability_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package share - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/pkg/da" -) - -// TestCacheAvailability tests to ensure that the successful result of a -// sampling process is properly stored locally. -func TestCacheAvailability(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - fullLocalServ, dah0 := RandFullLocalServiceWithSquare(t, 16) - lightLocalServ, dah1 := RandLightLocalServiceWithSquare(t, 16) - - var tests = []struct { - service *Service - root *Root - }{ - { - service: fullLocalServ, - root: dah0, - }, - { - service: lightLocalServ, - root: dah1, - }, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - ca := tt.service.Availability.(*CacheAvailability) - // ensure the dah isn't yet in the cache - exists, err := ca.ds.Has(ctx, rootKey(tt.root)) - require.NoError(t, err) - assert.False(t, exists) - err = tt.service.SharesAvailable(ctx, tt.root) - require.NoError(t, err) - // ensure the dah was stored properly - exists, err = ca.ds.Has(ctx, rootKey(tt.root)) - require.NoError(t, err) - assert.True(t, exists) - }) - } -} - -var invalidHeader = da.DataAvailabilityHeader{ - RowsRoots: [][]byte{{1, 2}}, -} - -// TestCacheAvailability_Failed tests to make sure a failed -// sampling process is not stored. -func TestCacheAvailability_Failed(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - ca := NewCacheAvailability(&dummyAvailability{}, sync.MutexWrap(datastore.NewMapDatastore())) - serv := NewService(mdutils.Bserv(), ca) - - err := serv.SharesAvailable(ctx, &invalidHeader) - require.Error(t, err) - // ensure the dah was NOT cached - exists, err := ca.ds.Has(ctx, rootKey(&invalidHeader)) - require.NoError(t, err) - assert.False(t, exists) -} - -// TestCacheAvailability_NoDuplicateSampling tests to ensure that -// if the Root was already sampled, it does not run a sampling routine -// again. -func TestCacheAvailability_NoDuplicateSampling(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // create root to cache - root := RandFillBS(t, 16, mdutils.Bserv()) - // wrap dummyAvailability with a datastore - ds := sync.MutexWrap(datastore.NewMapDatastore()) - ca := NewCacheAvailability(&dummyAvailability{counter: 0}, ds) - // sample the root - err := ca.SharesAvailable(ctx, root) - require.NoError(t, err) - // ensure root was cached - exists, err := ca.ds.Has(ctx, rootKey(root)) - require.NoError(t, err) - assert.True(t, exists) - // call sampling routine over same root again and ensure no error is returned - // if an error was returned, that means duplicate sampling occurred - err = ca.SharesAvailable(ctx, root) - require.NoError(t, err) -} - -// TestCacheAvailability_MinRoot tests to make sure `SharesAvailable` will -// short circuit if the given root is a minimum DataAvailabilityHeader (minRoot). -func TestCacheAvailability_MinRoot(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - fullLocalServ, _ := RandFullLocalServiceWithSquare(t, 16) - minDAH := da.MinDataAvailabilityHeader() - - err := fullLocalServ.SharesAvailable(ctx, &minDAH) - assert.NoError(t, err) -} - -type dummyAvailability struct { - counter int -} - -// SharesAvailable should only be called once, if called more than once, return -// error. -func (da *dummyAvailability) SharesAvailable(_ context.Context, root *Root) error { - if root == &invalidHeader { - return fmt.Errorf("invalid header") - } - if da.counter > 0 { - return fmt.Errorf("duplicate sampling process called") - } - da.counter++ - return nil -} - -func (da *dummyAvailability) ProbabilityOfAvailability() float64 { - return 0 -} diff --git a/service/share/discovery.go b/service/share/discovery.go deleted file mode 100644 index 074bb1d8f7..0000000000 --- a/service/share/discovery.go +++ /dev/null @@ -1,168 +0,0 @@ -package share - -import ( - "context" - "time" - - core "github.com/libp2p/go-libp2p-core/discovery" - "github.com/libp2p/go-libp2p-core/event" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" -) - -const ( - // peerWeight is a weight of discovered peers. - // peerWeight is a number that will be assigned to all discovered full nodes, - // so ConnManager will not break a connection with them. - peerWeight = 1000 - topic = "full" -) - -// waitF calculates time to restart announcing. -var waitF = func(ttl time.Duration) time.Duration { - return 7 * ttl / 8 -} - -// discovery combines advertise and discover services and allows to store discovered nodes. -type discovery struct { - set *limitedSet - host host.Host - disc core.Discovery - connector *backoffConnector - // peersLimit is max amount of peers that will be discovered during a discovery session. - peersLimit uint - // discInterval is an interval between discovery sessions. - discoveryInterval time.Duration - // advertiseInterval is an interval between advertising sessions. - advertiseInterval time.Duration -} - -// NewDiscovery constructs a new discovery. -func NewDiscovery( - h host.Host, - d core.Discovery, - peersLimit uint, - discInterval, - advertiseInterval time.Duration, -) *discovery { //nolint:revive - return &discovery{ - newLimitedSet(peersLimit), - h, - d, - newBackoffConnector(h, defaultBackoffFactory), - peersLimit, - discInterval, - advertiseInterval, - } -} - -// handlePeersFound receives peers and tries to establish a connection with them. -// Peer will be added to PeerCache if connection succeeds. -func (d *discovery) handlePeerFound(ctx context.Context, topic string, peer peer.AddrInfo) { - if peer.ID == d.host.ID() || len(peer.Addrs) == 0 || d.set.Contains(peer.ID) { - return - } - err := d.set.TryAdd(peer.ID) - if err != nil { - log.Debug(err) - return - } - - err = d.connector.Connect(ctx, peer) - if err != nil { - log.Warn(err) - d.set.Remove(peer.ID) - return - } - log.Debugw("added peer to set", "id", peer.ID) - // add tag to protect peer of being killed by ConnManager - d.host.ConnManager().TagPeer(peer.ID, topic, peerWeight) -} - -// ensurePeers ensures we always have 'peerLimit' connected peers. -// It starts peer discovery every 30 seconds until peer cache reaches peersLimit. -// Discovery is restarted if any previously connected peers disconnect. -func (d *discovery) ensurePeers(ctx context.Context) { - if d.peersLimit == 0 { - log.Warn("peers limit is set to 0. Skipping discovery...") - return - } - // subscribe on Event Bus in order to catch disconnected peers and restart the discovery - sub, err := d.host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}) - if err != nil { - log.Error(err) - return - } - go d.connector.GC(ctx) - - t := time.NewTicker(d.discoveryInterval) - defer func() { - t.Stop() - if err = sub.Close(); err != nil { - log.Error(err) - } - }() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - if uint(d.set.Size()) == d.peersLimit { - // stop ticker if we have reached the limit - t.Stop() - continue - } - peers, err := d.disc.FindPeers(ctx, topic) - if err != nil { - log.Error(err) - continue - } - for peer := range peers { - go d.handlePeerFound(ctx, topic, peer) - } - case e := <-sub.Out(): - // listen to disconnect event to remove peer from set and reset backoff time - // reset timer in order to restart the discovery, once stored peer is disconnected - connStatus := e.(event.EvtPeerConnectednessChanged) - if connStatus.Connectedness == network.NotConnected { - if d.set.Contains(connStatus.Peer) { - d.connector.RestartBackoff(connStatus.Peer) - d.set.Remove(connStatus.Peer) - d.host.ConnManager().UntagPeer(connStatus.Peer, topic) - t.Reset(d.discoveryInterval) - } - } - } - } -} - -// advertise is a utility function that persistently advertises a service through an Advertiser. -func (d *discovery) advertise(ctx context.Context) { - timer := time.NewTimer(d.advertiseInterval) - defer timer.Stop() - for { - ttl, err := d.disc.Advertise(ctx, topic) - if err != nil { - log.Debugf("Error advertising %s: %s", topic, err.Error()) - if ctx.Err() != nil { - return - } - - select { - case <-timer.C: - timer.Reset(d.advertiseInterval) - continue - case <-ctx.Done(): - return - } - } - - select { - case <-timer.C: - timer.Reset(waitF(ttl)) - case <-ctx.Done(): - return - } - } -} diff --git a/service/share/doc.go b/service/share/doc.go deleted file mode 100644 index fd3739ad81..0000000000 --- a/service/share/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -Package share contains logic related to the retrieval and random sampling of shares of -block data. - -Though this package contains several useful methods for getting specific shares and/or -sampling them at random, a particularly useful method is GetSharesByNamespace which retrieves -all shares of block data of the given namespace.ID from the block associated with the given -DataAvailabilityHeader (DAH, but referred to as Root within this package). - -This package also contains both implementations of the Availability interface: lightAvailability -which samples for 16 shares of block data (enough to verify the block's availability on the network) -and fullAvailability which samples for as many shares as necessary to fully reconstruct the block data. -*/ -package share diff --git a/service/share/empty.go b/service/share/empty.go deleted file mode 100644 index 931f505043..0000000000 --- a/service/share/empty.go +++ /dev/null @@ -1,32 +0,0 @@ -package share - -import ( - "bytes" - "context" - - "github.com/ipfs/go-blockservice" - "github.com/tendermint/tendermint/pkg/consts" - - "github.com/celestiaorg/celestia-node/ipld" -) - -// EnsureEmptySquareExists checks if the given DAG contains an empty block data square. -// If it does not, it stores an empty block. This optimization exists to prevent -// redundant storing of empty block data so that it is only stored once and returned -// upon request for a block with an empty data square. Ref: header/header.go#L56 -func EnsureEmptySquareExists(ctx context.Context, bServ blockservice.BlockService) error { - shares := make([][]byte, consts.MinSharecount) - for i := 0; i < consts.MinSharecount; i++ { - shares[i] = tailPaddingShare - } - - _, err := ipld.AddShares(ctx, shares, bServ) - return err -} - -// tail is filler for all tail padded shares -// it is allocated once and used everywhere -var tailPaddingShare = append( - append(make([]byte, 0, consts.ShareSize), consts.TailPaddingNamespaceID...), - bytes.Repeat([]byte{0}, consts.ShareSize-consts.NamespaceSize)..., -) diff --git a/service/share/full_availability.go b/service/share/full_availability.go deleted file mode 100644 index edc1b7eeaa..0000000000 --- a/service/share/full_availability.go +++ /dev/null @@ -1,72 +0,0 @@ -package share - -import ( - "context" - "errors" - - "github.com/ipfs/go-blockservice" - format "github.com/ipfs/go-ipld-format" - - "github.com/celestiaorg/celestia-node/ipld" -) - -// FullAvailability implements Availability using the full data square -// recovery technique. It is considered "full" because it is required -// to download enough shares to fully reconstruct the data square. -type FullAvailability struct { - rtrv *ipld.Retriever - disc *discovery - - cancel context.CancelFunc -} - -// NewFullAvailability creates a new full Availability. -func NewFullAvailability(bServ blockservice.BlockService, disc *discovery) *FullAvailability { - return &FullAvailability{ - rtrv: ipld.NewRetriever(bServ), - disc: disc, - } -} - -func (fa *FullAvailability) Start(context.Context) error { - ctx, cancel := context.WithCancel(context.Background()) - fa.cancel = cancel - - go fa.disc.advertise(ctx) - go fa.disc.ensurePeers(ctx) - return nil -} - -func (fa *FullAvailability) Stop(context.Context) error { - fa.cancel() - return nil -} - -// SharesAvailable reconstructs the data committed to the given Root by requesting -// enough Shares from the network. -func (fa *FullAvailability) SharesAvailable(ctx context.Context, root *Root) error { - ctx, cancel := context.WithTimeout(ctx, AvailabilityTimeout) - defer cancel() - // we assume the caller of this method has already performed basic validation on the - // given dah/root. If for some reason this has not happened, the node should panic. - if err := root.ValidateBasic(); err != nil { - log.Errorw("Availability validation cannot be performed on a malformed DataAvailabilityHeader", - "err", err) - panic(err) - } - - _, err := fa.rtrv.Retrieve(ctx, root) - if err != nil { - log.Errorw("availability validation failed", "root", root.Hash(), "err", err) - if format.IsNotFound(err) || errors.Is(err, context.DeadlineExceeded) { - return ErrNotAvailable - } - - return err - } - return err -} - -func (fa *FullAvailability) ProbabilityOfAvailability() float64 { - return 1 -} diff --git a/service/share/full_availability_test.go b/service/share/full_availability_test.go deleted file mode 100644 index 8f19915220..0000000000 --- a/service/share/full_availability_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package share - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func init() { - // randomize quadrant fetching, otherwise quadrant sampling is deterministic - rand.Seed(time.Now().UnixNano()) -} - -func TestSharesAvailable_Full(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // RandFullServiceWithSquare creates a NewFullAvailability inside, so we can test it - service, dah := RandFullServiceWithSquare(t, 16) - err := service.SharesAvailable(ctx, dah) - assert.NoError(t, err) -} - -func TestShareAvailableOverMocknet_Full(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - net := NewTestDAGNet(ctx, t) - _, root := net.RandFullNode(32) - nd := net.FullNode() - net.ConnectAll() - - err := nd.SharesAvailable(ctx, root) - assert.NoError(t, err) -} diff --git a/service/share/interface.go b/service/share/interface.go deleted file mode 100644 index 6573f2cfc7..0000000000 --- a/service/share/interface.go +++ /dev/null @@ -1,25 +0,0 @@ -package share - -import ( - "context" - "errors" - "time" -) - -// ErrNotAvailable is returned whenever DA sampling fails. -var ErrNotAvailable = errors.New("da: data not available") - -// AvailabilityTimeout specifies timeout for DA validation during which data have to be found on the network, -// otherwise ErrNotAvailable is fired. -// TODO: https://github.com/celestiaorg/celestia-node/issues/10 -const AvailabilityTimeout = 20 * time.Minute - -// Availability defines interface for validation of Shares' availability. -type Availability interface { - // SharesAvailable subjectively validates if Shares committed to the given Root are available on the Network. - SharesAvailable(context.Context, *Root) error - // ProbabilityOfAvailability calculates the probability of the data square - // being available based on the number of samples collected. - // TODO(@Wondertan): Merge with SharesAvailable method, eventually - ProbabilityOfAvailability() float64 -} diff --git a/service/share/light_availability.go b/service/share/light_availability.go deleted file mode 100644 index 2c85eb87ad..0000000000 --- a/service/share/light_availability.go +++ /dev/null @@ -1,118 +0,0 @@ -package share - -import ( - "context" - "errors" - "math" - - "github.com/ipfs/go-blockservice" - format "github.com/ipfs/go-ipld-format" - - "github.com/celestiaorg/celestia-node/ipld" -) - -// DefaultSampleAmount sets the default amount of samples to be sampled from the network by lightAvailability. -var DefaultSampleAmount = 16 - -// LightAvailability implements Availability using Data Availability Sampling technique. -// It is light because it does not require the downloading of all the data to verify -// its availability. It is assumed that there are a lot of lightAvailability instances -// on the network doing sampling over the same Root to collectively verify its availability. -type LightAvailability struct { - bserv blockservice.BlockService - // disc discovers new full nodes in the network. - // it is not allowed to call advertise for light nodes (Full nodes only). - disc *discovery - cancel context.CancelFunc -} - -// NewLightAvailability creates a new light Availability. -func NewLightAvailability( - bserv blockservice.BlockService, - disc *discovery, -) *LightAvailability { - la := &LightAvailability{ - bserv: bserv, - disc: disc, - } - return la -} - -func (la *LightAvailability) Start(context.Context) error { - ctx, cancel := context.WithCancel(context.Background()) - la.cancel = cancel - - go la.disc.ensurePeers(ctx) - return nil -} - -func (la *LightAvailability) Stop(context.Context) error { - la.cancel() - return nil -} - -// SharesAvailable randomly samples DefaultSamples amount of Shares committed to the given Root. -// This way SharesAvailable subjectively verifies that Shares are available. -func (la *LightAvailability) SharesAvailable(ctx context.Context, dah *Root) error { - log.Debugw("Validate availability", "root", dah.Hash()) - // We assume the caller of this method has already performed basic validation on the - // given dah/root. If for some reason this has not happened, the node should panic. - if err := dah.ValidateBasic(); err != nil { - log.Errorw("Availability validation cannot be performed on a malformed DataAvailabilityHeader", - "err", err) - panic(err) - } - samples, err := SampleSquare(len(dah.RowsRoots), DefaultSampleAmount) - if err != nil { - return err - } - - ctx, cancel := context.WithTimeout(ctx, AvailabilityTimeout) - defer cancel() - - ses := blockservice.NewSession(ctx, la.bserv) - errs := make(chan error, len(samples)) - for _, s := range samples { - go func(s Sample) { - root, leaf := translate(dah, s.Row, s.Col) - _, err := ipld.GetShare(ctx, ses, root, leaf, len(dah.RowsRoots)) - // we don't really care about Share bodies at this point - // it also means we now saved the Share in local storage - select { - case errs <- err: - case <-ctx.Done(): - } - }(s) - } - - for range samples { - var err error - select { - case err = <-errs: - case <-ctx.Done(): - err = ctx.Err() - } - - if err != nil { - if !errors.Is(err, context.Canceled) { - log.Errorw("availability validation failed", "root", dah.Hash(), "err", err) - } - if format.IsNotFound(err) || errors.Is(err, context.DeadlineExceeded) { - return ErrNotAvailable - } - - return err - } - } - - return nil -} - -// ProbabilityOfAvailability calculates the probability that the -// data square is available based on the amount of samples collected -// (DefaultSampleAmount). -// -// Formula: 1 - (0.75 ** amount of samples) -func (la *LightAvailability) ProbabilityOfAvailability() float64 { - return 1 - math.Pow(0.75, float64(DefaultSampleAmount)) -} diff --git a/service/share/light_availability_test.go b/service/share/light_availability_test.go deleted file mode 100644 index 33e4051f0e..0000000000 --- a/service/share/light_availability_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package share - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/celestiaorg/celestia-node/header" -) - -func TestSharesAvailable(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // RandLightServiceWithSquare creates a NewLightAvailability inside, so we can test it - service, dah := RandLightServiceWithSquare(t, 16) - err := service.SharesAvailable(ctx, dah) - assert.NoError(t, err) -} - -func TestSharesAvailableFailed(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // RandLightServiceWithSquare creates a NewLightAvailability inside, so we can test it - s, _ := RandLightServiceWithSquare(t, 16) - empty := header.EmptyDAH() - err := s.SharesAvailable(ctx, &empty) - assert.Error(t, err) -} - -func TestShareAvailableOverMocknet(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - net := NewTestDAGNet(ctx, t) - _, root := net.RandLightNode(16) - nd := net.LightNode() - net.ConnectAll() - - err := nd.SharesAvailable(ctx, root) - assert.NoError(t, err) -} diff --git a/service/share/set.go b/service/share/set.go deleted file mode 100644 index a89b09dabf..0000000000 --- a/service/share/set.go +++ /dev/null @@ -1,72 +0,0 @@ -package share - -import ( - "errors" - "sync" - - "github.com/libp2p/go-libp2p-core/peer" -) - -// limitedSet is a thread safe set of peers with given limit. -// Inspired by libp2p peer.Set but extended with Remove method. -type limitedSet struct { - lk sync.RWMutex - ps map[peer.ID]struct{} - - limit uint -} - -// newLimitedSet constructs a set with the maximum peers amount. -func newLimitedSet(limit uint) *limitedSet { - ps := new(limitedSet) - ps.ps = make(map[peer.ID]struct{}) - ps.limit = limit - return ps -} - -func (ps *limitedSet) Contains(p peer.ID) bool { - ps.lk.RLock() - _, ok := ps.ps[p] - ps.lk.RUnlock() - return ok -} - -func (ps *limitedSet) Size() int { - ps.lk.RLock() - defer ps.lk.RUnlock() - return len(ps.ps) -} - -// TryAdd attempts to add the given peer into the set. -// This operation will fail if the number of peers in the set is equal to size. -func (ps *limitedSet) TryAdd(p peer.ID) error { - ps.lk.Lock() - defer ps.lk.Unlock() - if _, ok := ps.ps[p]; ok { - return errors.New("share: discovery: peer already added") - } - if len(ps.ps) < int(ps.limit) { - ps.ps[p] = struct{}{} - return nil - } - - return errors.New("share: discovery: peers limit reached") -} - -func (ps *limitedSet) Remove(id peer.ID) { - ps.lk.Lock() - defer ps.lk.Unlock() - if ps.limit > 0 { - delete(ps.ps, id) - } -} - -func (ps *limitedSet) Peers() []peer.ID { - ps.lk.Lock() - out := make([]peer.ID, 0, len(ps.ps)) - for p := range ps.ps { - out = append(out, p) - } - ps.lk.Unlock() - return out -} diff --git a/service/share/share.go b/service/share/share.go deleted file mode 100644 index 85a1bb679f..0000000000 --- a/service/share/share.go +++ /dev/null @@ -1,175 +0,0 @@ -package share - -import ( - "context" - "fmt" - "math/rand" - - "golang.org/x/sync/errgroup" - - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/tendermint/tendermint/pkg/da" - - "github.com/celestiaorg/celestia-node/ipld" - "github.com/celestiaorg/celestia-node/ipld/plugin" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" -) - -var log = logging.Logger("share") - -// Share is a fixed-size data chunk associated with a namespace ID, whose data will be erasure-coded and committed -// to in Namespace Merkle trees. -type Share = ipld.Share - -// GetID extracts namespace ID out of a Share. -var GetID = ipld.ShareID - -// GetData extracts data out of a Share. -var GetData = ipld.ShareData - -// Root represents root commitment to multiple Shares. -// In practice, it is a commitment to all the Data in a square. -type Root = da.DataAvailabilityHeader - -// Service provides access to any data square or block share on the network. -// -// All Get methods provided on Service follow the following flow: -// * Check local storage for the requested Share. -// * If exists -// * Load from disk -// * Return -// * If not -// * Find provider on the network -// * Fetch the Share from the provider -// * Store the Share -// * Return -// TODO(@Wondertan): Simple thread safety for Start and Stop would not hurt. -type Service struct { - Availability - rtrv *ipld.Retriever - bServ blockservice.BlockService - // session is blockservice sub-session that applies optimization for fetching/loading related nodes, like shares - // prefer session over blockservice for fetching nodes. - session blockservice.BlockGetter - cancel context.CancelFunc -} - -// NewService creates new basic share.Service. -func NewService(bServ blockservice.BlockService, avail Availability) *Service { - return &Service{ - rtrv: ipld.NewRetriever(bServ), - Availability: avail, - bServ: bServ, - } -} - -func (s *Service) Start(context.Context) error { - if s.session != nil || s.cancel != nil { - return fmt.Errorf("share: Service already started") - } - - // NOTE: The ctx given as param is used to control Start flow and only needed when Start is blocking, - // but this one is not. - // - // The newer context here is created to control lifecycle of the session and peer discovery. - ctx, cancel := context.WithCancel(context.Background()) - s.cancel = cancel - s.session = blockservice.NewSession(ctx, s.bServ) - return nil -} - -func (s *Service) Stop(context.Context) error { - if s.session == nil || s.cancel == nil { - return fmt.Errorf("share: Service already stopped") - } - - s.cancel() - s.cancel = nil - s.session = nil - return nil -} - -func (s *Service) GetShare(ctx context.Context, dah *Root, row, col int) (Share, error) { - root, leaf := translate(dah, row, col) - nd, err := ipld.GetShare(ctx, s.bServ, root, leaf, len(dah.RowsRoots)) - if err != nil { - return nil, err - } - - return nd, nil -} - -func (s *Service) GetShares(ctx context.Context, root *Root) ([][]Share, error) { - eds, err := s.rtrv.Retrieve(ctx, root) - if err != nil { - return nil, err - } - - origWidth := int(eds.Width() / 2) - shares := make([][]Share, origWidth) - - for i := 0; i < origWidth; i++ { - row := eds.Row(uint(i)) - shares[i] = make([]Share, origWidth) - for j := 0; j < origWidth; j++ { - shares[i][j] = row[j] - } - } - - return shares, nil -} - -func (s *Service) GetSharesByNamespace(ctx context.Context, root *Root, nID namespace.ID) ([]Share, error) { - err := ipld.SanityCheckNID(nID) - if err != nil { - return nil, err - } - rowRootCIDs := make([]cid.Cid, 0) - for _, row := range root.RowsRoots { - if !nID.Less(nmt.MinNamespace(row, nID.Size())) && nID.LessOrEqual(nmt.MaxNamespace(row, nID.Size())) { - rowRootCIDs = append(rowRootCIDs, plugin.MustCidFromNamespacedSha256(row)) - } - } - if len(rowRootCIDs) == 0 { - return nil, nil - } - - errGroup, ctx := errgroup.WithContext(ctx) - shares := make([][]Share, len(rowRootCIDs)) - for i, rootCID := range rowRootCIDs { - // shadow loop variables, to ensure correct values are captured - i, rootCID := i, rootCID - errGroup.Go(func() (err error) { - shares[i], err = ipld.GetSharesByNamespace(ctx, s.bServ, rootCID, nID) - return - }) - } - - if err := errGroup.Wait(); err != nil { - return nil, err - } - - // we don't know the amount of shares in the namespace, so we cannot preallocate properly - // TODO(@Wondertan): Consider improving encoding schema for data in the shares that will also include metadata - // with the amount of shares. If we are talking about plenty of data here, proper preallocation would make a - // difference - var out []Share - for i := 0; i < len(rowRootCIDs); i++ { - out = append(out, shares[i]...) - } - - return out, nil -} - -// translate transforms square coordinates into IPLD NMT tree path to a leaf node. -// It also adds randomization to evenly spread fetching from Rows and Columns. -func translate(dah *Root, row, col int) (cid.Cid, int) { - if rand.Intn(2) == 0 { //nolint:gosec - return plugin.MustCidFromNamespacedSha256(dah.ColumnRoots[col]), row - } - - return plugin.MustCidFromNamespacedSha256(dah.RowsRoots[row]), col -} diff --git a/service/share/share_test.go b/service/share/share_test.go deleted file mode 100644 index 9985ab0914..0000000000 --- a/service/share/share_test.go +++ /dev/null @@ -1,296 +0,0 @@ -package share - -import ( - "bytes" - "context" - _ "embed" - "encoding/hex" - "encoding/json" - "math" - mrand "math/rand" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/pkg/da" - core "github.com/tendermint/tendermint/types" - - "github.com/celestiaorg/celestia-node/ipld" -) - -func TestGetShare(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n := 16 - serv, dah := RandLightServiceWithSquare(t, n) - err := serv.Start(ctx) - require.NoError(t, err) - - for i := range make([]bool, n) { - for j := range make([]bool, n) { - share, err := serv.GetShare(ctx, dah, i, j) - assert.NotNil(t, share) - assert.NoError(t, err) - } - } - - err = serv.Stop(ctx) - require.NoError(t, err) -} - -func TestService_GetSharesByNamespace(t *testing.T) { - var tests = []struct { - squareSize int - expectedShareCount int - }{ - {squareSize: 4, expectedShareCount: 2}, - {squareSize: 16, expectedShareCount: 2}, - {squareSize: 128, expectedShareCount: 2}, - } - - for _, tt := range tests { - t.Run("size: "+strconv.Itoa(tt.squareSize), func(t *testing.T) { - serv, bServ := RandLightService() - n := tt.squareSize * tt.squareSize - randShares := RandShares(t, n) - idx1 := (n - 1) / 2 - idx2 := n / 2 - if tt.expectedShareCount > 1 { - // make it so that two rows have the same namespace ID - copy(randShares[idx2][:8], randShares[idx1][:8]) - } - root := FillBS(t, bServ, randShares) - randNID := randShares[idx1][:8] - - shares, err := serv.GetSharesByNamespace(context.Background(), root, randNID) - require.NoError(t, err) - assert.Len(t, shares, tt.expectedShareCount) - for _, value := range shares { - assert.Equal(t, randNID, []byte(GetID(value))) - } - if tt.expectedShareCount > 1 { - // idx1 is always smaller than idx2 - assert.Equal(t, randShares[idx1], shares[0]) - assert.Equal(t, randShares[idx2], shares[1]) - } - }) - } -} - -func TestGetShares(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n := 16 - serv, dah := RandLightServiceWithSquare(t, n) - err := serv.Start(ctx) - require.NoError(t, err) - - shares, err := serv.GetShares(ctx, dah) - require.NoError(t, err) - - flattened := make([][]byte, 0, len(shares)*2) - for _, row := range shares { - flattened = append(flattened, row...) - } - // generate DAH from shares returned by `GetShares` to compare - // calculated DAH to expected DAH - squareSize := uint64(math.Sqrt(float64(len(flattened)))) - eds, err := da.ExtendShares(squareSize, flattened) - require.NoError(t, err) - gotDAH := da.NewDataAvailabilityHeader(eds) - - require.True(t, dah.Equals(&gotDAH)) - - err = serv.Stop(ctx) - require.NoError(t, err) -} - -func TestService_GetSharesByNamespaceNotFound(t *testing.T) { - serv, root := RandLightServiceWithSquare(t, 1) - root.RowsRoots = nil - - shares, err := serv.GetSharesByNamespace(context.Background(), root, []byte{1, 1, 1, 1, 1, 1, 1, 1}) - assert.Len(t, shares, 0) - assert.NoError(t, err) -} - -func BenchmarkService_GetSharesByNamespace(b *testing.B) { - var tests = []struct { - amountShares int - }{ - {amountShares: 4}, - {amountShares: 16}, - {amountShares: 128}, - } - - for _, tt := range tests { - b.Run(strconv.Itoa(tt.amountShares), func(b *testing.B) { - t := &testing.T{} - serv, root := RandLightServiceWithSquare(t, tt.amountShares) - randNID := root.RowsRoots[(len(root.RowsRoots)-1)/2][:8] - root.RowsRoots[(len(root.RowsRoots) / 2)] = root.RowsRoots[(len(root.RowsRoots)-1)/2] - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := serv.GetSharesByNamespace(context.Background(), root, randNID) - require.NoError(t, err) - } - }) - } -} - -func TestSharesRoundTrip(t *testing.T) { - serv, store := RandLightService() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var b core.Block - err := json.Unmarshal([]byte(sampleBlock), &b) - require.NoError(t, err) - - namespace, err := hex.DecodeString("00001337BEEF0000") - require.NoError(t, err) - namespaceBefore, err := hex.DecodeString("0000000000000123") - require.NoError(t, err) - namespaceAfter, err := hex.DecodeString("1234000000000123") - require.NoError(t, err) - - type testCase struct { - name string - messages [][]byte - namespaces [][]byte - } - - cases := []testCase{ - { - "original test case", - [][]byte{b.Data.Messages.MessagesList[0].Data}, - [][]byte{namespace}}, - { - "one short message", - [][]byte{{1, 2, 3, 4}}, - [][]byte{namespace}}, - { - "one short before other namespace", - [][]byte{{1, 2, 3, 4}, {4, 5, 6, 7}}, - [][]byte{namespace, namespaceAfter}, - }, - { - "one short after other namespace", - [][]byte{{1, 2, 3, 4}, {4, 5, 6, 7}}, - [][]byte{namespaceBefore, namespace}, - }, - { - "two short messages", - [][]byte{{1, 2, 3, 4}, {4, 5, 6, 7}}, - [][]byte{namespace, namespace}, - }, - { - "two short messages before other namespace", - [][]byte{{1, 2, 3, 4}, {4, 5, 6, 7}, {7, 8, 9}}, - [][]byte{namespace, namespace, namespaceAfter}, - }, - { - "two short messages after other namespace", - [][]byte{{1, 2, 3, 4}, {4, 5, 6, 7}, {7, 8, 9}}, - [][]byte{namespaceBefore, namespace, namespace}, - }, - } - randBytes := func(n int) []byte { - bytes := make([]byte, n) - mrand.Read(bytes) - return bytes - } - for i := 128; i < 4192; i += mrand.Intn(256) { - l := strconv.Itoa(i) - cases = append(cases, testCase{ - "one " + l + " bytes message", - [][]byte{randBytes(i)}, - [][]byte{namespace}, - }) - cases = append(cases, testCase{ - "one " + l + " bytes before other namespace", - [][]byte{randBytes(i), randBytes(1 + mrand.Intn(i))}, - [][]byte{namespace, namespaceAfter}, - }) - cases = append(cases, testCase{ - "one " + l + " bytes after other namespace", - [][]byte{randBytes(1 + mrand.Intn(i)), randBytes(i)}, - [][]byte{namespaceBefore, namespace}, - }) - cases = append(cases, testCase{ - "two " + l + " bytes messages", - [][]byte{randBytes(i), randBytes(i)}, - [][]byte{namespace, namespace}, - }) - cases = append(cases, testCase{ - "two " + l + " bytes messages before other namespace", - [][]byte{randBytes(i), randBytes(i), randBytes(1 + mrand.Intn(i))}, - [][]byte{namespace, namespace, namespaceAfter}, - }) - cases = append(cases, testCase{ - "two " + l + " bytes messages after other namespace", - [][]byte{randBytes(1 + mrand.Intn(i)), randBytes(i), randBytes(i)}, - [][]byte{namespaceBefore, namespace, namespace}, - }) - } - - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - // prepare data - b.Data.Messages.MessagesList = make([]core.Message, len(tc.messages)) - var msgsInNamespace [][]byte - require.Equal(t, len(tc.namespaces), len(tc.messages)) - for i := range tc.messages { - b.Data.Messages.MessagesList[i] = core.Message{NamespaceID: tc.namespaces[i], Data: tc.messages[i]} - if bytes.Equal(tc.namespaces[i], namespace) { - msgsInNamespace = append(msgsInNamespace, tc.messages[i]) - } - } - - namespacedShares, _, _ := b.Data.ComputeShares(uint64(0)) - - // test round trip using only encoding, without IPLD - { - myShares := make([][]byte, 0) - for _, sh := range namespacedShares.RawShares() { - if bytes.Equal(namespace, sh[:8]) { - myShares = append(myShares, sh) - } - } - msgs, err := core.ParseMsgs(myShares) - require.NoError(t, err) - assert.Len(t, msgs.MessagesList, len(msgsInNamespace)) - for i := range msgs.MessagesList { - assert.Equal(t, msgsInNamespace[i], msgs.MessagesList[i].Data) - } - } - - // test full round trip - with IPLD + decoding shares - { - extSquare, err := ipld.AddShares(ctx, namespacedShares.RawShares(), store) - require.NoError(t, err) - - dah := da.NewDataAvailabilityHeader(extSquare) - shares, err := serv.GetSharesByNamespace(ctx, &dah, namespace) - require.NoError(t, err) - require.NotEmpty(t, shares) - - msgs, err := core.ParseMsgs(shares) - require.NoError(t, err) - assert.Len(t, msgs.MessagesList, len(msgsInNamespace)) - for i := range msgs.MessagesList { - assert.Equal(t, namespace, []byte(msgs.MessagesList[i].NamespaceID)) - assert.Equal(t, msgsInNamespace[i], msgs.MessagesList[i].Data) - } - } - }) - } -} - -// this is a sample block from devnet-2 which originally showed the issue with share ordering -//go:embed "testdata/block-825320.json" -var sampleBlock string diff --git a/service/share/testdata/block-825320.json b/service/share/testdata/block-825320.json deleted file mode 100644 index f159fdcc96..0000000000 --- a/service/share/testdata/block-825320.json +++ /dev/null @@ -1,659 +0,0 @@ -{ - "header": { - "version": { - "block": 11 - }, - "chain_id": "devnet-2", - "height": 825320, - "time": "2022-04-14T07:52:33.890556391Z", - "last_block_id": { - "hash": "7FA631D8E8DA005132615B5BB9DD715E66ABE18DB3BF4A891242CCF090D629D1", - "parts": { - "total": 1, - "hash": "9C2705130B4D8A9A9FAD64D462944EABC281B5AC36ADBFA6E3A48A7F347DD8C1" - } - }, - "last_commit_hash": "00AAFEB5461158D198967E750E108367EE6D0739E69C0AC2F4D102E0F87D2595", - "data_hash": "2AE354BAA0A6C75EDAAB7AA9D6FAAF873FD6FF3E0884DD479306EC6C1AAFD796", - "validators_hash": "A76865C38C998C200C6056F8B1D08CB4009683B1110ACE85D95B9889A47650ED", - "next_validators_hash": "A76865C38C998C200C6056F8B1D08CB4009683B1110ACE85D95B9889A47650ED", - "consensus_hash": "048091BC7DDC283F77BFBF91D73C44DA58C3DF8A9CBC867405D8B7F3DAADA22F", - "app_hash": "E2DD3E32D66B330B16BCAB389DA17C2B6BC1B082B2B58ED0A90E8197E36A59A6", - "last_results_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", - "evidence_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", - "proposer_address": "2CB2349B8516D69839FE49892B1AF6909D7361A7" - }, - "data": { - "txs": [ - "CpoBCpcBCiMvY29zbW9zLnN0YWtpbmcudjFiZXRhMS5Nc2dEZWxlZ2F0ZRJwCixjZWxlczEycGd2eWRsOHQ0c3o4NjdzYWNwOWRnOHJhNngydmUwa2d5NjZuMhIzY2VsZXN2YWxvcGVyMTJwZ3Z5ZGw4dDRzejg2N3NhY3A5ZGc4cmE2eDJ2ZTBrZGY0Y3l6GgsKBWNlbGVzEgIxNRJaClIKRgofL2Nvc21vcy5jcnlwdG8uc2VjcDI1NmsxLlB1YktleRIjCiECEyDrMs7ZgCs9VLJY95YHFhUQHNNGMVnEvLnA+fgKuY0SBAoCCAEYr5oPEgQQjPwHGkBRV6kkcTFf+tZVGiSYnm/cy18RQ4Hq/k7NU5JQpJjstln/lCCx2mvjzZENdzoLB6e/ojG1tXPGLApLOtsDG071", - "CiDfWZrO6VHoRRehPruO7n8G95bt9EZXM1sPzskKRBlPXxKnAgp8CnoKGS9wYXltZW50Lk1zZ1BheUZvck1lc3NhZ2USXQosY2VsZXMxZ2hodmUyaG1hOW03Y3F1YWRtNTdqZzg3emV4Znhla3poc3B4OGcSCAAAEze+7wAAGIAEIiCB+JF+iC6YVjh3tYKgtF2dIkzNlxVCsRaFZWMmBedAqRJlClEKRgofL2Nvc21vcy5jcnlwdG8uc2VjcDI1NmsxLlB1YktleRIjCiEChGDFtCCOMQBEoVo9SwdQtb7tK9RcHFI/S6mrajefVW0SBAoCCAEYjwwSEAoKCgVjZWxlcxIBMRCAiXoaQGaO5xm+sB5eQN69UO01B0dbkHTJvNdyCMfk2VG3O012aK6eRB4/70zgZ00DLQSFpxZYNQwWZC/VuqRLj1a1e+g=" - ], - "intermediate_roots": { - "intermediate_roots": null - }, - "evidence": { - "evidence": [] - }, - "msgs": { - "msgs": [ - { - "NamespaceID": "AAATN77vAAA=", - "Data": "CvgBCgIICxIIAAATN77vAAAYAyDDpd+SBiogbVQ+9SML4CjY2Mg0LGwm5W0ZoQOumJUIkb0S4PjiNqQyIMKzG0e9Tl6QJUiIGK1WPPZ8blyB6AGq34E9XcvWe/A/OiAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEIgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABKILUe8T6ZAQKnvyvNP0iFQbQ5uNwqNTmfueick6JOJ0tyUiDjsMRCmPwcFJr79MiZb7kkJ65B5GSbk0yklZkbeFK4VVoUX9WPtt7dVrsuZpPR/svrM19RW8kSABpmCAISIG1UPvUjC+Ao2NjINCxsJuVtGaEDrpiVCJG9EuD44jakGkB78QszY4BV8F393pSze98ORvKUbNAdNaXwSK8HhfFUM30CwXlo9/BrbJVawCExfj7w2S6QmuhUv0S3KQ2Yh5YIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" - } - ] - } - }, - "last_commit": { - "height": 825319, - "round": 0, - "block_id": { - "hash": "7FA631D8E8DA005132615B5BB9DD715E66ABE18DB3BF4A891242CCF090D629D1", - "parts": { - "total": 1, - "hash": "9C2705130B4D8A9A9FAD64D462944EABC281B5AC36ADBFA6E3A48A7F347DD8C1" - } - }, - "signatures": [ - { - "block_id_flag": 2, - "validator_address": "87265CC17922E01497F40B701EC9F05373B83467", - "timestamp": "2022-04-14T07:52:33.880851041Z", - "signature": "iYUZXYv5FuxC/XZIZ2dA9scTIZXz5dCsy7oSDow7r45KqxytygzleZ3u96X66MV48+G7pNAgyEAdSVku8undAQ==" - }, - { - "block_id_flag": 2, - "validator_address": "D345D62BBD18C301B843DF7C65F10E57AB17BD98", - "timestamp": "2022-04-14T07:52:33.882738752Z", - "signature": "Fk70wx+AsLxFSJVKH2cUwGgplVgkHmMIY0yoy/WZd+Mmde8mqOrQBNozR2/yvyQvmfcSFl5cEDwgsmoEZxNkAw==" - }, - { - "block_id_flag": 2, - "validator_address": "604377BC74F4F27274825A564761D9875B31AF05", - "timestamp": "2022-04-14T07:52:33.906538296Z", - "signature": "C/9OpYWM0cILxwpV8j4rWpeq5T95+Wj3Pv9ntbv+zmE5lHFbuTL9IEPXsnNFC3lTO/6AWZDhiiiBFBdXx5RZDw==" - }, - { - "block_id_flag": 2, - "validator_address": "D6084BAB9BCECCA7A82F6D1F019B315A056A990F", - "timestamp": "2022-04-14T07:52:33.934987498Z", - "signature": "4Wqvf8KEMTd/Gv/xIDTOBvMxFrE3GANF/C3VO+gMM3lKTQPjN4z2bZvO3r3jYpwUm13CjusDPEFferNa2FcDDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "B80E80B249B9AC1F192DF445A68A1B69D89458C0", - "timestamp": "2022-04-14T07:52:33.905001033Z", - "signature": "SxM4iUI9+iUnmCzQBepP2uHrka5Qhos7myhcC7MeiHSorWf0gXFGpxV0KGMlShiAIjEqwIr6wMRQHqV6Fkf/BA==" - }, - { - "block_id_flag": 2, - "validator_address": "89B63255AE88218533F9727D7CFDA0376A8C9A67", - "timestamp": "2022-04-14T07:52:33.954724715Z", - "signature": "rZkx1meoaYHHrEa97sNJKibxktE7OSvTxyGC3ojOhDsG/mMPJXvR9QnHTIn9myuEL9+cUuXjnjrEeL6Xl16tDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "C25C9BA4A8C2D71E6853A1227EBE6B1D7A07B5A7", - "timestamp": "2022-04-14T07:52:33.890556391Z", - "signature": "b4vt1uR5a9oTsiRsxxIahjc0lWyJ+hzCxVpJQaAU9yah3OFRH7nfLsPOgZVSky+odnArrP/rM6yqPllr2tMzAg==" - }, - { - "block_id_flag": 2, - "validator_address": "7635E9087D9875E784581BE236E4DFCBA71CE556", - "timestamp": "2022-04-14T07:52:33.92779807Z", - "signature": "I4Hfeufh+xNMwiOhjxds+tJ5E+s1UCnnvTq/rNC4oko3qbGci4xQsLNer/bpf3P1Fd2uHHYq9kE+M/9yv0TgCA==" - }, - { - "block_id_flag": 2, - "validator_address": "2CB2349B8516D69839FE49892B1AF6909D7361A7", - "timestamp": "2022-04-14T07:52:33.871894353Z", - "signature": "yppE7QcDrRB2ejnUURnS5I1GZJlO9GaChSeKIkLiAz8X1Rmmd3HTtRGJyc1a0/V2xMOu/ChYjQRBSVXB8ThKBQ==" - }, - { - "block_id_flag": 2, - "validator_address": "DEC2642E786A941511A401090D21621E7F08A36D", - "timestamp": "2022-04-14T07:52:33.895698256Z", - "signature": "f+6qaGhh8qLW+DjEfIc8w0yoFjgxQkWL/BVU1z/YF42ByaVpYL8GKg+myb5wGznwsLG46nwGgvhaB9cV0SwXCg==" - }, - { - "block_id_flag": 2, - "validator_address": "03F1044A6DF782189C7061FF89146B3D33608F17", - "timestamp": "2022-04-14T07:52:33.875135897Z", - "signature": "5I0xK60/DwwVzRC1oA8FqZo7XrArA9R5jTYg08TXMKL3Dd/UCacyT9TVxaG/ekyMB0lCGajkccjWdI2OyqTnDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "BAD6693EFA5333479136B5B082C3C5A28FA97491", - "timestamp": "2022-04-14T07:52:33.916594881Z", - "signature": "7Ih2MIVMCUL78lPWZ22PGeb+TE1YKbrszO3Z/0awoEEmMBeWbRQBZgM6D5uWUbqc5SQ5fj2e2kYz8u04lFhfCA==" - }, - { - "block_id_flag": 2, - "validator_address": "EAB5D30E6F271470A37635E9889D613D41DED830", - "timestamp": "2022-04-14T07:52:33.822011898Z", - "signature": "ZcPUxJD52JOwXotNM+GTkivFcifihb9wq8xDQaBVKaJd7drQjHSlUMvCnQuDXcMlE0D2x909q+P2WNc0NkM1Dw==" - }, - { - "block_id_flag": 2, - "validator_address": "A85A278B256FF30E21057079F675644518010CDA", - "timestamp": "2022-04-14T07:52:33.948758137Z", - "signature": "ohveTMYCrItd9sGDPMPtALuigGVDi+uJANV/CUJLzqwBhIxNsABG5WhBDdrfZRI5vHzPfSUNtMbRAt+CpzUJBQ==" - }, - { - "block_id_flag": 2, - "validator_address": "4342666C1595411A1C58CD949FB607F1DC473CFE", - "timestamp": "2022-04-14T07:52:33.913213874Z", - "signature": "CxX/ysvqpm8nsHLptr/6Q9PFCKgIxvswTcv6KJqiS5ymk8Usi9u5pGZNspu/mUgVAgJOT1yq/wHB3NL7FsklDA==" - }, - { - "block_id_flag": 2, - "validator_address": "09F4D1088EAB7E6EA2A87EE087A1467D23F22367", - "timestamp": "2022-04-14T07:52:33.977741332Z", - "signature": "IPV62zvcVPGN2BqTf0l9OuLSbOf+fixNQgG5TcA8D39wlz1Midwx1LcfVxXS2sQYpUDxoHro5QTG0ha95mnBAQ==" - }, - { - "block_id_flag": 2, - "validator_address": "A7B9EC7F97BD9F35E92F9CF873CE2CC618488BA2", - "timestamp": "2022-04-14T07:52:33.955291444Z", - "signature": "qY2HRpwAdA7JG3qqXzUQn1ox/hmo+BnnSID5ctslSsvwMZujxBCsx+7dMrDoIqnwP82IjI9wu8wvdIpIzXUCCQ==" - }, - { - "block_id_flag": 3, - "validator_address": "46387FF5D4E466F61891D04837E4C85294DB1FA5", - "timestamp": "2022-04-14T07:52:37.945040122Z", - "signature": "u7YUoivwzs5uN+TQTvHRB7eyIhGOzOzpPUyuMWq5p07GtdQJ4Z8j8NjmFyAvQKSl/++2F7h7IehmEMv9zZXODA==" - }, - { - "block_id_flag": 2, - "validator_address": "6B8A4D59A6C9B32622BE6E13CA3C9B89890DF317", - "timestamp": "2022-04-14T07:52:33.896204322Z", - "signature": "2Le62jKu6sdh83iiW+fWJG5m0UPcLcUId5w/JnVqYs628sbmcZmSf/xPuvniENWVQCRhYQrE59u8TlxDUp0zDA==" - }, - { - "block_id_flag": 2, - "validator_address": "B85CC62F660DC5416C2A19733E7FB4147D5A1BDC", - "timestamp": "2022-04-14T07:52:33.965734652Z", - "signature": "li1owrnyoTGd0gW82udA6Dj/sHu6aPQ475RkhMttH+SCRF+zjxJv3eXQ+jQ/QftaNJr/f5I+iswqYhjisgU2AA==" - }, - { - "block_id_flag": 2, - "validator_address": "2EC4B0E7E14949855ACD6976D1E11C2080DF0AE3", - "timestamp": "2022-04-14T07:52:33.883919347Z", - "signature": "/+XyGFufBUhCJB5CItF7QULJuQ6JuoHwY5CR86fE29ahEtjCXqvnbHA/hL7hVDZ0yZ3OGng2/M8mZ0tiuXzgBg==" - }, - { - "block_id_flag": 2, - "validator_address": "3E5203383282356A08F17ADF86BEE43482D7821C", - "timestamp": "2022-04-14T07:52:33.948533772Z", - "signature": "ZmsLZdA7DcCBuZDCrCLXyotdHMj3BTJxMuXjWbqmhYTwFdxw6IeUapAgFI9HmgQYIe470W5T/XQXLM8pKp0zCA==" - }, - { - "block_id_flag": 2, - "validator_address": "DA1662059918F943B478CC432345E9CABD0B84D0", - "timestamp": "2022-04-14T07:52:33.966697203Z", - "signature": "w33lYcxlgZCz9c+yMJrR4fHnx6JXdA3Y9C1xq5XnBr61unx5mbD3PwdXJoxekaequdtuwGKOwlm3Ilf7y/JgBQ==" - }, - { - "block_id_flag": 2, - "validator_address": "0EACA93ECFF404F8C6675A05C6CEEBDB60E94E67", - "timestamp": "2022-04-14T07:52:33.903813219Z", - "signature": "DSCFvK8rOyvYqMFmgHz4ifuXHB425TysBIEN1a15j1NBKJTe47AOeyimRxD0QcTwDtMqlVqDP0RQmY/FqbxIAg==" - }, - { - "block_id_flag": 2, - "validator_address": "3050EC11FD87A5650113CCC2D88B4CEB4A33F4E9", - "timestamp": "2022-04-14T07:52:33.902721724Z", - "signature": "51WrpCpPyktY5t7lsbsPzWgfKzJjOucs26IeNtymOWi0HRBdG03s6UCTKGxJwpagTw/+fXjsW9Tpa1UTRM5/CQ==" - }, - { - "block_id_flag": 2, - "validator_address": "F6204971AA2A655087E16146A39D730336783EC7", - "timestamp": "2022-04-14T07:52:33.894105731Z", - "signature": "XO68atiTR0wuTLo0hw+6kdUuyBUrDhJtWpvGwYDr3K1hPjcM9R44k8OXbyTcvLsv7GTK85FY2XOTi+6LrMzRDA==" - }, - { - "block_id_flag": 2, - "validator_address": "D83F63EDC86900F07BF03F76777E748F637D30B6", - "timestamp": "2022-04-14T07:52:33.891952948Z", - "signature": "ylRqDHzF0vDjr1L3QZCxydB2bKgYL7W4BO17ETYLPqlNSpu3dbcjVK4+PPaDjB4pWYYl8o5yoVw7n22u+1hlBw==" - }, - { - "block_id_flag": 2, - "validator_address": "F617C06738A76B99B51F1B2F23F7F606CFFAEC7E", - "timestamp": "2022-04-14T07:52:33.895358159Z", - "signature": "AUGmFAecT+sqlPOjY0f7Y/BLw2TUB3UCsomw2XM0xinY37f3FNflxQWIkodgI98Rt5abQ6gsOkpnha83mQ5NAw==" - }, - { - "block_id_flag": 2, - "validator_address": "8ED38173BF8EAE85955C1CE8CC471C87A3950309", - "timestamp": "2022-04-14T07:52:33.927364939Z", - "signature": "4Qr/xA1s2zRnQIWuhlaWjxmSvPP4PLVUrOlepIJOkOD5IedSGQuwtAL2MsJ4l8cXrfx3Dz85JqiEejBOdaHzAA==" - }, - { - "block_id_flag": 2, - "validator_address": "352E90351E5605C7C991BB4B4C076196851795CC", - "timestamp": "2022-04-14T07:52:33.943534076Z", - "signature": "RWD2HzMukwL6bDB8DwRX8yspMNDQL61+aHtFBzB+fttpMZA7in+iyvW8UKbx5H3ZZh1BZSQ+pgKJIuyzkldrCg==" - }, - { - "block_id_flag": 2, - "validator_address": "628C6B13D44802869464D0763F7B946B105C1C4F", - "timestamp": "2022-04-14T07:52:34.014354318Z", - "signature": "3OwaoPDGYCYqUXuKU+FX6bCfpyFbI85NNDWZ3PPeAJ9n3de0GCR8QGBfkUc0g0h8nnXqrNidkt2r0HLH4llKDg==" - }, - { - "block_id_flag": 2, - "validator_address": "6518C991A8C3F9C94899423D32623E8E1069649B", - "timestamp": "2022-04-14T07:52:33.947923197Z", - "signature": "lDsqlaxL4Vpn4bL/U5tjsDg5qjScIo1hFRrm5FihX/TAAhL812TM/OTjAITbEccNBCcjJhFrA0KnXPGDjFadCg==" - }, - { - "block_id_flag": 2, - "validator_address": "7F8E54ECF4E33A0A9B07394C4DE15F57AB86A648", - "timestamp": "2022-04-14T07:52:33.893345159Z", - "signature": "gOg13rDOjEg24JTJCug5I8CfUbutDnX2/70YOUydV/4uGiMyHe9UU4hcNSRXMsX1ZYye77DxGRq4SOG6nHLnDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "B14790FAFAB929B56513EBE7BC5ABB25D1883CE3", - "timestamp": "2022-04-14T07:52:33.93583778Z", - "signature": "KuOyvyTYRLuq0emzAXQRHnyaWLe8ozXZR+109pOVKRk+UFut0IAUxPjynCz8PKStv/XkyDbS0w3StsNygfO+Cw==" - }, - { - "block_id_flag": 2, - "validator_address": "C23DD7A2BDB836A97C09046FB8247871BD5741CC", - "timestamp": "2022-04-14T07:52:33.984675309Z", - "signature": "IJaB0A+79QXvy3fgZc/8HbPVSymDuSIRf0oQJq8dZjBlsRLrr4GGs6eBauqID52+kAKHPNO/SH+OuRHNCk6FCA==" - }, - { - "block_id_flag": 2, - "validator_address": "33479A3C40E3827C2CB1775FF69B1F8C01F1C97F", - "timestamp": "2022-04-14T07:52:33.932691066Z", - "signature": "B0sot4lgyxaivEbymHFzUh9tUfQoHNu7B82vavwnGAVs+BsPVC5yqsUFL3UviDuBUUdz9A4LdzsAHkcCn/TEBA==" - }, - { - "block_id_flag": 2, - "validator_address": "2B5CA1444614CD5693259A2836F237E695C20374", - "timestamp": "2022-04-14T07:52:33.931461341Z", - "signature": "wBkQGvp7HnBl0GcLqL/Yf9+aFJX4cL2c02yuJbiSJbxprPLb2fGVezwkrQxZevotJ5gnrIJkYVytW80Nb71nCw==" - }, - { - "block_id_flag": 2, - "validator_address": "B315C0E5926653484495AB4D38055A15C0BF43FE", - "timestamp": "2022-04-14T07:52:34.00036607Z", - "signature": "jjDrFCjUsXra3qe7YQSgyROPIPLKyNUa6F/WRTTQTyuip1fU0R4WRK6gxVKTsgcyWJF3KQfVg+TxNPsfPs06Cg==" - }, - { - "block_id_flag": 2, - "validator_address": "8700D7FEC0879EFA716909F686176D87B346B40E", - "timestamp": "2022-04-14T07:52:33.874461806Z", - "signature": "piUqWW89xUYRjsEnxBEwROgFek8kCW4faObb7VOmm0ub7Nq60hbDhpxges1tjLhByGXc6rh6o3j21sFIc2ZhDw==" - }, - { - "block_id_flag": 2, - "validator_address": "81875DA3D4465AFC9769B351F2A038304DD646FC", - "timestamp": "2022-04-14T07:52:34.008911756Z", - "signature": "GA/wwpF5IZQj2ZCdF9CbqgP58uwxYkz/6adcpvDMsx2G1Z9f/1Es128GIkc6irxfIc8HMqRCrg9kBniLilCPBw==" - }, - { - "block_id_flag": 2, - "validator_address": "0E0402827BF366AEB78362B097BC394C4AB44E81", - "timestamp": "2022-04-14T07:52:33.8677605Z", - "signature": "ae/UG3XI0zPgzp6AhjjiLqlpJakdKIOkyFduRtu3BDnGh8MuiWbuwYDNLpZPxqor8CuBtfFQQN+4Wjgls4lnBQ==" - }, - { - "block_id_flag": 2, - "validator_address": "AFB99D32657A2034F2C5400D78C66E8E507AF941", - "timestamp": "2022-04-14T07:52:33.933281067Z", - "signature": "U6su6S7ViquugX/6BNnTeuYh3G7p9hS65rVoq1pBAzgUN65/ZD6CUMi+Rv8ZSr9jaILSiIieJy1JlGb3wecMAQ==" - }, - { - "block_id_flag": 2, - "validator_address": "C5FE3F4F3091BFE32A61E50EAC660E9D7A2EF7A6", - "timestamp": "2022-04-14T07:52:33.847676476Z", - "signature": "VfiTA1FogVv5IO5AOlhMMQU8e1VOnsmSgB5gLxsGlGoeXVj/Pp+3mrQIE4thUPGRXyZvDI+QaFB8l3iDEQMjAg==" - }, - { - "block_id_flag": 2, - "validator_address": "C4456081E1163BF2323D3D354EDA3AB614F24F83", - "timestamp": "2022-04-14T07:52:33.888619411Z", - "signature": "c/CPqU0JG0Xy5PrfP8sMVjqIUxk4GijE6C80zROH0VOvRQaQ0E288MM8N4ZISQK7AwiEMF+Tcx6wzo/NQDXcBg==" - }, - { - "block_id_flag": 2, - "validator_address": "7EED60460B269EFA5D8B08A26C0E1D814AFF64DC", - "timestamp": "2022-04-14T07:52:33.937532147Z", - "signature": "WJ/UbF1dXQ1NY2bZzw/WcZMTWL4Qjc8+hn7VA3V1/ar2NWu09OigcKSMVNnM7OwRZyX0WIgjTnKysD9rTzuQBQ==" - }, - { - "block_id_flag": 2, - "validator_address": "23133C4595E8DF70B816B5FE370FC2A2236D289E", - "timestamp": "2022-04-14T07:52:33.902898336Z", - "signature": "czCfr3VcSaFEbPhI3DzXpD+t/QYhhvwvzeU0OdbcGK9uTmCp9K7jcLiaUVc6Dx2b0C5BOa+YYlVeT6slIQCLAw==" - }, - { - "block_id_flag": 2, - "validator_address": "3CD42258ED58B0251D3721E9C7794FEF02DEEB65", - "timestamp": "2022-04-14T07:52:33.943636445Z", - "signature": "nJvJKkuMLnSRmF9czfGooEeAMxTWBstIIaLDU6aiWgknhAQrZN3/1VUmxc9o2fJG2HJpFRrTh06aKSft/Uy+Cg==" - }, - { - "block_id_flag": 2, - "validator_address": "A76175C0D7D55FDC57E7533A081F8A9B186D718B", - "timestamp": "2022-04-14T07:52:33.899216378Z", - "signature": "S6rnEmrurT0JsYvvrairdn5VOOp9mLD6jgBPYZAbC/x8errZMQdm08c5ooyEGYHSRHuuc2g7eqjVH50i9wBnAw==" - }, - { - "block_id_flag": 2, - "validator_address": "BE756A959F2CBE58542FF246D6D6BB8E728191B3", - "timestamp": "2022-04-14T07:52:33.934581952Z", - "signature": "dXnBT/5A+8CBWH36/GKvhFdiZktHNmNkjolRckFzXiNxcL+wuU3qjZMqVmohxh/kcKBxIuV7RigsegIg1RmJAw==" - }, - { - "block_id_flag": 2, - "validator_address": "7FA1FB4D6C52A814F112B963AA88EF427FE7CD72", - "timestamp": "2022-04-14T07:52:33.886367881Z", - "signature": "2BIkeQczV+3zZjmP14CusZHwC0Dvn296CokyGs2PJeiGe4VwjSesKzl8ptxI1i02tWwqYoTeBzxUReh1aGGGCQ==" - }, - { - "block_id_flag": 2, - "validator_address": "8911065B7BA2C2235A4CB996B1A2C433E69A617F", - "timestamp": "2022-04-14T07:52:33.867223901Z", - "signature": "fDoCxLgaiMFrySOJxOq5qnThJNHYQMCS/OQ1mfAy2eCOsoPsfzs0eGr94D9K4RZeK3PvwOXAN9JWdX9SVXqLDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "8A033BD3830BF187779409E1651F4E82B6B8E5D8", - "timestamp": "2022-04-14T07:52:33.882866438Z", - "signature": "hGbNdDol/Ts5mM4ePoB27EiWMDLUKJzcdEGatXxP7wrP+i5sA6xif7EFaXzmDz0n4M8RSRpds6BHPOHog8ovAg==" - }, - { - "block_id_flag": 2, - "validator_address": "F214F894751C8B7BAC62FB1DE98D19ED1F98604A", - "timestamp": "2022-04-14T07:52:33.946890864Z", - "signature": "ZLPYgqm5SHjgxwyc1CWNBxqqWvVZ9OcrLdnUVciNFYsRKC3NOyNpRbYtyLgVNSZPQwSs5UV/sOJELuD4Z8Z6BQ==" - }, - { - "block_id_flag": 2, - "validator_address": "25A30FE88ADE7817AA86D94D056A7BBB97540D18", - "timestamp": "2022-04-14T07:52:33.833070775Z", - "signature": "aH+tpm0v4ODvsq9lNFd9D7lp3Sg0v4dXFepSZ+0DoE3a798lFRhauzAnzMGHg3bC5hiTv2AW/No5T/ERl33nBQ==" - }, - { - "block_id_flag": 2, - "validator_address": "4A28AAB7BB3117858A129DED53D812558F6C8A02", - "timestamp": "2022-04-14T07:52:33.957379945Z", - "signature": "w5klvmWoKZuzf94ebjwDa5qVmrcgX/SOAnWM6kIzzUrkI/MjXZjMn5WePtI6dhF+0c3ttAiEfQJN8UAPusRvCw==" - }, - { - "block_id_flag": 2, - "validator_address": "E7DC8AD584DAECB46C42F972D6D3E4D4B65DD6B6", - "timestamp": "2022-04-14T07:52:34.219257162Z", - "signature": "e7quVi6UzZ2Cw/PQtSawQ7CAT7BzvW4YwuS5Wczzyu0sWvg5szZdFUmXTe5q6sdl+4qjq6sPAhuajdUajeBCAQ==" - }, - { - "block_id_flag": 2, - "validator_address": "33F2583A28E7227356AD6B423AEE8325CEB9DA52", - "timestamp": "2022-04-14T07:52:33.921088338Z", - "signature": "4uIzXiWswKT5JU1FyzA6rjT2Lv5ni3joGHF0qEKuavRKFTXMin8CPm/GRiU0UTZmhYzAh4d96vGa3Es2/bQjDA==" - }, - { - "block_id_flag": 2, - "validator_address": "09EE417F173A1F340E5C53A42C41B3E878890340", - "timestamp": "2022-04-14T07:52:33.881423271Z", - "signature": "ao2Gc4fAqM+3LnwzN6RgkhTI8wuvorCx2kqrc2BaAV4+ibG4qUUiYEOgfhMObDf32F6I6j/vPnOGRDlqDKu/AQ==" - }, - { - "block_id_flag": 2, - "validator_address": "2AAB6CA116BC52DCF7A32569114291FF1D7A32CA", - "timestamp": "2022-04-14T07:52:33.886147184Z", - "signature": "2vJIdQXYpdrElEsep2hFrq/DsJrgZqXE+NsQCNZazvbRnLl9vr4HrnsLnLSIBkbl9xel9T2/KL4Aa8/NMSesCw==" - }, - { - "block_id_flag": 2, - "validator_address": "6060A27F639300584BD095F881C2A8714538F0CB", - "timestamp": "2022-04-14T07:52:33.942592602Z", - "signature": "p4rVbiLaaclEn0CMeoHNRaRwDyaRWQWztTfrR7xhLeZ7O9nDuWtftPTTA1MuwzsUeGnL6qMIPlIIjKF9irRiAg==" - }, - { - "block_id_flag": 2, - "validator_address": "C3671ED4ED22EB622312E76615CE286ABF0AC53D", - "timestamp": "2022-04-14T07:52:33.879020462Z", - "signature": "XTLdspKUnlw1SHiw2NYb4MZQZFgZq+OOFOHG53Aq/86o3oGyN57mY82yS+RLmSEet6n+KKW4WuR8+2KJCJuoAA==" - }, - { - "block_id_flag": 2, - "validator_address": "F5F246DEA8E27C59F6D0884CCAB31ABDB589F4CE", - "timestamp": "2022-04-14T07:52:33.89889915Z", - "signature": "HXLuWVGBRO6B/olGgAl4W6HGNmbyp5cmSLVO4ZPde9miRhvVzFU/NurytMNhGBUBu5+z8Sa+Zu+bG39OlPdBCQ==" - }, - { - "block_id_flag": 2, - "validator_address": "0C3A78D0F3B2ED2E9ED3F3DB6289185D40742870", - "timestamp": "2022-04-14T07:52:33.91923213Z", - "signature": "2baYTKRWAatJmWL/mKxrUaPa1tUpzrQ7JuLClFWP8snhOozHN66IwrK7a8DEYpIprJ2k/WQEY40abTQ4el9nBw==" - }, - { - "block_id_flag": 2, - "validator_address": "B423FF79F90608BC76DA0AB850DEED5A7231E678", - "timestamp": "2022-04-14T07:52:33.924263801Z", - "signature": "XU8g6Eao+jqcMld8v97z/iEvDbDRSB4MjOOdqz4CAHf7c2TNY1+I+9iqAG8YU7tKoDWckgSA4en7mUu5SvasBg==" - }, - { - "block_id_flag": 2, - "validator_address": "B4B28030F3503F0650D0F12A70FCDDA6A6A844E0", - "timestamp": "2022-04-14T07:52:33.940053644Z", - "signature": "v2xN5tRKt7x9krj9DiI/ZJWRHQrHjnpEQZJ3SU8j6resu7y7Q0JsVEnCjWmuIcKhJgc6AxK5olf1MhmOS5gDDg==" - }, - { - "block_id_flag": 2, - "validator_address": "C2E2B0C72E6A427EC87E23675CE1A3EAEC30618C", - "timestamp": "2022-04-14T07:52:33.984048042Z", - "signature": "v4K2aXKdcwdUmZWmYIfXoTkb7eJL0lQ+Y9LZ5st/GX65o9fmoWMQSS9N5usqza3evt+XPJfapiezW+rMJqBHCg==" - }, - { - "block_id_flag": 2, - "validator_address": "E3461442CDD237B39BC3F40063FF4712E2E066F2", - "timestamp": "2022-04-14T07:52:33.942541279Z", - "signature": "ScPbwMwLL5e7hiRKO/IyllXhnT8Ifu2T64EUxqPjM/LDh4XwEQhsXCJHWbZEkX5JHVEp7kWyaEfc6B8163mWBg==" - }, - { - "block_id_flag": 2, - "validator_address": "1A302C41C4E175F2D61E9C3FD2195C3DB7545407", - "timestamp": "2022-04-14T07:52:33.93829292Z", - "signature": "BjgWxTQVajqmzZQZRZJi3V39VgDwazOsfqJFM/acaiCv9Oo3wlCH358pUGXHtTujLr3p8M2PEreFcp/7V9LBDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "DE9CB7EB31538B3846C2EB36A9E3C4341EDF6A95", - "timestamp": "2022-04-14T07:52:33.871429849Z", - "signature": "pe9FGfJjngntnzuFzS5kPAojjkmk6+LD/2M1KsVp74FveU8mpAiEiYvYh8n0v92Sb5djoOD6F3s8F06C9suoAg==" - }, - { - "block_id_flag": 2, - "validator_address": "1C8BCA4A78B616E2ACEE8D0E218087614D85823B", - "timestamp": "2022-04-14T07:52:33.909438503Z", - "signature": "0HmvyxC3WWRmttUHZ/01mwp2lyvSRaM0pO1dRx50M+BxyZ3jgINGNauNEjJo54csq8H6ow7dwoT/UY+XE5cBBQ==" - }, - { - "block_id_flag": 2, - "validator_address": "48666481633E7C3A717BE6180449F2143E95A53F", - "timestamp": "2022-04-14T07:52:33.903349178Z", - "signature": "F9bnteb/6c/T3Mi/4yUyXWUVRKTneTJ/gQmFu2fbQ3qVvvi9Nm0CN4XOMxv8gSpxc5FVhfgbWGBZleOX6V2MDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "DEAFFD4536C979DB03184C422FE5B3D020490372", - "timestamp": "2022-04-14T07:52:33.882483659Z", - "signature": "vqi7SJigSj6PDzt0VBLdZfb6xEL4v7Ih6930unpDWrgpBctEfQVIKBW/BEP2UZZvAxfdSqb5l9XG4DjFbFR+AQ==" - }, - { - "block_id_flag": 2, - "validator_address": "107524E392B718F087153DF0405BC58853F4DCEA", - "timestamp": "2022-04-14T07:52:33.886372524Z", - "signature": "ce/cp+MZIBHQA22uqOhu85sA0BqJmfXcBhG6TvcCutfmILkAJtcpUF8obgp7hWYH3uUKJ6VnbbDzgpIWMAyNAQ==" - }, - { - "block_id_flag": 2, - "validator_address": "23012F4B162F9805888DCBB3A04A475AE2867184", - "timestamp": "2022-04-14T07:52:33.961357434Z", - "signature": "KMe3UCtOkqgZvYJ3aXcuBtbWEAslZ6pjx7scD09jhMcbQHmBls1va8vW3d09lTzMr1erHTt8revAMHPHLcVpAg==" - }, - { - "block_id_flag": 2, - "validator_address": "EF7E1D03677202D2A845452B42E47C6A5F047649", - "timestamp": "2022-04-14T07:52:33.880907414Z", - "signature": "DSxqFunRRibkWkEJRq4zwUdCaO8SS3oHp1xeQAjyYebEt4UajdY9WrSboyquF0dzK+ll7RH9WjPolS1rQJZdBA==" - }, - { - "block_id_flag": 2, - "validator_address": "43F8A1A85AEB63533830AF2476306F0641E42BCA", - "timestamp": "2022-04-14T07:52:33.924804598Z", - "signature": "Wb1qEyJ63qIZIQelyCRmQ2EdYgXTmfEkWS5wVCDr0BieD+QMlycX9HKPV29Gf4d5o6FcnEHSCT/mv8LNdtAuAg==" - }, - { - "block_id_flag": 2, - "validator_address": "E94F109D064F48DF1147A1487338FE785A0CA285", - "timestamp": "2022-04-14T07:52:33.842587485Z", - "signature": "vIAWj7j2tloOCW1Y6S+AAajx5uCABMDFNAb6Y3bC0Hq1SRGSCJC0A5jAGYqcxn/7PAHD7mdqAgWbmeFOF4r1BA==" - }, - { - "block_id_flag": 2, - "validator_address": "4609FD57FBDDC077A2C68ACD45782D9B25CB894A", - "timestamp": "2022-04-14T07:52:34.11149712Z", - "signature": "OxTf6b9V3PzrNmCnS6zft3pbl3Tqrcr0AYBrl/4cOZZ+/HupRmc0Cckdbuti4cY1UbjlvoDr2mWe+LhNi/hcCw==" - }, - { - "block_id_flag": 2, - "validator_address": "4FB44F29BE1F9941FB8A0E32CA463B67065AADB4", - "timestamp": "2022-04-14T07:52:33.976831752Z", - "signature": "9Lc/1XxND2a0nXUDQ5Tm++g4j9noxLI6m8dbO0YEMVefE3LaFSEdNVEQtIG1llhN0tLjp869xx5fOtb8N/olAg==" - }, - { - "block_id_flag": 2, - "validator_address": "AE4A96CEC1423B86D6FA62F34ACEBFB281FD3112", - "timestamp": "2022-04-14T07:52:33.856149693Z", - "signature": "wBvnmo+4DzzhmIUT3izZCdTsW+L3wEt2bzNlKzmhFkJWiRMnDWOBHBDpG+Q3Cgsb5KDfxMW+kynGEJFRbuo6CQ==" - }, - { - "block_id_flag": 2, - "validator_address": "6A6964D3274DA12A0575BDEC7DB58C059D53CE10", - "timestamp": "2022-04-14T07:52:33.875321135Z", - "signature": "4ejz/jY9Hsgc3aQ2ZKO8Q39AXhoxZWSZ4tRAfUBZn77fRPr37sI8fVbu/HQO2bcv0fNdUUzX2M5wgKCyCVSCAA==" - }, - { - "block_id_flag": 2, - "validator_address": "830DEB89DB946F2B8514123E0A0CEA8D2F6DA726", - "timestamp": "2022-04-14T07:52:34.099862263Z", - "signature": "n3pgQzrNpuHckrpvN2C4HTzWpgPuhk+yvpZWDwusSw+jQQr/4hskjkv8hoR4TsDNFO2FXHzbHHX4mD3IA3uaAA==" - }, - { - "block_id_flag": 2, - "validator_address": "F91AC9B31A2ABFDA33F38816D02D082DEFBD2ED3", - "timestamp": "2022-04-14T07:52:33.902887385Z", - "signature": "ayaGI0JQWmoGDJLblDR7GJpYqjbqT3Ccb51IzTC/ZwLXmkjatxlN+3+4+CLnYk6CfJxRuiWsBYtSLFqhMSRPCw==" - }, - { - "block_id_flag": 2, - "validator_address": "9163DEBE8B3F6C37A204DE2678F6D5F1009B4EC6", - "timestamp": "2022-04-14T07:52:33.959494792Z", - "signature": "ITfE3vhZyOmgi1vVmc0VLsrlsneVRz9+phMTYSXxPu/1iG1XfgbC1Pbev08Xd2mdVksNpSQFPARouk/SFRriAA==" - }, - { - "block_id_flag": 2, - "validator_address": "DCC2C2EC7FD1A8339BC34A1529DAC1711412FD96", - "timestamp": "2022-04-14T07:52:33.905452048Z", - "signature": "2PqctIN7lxQPYHghKoWSe5W9oy28BK1SlEdB+6hl5X3qlqmVU2NGYa0PBiCkhkq+k8UXmhIzKyOwz/6bvOlnBg==" - }, - { - "block_id_flag": 2, - "validator_address": "B2B9701A3D2929285E9F1F5035DF65F2086223B4", - "timestamp": "2022-04-14T07:52:34.215622183Z", - "signature": "8gZDKFQYtXIXvb1E+ThX5qDfkXSzuA53hqcmbM6qFelPRIq+ta6anF20KrQY8YjWopQX2RuHA2ZZcl6FT1h1AA==" - }, - { - "block_id_flag": 2, - "validator_address": "5F2D57512F251AE4E06C2D117273F33391750F43", - "timestamp": "2022-04-14T07:52:33.935519822Z", - "signature": "L2pPgkBz2akDvcstcANEeOvp4Qwf1g3aY1kpR2IcZuOsPtZ7wER9kQCjK9kTM10RJIWB4kRrgvFiXn0L6JOmAQ==" - }, - { - "block_id_flag": 2, - "validator_address": "582A84896F496809F359EDC222FEDC416F373D8C", - "timestamp": "2022-04-14T07:52:33.94923149Z", - "signature": "Ccyodn11h932ctLI1MxV/lYap9gxnVUDSsdjhna/kuNx2RSvuEeD/u9SUicBUXPUpPbqzxLG1SkA2hUi4Zv5Dw==" - }, - { - "block_id_flag": 2, - "validator_address": "DEBCE7328D1B8B0ECEA210141908F733D6B5FEEB", - "timestamp": "2022-04-14T07:52:34.383657541Z", - "signature": "kf3kkoE7QAI0vPUvvdRi/KWrMT50RDLMBqqAvlK3XBOYZIyHFod9dGfxAhFG5dMloD7jIqiWDRVVdYao27IgBA==" - }, - { - "block_id_flag": 2, - "validator_address": "EA1FBB2E1EA6766ED1A51D3330C6E1E38934C543", - "timestamp": "2022-04-14T07:52:34.187181731Z", - "signature": "zkAPDsiKReUh2PaWV3PX6dKOwkZmgPWyll/V8XmfgGFk3ymZ1PGBuHIZ2KlpC637AnB2Y1t0cGekX6s236cDBA==" - }, - { - "block_id_flag": 2, - "validator_address": "4C27A85A8055B381BEEF72658878AB442DD67106", - "timestamp": "2022-04-14T07:52:33.908674714Z", - "signature": "qLXPns/FCZSO0rQwck6ifL7sDgnhM/Yy9oHrsHFr531QpKIDrKoIZrqm2whCBs1RuNfPL0II7/QWGA5lFlCbDA==" - }, - { - "block_id_flag": 2, - "validator_address": "AF3691263C43369774C461A4B2F168BB09A5DDD1", - "timestamp": "2022-04-14T07:52:33.960449971Z", - "signature": "RWmVGGq6QXP9ti1NQCB7EUf3QipB1FeiVty66lQvJifAgTmG9BWDaQOpLqe/p6gvEx0ESZawUWQvp5L3Nq+7DA==" - }, - { - "block_id_flag": 2, - "validator_address": "B0AE11B935A1B3B7C2AB9FBBEC6EC7DF7E243F55", - "timestamp": "2022-04-14T07:52:33.899219543Z", - "signature": "R23ccKlU8N0nVSlZqhjHQaRQZQ/PXIQF1BgorJLuDp6QavQClYUBRvFW8Oees827rDQmCvmsi1FUsxYsZfrmCQ==" - }, - { - "block_id_flag": 2, - "validator_address": "C6AD188E37799621EFA778139823F1771C7539E0", - "timestamp": "2022-04-14T07:52:34.003983026Z", - "signature": "dKDJInOyi/4YH3Pr7VytlwxX2ATbHn4vak4NsgjlLZIZNj3eptv9RFzO/Yxkli7uOQIhN0c80Yz+kuPVBeyJAA==" - }, - { - "block_id_flag": 2, - "validator_address": "E9F7AE72FF98F98FE6F109CEB28B002D5C3AE234", - "timestamp": "2022-04-14T07:52:33.858520773Z", - "signature": "uQUB6z3mAk3PY9AJKXvg7/YPKVxrjJmVPfskVJuT/qugU8PS7E59Sw8H3Eca6ExwNle5LLCxuy+oUjvjlq1gBw==" - }, - { - "block_id_flag": 2, - "validator_address": "F305CFA31F9A8060E34EB5FD1FCD2E6FC6E0DC4A", - "timestamp": "2022-04-14T07:52:33.854407457Z", - "signature": "G+NY2T5TF3FWA/aDRysP5F/W3woEsZYSp7Y8+khaB9+t8P+NF9M+cEw5Dkru2w+LUpzL3H5qNmLMSYauSdZ0Ag==" - }, - { - "block_id_flag": 2, - "validator_address": "8312473768107B9D21048CEF310BCF89E00CC22D", - "timestamp": "2022-04-14T07:52:33.903177736Z", - "signature": "idHiXEpQBJ3icGYzbsbSC8KeU53tLJPcCVgXXNa01tgNtLL/ovWYNkClZwc923ztPv4NNuXyUC0ImpGmTZbGDQ==" - }, - { - "block_id_flag": 2, - "validator_address": "AE201075C1D2EF9970AC8EB0A8A4DC0BC121B37D", - "timestamp": "2022-04-14T07:52:33.9829821Z", - "signature": "O+JVxO2mc5xrBKoRlSD+FzguQCQf/qTZhH8muo3Vq8E20KbmyQUV1bgElmg9cGxuu9M2DyH4EcsSb8NRNppDCQ==" - }, - { - "block_id_flag": 2, - "validator_address": "C487F83AE5903E388231E5AA8D345D8977D56F04", - "timestamp": "2022-04-14T07:52:33.901238972Z", - "signature": "jYDqHlWuUMXvCZJ4BauCrM4Pzj/lRxOPtUmcm/tf+M1LA69balDuRGADkGx7TLlD0TjqGbXkRD55PF2M5NVqBg==" - }, - { - "block_id_flag": 2, - "validator_address": "DB968E2D6C5D1CA87A67B79BCA7B9E5424C0DC72", - "timestamp": "2022-04-14T07:52:33.915773636Z", - "signature": "phweHuFyD6i2wYaFr5ka4l55H3rykWYY9ZO+yeCokSF6bIgSBzomLOftKtuY31VQAZxWg6CuR0g5IvCPgBNWDA==" - } - ] - } -} diff --git a/service/share/testing.go b/service/share/testing.go deleted file mode 100644 index 49d21a6107..0000000000 --- a/service/share/testing.go +++ /dev/null @@ -1,295 +0,0 @@ -package share - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/ipfs/go-bitswap" - "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-blockservice" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipfs/go-ipfs-routing/offline" - mdutils "github.com/ipfs/go-merkledag/test" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - record "github.com/libp2p/go-libp2p-record" - routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" - "github.com/libp2p/go-libp2p/p2p/discovery/routing" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/pkg/da" - - "github.com/celestiaorg/celestia-node/ipld" -) - -// RandLightServiceWithSquare provides a share.Service filled with 'n' NMT -// trees of 'n' random shares, essentially storing a whole square. -func RandLightServiceWithSquare(t *testing.T, n int) (*Service, *Root) { - bServ := mdutils.Bserv() - return NewService(bServ, TestLightAvailability(bServ)), RandFillBS(t, n, bServ) -} - -// RandLightService provides an unfilled share.Service with corresponding -// blockservice.BlockService than can be filled by the test. -func RandLightService() (*Service, blockservice.BlockService) { - bServ := mdutils.Bserv() - return NewService(bServ, TestLightAvailability(bServ)), bServ -} - -// RandFullServiceWithSquare provides a share.Service filled with 'n' NMT -// trees of 'n' random shares, essentially storing a whole square. -func RandFullServiceWithSquare(t *testing.T, n int) (*Service, *Root) { - bServ := mdutils.Bserv() - return NewService(bServ, TestFullAvailability(bServ)), RandFillBS(t, n, bServ) -} - -// RandLightLocalServiceWithSquare is the same as RandLightServiceWithSquare, except -// the Availability is wrapped with CacheAvailability. -func RandLightLocalServiceWithSquare(t *testing.T, n int) (*Service, *Root) { - bServ := mdutils.Bserv() - ds := dssync.MutexWrap(ds.NewMapDatastore()) - ca := NewCacheAvailability( - TestLightAvailability(bServ), - ds, - ) - return NewService(bServ, ca), RandFillBS(t, n, bServ) -} - -// RandFullLocalServiceWithSquare is the same as RandFullServiceWithSquare, except -// the Availability is wrapped with CacheAvailability. -func RandFullLocalServiceWithSquare(t *testing.T, n int) (*Service, *Root) { - bServ := mdutils.Bserv() - ds := dssync.MutexWrap(ds.NewMapDatastore()) - ca := NewCacheAvailability( - TestFullAvailability(bServ), - ds, - ) - return NewService(bServ, ca), RandFillBS(t, n, bServ) -} - -// RandFillBS fills the given BlockService with a random block of a given size. -func RandFillBS(t *testing.T, n int, bServ blockservice.BlockService) *Root { - shares := RandShares(t, n*n) - return FillBS(t, bServ, shares) -} - -// FillBS fills the given BlockService with the given shares. -func FillBS(t *testing.T, bServ blockservice.BlockService, shares []Share) *Root { - eds, err := ipld.AddShares(context.TODO(), shares, bServ) - require.NoError(t, err) - dah := da.NewDataAvailabilityHeader(eds) - return &dah -} - -// RandShares provides 'n' randomized shares prefixed with random namespaces. -func RandShares(t *testing.T, n int) []Share { - return ipld.RandShares(t, n) -} - -type node struct { - net *dagNet - *Service - blockservice.BlockService - host.Host -} - -// ClearStorage cleans up the storage of the node. -func (n *node) ClearStorage() { - keys, err := n.Blockstore().AllKeysChan(n.net.ctx) - require.NoError(n.net.t, err) - - for k := range keys { - err := n.DeleteBlock(n.net.ctx, k) - require.NoError(n.net.t, err) - } -} - -type dagNet struct { - ctx context.Context - t *testing.T - net mocknet.Mocknet - nodes []*node -} - -// NewTestDAGNet creates a new testing swarm utility to spawn different nodes -// and test how they interact and/or exchange data. -func NewTestDAGNet(ctx context.Context, t *testing.T) *dagNet { //nolint:revive - return &dagNet{ - ctx: ctx, - t: t, - net: mocknet.New(), - } -} - -// RandLightNode creates a Light Node filled with a random block of the given size. -func (dn *dagNet) RandLightNode(squareSize int) (*node, *Root) { - nd := dn.LightNode() - return nd, RandFillBS(dn.t, squareSize, nd.BlockService) -} - -// RandFullNode creates a Full Node filled with a random block of the given size. -func (dn *dagNet) RandFullNode(squareSize int) (*node, *Root) { - nd := dn.FullNode() - return nd, RandFillBS(dn.t, squareSize, nd.BlockService) -} - -// LightNode creates a new empty LightAvailability Node. -func (dn *dagNet) LightNode() *node { - nd := dn.Node() - nd.Service = NewService(nd.BlockService, TestLightAvailability(nd.BlockService)) - return nd -} - -// FullNode creates a new empty FullAvailability Node. -func (dn *dagNet) FullNode() *node { - nd := dn.Node() - nd.Service = NewService(nd.BlockService, TestFullAvailability(nd.BlockService)) - return nd -} - -// Node create a plain network node that can serve and request data. -func (dn *dagNet) Node() *node { - hst, err := dn.net.GenPeer() - require.NoError(dn.t, err) - dstore := dssync.MutexWrap(ds.NewMapDatastore()) - bstore := blockstore.NewBlockstore(dstore) - routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{}) - bs := bitswap.New( - dn.ctx, - network.NewFromIpfsHost(hst, routing), - bstore, - bitswap.ProvideEnabled(false), // disable routines for DHT content provides, as we don't use them - bitswap.EngineBlockstoreWorkerCount(1), // otherwise it spawns 128 routines which is too much for tests - bitswap.EngineTaskWorkerCount(2), - bitswap.TaskWorkerCount(2), - bitswap.SetSimulateDontHavesOnTimeout(false), - bitswap.SetSendDontHaves(false), - ) - nd := &node{ - net: dn, - BlockService: blockservice.New(bstore, bs), - Host: hst, - } - dn.nodes = append(dn.nodes, nd) - return nd -} - -// ConnectAll connects all the peers on registered on the dagNet. -func (dn *dagNet) ConnectAll() { - err := dn.net.LinkAll() - require.NoError(dn.t, err) - - err = dn.net.ConnectAllButSelf() - require.NoError(dn.t, err) -} - -// Connect connects two given peer. -func (dn *dagNet) Connect(peerA, peerB peer.ID) { - _, err := dn.net.LinkPeers(peerA, peerB) - require.NoError(dn.t, err) - _, err = dn.net.ConnectPeers(peerA, peerB) - require.NoError(dn.t, err) -} - -// Disconnect disconnects two peers. -// It does a hard disconnect, meaning that disconnected peers won't be able to reconnect on their own -// but only with dagNet.Connect or dagNet.ConnectAll. -func (dn *dagNet) Disconnect(peerA, peerB peer.ID) { - err := dn.net.UnlinkPeers(peerA, peerB) - require.NoError(dn.t, err) - err = dn.net.DisconnectPeers(peerA, peerB) - require.NoError(dn.t, err) -} - -type subNet struct { - *dagNet - nodes []*node -} - -func (dn *dagNet) SubNet() *subNet { - return &subNet{dn, nil} -} - -func (sn *subNet) LightNode() *node { - nd := sn.dagNet.LightNode() - sn.nodes = append(sn.nodes, nd) - return nd -} - -func (sn *subNet) FullNode() *node { - nd := sn.dagNet.FullNode() - sn.nodes = append(sn.nodes, nd) - return nd -} - -func (sn *subNet) ConnectAll() { - nodes := sn.nodes - for _, n1 := range nodes { - for _, n2 := range nodes { - if n1 == n2 { - continue - } - _, err := sn.net.LinkPeers(n1.ID(), n2.ID()) - require.NoError(sn.t, err) - - _, err = sn.net.ConnectPeers(n1.ID(), n2.ID()) - require.NoError(sn.t, err) - } - } -} - -type TestBrokenAvailability struct { - Root *Root -} - -// NewTestBrokenAvailability returns an instance of Availability that -// allows for testing error cases during sampling. -// -// If the Root field is empty, it will return ErrNotAvailable on every call -// to SharesAvailable. Otherwise, it will only return ErrNotAvailable if the -// given Root hash matches the stored Root hash. -func NewTestBrokenAvailability() Availability { - return &TestBrokenAvailability{} -} - -func (b *TestBrokenAvailability) SharesAvailable(_ context.Context, root *Root) error { - if b.Root == nil || bytes.Equal(b.Root.Hash(), root.Hash()) { - return ErrNotAvailable - } - return nil -} - -func (b *TestBrokenAvailability) ProbabilityOfAvailability() float64 { - return 0 -} - -func TestLightAvailability(bServ blockservice.BlockService) *LightAvailability { - disc := NewDiscovery(nil, routing.NewRoutingDiscovery(routinghelpers.Null{}), 0, time.Second, time.Second) - return NewLightAvailability(bServ, disc) -} - -func TestFullAvailability(bServ blockservice.BlockService) *FullAvailability { - disc := NewDiscovery(nil, routing.NewRoutingDiscovery(routinghelpers.Null{}), 0, time.Second, time.Second) - return NewFullAvailability(bServ, disc) -} - -type TestSuccessfulAvailability struct { -} - -// NewTestSuccessfulAvailability returns an Availability that always -// returns successfully when SharesAvailable is called. -func NewTestSuccessfulAvailability() Availability { - return &TestSuccessfulAvailability{} -} - -func (tsa *TestSuccessfulAvailability) SharesAvailable(context.Context, *Root) error { - return nil -} - -func (tsa *TestSuccessfulAvailability) ProbabilityOfAvailability() float64 { - return 0 -} diff --git a/service/state/core_access.go b/service/state/core_access.go deleted file mode 100644 index bc082a578a..0000000000 --- a/service/state/core_access.go +++ /dev/null @@ -1,224 +0,0 @@ -package state - -import ( - "context" - "fmt" - - "github.com/cosmos/cosmos-sdk/api/tendermint/abci" - sdktypes "github.com/cosmos/cosmos-sdk/types" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - proofutils "github.com/cosmos/ibc-go/v4/modules/core/23-commitment/types" - rpcclient "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/client/http" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/celestiaorg/celestia-app/app" - "github.com/celestiaorg/celestia-app/x/payment" - apptypes "github.com/celestiaorg/celestia-app/x/payment/types" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/nmt/namespace" -) - -// CoreAccessor implements Accessor over a gRPC connection -// with a celestia-core node. -type CoreAccessor struct { - signer *apptypes.KeyringSigner - getter header.Getter - - queryCli banktypes.QueryClient - rpcCli rpcclient.ABCIClient - - coreConn *grpc.ClientConn - coreIP string - rpcPort string - grpcPort string -} - -// NewCoreAccessor dials the given celestia-core endpoint and -// constructs and returns a new CoreAccessor with the active -// connection. -func NewCoreAccessor( - signer *apptypes.KeyringSigner, - getter header.Getter, - coreIP, - rpcPort string, - grpcPort string, -) *CoreAccessor { - return &CoreAccessor{ - signer: signer, - getter: getter, - coreIP: coreIP, - rpcPort: rpcPort, - grpcPort: grpcPort, - } -} - -func (ca *CoreAccessor) Start(ctx context.Context) error { - if ca.coreConn != nil { - return fmt.Errorf("core-access: already connected to core endpoint") - } - // dial given celestia-core endpoint - endpoint := fmt.Sprintf("%s:%s", ca.coreIP, ca.grpcPort) - client, err := grpc.DialContext(ctx, endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return err - } - ca.coreConn = client - // create the query client - queryCli := banktypes.NewQueryClient(ca.coreConn) - ca.queryCli = queryCli - // create ABCI query client - cli, err := http.New(fmt.Sprintf("http://%s:%s", ca.coreIP, ca.rpcPort)) - if err != nil { - return err - } - ca.rpcCli = cli - return nil -} - -func (ca *CoreAccessor) Stop(context.Context) error { - if ca.coreConn == nil { - return fmt.Errorf("core-access: no connection found to close") - } - // close out core connection - err := ca.coreConn.Close() - if err != nil { - return err - } - ca.coreConn = nil - ca.queryCli = nil - return nil -} - -func (ca *CoreAccessor) constructSignedTx( - ctx context.Context, - msg sdktypes.Msg, - opts ...apptypes.TxBuilderOption, -) ([]byte, error) { - // should be called first in order to make a valid tx - err := ca.signer.QueryAccountNumber(ctx, ca.coreConn) - if err != nil { - return nil, err - } - - tx, err := ca.signer.BuildSignedTx(ca.signer.NewTxBuilder(opts...), msg) - if err != nil { - return nil, err - } - return ca.signer.EncodeTx(tx) -} - -func (ca *CoreAccessor) SubmitPayForData( - ctx context.Context, - nID namespace.ID, - data []byte, - gasLim uint64, -) (*TxResponse, error) { - return payment.SubmitPayForData(ctx, ca.signer, ca.coreConn, nID, data, gasLim) -} - -func (ca *CoreAccessor) Balance(ctx context.Context) (*Balance, error) { - addr, err := ca.signer.GetSignerInfo().GetAddress() - if err != nil { - return nil, err - } - return ca.BalanceForAddress(ctx, addr) -} - -func (ca *CoreAccessor) BalanceForAddress(ctx context.Context, addr Address) (*Balance, error) { - head, err := ca.getter.Head(ctx) - if err != nil { - return nil, err - } - // construct an ABCI query for the height at head-1 because - // the AppHash contained in the head is actually the state root - // after applying the transactions contained in the previous block. - // TODO @renaynay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use this method instead - prefixedAccountKey := append(banktypes.CreateAccountBalancesPrefix(addr.Bytes()), []byte(app.BondDenom)...) - abciReq := abci.RequestQuery{ - // TODO @renayay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use const instead - Path: fmt.Sprintf("store/%s/key", banktypes.StoreKey), - Height: head.Height - 1, - Data: prefixedAccountKey, - Prove: true, - } - opts := rpcclient.ABCIQueryOptions{ - Height: abciReq.Height, - Prove: abciReq.Prove, - } - result, err := ca.rpcCli.ABCIQueryWithOptions(ctx, abciReq.Path, abciReq.Data, opts) - if err != nil { - return nil, err - } - if !result.Response.IsOK() { - return nil, sdkErrorToGRPCError(result.Response) - } - // unmarshal balance information - value := result.Response.Value - coin, ok := sdktypes.NewIntFromString(string(value)) - if !ok { - return nil, fmt.Errorf("cannot convert %s into sdktypes.Int", string(value)) - } - // convert proofs into a more digestible format - merkleproof, err := proofutils.ConvertProofs(result.Response.GetProofOps()) - if err != nil { - return nil, err - } - root := proofutils.NewMerkleRoot(head.AppHash) - // VerifyMembership expects the path as: - // []string{, } - path := proofutils.NewMerklePath(banktypes.StoreKey, string(prefixedAccountKey)) - err = merkleproof.VerifyMembership(proofutils.GetSDKSpecs(), root, path, value) - if err != nil { - return nil, err - } - return &Balance{ - Denom: app.BondDenom, - Amount: coin, - }, nil -} - -func (ca *CoreAccessor) SubmitTx(ctx context.Context, tx Tx) (*TxResponse, error) { - txResp, err := apptypes.BroadcastTx(ctx, ca.coreConn, sdktx.BroadcastMode_BROADCAST_MODE_BLOCK, tx) - if err != nil { - return nil, err - } - return txResp.TxResponse, nil -} - -func (ca *CoreAccessor) SubmitTxWithBroadcastMode( - ctx context.Context, - tx Tx, - mode sdktx.BroadcastMode, -) (*TxResponse, error) { - txResp, err := apptypes.BroadcastTx(ctx, ca.coreConn, mode, tx) - if err != nil { - return nil, err - } - return txResp.TxResponse, nil -} - -func (ca *CoreAccessor) Transfer( - ctx context.Context, - addr Address, - amount Int, - gasLim uint64, -) (*TxResponse, error) { - to, ok := addr.(sdktypes.AccAddress) - if !ok { - return nil, fmt.Errorf("state: unsupported address type") - } - from, err := ca.signer.GetSignerInfo().GetAddress() - if err != nil { - return nil, err - } - coins := sdktypes.NewCoins(sdktypes.NewCoin(app.BondDenom, amount)) - msg := banktypes.NewMsgSend(from, to, coins) - signedTx, err := ca.constructSignedTx(ctx, msg, apptypes.SetGasLimit(gasLim)) - if err != nil { - return nil, err - } - return ca.SubmitTx(ctx, signedTx) -} diff --git a/service/state/interface.go b/service/state/interface.go deleted file mode 100644 index 2f2a53ccd8..0000000000 --- a/service/state/interface.go +++ /dev/null @@ -1,39 +0,0 @@ -package state - -import ( - "context" - - "github.com/cosmos/cosmos-sdk/types" - - "github.com/celestiaorg/nmt/namespace" -) - -// Accessor represents the behaviors necessary for a user to -// query for state-related information and submit transactions/ -// messages to the Celestia network. -type Accessor interface { - // Start starts the state Accessor. - Start(context.Context) error - // Stop stops the state Accessor. - Stop(context.Context) error - - // Balance retrieves the Celestia coin balance for the node's account/signer - // and verifies it against the corresponding block's AppHash. - Balance(ctx context.Context) (*Balance, error) - // BalanceForAddress retrieves the Celestia coin balance for the given address and verifies - // the returned balance against the corresponding block's AppHash. - // - // NOTE: the balance returned is the balance reported by the block right before - // the node's current head (head-1). This is due to the fact that for block N, the block's - // `AppHash` is the result of applying the previous block's transaction list. - BalanceForAddress(ctx context.Context, addr Address) (*Balance, error) - - // Transfer sends the given amount of coins from default wallet of the node to the given account address. - Transfer(ctx context.Context, to types.Address, amount types.Int, gasLimit uint64) (*TxResponse, error) - // SubmitTx submits the given transaction/message to the - // Celestia network and blocks until the tx is included in - // a block. - SubmitTx(ctx context.Context, tx Tx) (*TxResponse, error) - // SubmitPayForData builds, signs and submits a PayForData transaction. - SubmitPayForData(ctx context.Context, nID namespace.ID, data []byte, gasLim uint64) (*TxResponse, error) -} diff --git a/service/state/service.go b/service/state/service.go deleted file mode 100644 index f982940422..0000000000 --- a/service/state/service.go +++ /dev/null @@ -1,67 +0,0 @@ -package state - -import ( - "context" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/nmt/namespace" -) - -// Service can access state-related information via the given -// Accessor. -type Service struct { - ctx context.Context - cancel context.CancelFunc - - accessor Accessor - - getter header.Getter -} - -// NewService constructs a new state Service. -func NewService(accessor Accessor, getter header.Getter) *Service { - return &Service{ - accessor: accessor, - getter: getter, - } -} - -func (s *Service) SubmitPayForData( - ctx context.Context, - nID namespace.ID, - data []byte, - gasLim uint64, -) (*TxResponse, error) { - return s.accessor.SubmitPayForData(ctx, nID, data, gasLim) -} - -func (s *Service) Balance(ctx context.Context) (*Balance, error) { - return s.accessor.Balance(ctx) -} - -func (s *Service) BalanceForAddress(ctx context.Context, addr Address) (*Balance, error) { - return s.accessor.BalanceForAddress(ctx, addr) -} - -func (s *Service) SubmitTx(ctx context.Context, tx Tx) (*TxResponse, error) { - return s.accessor.SubmitTx(ctx, tx) -} - -func (s *Service) Transfer(ctx context.Context, to Address, amount Int, gasLimit uint64) (*TxResponse, error) { - return s.accessor.Transfer(ctx, to, amount, gasLimit) -} - -func (s *Service) Start(context.Context) error { - s.ctx, s.cancel = context.WithCancel(context.Background()) - return nil -} - -func (s *Service) Stop(context.Context) error { - s.cancel() - return nil -} - -// IsStopped checks if context was canceled. -func (s *Service) IsStopped() bool { - return s.ctx.Err() != nil -} diff --git a/service/state/state.go b/service/state/state.go deleted file mode 100644 index 9b010c6f45..0000000000 --- a/service/state/state.go +++ /dev/null @@ -1,21 +0,0 @@ -package state - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - coretypes "github.com/tendermint/tendermint/types" -) - -// Balance is an alias to the Coin type from Cosmos-SDK. -type Balance = sdk.Coin - -// Tx is an alias to the Tx type from celestia-core. -type Tx = coretypes.Tx - -// TxResponse is an alias to the TxResponse type from Cosmos-SDK. -type TxResponse = sdk.TxResponse - -// Address is an alias to the Address type from Cosmos-SDK. -type Address = sdk.Address - -// Int is an alias to the Int type from Cosmos-SDK. -type Int = sdk.Int diff --git a/share/availability.go b/share/availability.go new file mode 100644 index 0000000000..f3511da450 --- /dev/null +++ b/share/availability.go @@ -0,0 +1,37 @@ +package share + +import ( + "context" + "errors" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" +) + +// ErrNotAvailable is returned whenever DA sampling fails. +var ErrNotAvailable = errors.New("share: data not available") + +// Root represents root commitment to multiple Shares. +// In practice, it is a commitment to all the Data in a square. +type Root = da.DataAvailabilityHeader + +// NewRoot generates Root(DataAvailabilityHeader) using the +// provided extended data square. +func NewRoot(eds *rsmt2d.ExtendedDataSquare) (*Root, error) { + dah, err := da.NewDataAvailabilityHeader(eds) + if err != nil { + return nil, err + } + return &dah, nil +} + +// Availability defines interface for validation of Shares' availability. +// +//go:generate mockgen -destination=availability/mocks/availability.go -package=mocks . Availability +type Availability interface { + // SharesAvailable subjectively validates if Shares committed to the given Root are available on + // the Network. + SharesAvailable(context.Context, *header.ExtendedHeader) error +} diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go new file mode 100644 index 0000000000..4ea211cb1e --- /dev/null +++ b/share/availability/full/availability.go @@ -0,0 +1,102 @@ +package full + +import ( + "context" + "errors" + "fmt" + + "github.com/filecoin-project/dagstore" + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/p2p/discovery" +) + +var log = logging.Logger("share/full") + +// ShareAvailability implements share.Availability using the full data square +// recovery technique. It is considered "full" because it is required +// to download enough shares to fully reconstruct the data square. +type ShareAvailability struct { + store *eds.Store + getter share.Getter + disc *discovery.Discovery + + cancel context.CancelFunc +} + +// NewShareAvailability creates a new full ShareAvailability. +func NewShareAvailability( + store *eds.Store, + getter share.Getter, + disc *discovery.Discovery, +) *ShareAvailability { + return &ShareAvailability{ + store: store, + getter: getter, + disc: disc, + } +} + +func (fa *ShareAvailability) Start(context.Context) error { + ctx, cancel := context.WithCancel(context.Background()) + fa.cancel = cancel + + go fa.disc.Advertise(ctx) + return nil +} + +func (fa *ShareAvailability) Stop(context.Context) error { + fa.cancel() + return nil +} + +// SharesAvailable reconstructs the data committed to the given Root by requesting +// enough Shares from the network. +func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { + dah := header.DAH + // short-circuit if the given root is minimum DAH of an empty data square, to avoid datastore hit + if share.DataHash(dah.Hash()).IsEmptyRoot() { + return nil + } + + // we assume the caller of this method has already performed basic validation on the + // given dah/root. If for some reason this has not happened, the node should panic. + if err := dah.ValidateBasic(); err != nil { + log.Errorw("Availability validation cannot be performed on a malformed DataAvailabilityHeader", + "err", err) + panic(err) + } + + // a hack to avoid loading the whole EDS in mem if we store it already. + if ok, _ := fa.store.Has(ctx, dah.Hash()); ok { + return nil + } + + adder := ipld.NewProofsAdder(len(dah.RowRoots)) + ctx = ipld.CtxWithProofsAdder(ctx, adder) + defer adder.Purge() + + eds, err := fa.getter.GetEDS(ctx, header) + if err != nil { + if errors.Is(err, context.Canceled) { + return err + } + log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error()) + var byzantineErr *byzantine.ErrByzantine + if errors.Is(err, share.ErrNotFound) || errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &byzantineErr) { + return share.ErrNotAvailable + } + return err + } + + err = fa.store.Put(ctx, dah.Hash(), eds) + if err != nil && !errors.Is(err, dagstore.ErrShardExists) { + return fmt.Errorf("full availability: failed to store eds: %w", err) + } + return nil +} diff --git a/share/availability/full/availability_test.go b/share/availability/full/availability_test.go new file mode 100644 index 0000000000..8ac0648a87 --- /dev/null +++ b/share/availability/full/availability_test.go @@ -0,0 +1,82 @@ +package full + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/mocks" +) + +func TestShareAvailableOverMocknet_Full(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + net := availability_test.NewTestDAGNet(ctx, t) + _, root := RandNode(net, 32) + + eh := headertest.RandExtendedHeaderWithRoot(t, root) + nd := Node(net) + net.ConnectAll() + + err := nd.SharesAvailable(ctx, eh) + assert.NoError(t, err) +} + +func TestSharesAvailable_Full(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it + getter, dah := GetterWithRandSquare(t, 16) + + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + avail := TestAvailability(t, getter) + err := avail.SharesAvailable(ctx, eh) + assert.NoError(t, err) +} + +func TestSharesAvailable_StoresToEDSStore(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it + getter, dah := GetterWithRandSquare(t, 16) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + avail := TestAvailability(t, getter) + err := avail.SharesAvailable(ctx, eh) + assert.NoError(t, err) + + has, err := avail.store.Has(ctx, dah.Hash()) + assert.NoError(t, err) + assert.True(t, has) +} + +func TestSharesAvailable_Full_ErrNotAvailable(t *testing.T) { + ctrl := gomock.NewController(t) + getter := mocks.NewMockGetter(ctrl) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eds := edstest.RandEDS(t, 4) + dah, err := da.NewDataAvailabilityHeader(eds) + eh := headertest.RandExtendedHeaderWithRoot(t, &dah) + require.NoError(t, err) + avail := TestAvailability(t, getter) + + errors := []error{share.ErrNotFound, context.DeadlineExceeded} + for _, getterErr := range errors { + getter.EXPECT().GetEDS(gomock.Any(), gomock.Any()).Return(nil, getterErr) + err := avail.SharesAvailable(ctx, eh) + require.ErrorIs(t, err, share.ErrNotAvailable) + } +} diff --git a/service/share/full_reconstruction_test.go b/share/availability/full/reconstruction_test.go similarity index 58% rename from service/share/full_reconstruction_test.go rename to share/availability/full/reconstruction_test.go index 10d47947dd..6ac5a3f31e 100644 --- a/service/share/full_reconstruction_test.go +++ b/share/availability/full/reconstruction_test.go @@ -1,28 +1,33 @@ //go:build !race -package share +package full import ( "context" + "sync" "testing" "time" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/celestiaorg/celestia-node/ipld" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/availability/light" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/eds" ) func init() { - ipld.RetrieveQuadrantTimeout = time.Millisecond * 100 // to speed up tests + eds.RetrieveQuadrantTimeout = time.Millisecond * 100 // to speed up tests } -// TestShareAvailable_OneFullNode asserts that a FullAvailability node can ensure +// TestShareAvailable_OneFullNode asserts that a full node can ensure // data is available (reconstruct data square) while being connected to -// LightAvailability nodes only. +// light nodes only. func TestShareAvailable_OneFullNode(t *testing.T) { // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper - DefaultSampleAmount = 20 // s + light.DefaultSampleAmount = 20 // s const ( origSquareSize = 16 // k lightNodes = 69 // c @@ -31,9 +36,11 @@ func TestShareAvailable_OneFullNode(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() - net := NewTestDAGNet(ctx, t) - source, root := net.RandFullNode(origSquareSize) // make a source node, a.k.a bridge - full := net.FullNode() // make a full availability service which reconstructs data + net := availability_test.NewTestDAGNet(ctx, t) + source, root := RandNode(net, origSquareSize) // make a source node, a.k.a bridge + eh := headertest.RandExtendedHeader(t) + eh.DAH = root + full := Node(net) // make a full availability service which reconstructs data // ensure there is no connection between source and full nodes // so that full reconstructs from the light nodes only @@ -41,14 +48,14 @@ func TestShareAvailable_OneFullNode(t *testing.T) { errg, errCtx := errgroup.WithContext(ctx) errg.Go(func() error { - return full.SharesAvailable(errCtx, root) + return full.SharesAvailable(errCtx, eh) }) - lights := make([]*node, lightNodes) + lights := make([]*availability_test.TestNode, lightNodes) for i := 0; i < len(lights); i++ { - lights[i] = net.LightNode() + lights[i] = light.Node(net) go func(i int) { - err := lights[i].SharesAvailable(ctx, root) + err := lights[i].SharesAvailable(ctx, eh) if err != nil { t.Log("light errors:", err) } @@ -67,14 +74,16 @@ func TestShareAvailable_OneFullNode(t *testing.T) { require.NoError(t, err) } -// TestShareAvailable_ConnectedFullNodes asserts that two connected FullAvailability nodes -// can ensure data availability via two isolated LightAvailability node subnetworks. Full nodes -// start their availability process first, then Lights start availability process and connect to -// Fulls and only after Lights connect to the source node which has the data. After Lights connect -// to the source, Full must be able to finish the availability process started in the beginning. +// TestShareAvailable_ConnectedFullNodes asserts that two connected full nodes +// can ensure data availability via two isolated light node subnetworks. Full +// nodes start their availability process first, then light node start +// availability process and connect to full node and only after light node +// connect to the source node which has the data. After light node connect to the +// source, full node must be able to finish the availability process started in +// the beginning. func TestShareAvailable_ConnectedFullNodes(t *testing.T) { // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper - DefaultSampleAmount = 20 // s + light.DefaultSampleAmount = 20 // s const ( origSquareSize = 16 // k lightNodes = 60 // c @@ -83,12 +92,14 @@ func TestShareAvailable_ConnectedFullNodes(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - net := NewTestDAGNet(ctx, t) - source, root := net.RandFullNode(origSquareSize) + net := availability_test.NewTestDAGNet(ctx, t) + source, root := RandNode(net, origSquareSize) + eh := headertest.RandExtendedHeader(t) + eh.DAH = root // create two full nodes and ensure they are disconnected - full1 := net.FullNode() - full2 := net.FullNode() + full1 := Node(net) + full2 := Node(net) // pre-connect fulls net.Connect(full1.ID(), full2.ID()) @@ -100,26 +111,28 @@ func TestShareAvailable_ConnectedFullNodes(t *testing.T) { // start reconstruction for fulls errg, errCtx := errgroup.WithContext(ctx) errg.Go(func() error { - return full1.SharesAvailable(errCtx, root) + return full1.SharesAvailable(errCtx, eh) }) errg.Go(func() error { - return full2.SharesAvailable(errCtx, root) + return full2.SharesAvailable(errCtx, eh) }) // create light nodes and start sampling for them immediately - lights1, lights2 := make([]*node, lightNodes/2), make([]*node, lightNodes/2) + lights1, lights2 := make( + []*availability_test.TestNode, lightNodes/2), + make([]*availability_test.TestNode, lightNodes/2) for i := 0; i < len(lights1); i++ { - lights1[i] = net.LightNode() + lights1[i] = light.Node(net) go func(i int) { - err := lights1[i].SharesAvailable(ctx, root) + err := lights1[i].SharesAvailable(ctx, eh) if err != nil { t.Log("light1 errors:", err) } }(i) - lights2[i] = net.LightNode() + lights2[i] = light.Node(net) go func(i int) { - err := lights2[i].SharesAvailable(ctx, root) + err := lights2[i].SharesAvailable(ctx, eh) if err != nil { t.Log("light2 errors:", err) } @@ -146,10 +159,11 @@ func TestShareAvailable_ConnectedFullNodes(t *testing.T) { require.NoError(t, err) } -// TestShareAvailable_DisconnectedFullNodes asserts that two disconnected FullAvailability nodes -// cannot ensure data is available (reconstruct data square) while being connected to isolated -// LightAvailability nodes subnetworks, which do not have enough nodes to reconstruct the data, -// but once FullAvailability nodes connect, they can collectively reconstruct it. +// TestShareAvailable_DisconnectedFullNodes asserts that two disconnected full +// nodes cannot ensure data is available (reconstruct data square) while being +// connected to isolated light nodes subnetworks, which do not have enough nodes +// to reconstruct the data, but once ShareAvailability nodes connect, they can +// collectively reconstruct it. func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { // S - Source // L - Light Node @@ -169,58 +183,57 @@ func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { // // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper - DefaultSampleAmount = 20 // s + light.DefaultSampleAmount = 20 // s const ( origSquareSize = 16 // k - lightNodes = 60 // c - total number of nodes on two subnetworks + lightNodes = 32 // c - total number of nodes on two subnetworks ) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() - net := NewTestDAGNet(ctx, t) - source, root := net.RandFullNode(origSquareSize) - - // create two full nodes and ensure they are disconnected - full1 := net.FullNode() - full2 := net.FullNode() - net.Disconnect(full1.ID(), full2.ID()) - - // ensure fulls and source are not connected - // so that fulls take data from light nodes only - net.Disconnect(full1.ID(), source.ID()) - net.Disconnect(full2.ID(), source.ID()) - - // start reconstruction for fulls that should fail - ctxErr, cancelErr := context.WithTimeout(ctx, ipld.RetrieveQuadrantTimeout*8) - errg, errCtx := errgroup.WithContext(ctxErr) - errg.Go(func() error { - return full1.SharesAvailable(errCtx, root) - }) - errg.Go(func() error { - return full2.SharesAvailable(errCtx, root) - }) + net := availability_test.NewTestDAGNet(ctx, t) + source, root := RandNode(net, origSquareSize) + eh := headertest.RandExtendedHeader(t) + eh.DAH = root // create light nodes and start sampling for them immediately - lights1, lights2 := make([]*node, lightNodes/2), make([]*node, lightNodes/2) + lights1, lights2 := make( + []*availability_test.TestNode, lightNodes/2), + make([]*availability_test.TestNode, lightNodes/2) + + var wg sync.WaitGroup + wg.Add(lightNodes) for i := 0; i < len(lights1); i++ { - lights1[i] = net.LightNode() + lights1[i] = light.Node(net) go func(i int) { - err := lights1[i].SharesAvailable(ctx, root) + defer wg.Done() + err := lights1[i].SharesAvailable(ctx, eh) if err != nil { t.Log("light1 errors:", err) } }(i) - lights2[i] = net.LightNode() + lights2[i] = light.Node(net) go func(i int) { - err := lights2[i].SharesAvailable(ctx, root) + defer wg.Done() + err := lights2[i].SharesAvailable(ctx, eh) if err != nil { t.Log("light2 errors:", err) } }(i) } + // create two full nodes and ensure they are disconnected + full1 := Node(net) + full2 := Node(net) + net.Disconnect(full1.ID(), full2.ID()) + + // ensure fulls and source are not connected + // so that fulls take data from light nodes only + net.Disconnect(full1.ID(), source.ID()) + net.Disconnect(full2.ID(), source.ID()) + // shape topology for i := 0; i < len(lights1); i++ { // ensure lights1 are only connected to source and full1 @@ -233,9 +246,19 @@ func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { net.Disconnect(lights2[i].ID(), full1.ID()) } + // start reconstruction for fulls that should fail + ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*5) + errg, errCtx := errgroup.WithContext(ctxErr) + errg.Go(func() error { + return full1.SharesAvailable(errCtx, eh) + }) + errg.Go(func() error { + return full2.SharesAvailable(errCtx, eh) + }) + // check that any of the fulls cannot reconstruct on their own err := errg.Wait() - require.ErrorIs(t, err, ErrNotAvailable) + require.ErrorIs(t, err, share.ErrNotAvailable) cancelErr() // but after they connect @@ -246,8 +269,14 @@ func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { full2.ClearStorage() // they both should be able to reconstruct the block - err = full1.SharesAvailable(ctx, root) - require.NoError(t, err, ErrNotAvailable) - err = full2.SharesAvailable(ctx, root) - require.NoError(t, err, ErrNotAvailable) + errg, bctx := errgroup.WithContext(ctx) + errg.Go(func() error { + return full1.SharesAvailable(bctx, eh) + }) + errg.Go(func() error { + return full2.SharesAvailable(bctx, eh) + }) + require.NoError(t, errg.Wait()) + // wait for all routines to finish before exit, in case there are any errors to log + wg.Wait() } diff --git a/share/availability/full/testing.go b/share/availability/full/testing.go new file mode 100644 index 0000000000..46e97581f2 --- /dev/null +++ b/share/availability/full/testing.go @@ -0,0 +1,64 @@ +package full + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" + "github.com/libp2p/go-libp2p/p2p/discovery/routing" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/p2p/discovery" +) + +// GetterWithRandSquare provides a share.Getter filled with 'n' NMT +// trees of 'n' random shares, essentially storing a whole square. +func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *share.Root) { + bServ := ipld.NewMemBlockservice() + getter := getters.NewIPLDGetter(bServ) + return getter, availability_test.RandFillBS(t, n, bServ) +} + +// RandNode creates a Full Node filled with a random block of the given size. +func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { + nd := Node(dn) + return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) +} + +// Node creates a new empty Full Node. +func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { + nd := dn.NewTestNode() + nd.Getter = getters.NewIPLDGetter(nd.BlockService) + nd.Availability = TestAvailability(dn.T, nd.Getter) + return nd +} + +func TestAvailability(t *testing.T, getter share.Getter) *ShareAvailability { + params := discovery.DefaultParameters() + params.AdvertiseInterval = time.Second + params.PeersLimit = 10 + disc, err := discovery.NewDiscovery( + params, + nil, + routing.NewRoutingDiscovery(routinghelpers.Null{}), + "full", + ) + require.NoError(t, err) + store, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), datastore.NewMapDatastore()) + require.NoError(t, err) + err = store.Start(context.Background()) + require.NoError(t, err) + + t.Cleanup(func() { + err = store.Stop(context.Background()) + require.NoError(t, err) + }) + return NewShareAvailability(store, getter, disc) +} diff --git a/share/availability/light/availability.go b/share/availability/light/availability.go new file mode 100644 index 0000000000..1d35542344 --- /dev/null +++ b/share/availability/light/availability.go @@ -0,0 +1,151 @@ +package light + +import ( + "context" + "errors" + "sync" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/autobatch" + "github.com/ipfs/go-datastore/namespace" + ipldFormat "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/getters" +) + +var ( + log = logging.Logger("share/light") + cacheAvailabilityPrefix = datastore.NewKey("sampling_result") + writeBatchSize = 2048 +) + +// ShareAvailability implements share.Availability using Data Availability Sampling technique. +// It is light because it does not require the downloading of all the data to verify +// its availability. It is assumed that there are a lot of lightAvailability instances +// on the network doing sampling over the same Root to collectively verify its availability. +type ShareAvailability struct { + getter share.Getter + params Parameters + + // TODO(@Wondertan): Once we come to parallelized DASer, this lock becomes a contention point + // Related to #483 + // TODO: Striped locks? :D + dsLk sync.RWMutex + ds *autobatch.Datastore +} + +// NewShareAvailability creates a new light Availability. +func NewShareAvailability( + getter share.Getter, + ds datastore.Batching, + opts ...Option, +) *ShareAvailability { + params := DefaultParameters() + ds = namespace.Wrap(ds, cacheAvailabilityPrefix) + autoDS := autobatch.NewAutoBatching(ds, writeBatchSize) + + for _, opt := range opts { + opt(¶ms) + } + + return &ShareAvailability{ + getter: getter, + params: params, + ds: autoDS, + } +} + +// SharesAvailable randomly samples `params.SampleAmount` amount of Shares committed to the given +// ExtendedHeader. This way SharesAvailable subjectively verifies that Shares are available. +func (la *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { + dah := header.DAH + // short-circuit if the given root is minimum DAH of an empty data square + if share.DataHash(dah.Hash()).IsEmptyRoot() { + return nil + } + + // do not sample over Root that has already been sampled + key := rootKey(dah) + + la.dsLk.RLock() + exists, err := la.ds.Has(ctx, key) + la.dsLk.RUnlock() + if err != nil || exists { + return err + } + + log.Debugw("validate availability", "root", dah.String()) + // We assume the caller of this method has already performed basic validation on the + // given dah/root. If for some reason this has not happened, the node should panic. + if err := dah.ValidateBasic(); err != nil { + log.Errorw("availability validation cannot be performed on a malformed DataAvailabilityHeader", + "err", err) + panic(err) + } + samples, err := SampleSquare(len(dah.RowRoots), int(la.params.SampleAmount)) + if err != nil { + return err + } + + // indicate to the share.Getter that a blockservice session should be created. This + // functionality is optional and must be supported by the used share.Getter. + ctx = getters.WithSession(ctx) + + log.Debugw("starting sampling session", "root", dah.String()) + errs := make(chan error, len(samples)) + for _, s := range samples { + go func(s Sample) { + log.Debugw("fetching share", "root", dah.String(), "row", s.Row, "col", s.Col) + _, err := la.getter.GetShare(ctx, header, s.Row, s.Col) + if err != nil { + log.Debugw("error fetching share", "root", dah.String(), "row", s.Row, "col", s.Col) + } + // we don't really care about Share bodies at this point + // it also means we now saved the Share in local storage + select { + case errs <- err: + case <-ctx.Done(): + } + }(s) + } + + for range samples { + var err error + select { + case err = <-errs: + case <-ctx.Done(): + err = ctx.Err() + } + + if err != nil { + if errors.Is(err, context.Canceled) { + return err + } + log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error()) + if ipldFormat.IsNotFound(err) || errors.Is(err, context.DeadlineExceeded) { + return share.ErrNotAvailable + } + return err + } + } + + la.dsLk.Lock() + err = la.ds.Put(ctx, key, []byte{}) + la.dsLk.Unlock() + if err != nil { + log.Errorw("storing root of successful SharesAvailable request to disk", "err", err) + } + return nil +} + +func rootKey(root *share.Root) datastore.Key { + return datastore.NewKey(root.String()) +} + +// Close flushes all queued writes to disk. +func (la *ShareAvailability) Close(ctx context.Context) error { + return la.ds.Flush(ctx) +} diff --git a/share/availability/light/availability_test.go b/share/availability/light/availability_test.go new file mode 100644 index 0000000000..2ace654d50 --- /dev/null +++ b/share/availability/light/availability_test.go @@ -0,0 +1,243 @@ +package light + +import ( + "context" + _ "embed" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestSharesAvailableCaches(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + getter, eh := GetterWithRandSquare(t, 16) + dah := eh.DAH + avail := TestAvailability(getter) + + // cache doesn't have dah yet + has, err := avail.ds.Has(ctx, rootKey(dah)) + assert.NoError(t, err) + assert.False(t, has) + + err = avail.SharesAvailable(ctx, eh) + assert.NoError(t, err) + + // is now cached + has, err = avail.ds.Has(ctx, rootKey(dah)) + assert.NoError(t, err) + assert.True(t, has) +} + +func TestSharesAvailableHitsCache(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + getter, _ := GetterWithRandSquare(t, 16) + avail := TestAvailability(getter) + + bServ := ipld.NewMemBlockservice() + dah := availability_test.RandFillBS(t, 16, bServ) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + + // blockstore doesn't actually have the dah + err := avail.SharesAvailable(ctx, eh) + require.Error(t, err) + + // cache doesn't have dah yet, since it errored + has, err := avail.ds.Has(ctx, rootKey(dah)) + assert.NoError(t, err) + assert.False(t, has) + + err = avail.ds.Put(ctx, rootKey(dah), []byte{}) + require.NoError(t, err) + + // should hit cache after putting + err = avail.SharesAvailable(ctx, eh) + require.NoError(t, err) +} + +func TestSharesAvailableEmptyRoot(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + getter, _ := GetterWithRandSquare(t, 16) + avail := TestAvailability(getter) + + eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyRoot()) + err := avail.SharesAvailable(ctx, eh) + assert.NoError(t, err) +} + +func TestSharesAvailable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + getter, dah := GetterWithRandSquare(t, 16) + avail := TestAvailability(getter) + err := avail.SharesAvailable(ctx, dah) + assert.NoError(t, err) +} + +func TestSharesAvailableFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bServ := ipld.NewMemBlockservice() + dah := availability_test.RandFillBS(t, 16, bServ) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + + getter, _ := GetterWithRandSquare(t, 16) + avail := TestAvailability(getter) + err := avail.SharesAvailable(ctx, eh) + assert.Error(t, err) +} + +func TestShareAvailableOverMocknet_Light(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + net := availability_test.NewTestDAGNet(ctx, t) + _, root := RandNode(net, 16) + eh := headertest.RandExtendedHeader(t) + eh.DAH = root + nd := Node(net) + net.ConnectAll() + + err := nd.SharesAvailable(ctx, eh) + assert.NoError(t, err) +} + +func TestGetShare(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n := 16 + getter, eh := GetterWithRandSquare(t, n) + + for i := range make([]bool, n) { + for j := range make([]bool, n) { + sh, err := getter.GetShare(ctx, eh, i, j) + assert.NotNil(t, sh) + assert.NoError(t, err) + } + } +} + +func TestService_GetSharesByNamespace(t *testing.T) { + var tests = []struct { + squareSize int + expectedShareCount int + }{ + {squareSize: 4, expectedShareCount: 2}, + {squareSize: 16, expectedShareCount: 2}, + {squareSize: 128, expectedShareCount: 2}, + } + + for _, tt := range tests { + t.Run("size: "+strconv.Itoa(tt.squareSize), func(t *testing.T) { + getter, bServ := EmptyGetter() + totalShares := tt.squareSize * tt.squareSize + randShares := sharetest.RandShares(t, totalShares) + idx1 := (totalShares - 1) / 2 + idx2 := totalShares / 2 + if tt.expectedShareCount > 1 { + // make it so that two rows have the same namespace + copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) + } + root := availability_test.FillBS(t, bServ, randShares) + eh := headertest.RandExtendedHeader(t) + eh.DAH = root + randNamespace := share.GetNamespace(randShares[idx1]) + + shares, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) + require.NoError(t, err) + require.NoError(t, shares.Verify(root, randNamespace)) + flattened := shares.Flatten() + assert.Len(t, flattened, tt.expectedShareCount) + for _, value := range flattened { + assert.Equal(t, randNamespace, share.GetNamespace(value)) + } + if tt.expectedShareCount > 1 { + // idx1 is always smaller than idx2 + assert.Equal(t, randShares[idx1], flattened[0]) + assert.Equal(t, randShares[idx2], flattened[1]) + } + }) + t.Run("last two rows of a 4x4 square that have the same namespace have valid NMT proofs", func(t *testing.T) { + squareSize := 4 + totalShares := squareSize * squareSize + getter, bServ := EmptyGetter() + randShares := sharetest.RandShares(t, totalShares) + lastNID := share.GetNamespace(randShares[totalShares-1]) + for i := totalShares / 2; i < totalShares; i++ { + copy(share.GetNamespace(randShares[i]), lastNID) + } + root := availability_test.FillBS(t, bServ, randShares) + eh := headertest.RandExtendedHeader(t) + eh.DAH = root + + shares, err := getter.GetSharesByNamespace(context.Background(), eh, lastNID) + require.NoError(t, err) + require.NoError(t, shares.Verify(root, lastNID)) + }) + } +} + +func TestGetShares(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n := 16 + getter, eh := GetterWithRandSquare(t, n) + + eds, err := getter.GetEDS(ctx, eh) + require.NoError(t, err) + gotDAH, err := share.NewRoot(eds) + require.NoError(t, err) + + require.True(t, eh.DAH.Equals(gotDAH)) +} + +func TestService_GetSharesByNamespaceNotFound(t *testing.T) { + getter, eh := GetterWithRandSquare(t, 1) + eh.DAH.RowRoots = nil + + emptyShares, err := getter.GetSharesByNamespace(context.Background(), eh, sharetest.RandV0Namespace()) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) +} + +func BenchmarkService_GetSharesByNamespace(b *testing.B) { + var tests = []struct { + amountShares int + }{ + {amountShares: 4}, + {amountShares: 16}, + {amountShares: 128}, + } + + for _, tt := range tests { + b.Run(strconv.Itoa(tt.amountShares), func(b *testing.B) { + t := &testing.T{} + getter, eh := GetterWithRandSquare(t, tt.amountShares) + root := eh.DAH + randNamespace := root.RowRoots[(len(root.RowRoots)-1)/2][:share.NamespaceSize] + root.RowRoots[(len(root.RowRoots) / 2)] = root.RowRoots[(len(root.RowRoots)-1)/2] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) + require.NoError(t, err) + } + }) + } +} diff --git a/share/availability/light/options.go b/share/availability/light/options.go new file mode 100644 index 0000000000..80fd27acfd --- /dev/null +++ b/share/availability/light/options.go @@ -0,0 +1,50 @@ +package light + +import ( + "fmt" +) + +// SampleAmount specifies the minimum required amount of samples a light node must perform +// before declaring that a block is available +var ( + DefaultSampleAmount uint = 16 +) + +// Parameters is the set of Parameters that must be configured for the light +// availability implementation +type Parameters struct { + SampleAmount uint // The minimum required amount of samples to perform +} + +// Option is a function that configures light availability Parameters +type Option func(*Parameters) + +// DefaultParameters returns the default Parameters' configuration values +// for the light availability implementation +func DefaultParameters() Parameters { + return Parameters{ + SampleAmount: DefaultSampleAmount, + } +} + +// Validate validates the values in Parameters +func (p *Parameters) Validate() error { + if p.SampleAmount <= 0 { + return fmt.Errorf( + "light availability: invalid option: value %s was %s, where it should be %s", + "SampleAmount", + "<= 0", // current value + ">= 0", // what the value should be + ) + } + + return nil +} + +// WithSampleAmount is a functional option that the Availability interface +// implementers use to set the SampleAmount configuration param +func WithSampleAmount(sampleAmount uint) Option { + return func(p *Parameters) { + p.SampleAmount = sampleAmount + } +} diff --git a/service/share/sample.go b/share/availability/light/sample.go similarity index 99% rename from service/share/sample.go rename to share/availability/light/sample.go index 65da855486..e66ff9aafe 100644 --- a/service/share/sample.go +++ b/share/availability/light/sample.go @@ -1,5 +1,5 @@ // TODO(@Wondertan): Instead of doing sampling over the coordinates do a random walk over NMT trees. -package share +package light import ( crand "crypto/rand" diff --git a/service/share/sample_test.go b/share/availability/light/sample_test.go similarity index 97% rename from service/share/sample_test.go rename to share/availability/light/sample_test.go index c93aa1d28a..7092b99e83 100644 --- a/service/share/sample_test.go +++ b/share/availability/light/sample_test.go @@ -1,4 +1,4 @@ -package share +package light import ( "testing" diff --git a/share/availability/light/testing.go b/share/availability/light/testing.go new file mode 100644 index 0000000000..9efc9ff14a --- /dev/null +++ b/share/availability/light/testing.go @@ -0,0 +1,60 @@ +package light + +import ( + "testing" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-datastore" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +// GetterWithRandSquare provides a share.Getter filled with 'n' NMT trees of 'n' random shares, +// essentially storing a whole square. +func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *header.ExtendedHeader) { + bServ := ipld.NewMemBlockservice() + getter := getters.NewIPLDGetter(bServ) + root := availability_test.RandFillBS(t, n, bServ) + eh := headertest.RandExtendedHeader(t) + eh.DAH = root + + return getter, eh +} + +// EmptyGetter provides an unfilled share.Getter with corresponding blockservice.BlockService than +// can be filled by the test. +func EmptyGetter() (share.Getter, blockservice.BlockService) { + bServ := ipld.NewMemBlockservice() + getter := getters.NewIPLDGetter(bServ) + return getter, bServ +} + +// RandNode creates a Light Node filled with a random block of the given size. +func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { + nd := Node(dn) + return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) +} + +// Node creates a new empty Light Node. +func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { + nd := dn.NewTestNode() + nd.Getter = getters.NewIPLDGetter(nd.BlockService) + nd.Availability = TestAvailability(nd.Getter) + return nd +} + +func TestAvailability(getter share.Getter) *ShareAvailability { + ds := datastore.NewMapDatastore() + return NewShareAvailability(getter, ds) +} + +func SubNetNode(sn *availability_test.SubNet) *availability_test.TestNode { + nd := Node(sn.TestDagNet) + sn.AddNode(nd) + return nd +} diff --git a/share/availability/mocks/availability.go b/share/availability/mocks/availability.go new file mode 100644 index 0000000000..fc68d3d2bc --- /dev/null +++ b/share/availability/mocks/availability.go @@ -0,0 +1,50 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/share (interfaces: Availability) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + header "github.com/celestiaorg/celestia-node/header" + gomock "github.com/golang/mock/gomock" +) + +// MockAvailability is a mock of Availability interface. +type MockAvailability struct { + ctrl *gomock.Controller + recorder *MockAvailabilityMockRecorder +} + +// MockAvailabilityMockRecorder is the mock recorder for MockAvailability. +type MockAvailabilityMockRecorder struct { + mock *MockAvailability +} + +// NewMockAvailability creates a new mock instance. +func NewMockAvailability(ctrl *gomock.Controller) *MockAvailability { + mock := &MockAvailability{ctrl: ctrl} + mock.recorder = &MockAvailabilityMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAvailability) EXPECT() *MockAvailabilityMockRecorder { + return m.recorder +} + +// SharesAvailable mocks base method. +func (m *MockAvailability) SharesAvailable(arg0 context.Context, arg1 *header.ExtendedHeader) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SharesAvailable", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SharesAvailable indicates an expected call of SharesAvailable. +func (mr *MockAvailabilityMockRecorder) SharesAvailable(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SharesAvailable", reflect.TypeOf((*MockAvailability)(nil).SharesAvailable), arg0, arg1) +} diff --git a/share/availability/test/corrupt_data.go b/share/availability/test/corrupt_data.go new file mode 100644 index 0000000000..1ff553f8b3 --- /dev/null +++ b/share/availability/test/corrupt_data.go @@ -0,0 +1,130 @@ +package availability_test + +import ( + "context" + "crypto/rand" + "fmt" + mrand "math/rand" + "testing" + + "github.com/ipfs/boxo/blockstore" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" +) + +var _ blockstore.Blockstore = (*FraudulentBlockstore)(nil) + +// CorruptBlock is a block where the cid doesn't match the data. It fulfills the blocks.Block +// interface. +type CorruptBlock struct { + cid cid.Cid + data []byte +} + +func (b *CorruptBlock) RawData() []byte { + return b.data +} + +func (b *CorruptBlock) Cid() cid.Cid { + return b.cid +} + +func (b *CorruptBlock) String() string { + return fmt.Sprintf("[Block %s]", b.Cid()) +} + +func (b *CorruptBlock) Loggable() map[string]interface{} { + return map[string]interface{}{ + "block": b.Cid().String(), + } +} + +func NewCorruptBlock(data []byte, fakeCID cid.Cid) *CorruptBlock { + return &CorruptBlock{ + fakeCID, + data, + } +} + +// FraudulentBlockstore is a mock blockstore.Blockstore that saves both corrupted and original data +// for every block it receives. If FraudulentBlockstore.Attacking is true, it will serve the +// corrupted data on requests. +type FraudulentBlockstore struct { + ds.Datastore + Attacking bool +} + +func (fb FraudulentBlockstore) Has(context.Context, cid.Cid) (bool, error) { + return false, nil +} + +func (fb FraudulentBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + key := cid.String() + if fb.Attacking { + key = "corrupt_get" + key + } + + data, err := fb.Datastore.Get(ctx, ds.NewKey(key)) + if err != nil { + return nil, err + } + return NewCorruptBlock(data, cid), nil +} + +func (fb FraudulentBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + key := cid.String() + if fb.Attacking { + key = "corrupt_size" + key + } + + return fb.Datastore.GetSize(ctx, ds.NewKey(key)) +} + +func (fb FraudulentBlockstore) Put(ctx context.Context, block blocks.Block) error { + err := fb.Datastore.Put(ctx, ds.NewKey(block.Cid().String()), block.RawData()) + if err != nil { + return err + } + + // create data that doesn't match the CID with arbitrary lengths between 1 and + // len(block.RawData())*2 + corrupted := make([]byte, 1+mrand.Int()%(len(block.RawData())*2-1)) //nolint:gosec + _, _ = rand.Read(corrupted) + return fb.Datastore.Put(ctx, ds.NewKey("corrupt"+block.Cid().String()), corrupted) +} + +func (fb FraudulentBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + for _, b := range blocks { + err := fb.Put(ctx, b) + if err != nil { + return err + } + } + return nil +} + +func (fb FraudulentBlockstore) DeleteBlock(context.Context, cid.Cid) error { + panic("implement me") +} + +func (fb FraudulentBlockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { + panic("implement me") +} + +func (fb FraudulentBlockstore) HashOnRead(bool) { + panic("implement me") +} + +// MockNode creates a TestNode that uses a FraudulentBlockstore to simulate serving corrupted data. +func MockNode(t *testing.T, net *TestDagNet) (*TestNode, *FraudulentBlockstore) { + t.Helper() + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + mockBS := &FraudulentBlockstore{ + Datastore: dstore, + Attacking: false, + } + provider := net.NewTestNodeWithBlockstore(dstore, mockBS) + return provider, mockBS +} diff --git a/share/availability/test/testing.go b/share/availability/test/testing.go new file mode 100644 index 0000000000..64e8d23bb7 --- /dev/null +++ b/share/availability/test/testing.go @@ -0,0 +1,163 @@ +package availability_test + +import ( + "context" + "testing" + + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/routing/offline" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +// RandFillBS fills the given BlockService with a random block of a given size. +func RandFillBS(t *testing.T, n int, bServ blockservice.BlockService) *share.Root { + shares := sharetest.RandShares(t, n*n) + return FillBS(t, bServ, shares) +} + +// FillBS fills the given BlockService with the given shares. +func FillBS(t *testing.T, bServ blockservice.BlockService, shares []share.Share) *share.Root { + eds, err := ipld.AddShares(context.TODO(), shares, bServ) + require.NoError(t, err) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + return dah +} + +type TestNode struct { + net *TestDagNet + share.Getter + share.Availability + blockservice.BlockService + host.Host +} + +// ClearStorage cleans up the storage of the node. +func (n *TestNode) ClearStorage() { + keys, err := n.Blockstore().AllKeysChan(n.net.ctx) + require.NoError(n.net.T, err) + + for k := range keys { + err := n.DeleteBlock(n.net.ctx, k) + require.NoError(n.net.T, err) + } +} + +type TestDagNet struct { + ctx context.Context + T *testing.T + net mocknet.Mocknet + nodes []*TestNode +} + +// NewTestDAGNet creates a new testing swarm utility to spawn different nodes and test how they +// interact and/or exchange data. +func NewTestDAGNet(ctx context.Context, t *testing.T) *TestDagNet { + return &TestDagNet{ + ctx: ctx, + T: t, + net: mocknet.New(), + } +} + +// NewTestNodeWithBlockstore creates a new plain TestNode with the given blockstore that can serve +// and request data. +func (dn *TestDagNet) NewTestNodeWithBlockstore(dstore ds.Datastore, bstore blockstore.Blockstore) *TestNode { + hst, err := dn.net.GenPeer() + require.NoError(dn.T, err) + routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{}) + bs := bitswap.New( + dn.ctx, + network.NewFromIpfsHost(hst, routing), + bstore, + bitswap.ProvideEnabled(false), // disable routines for DHT content provides, as we don't use them + bitswap.EngineBlockstoreWorkerCount(1), // otherwise it spawns 128 routines which is too much for tests + bitswap.EngineTaskWorkerCount(2), + bitswap.TaskWorkerCount(2), + bitswap.SetSimulateDontHavesOnTimeout(false), + bitswap.SetSendDontHaves(false), + ) + nd := &TestNode{ + net: dn, + BlockService: ipld.NewBlockservice(bstore, bs), + Host: hst, + } + dn.nodes = append(dn.nodes, nd) + return nd +} + +// NewTestNode creates a plain network node that can serve and request data. +func (dn *TestDagNet) NewTestNode() *TestNode { + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + return dn.NewTestNodeWithBlockstore(dstore, bstore) +} + +// ConnectAll connects all the peers on registered on the TestDagNet. +func (dn *TestDagNet) ConnectAll() { + err := dn.net.LinkAll() + require.NoError(dn.T, err) + + err = dn.net.ConnectAllButSelf() + require.NoError(dn.T, err) +} + +// Connect connects two given peers. +func (dn *TestDagNet) Connect(peerA, peerB peer.ID) { + _, err := dn.net.LinkPeers(peerA, peerB) + require.NoError(dn.T, err) + _, err = dn.net.ConnectPeers(peerA, peerB) + require.NoError(dn.T, err) +} + +// Disconnect disconnects two peers. +// It does a hard disconnect, meaning that disconnected peers won't be able to reconnect on their +// own but only with DagNet.Connect or TestDagNet.ConnectAll. +func (dn *TestDagNet) Disconnect(peerA, peerB peer.ID) { + err := dn.net.UnlinkPeers(peerA, peerB) + require.NoError(dn.T, err) + err = dn.net.DisconnectPeers(peerA, peerB) + require.NoError(dn.T, err) +} + +type SubNet struct { + *TestDagNet + nodes []*TestNode +} + +func (dn *TestDagNet) SubNet() *SubNet { + return &SubNet{dn, nil} +} + +func (sn *SubNet) AddNode(nd *TestNode) { + sn.nodes = append(sn.nodes, nd) +} + +func (sn *SubNet) ConnectAll() { + nodes := sn.nodes + for _, n1 := range nodes { + for _, n2 := range nodes { + if n1 == n2 { + continue + } + _, err := sn.net.LinkPeers(n1.ID(), n2.ID()) + require.NoError(sn.T, err) + + _, err = sn.net.ConnectPeers(n1.ID(), n2.ID()) + require.NoError(sn.T, err) + } + } +} diff --git a/share/doc.go b/share/doc.go new file mode 100644 index 0000000000..97229932a7 --- /dev/null +++ b/share/doc.go @@ -0,0 +1,17 @@ +/* +Package share contains logic related to the retrieval and random sampling of shares of +block data. + +Though this package contains several useful methods for getting specific shares and/or +sampling them at random, a particularly useful method is GetSharesByNamespace which retrieves +all shares of block data of the given Namespace from the block associated with the given +DataAvailabilityHeader (DAH, but referred to as Root within this package). + +This package also contains declaration of the Availability interface. Implementations of +the interface (light, full) are located in the availability sub-folder. +Light Availability implementation samples for 16 shares of block data (enough to verify +the block's availability on the network). +Full Availability implementation samples for as many shares as necessary to fully reconstruct +the block data. +*/ +package share diff --git a/share/eds/adapters.go b/share/eds/adapters.go new file mode 100644 index 0000000000..8bf2340d91 --- /dev/null +++ b/share/eds/adapters.go @@ -0,0 +1,66 @@ +package eds + +import ( + "context" + "sync" + + "github.com/filecoin-project/dagstore" + "github.com/ipfs/boxo/blockservice" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +var _ blockservice.BlockGetter = (*BlockGetter)(nil) + +// NewBlockGetter creates new blockservice.BlockGetter adapter from dagstore.ReadBlockstore +func NewBlockGetter(store dagstore.ReadBlockstore) *BlockGetter { + return &BlockGetter{store: store} +} + +// BlockGetter is an adapter for dagstore.ReadBlockstore to implement blockservice.BlockGetter +// interface. +type BlockGetter struct { + store dagstore.ReadBlockstore +} + +// GetBlock gets the requested block by the given CID. +func (bg *BlockGetter) GetBlock(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + return bg.store.Get(ctx, cid) +} + +// GetBlocks does a batch request for the given cids, returning blocks as +// they are found, in no particular order. +// +// It implements blockservice.BlockGetter interface, that requires: +// It may not be able to find all requested blocks (or the context may +// be canceled). In that case, it will close the channel early. It is up +// to the consumer to detect this situation and keep track which blocks +// it has received and which it hasn't. +func (bg *BlockGetter) GetBlocks(ctx context.Context, cids []cid.Cid) <-chan blocks.Block { + bCh := make(chan blocks.Block) + + go func() { + var wg sync.WaitGroup + wg.Add(len(cids)) + for _, c := range cids { + go func(cid cid.Cid) { + defer wg.Done() + block, err := bg.store.Get(ctx, cid) + if err != nil { + log.Debugw("getblocks: error getting block by cid", "cid", cid, "error", err) + return + } + + select { + case bCh <- block: + case <-ctx.Done(): + return + } + }(c) + } + wg.Wait() + close(bCh) + }() + + return bCh +} diff --git a/share/eds/adapters_test.go b/share/eds/adapters_test.go new file mode 100644 index 0000000000..70165b81c8 --- /dev/null +++ b/share/eds/adapters_test.go @@ -0,0 +1,148 @@ +package eds + +import ( + "context" + "errors" + mrand "math/rand" + "sort" + "testing" + "time" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/ipld" +) + +func TestBlockGetter_GetBlocks(t *testing.T) { + t.Run("happy path", func(t *testing.T) { + cids := randCIDs(t, 32) + // sort cids in asc order + sort.Slice(cids, func(i, j int) bool { + return cids[i].String() < cids[j].String() + }) + + bg := &BlockGetter{store: rbsMock{}} + blocksCh := bg.GetBlocks(context.Background(), cids) + + // collect blocks from channel + blocks := make([]blocks.Block, 0, len(cids)) + for block := range blocksCh { + blocks = append(blocks, block) + } + + // sort blocks in cid asc order + sort.Slice(blocks, func(i, j int) bool { + return blocks[i].Cid().String() < blocks[j].Cid().String() + }) + + // validate results + require.Equal(t, len(cids), len(blocks)) + for i, block := range blocks { + require.Equal(t, cids[i].String(), block.Cid().String()) + } + }) + t.Run("retrieval error", func(t *testing.T) { + cids := randCIDs(t, 32) + + // split cids into failed and succeeded + failedLen := mrand.Intn(len(cids)-1) + 1 + failed := make(map[cid.Cid]struct{}, failedLen) + succeeded := make([]cid.Cid, 0, len(cids)-failedLen) + for i, cid := range cids { + if i < failedLen { + failed[cid] = struct{}{} + continue + } + succeeded = append(succeeded, cid) + } + + // sort succeeded cids in asc order + sort.Slice(succeeded, func(i, j int) bool { + return succeeded[i].String() < succeeded[j].String() + }) + + bg := &BlockGetter{store: rbsMock{failed: failed}} + blocksCh := bg.GetBlocks(context.Background(), cids) + + // collect blocks from channel + blocks := make([]blocks.Block, 0, len(cids)) + for block := range blocksCh { + blocks = append(blocks, block) + } + + // sort blocks in cid asc order + sort.Slice(blocks, func(i, j int) bool { + return blocks[i].Cid().String() < blocks[j].Cid().String() + }) + + // validate results + require.Equal(t, len(succeeded), len(blocks)) + for i, block := range blocks { + require.Equal(t, succeeded[i].String(), block.Cid().String()) + } + }) + t.Run("retrieval timeout", func(t *testing.T) { + cids := randCIDs(t, 128) + + bg := &BlockGetter{ + store: rbsMock{}, + } + + // cancel the context before any blocks are collected + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + blocksCh := bg.GetBlocks(ctx, cids) + + // pretend nobody is reading from blocksCh after context is canceled + time.Sleep(50 * time.Millisecond) + + // blocksCh should be closed indicating GetBlocks exited + select { + case _, ok := <-blocksCh: + require.False(t, ok) + default: + t.Error("channel is not closed on canceled context") + } + }) +} + +// rbsMock is a dagstore.ReadBlockstore mock +type rbsMock struct { + failed map[cid.Cid]struct{} +} + +func (r rbsMock) Has(context.Context, cid.Cid) (bool, error) { + panic("implement me") +} + +func (r rbsMock) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { + // return error for failed items + if _, ok := r.failed[cid]; ok { + return nil, errors.New("not found") + } + + return blocks.NewBlockWithCid(nil, cid) +} + +func (r rbsMock) GetSize(context.Context, cid.Cid) (int, error) { + panic("implement me") +} + +func (r rbsMock) AllKeysChan(context.Context) (<-chan cid.Cid, error) { + panic("implement me") +} + +func (r rbsMock) HashOnRead(bool) { + panic("implement me") +} + +func randCIDs(t *testing.T, n int) []cid.Cid { + cids := make([]cid.Cid, n) + for i := range cids { + cids[i] = ipld.RandNamespacedCID(t) + } + return cids +} diff --git a/share/eds/blockstore.go b/share/eds/blockstore.go new file mode 100644 index 0000000000..e44601870e --- /dev/null +++ b/share/eds/blockstore.go @@ -0,0 +1,168 @@ +package eds + +import ( + "context" + "errors" + "fmt" + + bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/datastore/dshelp" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + ipld "github.com/ipfs/go-ipld-format" +) + +var _ bstore.Blockstore = (*blockstore)(nil) + +var ( + blockstoreCacheKey = datastore.NewKey("bs-cache") + errUnsupportedOperation = errors.New("unsupported operation") +) + +// blockstore implements the store.Blockstore interface on an EDSStore. +// The lru cache approach is heavily inspired by the existing implementation upstream. +// We simplified the design to not support multiple shards per key, call GetSize directly on the +// underlying RO blockstore, and do not throw errors on Put/PutMany. Also, we do not abstract away +// the blockstore operations. +// +// The intuition here is that each CAR file is its own blockstore, so we need this top level +// implementation to allow for the blockstore operations to be routed to the underlying stores. +type blockstore struct { + store *Store + ds datastore.Batching +} + +func newBlockstore(store *Store, ds datastore.Batching) *blockstore { + return &blockstore{ + store: store, + ds: namespace.Wrap(ds, blockstoreCacheKey), + } +} + +func (bs *blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) + if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { + // key wasn't found in top level blockstore, but could be in datastore while being reconstructed + dsHas, dsErr := bs.ds.Has(ctx, dshelp.MultihashToDsKey(cid.Hash())) + if dsErr != nil { + return false, nil + } + return dsHas, nil + } + if err != nil { + return false, err + } + + return len(keys) > 0, nil +} + +func (bs *blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + blockstr, err := bs.getReadOnlyBlockstore(ctx, cid) + if err == nil { + defer closeAndLog("blockstore", blockstr) + return blockstr.Get(ctx, cid) + } + + if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { + k := dshelp.MultihashToDsKey(cid.Hash()) + blockData, err := bs.ds.Get(ctx, k) + if err == nil { + return blocks.NewBlockWithCid(blockData, cid) + } + // nmt's GetNode expects an ipld.ErrNotFound when a cid is not found. + return nil, ipld.ErrNotFound{Cid: cid} + } + + log.Debugf("failed to get blockstore for cid %s: %s", cid, err) + return nil, err +} + +func (bs *blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + blockstr, err := bs.getReadOnlyBlockstore(ctx, cid) + if err == nil { + defer closeAndLog("blockstore", blockstr) + return blockstr.GetSize(ctx, cid) + } + + if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { + k := dshelp.MultihashToDsKey(cid.Hash()) + size, err := bs.ds.GetSize(ctx, k) + if err == nil { + return size, nil + } + // nmt's GetSize expects an ipld.ErrNotFound when a cid is not found. + return 0, ipld.ErrNotFound{Cid: cid} + } + + log.Debugf("failed to get size for cid %s: %s", cid, err) + return 0, err +} + +func (bs *blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { + k := dshelp.MultihashToDsKey(cid.Hash()) + return bs.ds.Delete(ctx, k) +} + +func (bs *blockstore) Put(ctx context.Context, blk blocks.Block) error { + k := dshelp.MultihashToDsKey(blk.Cid().Hash()) + // note: we leave duplicate resolution to the underlying datastore + return bs.ds.Put(ctx, k, blk.RawData()) +} + +func (bs *blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + if len(blocks) == 1 { + // performance fast-path + return bs.Put(ctx, blocks[0]) + } + + t, err := bs.ds.Batch(ctx) + if err != nil { + return err + } + for _, b := range blocks { + k := dshelp.MultihashToDsKey(b.Cid().Hash()) + err = t.Put(ctx, k, b.RawData()) + if err != nil { + return err + } + } + return t.Commit(ctx) +} + +// AllKeysChan is a noop on the EDS blockstore because the keys are not stored in a single CAR file. +func (bs *blockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { + return nil, errUnsupportedOperation +} + +// HashOnRead is a noop on the EDS blockstore but an error cannot be returned due to the method +// signature from the blockstore interface. +func (bs *blockstore) HashOnRead(bool) { + log.Warnf("HashOnRead is a noop on the EDS blockstore") +} + +// getReadOnlyBlockstore finds the underlying blockstore of the shard that contains the given CID. +func (bs *blockstore) getReadOnlyBlockstore(ctx context.Context, cid cid.Cid) (*BlockstoreCloser, error) { + keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) + if errors.Is(err, datastore.ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { + return nil, ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("failed to find shards containing multihash: %w", err) + } + + // check if either cache contains an accessor + shardKey := keys[0] + accessor, err := bs.store.cache.Load().Get(shardKey) + if err == nil { + return blockstoreCloser(accessor) + } + + // load accessor to the blockstore cache and use it as blockstoreCloser + accessor, err = bs.store.cache.Load().Second().GetOrLoad(ctx, shardKey, bs.store.getAccessor) + if err != nil { + return nil, fmt.Errorf("failed to get accessor for shard %s: %w", shardKey, err) + } + return blockstoreCloser(accessor) +} diff --git a/share/eds/blockstore_test.go b/share/eds/blockstore_test.go new file mode 100644 index 0000000000..d9dbf7ed30 --- /dev/null +++ b/share/eds/blockstore_test.go @@ -0,0 +1,81 @@ +package eds + +import ( + "context" + "io" + "testing" + + "github.com/filecoin-project/dagstore" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipld/go-car" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ipld2 "github.com/celestiaorg/celestia-node/share/ipld" +) + +// TestBlockstore_Operations tests Has, Get, and GetSize on the top level eds.Store blockstore. +// It verifies that these operations are valid and successful on all blocks stored in a CAR file. +func TestBlockstore_Operations(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + r, err := edsStore.GetCAR(ctx, dah.Hash()) + require.NoError(t, err) + carReader, err := car.NewCarReader(r) + require.NoError(t, err) + + topLevelBS := edsStore.Blockstore() + carBS, err := edsStore.CARBlockstore(ctx, dah.Hash()) + require.NoError(t, err) + defer func() { + require.NoError(t, carBS.Close()) + }() + + root, err := edsStore.GetDAH(ctx, dah.Hash()) + require.NoError(t, err) + require.True(t, dah.Equals(root)) + + blockstores := []dagstore.ReadBlockstore{topLevelBS, carBS} + + for { + next, err := carReader.Next() + if err != nil { + require.ErrorIs(t, err, io.EOF) + break + } + blockCid := next.Cid() + randomCid := ipld2.RandNamespacedCID(t) + + for _, bs := range blockstores { + // test GetSize + has, err := bs.Has(ctx, blockCid) + require.NoError(t, err, "blockstore.Has could not find root CID") + require.True(t, has) + + // test GetSize + block, err := bs.Get(ctx, blockCid) + assert.NoError(t, err, "blockstore.Get could not get a leaf CID") + assert.Equal(t, block.Cid(), blockCid) + assert.Equal(t, block.RawData(), next.RawData()) + + // test Get (cid not found) + _, err = bs.Get(ctx, randomCid) + require.ErrorAs(t, err, &ipld.ErrNotFound{Cid: randomCid}) + + // test GetSize + size, err := bs.GetSize(ctx, blockCid) + assert.NotZerof(t, size, "blocksize.GetSize reported a root block from blockstore was empty") + assert.NoError(t, err) + } + } +} diff --git a/share/eds/byzantine/bad_encoding.go b/share/eds/byzantine/bad_encoding.go new file mode 100644 index 0000000000..fbb6b592ea --- /dev/null +++ b/share/eds/byzantine/bad_encoding.go @@ -0,0 +1,254 @@ +package byzantine + +import ( + "bytes" + "errors" + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + pb "github.com/celestiaorg/celestia-node/share/eds/byzantine/pb" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +const ( + version = "v0.1" + + BadEncoding fraud.ProofType = "badencoding" + version +) + +type BadEncodingProof struct { + headerHash []byte + BlockHeight uint64 + // ShareWithProof contains all shares from row or col. + // Shares that did not pass verification in rsmt2d will be nil. + // For non-nil shares MerkleProofs are computed. + Shares []*ShareWithProof + // Index represents the row/col index where ErrByzantineRow/ErrByzantineColl occurred. + Index uint32 + // Axis represents the axis that verification failed on. + Axis rsmt2d.Axis +} + +// CreateBadEncodingProof creates a new Bad Encoding Fraud Proof that should be propagated through +// network. The fraud proof will contain shares that did not pass verification and their relevant +// Merkle proofs. +func CreateBadEncodingProof( + hash []byte, + height uint64, + errByzantine *ErrByzantine, +) fraud.Proof[*header.ExtendedHeader] { + return &BadEncodingProof{ + headerHash: hash, + BlockHeight: height, + Shares: errByzantine.Shares, + Index: errByzantine.Index, + Axis: errByzantine.Axis, + } +} + +// Type returns type of fraud proof. +func (p *BadEncodingProof) Type() fraud.ProofType { + return BadEncoding +} + +// HeaderHash returns block hash. +func (p *BadEncodingProof) HeaderHash() []byte { + return p.headerHash +} + +// Height returns block height. +func (p *BadEncodingProof) Height() uint64 { + return p.BlockHeight +} + +// MarshalBinary converts BadEncodingProof to binary. +func (p *BadEncodingProof) MarshalBinary() ([]byte, error) { + shares := make([]*pb.Share, 0, len(p.Shares)) + for _, share := range p.Shares { + shares = append(shares, share.ShareWithProofToProto()) + } + + badEncodingFraudProof := pb.BadEncoding{ + HeaderHash: p.headerHash, + Height: p.BlockHeight, + Shares: shares, + Index: p.Index, + Axis: pb.Axis(p.Axis), + } + return badEncodingFraudProof.Marshal() +} + +// UnmarshalBinary converts binary to BadEncodingProof. +func (p *BadEncodingProof) UnmarshalBinary(data []byte) error { + in := pb.BadEncoding{} + if err := in.Unmarshal(data); err != nil { + return err + } + befp := &BadEncodingProof{ + headerHash: in.HeaderHash, + BlockHeight: in.Height, + Shares: ProtoToShare(in.Shares), + Index: in.Index, + Axis: rsmt2d.Axis(in.Axis), + } + + *p = *befp + + return nil +} + +var ( + errHeightMismatch = errors.New("height reported in proof does not match with the header's height") + errIncorrectIndex = errors.New("row/col index is more then the roots amount") + errIncorrectAmountOfShares = errors.New("incorrect amount of shares") + errIncorrectShare = errors.New("incorrect share received") + errNMTTreeRootsMatch = errors.New("recomputed root matches the DAH root") +) + +var ( + invalidProofPrefix = fmt.Sprintf("invalid %s proof", BadEncoding) +) + +// Validate ensures that fraud proof is correct. +// Validate checks that provided Merkle Proofs correspond to the shares, +// rebuilds bad row or col from received shares, computes Merkle Root +// and compares it with block's Merkle Root. +func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { + if hdr.Height() != p.BlockHeight { + log.Debugf("%s: %s. expected block's height: %d, got: %d", + invalidProofPrefix, + errHeightMismatch, + hdr.Height(), + p.BlockHeight, + ) + return errHeightMismatch + } + + if len(hdr.DAH.RowRoots) != len(hdr.DAH.ColumnRoots) { + // NOTE: This should never happen as callers of this method should not feed it with a + // malformed extended header. + panic(fmt.Sprintf( + "invalid extended header: length of row and column roots do not match. (rowRoots=%d) (colRoots=%d)", + len(hdr.DAH.RowRoots), + len(hdr.DAH.ColumnRoots)), + ) + } + + // merkleRoots are the roots against which we are going to check the inclusion of the received + // shares. Changing the order of the roots to prove the shares relative to the orthogonal axis, + // because inside the rsmt2d library rsmt2d.Row = 0 and rsmt2d.Col = 1 + merkleRoots := hdr.DAH.RowRoots + if p.Axis == rsmt2d.Row { + merkleRoots = hdr.DAH.ColumnRoots + } + + if int(p.Index) >= len(merkleRoots) { + log.Debugf("%s:%s (%d >= %d)", + invalidProofPrefix, errIncorrectIndex, int(p.Index), len(merkleRoots), + ) + return errIncorrectIndex + } + + if len(p.Shares) != len(merkleRoots) { + // Since p.Shares should contain all the shares from either a row or a + // column, it should exactly match the number of row roots. In this + // context, the number of row roots is the width of the extended data + // square. + log.Infof("%s: %s (%d >= %d)", + invalidProofPrefix, errIncorrectAmountOfShares, int(p.Index), len(merkleRoots), + ) + return errIncorrectAmountOfShares + } + + odsWidth := uint64(len(merkleRoots) / 2) + amount := uint64(0) + for _, share := range p.Shares { + if share == nil { + continue + } + amount++ + if amount == odsWidth { + break + } + } + + if amount < odsWidth { + log.Debugf("%s: %s. not enough shares provided to reconstruct row/col", + invalidProofPrefix, errIncorrectAmountOfShares) + return errIncorrectAmountOfShares + } + + // verify that Merkle proofs correspond to particular shares. + shares := make([][]byte, len(merkleRoots)) + for index, shr := range p.Shares { + if shr == nil { + continue + } + // validate inclusion of the share into one of the DAHeader roots + if ok := shr.Validate(ipld.MustCidFromNamespacedSha256(merkleRoots[index])); !ok { + log.Debugf("%s: %s at index %d", invalidProofPrefix, errIncorrectShare, index) + return errIncorrectShare + } + // NMTree commits the additional namespace while rsmt2d does not know about, so we trim it + // this is ugliness from NMTWrapper that we have to embrace ¯\_(ツ)_/¯ + shares[index] = share.GetData(shr.Share) + } + + codec := share.DefaultRSMT2DCodec() + + // We can conclude that the proof is valid in case we proved the inclusion of `Shares` but + // the row/col can't be reconstructed, or the building of NMTree fails. + rebuiltShares, err := codec.Decode(shares) + if err != nil { + log.Debugw("failed to decode shares at height", + "height", hdr.Height(), "err", err, + ) + return nil + } + + rebuiltExtendedShares, err := codec.Encode(rebuiltShares[0:odsWidth]) + if err != nil { + log.Debugw("failed to encode shares at height", + "height", hdr.Height(), "err", err, + ) + return nil + } + copy(rebuiltShares[odsWidth:], rebuiltExtendedShares) + + tree := wrapper.NewErasuredNamespacedMerkleTree(odsWidth, uint(p.Index)) + for _, share := range rebuiltShares { + err = tree.Push(share) + if err != nil { + log.Debugw("failed to build a tree from the reconstructed shares at height", + "height", hdr.Height(), "err", err, + ) + return nil + } + } + + expectedRoot, err := tree.Root() + if err != nil { + log.Debugw("failed to build a tree root at height", + "height", hdr.Height(), "err", err, + ) + return nil + } + + // root is a merkle root of the row/col where ErrByzantine occurred + root := hdr.DAH.RowRoots[p.Index] + if p.Axis == rsmt2d.Col { + root = hdr.DAH.ColumnRoots[p.Index] + } + + // comparing rebuilt Merkle Root of bad row/col with respective Merkle Root of row/col from block. + if bytes.Equal(expectedRoot, root) { + log.Debugf("invalid %s proof:%s", BadEncoding, errNMTTreeRootsMatch) + return errNMTTreeRootsMatch + } + return nil +} diff --git a/share/eds/byzantine/bad_encoding_test.go b/share/eds/byzantine/bad_encoding_test.go new file mode 100644 index 0000000000..e42e3c287c --- /dev/null +++ b/share/eds/byzantine/bad_encoding_test.go @@ -0,0 +1,310 @@ +package byzantine + +import ( + "context" + "crypto/sha256" + "hash" + "testing" + "time" + + "github.com/ipfs/boxo/blockservice" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + mhcore "github.com/multiformats/go-multihash/core" + "github.com/stretchr/testify/require" + core "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/test/util/malicious" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestBEFP_Validate(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer t.Cleanup(cancel) + bServ := ipld.NewMemBlockservice() + + square := edstest.RandByzantineEDS(t, 16) + dah, err := da.NewDataAvailabilityHeader(square) + require.NoError(t, err) + err = ipld.ImportEDS(ctx, square, bServ) + require.NoError(t, err) + + var errRsmt2d *rsmt2d.ErrByzantineData + err = square.Repair(dah.RowRoots, dah.ColumnRoots) + require.ErrorAs(t, err, &errRsmt2d) + + byzantine := NewErrByzantine(ctx, bServ, &dah, errRsmt2d) + var errByz *ErrByzantine + require.ErrorAs(t, byzantine, &errByz) + + proof := CreateBadEncodingProof([]byte("hash"), 0, errByz) + befp, ok := proof.(*BadEncodingProof) + require.True(t, ok) + var test = []struct { + name string + prepareFn func() error + expectedResult func(error) + }{ + { + name: "valid BEFP", + prepareFn: func() error { + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + }, + expectedResult: func(err error) { + require.NoError(t, err) + }, + }, + { + name: "invalid BEFP for valid header", + prepareFn: func() error { + validSquare := edstest.RandEDS(t, 2) + validDah, err := da.NewDataAvailabilityHeader(validSquare) + require.NoError(t, err) + err = ipld.ImportEDS(ctx, validSquare, bServ) + require.NoError(t, err) + validShares := validSquare.Flattened() + errInvalidByz := NewErrByzantine(ctx, bServ, &validDah, + &rsmt2d.ErrByzantineData{ + Axis: rsmt2d.Row, + Index: 0, + Shares: validShares[0:4], + }, + ) + var errInvalid *ErrByzantine + require.ErrorAs(t, errInvalidByz, &errInvalid) + invalidBefp := CreateBadEncodingProof([]byte("hash"), 0, errInvalid) + return invalidBefp.Validate(&header.ExtendedHeader{DAH: &validDah}) + }, + expectedResult: func(err error) { + require.ErrorIs(t, err, errNMTTreeRootsMatch) + }, + }, + { + name: "incorrect share with Proof", + prepareFn: func() error { + // break the first shareWithProof to test negative case + sh := sharetest.RandShares(t, 2) + nmtProof := nmt.NewInclusionProof(0, 1, nil, false) + befp.Shares[0] = &ShareWithProof{sh[0], &nmtProof} + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + }, + expectedResult: func(err error) { + require.ErrorIs(t, err, errIncorrectShare) + }, + }, + { + name: "invalid amount of shares", + prepareFn: func() error { + befp.Shares = befp.Shares[0 : len(befp.Shares)/2] + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + }, + expectedResult: func(err error) { + require.ErrorIs(t, err, errIncorrectAmountOfShares) + }, + }, + { + name: "not enough shares to recompute the root", + prepareFn: func() error { + befp.Shares[0] = nil + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + }, + expectedResult: func(err error) { + require.ErrorIs(t, err, errIncorrectAmountOfShares) + }, + }, + { + name: "index out of bounds", + prepareFn: func() error { + befp.Index = 100 + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + }, + expectedResult: func(err error) { + require.ErrorIs(t, err, errIncorrectIndex) + }, + }, + { + name: "heights mismatch", + prepareFn: func() error { + return proof.Validate(&header.ExtendedHeader{ + RawHeader: core.Header{ + Height: 42, + }, + DAH: &dah, + }) + }, + expectedResult: func(err error) { + require.ErrorIs(t, err, errHeightMismatch) + }, + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + err = tt.prepareFn() + tt.expectedResult(err) + }) + } +} + +// TestIncorrectBadEncodingFraudProof asserts that BEFP is not generated for the correct data +func TestIncorrectBadEncodingFraudProof(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bServ := ipld.NewMemBlockservice() + + squareSize := 8 + shares := sharetest.RandShares(t, squareSize*squareSize) + + eds, err := ipld.AddShares(ctx, shares, bServ) + require.NoError(t, err) + + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + // get an arbitrary row + row := uint(squareSize / 2) + rowShares := eds.Row(row) + rowRoot := dah.RowRoots[row] + + shareProofs, err := GetProofsForShares(ctx, bServ, ipld.MustCidFromNamespacedSha256(rowRoot), rowShares) + require.NoError(t, err) + + // create a fake error for data that was encoded correctly + fakeError := ErrByzantine{ + Index: uint32(row), + Shares: shareProofs, + Axis: rsmt2d.Row, + } + + h := &header.ExtendedHeader{ + RawHeader: core.Header{ + Height: 420, + }, + DAH: dah, + Commit: &core.Commit{ + BlockID: core.BlockID{ + Hash: []byte("made up hash"), + }, + }, + } + + proof := CreateBadEncodingProof(h.Hash(), h.Height(), &fakeError) + err = proof.Validate(h) + require.Error(t, err) +} + +func TestBEFP_ValidateOutOfOrderShares(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + t.Cleanup(cancel) + + size := 4 + eds := edstest.RandEDS(t, size) + + shares := eds.Flattened() + shares[0], shares[4] = shares[4], shares[0] // corrupting eds + + bServ := newNamespacedBlockService() + batchAddr := ipld.NewNmtNodeAdder(ctx, bServ, ipld.MaxSizeBatchOption(size*2)) + + eds, err := rsmt2d.ImportExtendedDataSquare(shares, + share.DefaultRSMT2DCodec(), + malicious.NewConstructor(uint64(size), nmt.NodeVisitor(batchAddr.Visit)), + ) + require.NoError(t, err, "failure to recompute the extended data square") + + err = batchAddr.Commit() + require.NoError(t, err) + + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + + var errRsmt2d *rsmt2d.ErrByzantineData + err = eds.Repair(dah.RowRoots, dah.ColumnRoots) + require.ErrorAs(t, err, &errRsmt2d) + + byzantine := NewErrByzantine(ctx, bServ, &dah, errRsmt2d) + var errByz *ErrByzantine + require.ErrorAs(t, byzantine, &errByz) + + befp := CreateBadEncodingProof([]byte("hash"), 0, errByz) + err = befp.Validate(&header.ExtendedHeader{DAH: &dah}) + require.NoError(t, err) +} + +// namespacedBlockService wraps `BlockService` and extends the verification part +// to avoid returning blocks that has out of order namespaces. +type namespacedBlockService struct { + blockservice.BlockService + // the data structure that is used on the networking level, in order + // to verify the order of the namespaces + prefix *cid.Prefix +} + +func newNamespacedBlockService() *namespacedBlockService { + sha256NamespaceFlagged := uint64(0x7701) + // register the nmt hasher to validate the order of namespaces + mhcore.Register(sha256NamespaceFlagged, func() hash.Hash { + nh := nmt.NewNmtHasher(sha256.New(), share.NamespaceSize, true) + nh.Reset() + return nh + }) + + bs := &namespacedBlockService{} + bs.BlockService = ipld.NewMemBlockservice() + + bs.prefix = &cid.Prefix{ + Version: 1, + Codec: sha256NamespaceFlagged, + MhType: sha256NamespaceFlagged, + // equals to NmtHasher.Size() + MhLength: sha256.New().Size() + 2*share.NamespaceSize, + } + return bs +} + +func (n *namespacedBlockService) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + block, err := n.BlockService.GetBlock(ctx, c) + if err != nil { + return nil, err + } + + _, err = n.prefix.Sum(block.RawData()) + if err != nil { + return nil, err + } + return block, nil +} + +func (n *namespacedBlockService) GetBlocks(ctx context.Context, cids []cid.Cid) <-chan blocks.Block { + blockCh := n.BlockService.GetBlocks(ctx, cids) + resultCh := make(chan blocks.Block) + + go func() { + for { + select { + case <-ctx.Done(): + close(resultCh) + return + case block, ok := <-blockCh: + if !ok { + close(resultCh) + return + } + if _, err := n.prefix.Sum(block.RawData()); err != nil { + continue + } + resultCh <- block + } + } + }() + return resultCh +} diff --git a/share/eds/byzantine/byzantine.go b/share/eds/byzantine/byzantine.go new file mode 100644 index 0000000000..d20b56deed --- /dev/null +++ b/share/eds/byzantine/byzantine.go @@ -0,0 +1,85 @@ +package byzantine + +import ( + "context" + "fmt" + + "github.com/ipfs/boxo/blockservice" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/ipld" +) + +// ErrByzantine is a thrown when recovered data square is not correct +// (merkle proofs do not match parity erasure-coding data). +// +// It is converted from rsmt2d.ByzantineRow/Col + +// Merkle Proof for each share. +type ErrByzantine struct { + Index uint32 + Shares []*ShareWithProof + Axis rsmt2d.Axis +} + +func (e *ErrByzantine) Error() string { + return fmt.Sprintf("byzantine error(Axis:%v, Index:%v)", e.Axis, e.Index) +} + +// NewErrByzantine creates new ErrByzantine from rsmt2d error. +// If error happens during proof collection, it terminates the process with os.Exit(1). +func NewErrByzantine( + ctx context.Context, + bGetter blockservice.BlockGetter, + dah *da.DataAvailabilityHeader, + errByz *rsmt2d.ErrByzantineData, +) error { + // changing the order to collect proofs against an orthogonal axis + roots := [][][]byte{ + dah.ColumnRoots, + dah.RowRoots, + }[errByz.Axis] + + sharesWithProof := make([]*ShareWithProof, len(errByz.Shares)) + + type result struct { + share *ShareWithProof + index int + } + resultCh := make(chan *result) + for index, share := range errByz.Shares { + if share == nil { + continue + } + + index := index + go func() { + share, err := getProofsAt( + ctx, bGetter, + ipld.MustCidFromNamespacedSha256(roots[index]), + int(errByz.Index), len(errByz.Shares), + ) + if err != nil { + log.Warn("requesting proof failed", "root", roots[index], "err", err) + return + } + resultCh <- &result{share, index} + }() + } + + for i := 0; i < len(dah.RowRoots)/2; i++ { + select { + case t := <-resultCh: + sharesWithProof[t.index] = t.share + case <-ctx.Done(): + return ipld.ErrNodeNotFound + } + } + + return &ErrByzantine{ + Index: uint32(errByz.Index), + Shares: sharesWithProof, + Axis: errByz.Axis, + } +} diff --git a/ipld/pb/share.pb.go b/share/eds/byzantine/pb/share.pb.go similarity index 59% rename from ipld/pb/share.pb.go rename to share/eds/byzantine/pb/share.pb.go index b3e32f8baa..4186eabc64 100644 --- a/ipld/pb/share.pb.go +++ b/share/eds/byzantine/pb/share.pb.go @@ -1,10 +1,11 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ipld/pb/share.proto +// source: share/eds/byzantine/pb/share.proto -package pb +package share_eds_byzantine_pb import ( fmt "fmt" + pb "github.com/celestiaorg/nmt/pb" proto "github.com/gogo/protobuf/proto" io "io" math "math" @@ -22,25 +23,48 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type MerkleProof struct { - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` - Nodes [][]byte `protobuf:"bytes,3,rep,name=nodes,proto3" json:"nodes,omitempty"` - LeafHash []byte `protobuf:"bytes,4,opt,name=leaf_hash,json=leafHash,proto3" json:"leaf_hash,omitempty"` +type Axis int32 + +const ( + Axis_ROW Axis = 0 + Axis_COL Axis = 1 +) + +var Axis_name = map[int32]string{ + 0: "ROW", + 1: "COL", +} + +var Axis_value = map[string]int32{ + "ROW": 0, + "COL": 1, } -func (m *MerkleProof) Reset() { *m = MerkleProof{} } -func (m *MerkleProof) String() string { return proto.CompactTextString(m) } -func (*MerkleProof) ProtoMessage() {} -func (*MerkleProof) Descriptor() ([]byte, []int) { - return fileDescriptor_f1f041c2d5c8eb54, []int{0} +func (x Axis) String() string { + return proto.EnumName(Axis_name, int32(x)) } -func (m *MerkleProof) XXX_Unmarshal(b []byte) error { + +func (Axis) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d28ce8f160a920d1, []int{0} +} + +type Share struct { + Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` + Proof *pb.Proof `protobuf:"bytes,2,opt,name=Proof,proto3" json:"Proof,omitempty"` +} + +func (m *Share) Reset() { *m = Share{} } +func (m *Share) String() string { return proto.CompactTextString(m) } +func (*Share) ProtoMessage() {} +func (*Share) Descriptor() ([]byte, []int) { + return fileDescriptor_d28ce8f160a920d1, []int{0} +} +func (m *Share) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *MerkleProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Share) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_MerkleProof.Marshal(b, m, deterministic) + return xxx_messageInfo_Share.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -50,63 +74,52 @@ func (m *MerkleProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *MerkleProof) XXX_Merge(src proto.Message) { - xxx_messageInfo_MerkleProof.Merge(m, src) +func (m *Share) XXX_Merge(src proto.Message) { + xxx_messageInfo_Share.Merge(m, src) } -func (m *MerkleProof) XXX_Size() int { +func (m *Share) XXX_Size() int { return m.Size() } -func (m *MerkleProof) XXX_DiscardUnknown() { - xxx_messageInfo_MerkleProof.DiscardUnknown(m) -} - -var xxx_messageInfo_MerkleProof proto.InternalMessageInfo - -func (m *MerkleProof) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 +func (m *Share) XXX_DiscardUnknown() { + xxx_messageInfo_Share.DiscardUnknown(m) } -func (m *MerkleProof) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} +var xxx_messageInfo_Share proto.InternalMessageInfo -func (m *MerkleProof) GetNodes() [][]byte { +func (m *Share) GetData() []byte { if m != nil { - return m.Nodes + return m.Data } return nil } -func (m *MerkleProof) GetLeafHash() []byte { +func (m *Share) GetProof() *pb.Proof { if m != nil { - return m.LeafHash + return m.Proof } return nil } -type Share struct { - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` - Proof *MerkleProof `protobuf:"bytes,2,opt,name=Proof,proto3" json:"Proof,omitempty"` +type BadEncoding struct { + HeaderHash []byte `protobuf:"bytes,1,opt,name=HeaderHash,proto3" json:"HeaderHash,omitempty"` + Height uint64 `protobuf:"varint,2,opt,name=Height,proto3" json:"Height,omitempty"` + Shares []*Share `protobuf:"bytes,3,rep,name=Shares,proto3" json:"Shares,omitempty"` + Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"Index,omitempty"` + Axis Axis `protobuf:"varint,5,opt,name=Axis,proto3,enum=share.eds.byzantine.pb.Axis" json:"Axis,omitempty"` } -func (m *Share) Reset() { *m = Share{} } -func (m *Share) String() string { return proto.CompactTextString(m) } -func (*Share) ProtoMessage() {} -func (*Share) Descriptor() ([]byte, []int) { - return fileDescriptor_f1f041c2d5c8eb54, []int{1} +func (m *BadEncoding) Reset() { *m = BadEncoding{} } +func (m *BadEncoding) String() string { return proto.CompactTextString(m) } +func (*BadEncoding) ProtoMessage() {} +func (*BadEncoding) Descriptor() ([]byte, []int) { + return fileDescriptor_d28ce8f160a920d1, []int{1} } -func (m *Share) XXX_Unmarshal(b []byte) error { +func (m *BadEncoding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Share) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *BadEncoding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Share.Marshal(b, m, deterministic) + return xxx_messageInfo_BadEncoding.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -116,60 +129,87 @@ func (m *Share) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Share) XXX_Merge(src proto.Message) { - xxx_messageInfo_Share.Merge(m, src) +func (m *BadEncoding) XXX_Merge(src proto.Message) { + xxx_messageInfo_BadEncoding.Merge(m, src) } -func (m *Share) XXX_Size() int { +func (m *BadEncoding) XXX_Size() int { return m.Size() } -func (m *Share) XXX_DiscardUnknown() { - xxx_messageInfo_Share.DiscardUnknown(m) +func (m *BadEncoding) XXX_DiscardUnknown() { + xxx_messageInfo_BadEncoding.DiscardUnknown(m) } -var xxx_messageInfo_Share proto.InternalMessageInfo +var xxx_messageInfo_BadEncoding proto.InternalMessageInfo -func (m *Share) GetData() []byte { +func (m *BadEncoding) GetHeaderHash() []byte { if m != nil { - return m.Data + return m.HeaderHash } return nil } -func (m *Share) GetProof() *MerkleProof { +func (m *BadEncoding) GetHeight() uint64 { if m != nil { - return m.Proof + return m.Height + } + return 0 +} + +func (m *BadEncoding) GetShares() []*Share { + if m != nil { + return m.Shares } return nil } +func (m *BadEncoding) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *BadEncoding) GetAxis() Axis { + if m != nil { + return m.Axis + } + return Axis_ROW +} + func init() { - proto.RegisterType((*MerkleProof)(nil), "ipld.pb.MerkleProof") - proto.RegisterType((*Share)(nil), "ipld.pb.Share") -} - -func init() { proto.RegisterFile("ipld/pb/share.proto", fileDescriptor_f1f041c2d5c8eb54) } - -var fileDescriptor_f1f041c2d5c8eb54 = []byte{ - // 247 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0x2c, 0xc8, 0x49, - 0xd1, 0x2f, 0x48, 0xd2, 0x2f, 0xce, 0x48, 0x2c, 0x4a, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, - 0x62, 0x07, 0x09, 0xea, 0x15, 0x24, 0x29, 0x65, 0x70, 0x71, 0xfb, 0xa6, 0x16, 0x65, 0xe7, 0xa4, - 0x06, 0x14, 0xe5, 0xe7, 0xa7, 0x09, 0x89, 0x70, 0xb1, 0x16, 0x97, 0x24, 0x16, 0x95, 0x48, 0x30, - 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x41, 0x38, 0x42, 0x02, 0x5c, 0xcc, 0xa9, 0x79, 0x29, 0x12, 0x4c, - 0x60, 0x31, 0x10, 0x13, 0xa4, 0x2e, 0x2f, 0x3f, 0x25, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, - 0x27, 0x08, 0xc2, 0x11, 0x92, 0xe6, 0xe2, 0xcc, 0x49, 0x4d, 0x4c, 0x8b, 0xcf, 0x48, 0x2c, 0xce, - 0x90, 0x60, 0x51, 0x60, 0xd4, 0xe0, 0x09, 0xe2, 0x00, 0x09, 0x78, 0x24, 0x16, 0x67, 0x28, 0xb9, - 0x73, 0xb1, 0x06, 0x83, 0x5c, 0x20, 0x24, 0xc4, 0xc5, 0xe2, 0x92, 0x58, 0x92, 0x08, 0xb6, 0x82, - 0x27, 0x08, 0xcc, 0x16, 0xd2, 0xe2, 0x62, 0x05, 0x3b, 0x00, 0x6c, 0x07, 0xb7, 0x91, 0x88, 0x1e, - 0xd4, 0x7d, 0x7a, 0x48, 0x8e, 0x0b, 0x82, 0x28, 0x71, 0x72, 0x3b, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, - 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, - 0xc6, 0x63, 0x39, 0x86, 0x28, 0x9d, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, - 0xfd, 0xe4, 0xd4, 0x9c, 0xd4, 0xe2, 0x92, 0xcc, 0xc4, 0xfc, 0xa2, 0x74, 0x38, 0x5b, 0x17, 0xe4, - 0x4c, 0x7d, 0x68, 0x78, 0x24, 0xb1, 0x81, 0x83, 0xc2, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x2b, 0x98, 0xa2, 0x21, 0x01, 0x00, 0x00, -} - -func (m *MerkleProof) Marshal() (dAtA []byte, err error) { + proto.RegisterEnum("share.eds.byzantine.pb.Axis", Axis_name, Axis_value) + proto.RegisterType((*Share)(nil), "share.eds.byzantine.pb.Share") + proto.RegisterType((*BadEncoding)(nil), "share.eds.byzantine.pb.BadEncoding") +} + +func init() { + proto.RegisterFile("share/eds/byzantine/pb/share.proto", fileDescriptor_d28ce8f160a920d1) +} + +var fileDescriptor_d28ce8f160a920d1 = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xc1, 0x4a, 0xec, 0x30, + 0x14, 0x86, 0x9b, 0x3b, 0xed, 0x5c, 0x38, 0xa3, 0xe3, 0x10, 0x64, 0x08, 0xa2, 0xa1, 0x0c, 0x08, + 0xc5, 0x45, 0x2a, 0x15, 0x1f, 0xc0, 0xaa, 0x50, 0x41, 0x18, 0x89, 0x0b, 0xd7, 0x89, 0x89, 0xd3, + 0x6e, 0xda, 0xd2, 0x74, 0x51, 0x7d, 0x0a, 0x1f, 0xca, 0x85, 0xcb, 0x59, 0xba, 0x94, 0xf6, 0x45, + 0xa4, 0x69, 0x11, 0x17, 0xba, 0x3b, 0xdf, 0x9f, 0x2f, 0x3f, 0xc9, 0x81, 0x95, 0x49, 0x45, 0xa5, + 0x43, 0xad, 0x4c, 0x28, 0x9f, 0x5f, 0x44, 0x5e, 0x67, 0xb9, 0x0e, 0x4b, 0x19, 0xda, 0x98, 0x95, + 0x55, 0x51, 0x17, 0x78, 0x39, 0x80, 0x56, 0x86, 0x7d, 0x3b, 0xac, 0x94, 0x07, 0xf3, 0x52, 0x86, + 0x65, 0x55, 0x14, 0x4f, 0x83, 0xb7, 0x8a, 0xc1, 0xbb, 0xef, 0x4d, 0x8c, 0xc1, 0xbd, 0x12, 0xb5, + 0x20, 0xc8, 0x47, 0xc1, 0x0e, 0xb7, 0x33, 0x3e, 0x06, 0xef, 0xae, 0x77, 0xc9, 0x3f, 0x1f, 0x05, + 0xb3, 0x68, 0x8f, 0x8d, 0x37, 0x25, 0xb3, 0x31, 0x1f, 0x4e, 0x57, 0x6f, 0x08, 0x66, 0xb1, 0x50, + 0xd7, 0xf9, 0x63, 0xa1, 0xb2, 0x7c, 0x83, 0x29, 0x40, 0xa2, 0x85, 0xd2, 0x55, 0x22, 0x4c, 0x3a, + 0x16, 0xfe, 0x48, 0xf0, 0x12, 0xa6, 0x89, 0xce, 0x36, 0x69, 0x6d, 0x7b, 0x5d, 0x3e, 0x12, 0x3e, + 0x87, 0xa9, 0x7d, 0x8b, 0x21, 0x13, 0x7f, 0x12, 0xcc, 0xa2, 0x23, 0xf6, 0xfb, 0x27, 0x98, 0xb5, + 0xf8, 0x28, 0xe3, 0x7d, 0xf0, 0x6e, 0x72, 0xa5, 0x1b, 0xe2, 0xfa, 0x28, 0xd8, 0xe5, 0x03, 0xe0, + 0x53, 0x70, 0x2f, 0x9a, 0xcc, 0x10, 0xcf, 0x47, 0xc1, 0x3c, 0x3a, 0xfc, 0xab, 0x4a, 0x34, 0x99, + 0xe1, 0xd6, 0x3c, 0x21, 0xe0, 0xf6, 0x84, 0xff, 0xc3, 0x84, 0xaf, 0x1f, 0x16, 0x4e, 0x3f, 0x5c, + 0xae, 0x6f, 0x17, 0x28, 0x26, 0xef, 0x2d, 0x45, 0xdb, 0x96, 0xa2, 0xcf, 0x96, 0xa2, 0xd7, 0x8e, + 0x3a, 0xdb, 0x8e, 0x3a, 0x1f, 0x1d, 0x75, 0xe4, 0xd4, 0x6e, 0xf1, 0xec, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0xb1, 0x96, 0xb9, 0xbe, 0x93, 0x01, 0x00, 0x00, +} + +func (m *Share) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -179,46 +219,39 @@ func (m *MerkleProof) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MerkleProof) MarshalTo(dAtA []byte) (int, error) { +func (m *Share) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MerkleProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Share) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.LeafHash) > 0 { - i -= len(m.LeafHash) - copy(dAtA[i:], m.LeafHash) - i = encodeVarintShare(dAtA, i, uint64(len(m.LeafHash))) - i-- - dAtA[i] = 0x22 - } - if len(m.Nodes) > 0 { - for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Nodes[iNdEx]) - copy(dAtA[i:], m.Nodes[iNdEx]) - i = encodeVarintShare(dAtA, i, uint64(len(m.Nodes[iNdEx]))) - i-- - dAtA[i] = 0x1a + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShare(dAtA, i, uint64(size)) } - } - if m.End != 0 { - i = encodeVarintShare(dAtA, i, uint64(m.End)) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } - if m.Start != 0 { - i = encodeVarintShare(dAtA, i, uint64(m.Start)) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintShare(dAtA, i, uint64(len(m.Data))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Share) Marshal() (dAtA []byte, err error) { +func (m *BadEncoding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -228,32 +261,49 @@ func (m *Share) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Share) MarshalTo(dAtA []byte) (int, error) { +func (m *BadEncoding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Share) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *BadEncoding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Axis != 0 { + i = encodeVarintShare(dAtA, i, uint64(m.Axis)) + i-- + dAtA[i] = 0x28 + } + if m.Index != 0 { + i = encodeVarintShare(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x20 + } + if len(m.Shares) > 0 { + for iNdEx := len(m.Shares) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Shares[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShare(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintShare(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } + } + if m.Height != 0 { + i = encodeVarintShare(dAtA, i, uint64(m.Height)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintShare(dAtA, i, uint64(len(m.Data))) + if len(m.HeaderHash) > 0 { + i -= len(m.HeaderHash) + copy(dAtA[i:], m.HeaderHash) + i = encodeVarintShare(dAtA, i, uint64(len(m.HeaderHash))) i-- dAtA[i] = 0xa } @@ -271,44 +321,47 @@ func encodeVarintShare(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *MerkleProof) Size() (n int) { +func (m *Share) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Start != 0 { - n += 1 + sovShare(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovShare(uint64(m.End)) - } - if len(m.Nodes) > 0 { - for _, b := range m.Nodes { - l = len(b) - n += 1 + l + sovShare(uint64(l)) - } - } - l = len(m.LeafHash) + l = len(m.Data) if l > 0 { n += 1 + l + sovShare(uint64(l)) } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovShare(uint64(l)) + } return n } -func (m *Share) Size() (n int) { +func (m *BadEncoding) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Data) + l = len(m.HeaderHash) if l > 0 { n += 1 + l + sovShare(uint64(l)) } - if m.Proof != nil { - l = m.Proof.Size() - n += 1 + l + sovShare(uint64(l)) + if m.Height != 0 { + n += 1 + sovShare(uint64(m.Height)) + } + if len(m.Shares) > 0 { + for _, e := range m.Shares { + l = e.Size() + n += 1 + l + sovShare(uint64(l)) + } + } + if m.Index != 0 { + n += 1 + sovShare(uint64(m.Index)) + } + if m.Axis != 0 { + n += 1 + sovShare(uint64(m.Axis)) } return n } @@ -319,7 +372,7 @@ func sovShare(x uint64) (n int) { func sozShare(x uint64) (n int) { return sovShare(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *MerkleProof) Unmarshal(dAtA []byte) error { +func (m *Share) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -342,53 +395,15 @@ func (m *MerkleProof) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MerkleProof: wiretype end group for non-group") + return fmt.Errorf("proto: Share: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MerkleProof: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Share: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -415,14 +430,16 @@ func (m *MerkleProof) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Nodes = append(m.Nodes, make([]byte, postIndex-iNdEx)) - copy(m.Nodes[len(m.Nodes)-1], dAtA[iNdEx:postIndex]) + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeafHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowShare @@ -432,24 +449,26 @@ func (m *MerkleProof) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthShare } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthShare } if postIndex > l { return io.ErrUnexpectedEOF } - m.LeafHash = append(m.LeafHash[:0], dAtA[iNdEx:postIndex]...) - if m.LeafHash == nil { - m.LeafHash = []byte{} + if m.Proof == nil { + m.Proof = &pb.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -473,7 +492,7 @@ func (m *MerkleProof) Unmarshal(dAtA []byte) error { } return nil } -func (m *Share) Unmarshal(dAtA []byte) error { +func (m *BadEncoding) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -496,15 +515,15 @@ func (m *Share) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Share: wiretype end group for non-group") + return fmt.Errorf("proto: BadEncoding: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Share: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BadEncoding: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HeaderHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -531,14 +550,33 @@ func (m *Share) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + m.HeaderHash = append(m.HeaderHash[:0], dAtA[iNdEx:postIndex]...) + if m.HeaderHash == nil { + m.HeaderHash = []byte{} } iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shares", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -565,13 +603,49 @@ func (m *Share) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Proof == nil { - m.Proof = &MerkleProof{} - } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Shares = append(m.Shares, &Share{}) + if err := m.Shares[len(m.Shares)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Axis", wireType) + } + m.Axis = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Axis |= Axis(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipShare(dAtA[iNdEx:]) diff --git a/fraud/pb/proof.proto b/share/eds/byzantine/pb/share.proto similarity index 53% rename from fraud/pb/proof.proto rename to share/eds/byzantine/pb/share.proto index 873bfa316c..33e3dae2c2 100644 --- a/fraud/pb/proof.proto +++ b/share/eds/byzantine/pb/share.proto @@ -1,8 +1,12 @@ syntax = "proto3"; -package fraud.pb; +package share.eds.byzantine.pb; +import "pb/proof.proto"; -import "ipld/pb/share.proto"; +message Share { + bytes Data = 1; + proof.pb.Proof Proof = 2; +} enum axis { ROW = 0; @@ -12,7 +16,7 @@ enum axis { message BadEncoding { bytes HeaderHash = 1; uint64 Height = 2; - repeated ipld.pb.Share Shares = 3; + repeated Share Shares = 3; uint32 Index = 4; axis Axis = 5; } diff --git a/share/eds/byzantine/share_proof.go b/share/eds/byzantine/share_proof.go new file mode 100644 index 0000000000..98b58ebbec --- /dev/null +++ b/share/eds/byzantine/share_proof.go @@ -0,0 +1,134 @@ +package byzantine + +import ( + "context" + "crypto/sha256" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/nmt" + nmt_pb "github.com/celestiaorg/nmt/pb" + + "github.com/celestiaorg/celestia-node/share" + pb "github.com/celestiaorg/celestia-node/share/eds/byzantine/pb" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +var log = logging.Logger("share/byzantine") + +// ShareWithProof contains data with corresponding Merkle Proof +type ShareWithProof struct { + // Share is a full data including namespace + share.Share + // Proof is a Merkle Proof of current share + Proof *nmt.Proof +} + +// NewShareWithProof takes the given leaf and its path, starting from the tree root, +// and computes the nmt.Proof for it. +func NewShareWithProof(index int, share share.Share, pathToLeaf []cid.Cid) *ShareWithProof { + rangeProofs := make([][]byte, 0, len(pathToLeaf)) + for i := len(pathToLeaf) - 1; i >= 0; i-- { + node := ipld.NamespacedSha256FromCID(pathToLeaf[i]) + rangeProofs = append(rangeProofs, node) + } + + proof := nmt.NewInclusionProof(index, index+1, rangeProofs, true) + return &ShareWithProof{ + share, + &proof, + } +} + +// Validate validates inclusion of the share under the given root CID. +func (s *ShareWithProof) Validate(root cid.Cid) bool { + return s.Proof.VerifyInclusion( + sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally + share.GetNamespace(s.Share).ToNMT(), + [][]byte{share.GetData(s.Share)}, + ipld.NamespacedSha256FromCID(root), + ) +} + +func (s *ShareWithProof) ShareWithProofToProto() *pb.Share { + if s == nil { + return &pb.Share{} + } + + return &pb.Share{ + Data: s.Share, + Proof: &nmt_pb.Proof{ + Start: int64(s.Proof.Start()), + End: int64(s.Proof.End()), + Nodes: s.Proof.Nodes(), + LeafHash: s.Proof.LeafHash(), + IsMaxNamespaceIgnored: s.Proof.IsMaxNamespaceIDIgnored(), + }, + } +} + +// GetProofsForShares fetches Merkle proofs for the given shares +// and returns the result as an array of ShareWithProof. +func GetProofsForShares( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + shares [][]byte, +) ([]*ShareWithProof, error) { + proofs := make([]*ShareWithProof, len(shares)) + for index, share := range shares { + if share != nil { + proof, err := getProofsAt(ctx, bGetter, root, index, len(shares)) + if err != nil { + return nil, err + } + proofs[index] = proof + } + } + return proofs, nil +} + +func getProofsAt( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + index, + total int, +) (*ShareWithProof, error) { + proof := make([]cid.Cid, 0) + // TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same + // tree. Add options that will control what data will be fetched. + node, err := ipld.GetLeaf(ctx, bGetter, root, index, total) + if err != nil { + return nil, err + } + + proof, err = ipld.GetProof(ctx, bGetter, root, proof, index, total) + if err != nil { + return nil, err + } + return NewShareWithProof(index, node.RawData(), proof), nil +} + +func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof { + shares := make([]*ShareWithProof, len(protoShares)) + for i, share := range protoShares { + if share.Proof == nil { + continue + } + proof := ProtoToProof(share.Proof) + shares[i] = &ShareWithProof{share.Data, &proof} + } + return shares +} + +func ProtoToProof(protoProof *nmt_pb.Proof) nmt.Proof { + return nmt.NewInclusionProof( + int(protoProof.Start), + int(protoProof.End), + protoProof.Nodes, + protoProof.IsMaxNamespaceIgnored, + ) +} diff --git a/share/eds/byzantine/share_proof_test.go b/share/eds/byzantine/share_proof_test.go new file mode 100644 index 0000000000..a9021d806d --- /dev/null +++ b/share/eds/byzantine/share_proof_test.go @@ -0,0 +1,83 @@ +package byzantine + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestGetProof(t *testing.T) { + const width = 4 + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + bServ := ipld.NewMemBlockservice() + + shares := sharetest.RandShares(t, width*width) + in, err := ipld.AddShares(ctx, shares, bServ) + require.NoError(t, err) + + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) + var tests = []struct { + roots [][]byte + }{ + {dah.RowRoots}, + {dah.ColumnRoots}, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + for _, root := range tt.roots { + rootCid := ipld.MustCidFromNamespacedSha256(root) + for index := 0; uint(index) < in.Width(); index++ { + proof := make([]cid.Cid, 0) + proof, err = ipld.GetProof(ctx, bServ, rootCid, proof, index, int(in.Width())) + require.NoError(t, err) + node, err := ipld.GetLeaf(ctx, bServ, rootCid, index, int(in.Width())) + require.NoError(t, err) + inclusion := NewShareWithProof(index, node.RawData(), proof) + require.True(t, inclusion.Validate(rootCid)) + } + } + }) + } +} + +func TestGetProofs(t *testing.T) { + const width = 4 + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + bServ := ipld.NewMemBlockservice() + + shares := sharetest.RandShares(t, width*width) + in, err := ipld.AddShares(ctx, shares, bServ) + require.NoError(t, err) + + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) + for _, root := range dah.ColumnRoots { + rootCid := ipld.MustCidFromNamespacedSha256(root) + data := make([][]byte, 0, in.Width()) + for index := 0; uint(index) < in.Width(); index++ { + node, err := ipld.GetLeaf(ctx, bServ, rootCid, index, int(in.Width())) + require.NoError(t, err) + data = append(data, node.RawData()[9:]) + } + + proves, err := GetProofsForShares(ctx, bServ, rootCid, data) + require.NoError(t, err) + for _, proof := range proves { + require.True(t, proof.Validate(rootCid)) + } + } +} diff --git a/share/eds/cache/accessor_cache.go b/share/eds/cache/accessor_cache.go new file mode 100644 index 0000000000..6f937818f8 --- /dev/null +++ b/share/eds/cache/accessor_cache.go @@ -0,0 +1,262 @@ +package cache + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/shard" + lru "github.com/hashicorp/golang-lru/v2" +) + +const defaultCloseTimeout = time.Minute + +var _ Cache = (*AccessorCache)(nil) + +// AccessorCache implements the Cache interface using an LRU cache backend. +type AccessorCache struct { + // The name is a prefix that will be used for cache metrics if they are enabled. + name string + // stripedLocks prevents simultaneous RW access to the blockstore cache for a shard. Instead + // of using only one lock or one lock per key, we stripe the shard keys across 256 locks. 256 is + // chosen because it 0-255 is the range of values we get looking at the last byte of the key. + stripedLocks [256]sync.Mutex + // Caches the blockstore for a given shard for shard read affinity, i.e., further reads will likely + // be from the same shard. Maps (shard key -> blockstore). + cache *lru.Cache[shard.Key, *accessorWithBlockstore] + + metrics *metrics +} + +// accessorWithBlockstore is the value that we store in the blockstore Cache. It implements the +// Accessor interface. +type accessorWithBlockstore struct { + sync.RWMutex + shardAccessor Accessor + // The blockstore is stored separately because each access to the blockstore over the shard + // accessor reopens the underlying CAR. + bs dagstore.ReadBlockstore + + done chan struct{} + refs atomic.Int32 + isClosed bool +} + +// Blockstore implements the Blockstore of the Accessor interface. It creates the blockstore on the +// first request and reuses the created instance for all subsequent requests. +func (s *accessorWithBlockstore) Blockstore() (dagstore.ReadBlockstore, error) { + s.Lock() + defer s.Unlock() + var err error + if s.bs == nil { + s.bs, err = s.shardAccessor.Blockstore() + } + return s.bs, err +} + +// Reader returns a new copy of the reader to read data. +func (s *accessorWithBlockstore) Reader() io.Reader { + return s.shardAccessor.Reader() +} + +func (s *accessorWithBlockstore) addRef() error { + s.Lock() + defer s.Unlock() + if s.isClosed { + // item is already closed and soon will be removed after all refs are released + return errCacheMiss + } + if s.refs.Add(1) == 1 { + // there were no refs previously and done channel was closed, reopen it by recreating + s.done = make(chan struct{}) + } + return nil +} + +func (s *accessorWithBlockstore) removeRef() { + s.Lock() + defer s.Unlock() + if s.refs.Add(-1) <= 0 { + close(s.done) + } +} + +func (s *accessorWithBlockstore) close() error { + s.Lock() + if s.isClosed { + s.Unlock() + // accessor will be closed by another goroutine + return nil + } + s.isClosed = true + done := s.done + s.Unlock() + + select { + case <-done: + case <-time.After(defaultCloseTimeout): + return fmt.Errorf("closing accessor, some readers didn't close the accessor within timeout,"+ + " amount left: %v", s.refs.Load()) + } + if err := s.shardAccessor.Close(); err != nil { + return fmt.Errorf("closing accessor: %w", err) + } + return nil +} + +func NewAccessorCache(name string, cacheSize int) (*AccessorCache, error) { + bc := &AccessorCache{ + name: name, + } + // Instantiate the blockstore Cache. + bslru, err := lru.NewWithEvict[shard.Key, *accessorWithBlockstore](cacheSize, bc.evictFn()) + if err != nil { + return nil, fmt.Errorf("failed to instantiate blockstore cache: %w", err) + } + bc.cache = bslru + return bc, nil +} + +// evictFn will be invoked when an item is evicted from the cache. +func (bc *AccessorCache) evictFn() func(shard.Key, *accessorWithBlockstore) { + return func(_ shard.Key, abs *accessorWithBlockstore) { + // we can release accessor from cache early, while it is being closed in parallel routine + go func() { + err := abs.close() + if err != nil { + bc.metrics.observeEvicted(true) + log.Errorf("couldn't close accessor after cache eviction: %s", err) + return + } + bc.metrics.observeEvicted(false) + }() + } +} + +// Get retrieves the Accessor for a given shard key from the Cache. If the Accessor is not in +// the Cache, it returns an errCacheMiss. +func (bc *AccessorCache) Get(key shard.Key) (Accessor, error) { + lk := &bc.stripedLocks[shardKeyToStriped(key)] + lk.Lock() + defer lk.Unlock() + + accessor, err := bc.get(key) + if err != nil { + bc.metrics.observeGet(false) + return nil, err + } + bc.metrics.observeGet(true) + return newRefCloser(accessor) +} + +func (bc *AccessorCache) get(key shard.Key) (*accessorWithBlockstore, error) { + abs, ok := bc.cache.Get(key) + if !ok { + return nil, errCacheMiss + } + return abs, nil +} + +// GetOrLoad attempts to get an item from the cache, and if not found, invokes +// the provided loader function to load it. +func (bc *AccessorCache) GetOrLoad( + ctx context.Context, + key shard.Key, + loader func(context.Context, shard.Key) (Accessor, error), +) (Accessor, error) { + lk := &bc.stripedLocks[shardKeyToStriped(key)] + lk.Lock() + defer lk.Unlock() + + abs, err := bc.get(key) + if err == nil { + // return accessor, only of it is not closed yet + accessorWithRef, err := newRefCloser(abs) + if err == nil { + bc.metrics.observeGet(true) + return accessorWithRef, nil + } + } + + // accessor not found in cache, so load new one using loader + accessor, err := loader(ctx, key) + if err != nil { + return nil, fmt.Errorf("unable to load accessor: %w", err) + } + + abs = &accessorWithBlockstore{ + shardAccessor: accessor, + } + + // Create a new accessor first to increment the reference count in it, so it cannot get evicted + // from the inner lru cache before it is used. + accessorWithRef, err := newRefCloser(abs) + if err != nil { + return nil, err + } + bc.cache.Add(key, abs) + return accessorWithRef, nil +} + +// Remove removes the Accessor for a given key from the cache. +func (bc *AccessorCache) Remove(key shard.Key) error { + lk := &bc.stripedLocks[shardKeyToStriped(key)] + lk.Lock() + accessor, err := bc.get(key) + lk.Unlock() + if errors.Is(err, errCacheMiss) { + // item is not in cache + return nil + } + if err = accessor.close(); err != nil { + return err + } + // The cache will call evictFn on removal, where accessor close will be called. + bc.cache.Remove(key) + return nil +} + +// EnableMetrics enables metrics for the cache. +func (bc *AccessorCache) EnableMetrics() error { + var err error + bc.metrics, err = newMetrics(bc) + return err +} + +// refCloser manages references to accessor from provided reader and removes the ref, when the +// Close is called +type refCloser struct { + *accessorWithBlockstore + closeFn func() +} + +// newRefCloser creates new refCloser +func newRefCloser(abs *accessorWithBlockstore) (*refCloser, error) { + if err := abs.addRef(); err != nil { + return nil, err + } + + var closeOnce sync.Once + return &refCloser{ + accessorWithBlockstore: abs, + closeFn: func() { + closeOnce.Do(abs.removeRef) + }, + }, nil +} + +func (c *refCloser) Close() error { + c.closeFn() + return nil +} + +// shardKeyToStriped returns the index of the lock to use for a given shard key. We use the last +// byte of the shard key as the pseudo-random index. +func shardKeyToStriped(sk shard.Key) byte { + return sk.String()[len(sk.String())-1] +} diff --git a/share/eds/cache/accessor_cache_test.go b/share/eds/cache/accessor_cache_test.go new file mode 100644 index 0000000000..347b251a88 --- /dev/null +++ b/share/eds/cache/accessor_cache_test.go @@ -0,0 +1,350 @@ +package cache + +import ( + "bytes" + "context" + "errors" + "io" + "sync" + "testing" + "time" + + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/shard" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +func TestAccessorCache(t *testing.T) { + t.Run("add / get item from cache", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key := shard.KeyFromString("key") + mock := &mockAccessor{ + data: []byte("test_data"), + } + loaded, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + + // check if item exists + got, err := cache.Get(key) + require.NoError(t, err) + + l, err := io.ReadAll(loaded.Reader()) + require.NoError(t, err) + require.Equal(t, mock.data, l) + g, err := io.ReadAll(got.Reader()) + require.NoError(t, err) + require.Equal(t, mock.data, g) + }) + + t.Run("get blockstore from accessor", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key := shard.KeyFromString("key") + mock := &mockAccessor{} + accessor, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + + // check if item exists + _, err = cache.Get(key) + require.NoError(t, err) + + // blockstore should be created only after first request + require.Equal(t, 0, mock.returnedBs) + + // try to get blockstore + _, err = accessor.Blockstore() + require.NoError(t, err) + + // second call to blockstore should return same blockstore + _, err = accessor.Blockstore() + require.NoError(t, err) + require.Equal(t, 1, mock.returnedBs) + }) + + t.Run("remove an item", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key := shard.KeyFromString("key") + mock := &mockAccessor{} + ac, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + err = ac.Close() + require.NoError(t, err) + + err = cache.Remove(key) + require.NoError(t, err) + + // accessor should be closed on removal + mock.checkClosed(t, true) + + // check if item exists + _, err = cache.Get(key) + require.ErrorIs(t, err, errCacheMiss) + }) + + t.Run("successive reads should read the same data", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key := shard.KeyFromString("key") + mock := &mockAccessor{data: []byte("test")} + accessor, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + + loaded, err := io.ReadAll(accessor.Reader()) + require.NoError(t, err) + require.Equal(t, mock.data, loaded) + + for i := 0; i < 2; i++ { + accessor, err = cache.Get(key) + require.NoError(t, err) + got, err := io.ReadAll(accessor.Reader()) + require.NoError(t, err) + require.Equal(t, mock.data, got) + } + }) + + t.Run("removed by eviction", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key := shard.KeyFromString("key") + mock := &mockAccessor{} + ac1, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + err = ac1.Close() + require.NoError(t, err) + + // add second item + key2 := shard.KeyFromString("key2") + ac2, err := cache.GetOrLoad(ctx, key2, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + err = ac2.Close() + require.NoError(t, err) + + // accessor should be closed on removal by eviction + mock.checkClosed(t, true) + + // check if item evicted + _, err = cache.Get(key) + require.ErrorIs(t, err, errCacheMiss) + }) + + t.Run("close on accessor is not closing underlying accessor", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key := shard.KeyFromString("key") + mock := &mockAccessor{} + _, err = cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + + // check if item exists + accessor, err := cache.Get(key) + require.NoError(t, err) + require.NotNil(t, accessor) + + // close on returned accessor should not close inner accessor + err = accessor.Close() + require.NoError(t, err) + + // check that close was not performed on inner accessor + mock.checkClosed(t, false) + }) + + t.Run("close on accessor should wait all readers to finish", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key := shard.KeyFromString("key") + mock := &mockAccessor{} + accessor1, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock, nil + }) + require.NoError(t, err) + + // create second readers + accessor2, err := cache.Get(key) + require.NoError(t, err) + + // initialize close + done := make(chan struct{}) + go func() { + err := cache.Remove(key) + require.NoError(t, err) + close(done) + }() + + // close on first reader and check that it is not enough to release the inner accessor + err = accessor1.Close() + require.NoError(t, err) + mock.checkClosed(t, false) + + // second close from same reader should not release accessor either + err = accessor1.Close() + require.NoError(t, err) + mock.checkClosed(t, false) + + // reads for item that is being evicted should result in errCacheMiss + _, err = cache.Get(key) + require.ErrorIs(t, err, errCacheMiss) + + // close second reader and wait for accessor to be closed + err = accessor2.Close() + require.NoError(t, err) + // wait until close is performed on accessor + select { + case <-done: + case <-ctx.Done(): + t.Fatal("timeout reached") + } + + // item will be removed + mock.checkClosed(t, true) + }) + + t.Run("slow reader should not block eviction", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + cache, err := NewAccessorCache("test", 1) + require.NoError(t, err) + + // add accessor to the cache + key1 := shard.KeyFromString("key1") + mock1 := &mockAccessor{} + accessor1, err := cache.GetOrLoad(ctx, key1, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock1, nil + }) + require.NoError(t, err) + + // add second accessor, to trigger eviction of the first one + key2 := shard.KeyFromString("key2") + mock2 := &mockAccessor{} + accessor2, err := cache.GetOrLoad(ctx, key2, func(ctx context.Context, key shard.Key) (Accessor, error) { + return mock2, nil + }) + require.NoError(t, err) + + // first accessor should be evicted from cache + _, err = cache.Get(key1) + require.ErrorIs(t, err, errCacheMiss) + + // first accessor should not be closed before all refs are released by Close() is calls. + mock1.checkClosed(t, false) + + // after Close() is called on first accessor, it is free to get closed + err = accessor1.Close() + require.NoError(t, err) + mock1.checkClosed(t, true) + + // after Close called on second accessor, it should stay in cache (not closed) + err = accessor2.Close() + require.NoError(t, err) + mock2.checkClosed(t, false) + }) +} + +type mockAccessor struct { + m sync.Mutex + data []byte + isClosed bool + returnedBs int +} + +func (m *mockAccessor) Reader() io.Reader { + m.m.Lock() + defer m.m.Unlock() + return bytes.NewBuffer(m.data) +} + +func (m *mockAccessor) Blockstore() (dagstore.ReadBlockstore, error) { + m.m.Lock() + defer m.m.Unlock() + if m.returnedBs > 0 { + return nil, errors.New("blockstore already returned") + } + m.returnedBs++ + return rbsMock{}, nil +} + +func (m *mockAccessor) Close() error { + m.m.Lock() + defer m.m.Unlock() + if m.isClosed { + return errors.New("already closed") + } + m.isClosed = true + return nil +} + +func (m *mockAccessor) checkClosed(t *testing.T, expected bool) { + // item will be removed in background, so give it some time to settle + time.Sleep(time.Millisecond * 100) + m.m.Lock() + defer m.m.Unlock() + require.Equal(t, expected, m.isClosed) +} + +// rbsMock is a dagstore.ReadBlockstore mock +type rbsMock struct{} + +func (r rbsMock) Has(context.Context, cid.Cid) (bool, error) { + panic("implement me") +} + +func (r rbsMock) Get(_ context.Context, _ cid.Cid) (blocks.Block, error) { + panic("implement me") +} + +func (r rbsMock) GetSize(context.Context, cid.Cid) (int, error) { + panic("implement me") +} + +func (r rbsMock) AllKeysChan(context.Context) (<-chan cid.Cid, error) { + panic("implement me") +} + +func (r rbsMock) HashOnRead(bool) { + panic("implement me") +} diff --git a/share/eds/cache/cache.go b/share/eds/cache/cache.go new file mode 100644 index 0000000000..13e207d7c0 --- /dev/null +++ b/share/eds/cache/cache.go @@ -0,0 +1,49 @@ +package cache + +import ( + "context" + "errors" + "io" + + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/shard" + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" +) + +var ( + log = logging.Logger("share/eds/cache") + meter = otel.Meter("eds_store_cache") +) + +var ( + errCacheMiss = errors.New("accessor not found in blockstore cache") +) + +// Cache is an interface that defines the basic Cache operations. +type Cache interface { + // Get retrieves an item from the Cache. + Get(shard.Key) (Accessor, error) + + // GetOrLoad attempts to get an item from the Cache and, if not found, invokes + // the provided loader function to load it into the Cache. + GetOrLoad( + ctx context.Context, + key shard.Key, + loader func(context.Context, shard.Key) (Accessor, error), + ) (Accessor, error) + + // Remove removes an item from Cache. + Remove(shard.Key) error + + // EnableMetrics enables metrics in Cache + EnableMetrics() error +} + +// Accessor is a interface type returned by cache, that allows to read raw data by reader or create +// readblockstore +type Accessor interface { + Blockstore() (dagstore.ReadBlockstore, error) + Reader() io.Reader + io.Closer +} diff --git a/share/eds/cache/doublecache.go b/share/eds/cache/doublecache.go new file mode 100644 index 0000000000..a63eadee9e --- /dev/null +++ b/share/eds/cache/doublecache.go @@ -0,0 +1,51 @@ +package cache + +import ( + "errors" + + "github.com/filecoin-project/dagstore/shard" +) + +// DoubleCache represents a Cache that looks into multiple caches one by one. +type DoubleCache struct { + first, second Cache +} + +// NewDoubleCache creates a new DoubleCache with the provided caches. +func NewDoubleCache(first, second Cache) *DoubleCache { + return &DoubleCache{ + first: first, + second: second, + } +} + +// Get looks for an item in all the caches one by one and returns the Cache found item. +func (mc *DoubleCache) Get(key shard.Key) (Accessor, error) { + ac, err := mc.first.Get(key) + if err == nil { + return ac, nil + } + return mc.second.Get(key) +} + +// Remove removes an item from all underlying caches +func (mc *DoubleCache) Remove(key shard.Key) error { + err1 := mc.first.Remove(key) + err2 := mc.second.Remove(key) + return errors.Join(err1, err2) +} + +func (mc *DoubleCache) First() Cache { + return mc.first +} + +func (mc *DoubleCache) Second() Cache { + return mc.second +} + +func (mc *DoubleCache) EnableMetrics() error { + if err := mc.first.EnableMetrics(); err != nil { + return err + } + return mc.second.EnableMetrics() +} diff --git a/share/eds/cache/metrics.go b/share/eds/cache/metrics.go new file mode 100644 index 0000000000..b2e3bec8d8 --- /dev/null +++ b/share/eds/cache/metrics.go @@ -0,0 +1,69 @@ +package cache + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +const ( + cacheFoundKey = "found" + failedKey = "failed" +) + +type metrics struct { + getCounter metric.Int64Counter + evictedCounter metric.Int64Counter +} + +func newMetrics(bc *AccessorCache) (*metrics, error) { + metricsPrefix := "eds_blockstore_cache_" + bc.name + + evictedCounter, err := meter.Int64Counter(metricsPrefix+"_evicted_counter", + metric.WithDescription("eds blockstore cache evicted event counter")) + if err != nil { + return nil, err + } + + getCounter, err := meter.Int64Counter(metricsPrefix+"_get_counter", + metric.WithDescription("eds blockstore cache evicted event counter")) + if err != nil { + return nil, err + } + + cacheSize, err := meter.Int64ObservableGauge(metricsPrefix+"_size", + metric.WithDescription("total amount of items in blockstore cache"), + ) + if err != nil { + return nil, err + } + + callback := func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(cacheSize, int64(bc.cache.Len())) + return nil + } + _, err = meter.RegisterCallback(callback, cacheSize) + + return &metrics{ + getCounter: getCounter, + evictedCounter: evictedCounter, + }, err +} + +func (m *metrics) observeEvicted(failed bool) { + if m == nil { + return + } + m.evictedCounter.Add(context.Background(), 1, + metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeGet(found bool) { + if m == nil { + return + } + m.getCounter.Add(context.Background(), 1, metric.WithAttributes( + attribute.Bool(cacheFoundKey, found))) +} diff --git a/share/eds/cache/noop.go b/share/eds/cache/noop.go new file mode 100644 index 0000000000..0a1a39ec7e --- /dev/null +++ b/share/eds/cache/noop.go @@ -0,0 +1,50 @@ +package cache + +import ( + "context" + "io" + + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/shard" +) + +var _ Cache = (*NoopCache)(nil) + +// NoopCache implements noop version of Cache interface +type NoopCache struct{} + +func (n NoopCache) Get(shard.Key) (Accessor, error) { + return nil, errCacheMiss +} + +func (n NoopCache) GetOrLoad( + context.Context, shard.Key, + func(context.Context, shard.Key) (Accessor, error), +) (Accessor, error) { + return NoopAccessor{}, nil +} + +func (n NoopCache) Remove(shard.Key) error { + return nil +} + +func (n NoopCache) EnableMetrics() error { + return nil +} + +var _ Accessor = (*NoopAccessor)(nil) + +// NoopAccessor implements noop version of Accessor interface +type NoopAccessor struct{} + +func (n NoopAccessor) Blockstore() (dagstore.ReadBlockstore, error) { + return nil, nil +} + +func (n NoopAccessor) Reader() io.Reader { + return nil +} + +func (n NoopAccessor) Close() error { + return nil +} diff --git a/share/eds/eds.go b/share/eds/eds.go new file mode 100644 index 0000000000..e0433a1b6b --- /dev/null +++ b/share/eds/eds.go @@ -0,0 +1,274 @@ +package eds + +import ( + "bytes" + "context" + "crypto/sha256" + "errors" + "fmt" + "io" + "math" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-car" + "github.com/ipld/go-car/util" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +var ErrEmptySquare = errors.New("share: importing empty data") + +// WriteEDS writes the entire EDS into the given io.Writer as CARv1 file. +// This includes all shares in quadrant order, followed by all inner nodes of the NMT tree. +// Order: [ Carv1Header | Q1 | Q2 | Q3 | Q4 | inner nodes ] +// For more information about the header: https://ipld.io/specs/transport/car/carv1/#header +func WriteEDS(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) (err error) { + ctx, span := tracer.Start(ctx, "write-eds") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + // Creates and writes Carv1Header. Roots are the eds Row + Col roots + err = writeHeader(eds, w) + if err != nil { + return fmt.Errorf("share: writing carv1 header: %w", err) + } + // Iterates over shares in quadrant order via eds.GetCell + err = writeQuadrants(eds, w) + if err != nil { + return fmt.Errorf("share: writing shares: %w", err) + } + + // Iterates over proofs and writes them to the CAR + err = writeProofs(ctx, eds, w) + if err != nil { + return fmt.Errorf("share: writing proofs: %w", err) + } + return nil +} + +// writeHeader creates a CarV1 header using the EDS's Row and Column roots as the list of DAG roots. +func writeHeader(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { + rootCids, err := rootsToCids(eds) + if err != nil { + return fmt.Errorf("getting root cids: %w", err) + } + + return car.WriteHeader(&car.CarHeader{ + Roots: rootCids, + Version: 1, + }, w) +} + +// writeQuadrants reorders the shares to quadrant order and writes them to the CARv1 file. +func writeQuadrants(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { + hasher := nmt.NewNmtHasher(sha256.New(), share.NamespaceSize, ipld.NMTIgnoreMaxNamespace) + shares := quadrantOrder(eds) + for _, share := range shares { + leaf, err := hasher.HashLeaf(share) + if err != nil { + return fmt.Errorf("hashing share: %w", err) + } + cid, err := ipld.CidFromNamespacedSha256(leaf) + if err != nil { + return fmt.Errorf("getting cid from share: %w", err) + } + err = util.LdWrite(w, cid.Bytes(), share) + if err != nil { + return fmt.Errorf("writing share to file: %w", err) + } + } + return nil +} + +// writeProofs iterates over the in-memory blockstore's keys and writes all inner nodes to the +// CARv1 file. +func writeProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { + // check if proofs are collected by ipld.ProofsAdder in previous reconstructions of eds + proofs, err := getProofs(ctx, eds) + if err != nil { + return fmt.Errorf("recomputing proofs: %w", err) + } + + for id, proof := range proofs { + err := util.LdWrite(w, id.Bytes(), proof) + if err != nil { + return fmt.Errorf("writing proof to the car: %w", err) + } + } + return nil +} + +func getProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare) (map[cid.Cid][]byte, error) { + // check if there are proofs collected by ipld.ProofsAdder in previous reconstruction of eds + if adder := ipld.ProofsAdderFromCtx(ctx); adder != nil { + defer adder.Purge() + return adder.Proofs(), nil + } + + // recompute proofs from eds + shares := eds.Flattened() + shareCount := len(shares) + if shareCount == 0 { + return nil, ErrEmptySquare + } + odsWidth := int(math.Sqrt(float64(shareCount)) / 2) + + // this adder ignores leaves, so that they are not added to the store we iterate through in + // writeProofs + adder := ipld.NewProofsAdder(odsWidth * 2) + defer adder.Purge() + + eds, err := rsmt2d.ImportExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(odsWidth), + nmt.NodeVisitor(adder.VisitFn())), + ) + if err != nil { + return nil, fmt.Errorf("recomputing data square: %w", err) + } + // compute roots + if _, err = eds.RowRoots(); err != nil { + return nil, fmt.Errorf("computing row roots: %w", err) + } + + return adder.Proofs(), nil +} + +// quadrantOrder reorders the shares in the EDS to quadrant row-by-row order, prepending the +// respective namespace to the shares. +// e.g. [ Q1 R1 | Q1 R2 | Q1 R3 | Q1 R4 | Q2 R1 | Q2 R2 .... ] +func quadrantOrder(eds *rsmt2d.ExtendedDataSquare) [][]byte { + size := eds.Width() * eds.Width() + shares := make([][]byte, size) + + quadrantWidth := int(eds.Width() / 2) + quadrantSize := quadrantWidth * quadrantWidth + for i := 0; i < quadrantWidth; i++ { + for j := 0; j < quadrantWidth; j++ { + cells := getQuadrantCells(eds, uint(i), uint(j)) + innerOffset := i*quadrantWidth + j + for quadrant := 0; quadrant < 4; quadrant++ { + shares[(quadrant*quadrantSize)+innerOffset] = prependNamespace(quadrant, cells[quadrant]) + } + } + } + return shares +} + +// getQuadrantCells returns the cell of each EDS quadrant with the passed inner-quadrant coordinates +func getQuadrantCells(eds *rsmt2d.ExtendedDataSquare, i, j uint) [][]byte { + cells := make([][]byte, 4) + quadrantWidth := eds.Width() / 2 + cells[0] = eds.GetCell(i, j) + cells[1] = eds.GetCell(i, j+quadrantWidth) + cells[2] = eds.GetCell(i+quadrantWidth, j) + cells[3] = eds.GetCell(i+quadrantWidth, j+quadrantWidth) + return cells +} + +// prependNamespace adds the namespace to the passed share if in the first quadrant, +// otherwise it adds the ParitySharesNamespace to the beginning. +func prependNamespace(quadrant int, shr share.Share) []byte { + namespacedShare := make([]byte, 0, share.NamespaceSize+share.Size) + switch quadrant { + case 0: + return append(append(namespacedShare, share.GetNamespace(shr)...), shr...) + case 1, 2, 3: + return append(append(namespacedShare, share.ParitySharesNamespace...), shr...) + default: + panic("invalid quadrant") + } +} + +// rootsToCids converts the EDS's Row and Column roots to CIDs. +func rootsToCids(eds *rsmt2d.ExtendedDataSquare) ([]cid.Cid, error) { + rowRoots, err := eds.RowRoots() + if err != nil { + return nil, err + } + colRoots, err := eds.ColRoots() + if err != nil { + return nil, err + } + + roots := make([][]byte, 0, len(rowRoots)+len(colRoots)) + roots = append(roots, rowRoots...) + roots = append(roots, colRoots...) + rootCids := make([]cid.Cid, len(roots)) + for i, r := range roots { + rootCids[i], err = ipld.CidFromNamespacedSha256(r) + if err != nil { + return nil, fmt.Errorf("getting cid from root: %w", err) + } + } + return rootCids, nil +} + +// ReadEDS reads the first EDS quadrant (1/4) from an io.Reader CAR file. +// Only the first quadrant will be read, which represents the original data. +// The returned EDS is guaranteed to be full and valid against the DataRoot, otherwise ReadEDS +// errors. +func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { + _, span := tracer.Start(ctx, "read-eds") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + carReader, err := car.NewCarReader(r) + if err != nil { + return nil, fmt.Errorf("share: reading car file: %w", err) + } + + // car header includes both row and col roots in header + odsWidth := len(carReader.Header.Roots) / 4 + odsSquareSize := odsWidth * odsWidth + shares := make([][]byte, odsSquareSize) + // the first quadrant is stored directly after the header, + // so we can just read the first odsSquareSize blocks + for i := 0; i < odsSquareSize; i++ { + block, err := carReader.Next() + if err != nil { + return nil, fmt.Errorf("share: reading next car entry: %w", err) + } + // the stored first quadrant shares are wrapped with the namespace twice. + // we cut it off here, because it is added again while importing to the tree below + shares[i] = share.GetData(block.RawData()) + } + + // use proofs adder if provided, to cache collected proofs while recomputing the eds + var opts []nmt.Option + visitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() + if visitor != nil { + opts = append(opts, nmt.NodeVisitor(visitor)) + } + + eds, err = rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(odsWidth), opts...), + ) + if err != nil { + return nil, fmt.Errorf("share: computing eds: %w", err) + } + + newDah, err := share.NewRoot(eds) + if err != nil { + return nil, err + } + if !bytes.Equal(newDah.Hash(), root) { + return nil, fmt.Errorf( + "share: content integrity mismatch: imported root %s doesn't match expected root %s", + newDah.Hash(), + root, + ) + } + return eds, nil +} diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go new file mode 100644 index 0000000000..ffb05343b9 --- /dev/null +++ b/share/eds/eds_test.go @@ -0,0 +1,283 @@ +package eds + +import ( + "bytes" + "context" + "embed" + "encoding/json" + "fmt" + "os" + "testing" + + bstore "github.com/ipfs/boxo/blockstore" + ds "github.com/ipfs/go-datastore" + carv1 "github.com/ipld/go-car" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +//go:embed "testdata/example-root.json" +var exampleRoot string + +//go:embed "testdata/example.car" +var f embed.FS + +func TestQuadrantOrder(t *testing.T) { + testCases := []struct { + name string + squareSize int + }{ + {"smol", 2}, + {"still smol", 8}, + {"default mainnet", appconsts.DefaultGovMaxSquareSize}, + {"max", share.MaxSquareSize}, + } + + testShareSize := 64 + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + shares := make([][]byte, tc.squareSize*tc.squareSize) + + for i := 0; i < tc.squareSize*tc.squareSize; i++ { + shares[i] = rand.Bytes(testShareSize) + } + + eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), rsmt2d.NewDefaultTree) + require.NoError(t, err) + + res := quadrantOrder(eds) + for _, s := range res { + require.Len(t, s, testShareSize+share.NamespaceSize) + } + + for q := 0; q < 4; q++ { + for i := 0; i < tc.squareSize; i++ { + for j := 0; j < tc.squareSize; j++ { + resIndex := q*tc.squareSize*tc.squareSize + i*tc.squareSize + j + edsRow := q/2*tc.squareSize + i + edsCol := (q%2)*tc.squareSize + j + + assert.Equal(t, res[resIndex], prependNamespace(q, eds.Row(uint(edsRow))[edsCol])) + } + } + } + }) + } +} + +func TestWriteEDS(t *testing.T) { + writeRandomEDS(t) +} + +func TestWriteEDSHeaderRoots(t *testing.T) { + eds := writeRandomEDS(t) + f := openWrittenEDS(t) + defer f.Close() + + reader, err := carv1.NewCarReader(f) + require.NoError(t, err, "error creating car reader") + roots, err := rootsToCids(eds) + require.NoError(t, err, "error converting roots to cids") + require.Equal(t, roots, reader.Header.Roots) +} + +func TestWriteEDSStartsWithLeaves(t *testing.T) { + eds := writeRandomEDS(t) + f := openWrittenEDS(t) + defer f.Close() + + reader, err := carv1.NewCarReader(f) + require.NoError(t, err, "error creating car reader") + block, err := reader.Next() + require.NoError(t, err, "error getting first block") + + require.Equal(t, share.GetData(block.RawData()), eds.GetCell(0, 0)) +} + +func TestWriteEDSIncludesRoots(t *testing.T) { + writeRandomEDS(t) + f := openWrittenEDS(t) + defer f.Close() + + bs := bstore.NewBlockstore(ds.NewMapDatastore()) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + loaded, err := carv1.LoadCar(ctx, bs, f) + require.NoError(t, err, "error loading car file") + for _, root := range loaded.Roots { + ok, err := bs.Has(context.Background(), root) + require.NoError(t, err, "error checking if blockstore has root") + require.True(t, ok, "blockstore does not have root") + } +} + +func TestWriteEDSInQuadrantOrder(t *testing.T) { + eds := writeRandomEDS(t) + f := openWrittenEDS(t) + defer f.Close() + + reader, err := carv1.NewCarReader(f) + require.NoError(t, err, "error creating car reader") + + shares := quadrantOrder(eds) + for i := 0; i < len(shares); i++ { + block, err := reader.Next() + require.NoError(t, err, "error getting block") + require.Equal(t, block.RawData(), shares[i]) + } +} + +func TestReadWriteRoundtrip(t *testing.T) { + eds := writeRandomEDS(t) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + f := openWrittenEDS(t) + defer f.Close() + + loaded, err := ReadEDS(context.Background(), f, dah.Hash()) + require.NoError(t, err, "error reading EDS from file") + + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + loadedRowRoots, err := loaded.RowRoots() + require.NoError(t, err) + require.Equal(t, rowRoots, loadedRowRoots) + + colRoots, err := eds.ColRoots() + require.NoError(t, err) + loadedColRoots, err := loaded.ColRoots() + require.NoError(t, err) + require.Equal(t, colRoots, loadedColRoots) +} + +func TestReadEDS(t *testing.T) { + f, err := f.Open("testdata/example.car") + require.NoError(t, err, "error opening file") + + var dah da.DataAvailabilityHeader + err = json.Unmarshal([]byte(exampleRoot), &dah) + require.NoError(t, err, "error unmarshaling example root") + + loaded, err := ReadEDS(context.Background(), f, dah.Hash()) + require.NoError(t, err, "error reading EDS from file") + rowRoots, err := loaded.RowRoots() + require.NoError(t, err) + require.Equal(t, dah.RowRoots, rowRoots) + colRoots, err := loaded.ColRoots() + require.NoError(t, err) + require.Equal(t, dah.ColumnRoots, colRoots) +} + +func TestReadEDSContentIntegrityMismatch(t *testing.T) { + writeRandomEDS(t) + dah, err := da.NewDataAvailabilityHeader(edstest.RandEDS(t, 4)) + require.NoError(t, err) + f := openWrittenEDS(t) + defer f.Close() + + _, err = ReadEDS(context.Background(), f, dah.Hash()) + require.ErrorContains(t, err, "share: content integrity mismatch: imported root") +} + +// BenchmarkReadWriteEDS benchmarks the time it takes to write and read an EDS from disk. The +// benchmark is run with a 4x4 ODS to a 64x64 ODS - a higher value can be used, but it will run for +// much longer. +func BenchmarkReadWriteEDS(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(cancel) + for originalDataWidth := 4; originalDataWidth <= 64; originalDataWidth *= 2 { + eds := edstest.RandEDS(b, originalDataWidth) + dah, err := share.NewRoot(eds) + require.NoError(b, err) + b.Run(fmt.Sprintf("Writing %dx%d", originalDataWidth, originalDataWidth), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + f := new(bytes.Buffer) + err := WriteEDS(ctx, eds, f) + require.NoError(b, err) + } + }) + b.Run(fmt.Sprintf("Reading %dx%d", originalDataWidth, originalDataWidth), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + b.StopTimer() + f := new(bytes.Buffer) + _ = WriteEDS(ctx, eds, f) + b.StartTimer() + _, err := ReadEDS(ctx, f, dah.Hash()) + require.NoError(b, err) + } + }) + } +} + +func writeRandomEDS(t *testing.T) *rsmt2d.ExtendedDataSquare { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + tmpDir := t.TempDir() + err := os.Chdir(tmpDir) + require.NoError(t, err, "error changing to the temporary test directory") + f, err := os.OpenFile("test.car", os.O_WRONLY|os.O_CREATE, 0600) + require.NoError(t, err, "error opening file") + + eds := edstest.RandEDS(t, 4) + err = WriteEDS(ctx, eds, f) + require.NoError(t, err, "error writing EDS to file") + f.Close() + return eds +} + +func openWrittenEDS(t *testing.T) *os.File { + t.Helper() + f, err := os.OpenFile("test.car", os.O_RDONLY, 0600) + require.NoError(t, err, "error opening file") + return f +} + +/* +use this function as needed to create new test data. + +example: + + func Test_CreateData(t *testing.T) { + createTestData(t, "celestia-node/share/eds/testdata") + } +*/ +func createTestData(t *testing.T, testDir string) { //nolint:unused + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err := os.Chdir(testDir) + require.NoError(t, err, "changing to the directory") + os.RemoveAll("example.car") + require.NoError(t, err, "removing old file") + f, err := os.OpenFile("example.car", os.O_WRONLY|os.O_CREATE, 0600) + require.NoError(t, err, "opening file") + + eds := edstest.RandEDS(t, 4) + err = WriteEDS(ctx, eds, f) + require.NoError(t, err, "writing EDS to file") + f.Close() + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + header, err := json.MarshalIndent(dah, "", "") + require.NoError(t, err, "marshaling example root") + os.RemoveAll("example-root.json") + require.NoError(t, err, "removing old file") + f, err = os.OpenFile("example-root.json", os.O_WRONLY|os.O_CREATE, 0600) + require.NoError(t, err, "opening file") + _, err = f.Write(header) + require.NoError(t, err, "writing example root to file") + f.Close() +} diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go new file mode 100644 index 0000000000..bf5e664f90 --- /dev/null +++ b/share/eds/edstest/testing.go @@ -0,0 +1,48 @@ +package edstest + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func RandByzantineEDS(t *testing.T, size int, options ...nmt.Option) *rsmt2d.ExtendedDataSquare { + eds := RandEDS(t, size) + shares := eds.Flattened() + copy(share.GetData(shares[0]), share.GetData(shares[1])) // corrupting eds + eds, err := rsmt2d.ImportExtendedDataSquare(shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(size), + options...)) + require.NoError(t, err, "failure to recompute the extended data square") + return eds +} + +// RandEDS generates EDS filled with the random data with the given size for original square. It +// uses require.TestingT to be able to take both a *testing.T and a *testing.B. +func RandEDS(t require.TestingT, size int) *rsmt2d.ExtendedDataSquare { + shares := sharetest.RandShares(t, size*size) + eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + require.NoError(t, err, "failure to recompute the extended data square") + return eds +} + +func RandEDSWithNamespace( + t require.TestingT, + namespace share.Namespace, + size int, +) (*rsmt2d.ExtendedDataSquare, *share.Root) { + shares := sharetest.RandSharesWithNamespace(t, namespace, size*size) + eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + require.NoError(t, err, "failure to recompute the extended data square") + dah, err := share.NewRoot(eds) + require.NoError(t, err) + return eds, dah +} diff --git a/share/eds/inverted_index.go b/share/eds/inverted_index.go new file mode 100644 index 0000000000..799ab6208d --- /dev/null +++ b/share/eds/inverted_index.go @@ -0,0 +1,102 @@ +package eds + +import ( + "context" + "errors" + "fmt" + "runtime" + + "github.com/dgraph-io/badger/v4/options" + "github.com/filecoin-project/dagstore/index" + "github.com/filecoin-project/dagstore/shard" + ds "github.com/ipfs/go-datastore" + dsbadger "github.com/ipfs/go-ds-badger4" + "github.com/multiformats/go-multihash" +) + +const invertedIndexPath = "/inverted_index/" + +// ErrNotFoundInIndex is returned instead of ErrNotFound if the multihash doesn't exist in the index +var ErrNotFoundInIndex = errors.New("does not exist in index") + +// simpleInvertedIndex is an inverted index that only stores a single shard key per multihash. Its +// implementation is modified from the default upstream implementation in dagstore/index. +type simpleInvertedIndex struct { + ds ds.Batching +} + +// newSimpleInvertedIndex returns a new inverted index that only stores a single shard key per +// multihash. This is because we use badger as a storage backend, so updates are expensive, and we +// don't care which shard is used to serve a cid. +func newSimpleInvertedIndex(storePath string) (*simpleInvertedIndex, error) { + opts := dsbadger.DefaultOptions // this should be copied + // turn off value log GC as we don't use value log + opts.GcInterval = 0 + // use minimum amount of NumLevelZeroTables to trigger L0 compaction faster + opts.NumLevelZeroTables = 1 + // MaxLevels = 8 will allow the db to grow to ~11.1 TiB + opts.MaxLevels = 8 + // inverted index stores unique hash keys, so we don't need to detect conflicts + opts.DetectConflicts = false + // we don't need compression for inverted index as it just hashes + opts.Compression = options.None + compactors := runtime.NumCPU() + if compactors < 2 { + compactors = 2 + } + if compactors > opts.MaxLevels { // ensure there is no more compactors than db table levels + compactors = opts.MaxLevels + } + opts.NumCompactors = compactors + + ds, err := dsbadger.NewDatastore(storePath+invertedIndexPath, &opts) + if err != nil { + return nil, fmt.Errorf("can't open Badger Datastore: %w", err) + } + + return &simpleInvertedIndex{ds: ds}, nil +} + +func (s *simpleInvertedIndex) AddMultihashesForShard( + ctx context.Context, + mhIter index.MultihashIterator, + sk shard.Key, +) error { + // in the original implementation, a mutex is used here to prevent unnecessary updates to the + // key. The amount of extra data produced by this is negligible, and the performance benefits + // from removing the lock are significant (indexing is a hot path during sync). + batch, err := s.ds.Batch(ctx) + if err != nil { + return fmt.Errorf("failed to create ds batch: %w", err) + } + + err = mhIter.ForEach(func(mh multihash.Multihash) error { + key := ds.NewKey(string(mh)) + if err := batch.Put(ctx, key, []byte(sk.String())); err != nil { + return fmt.Errorf("failed to put mh=%s, err=%w", mh, err) + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to add index entry: %w", err) + } + + if err := batch.Commit(ctx); err != nil { + return fmt.Errorf("failed to commit batch: %w", err) + } + return nil +} + +func (s *simpleInvertedIndex) GetShardsForMultihash(ctx context.Context, mh multihash.Multihash) ([]shard.Key, error) { + key := ds.NewKey(string(mh)) + sbz, err := s.ds.Get(ctx, key) + if err != nil { + return nil, errors.Join(ErrNotFoundInIndex, err) + } + + return []shard.Key{shard.KeyFromString(string(sbz))}, nil +} + +func (s *simpleInvertedIndex) close() error { + return s.ds.Close() +} diff --git a/share/eds/inverted_index_test.go b/share/eds/inverted_index_test.go new file mode 100644 index 0000000000..e83c2be267 --- /dev/null +++ b/share/eds/inverted_index_test.go @@ -0,0 +1,55 @@ +package eds + +import ( + "context" + "testing" + + "github.com/filecoin-project/dagstore/shard" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" +) + +type mockIterator struct { + mhs []multihash.Multihash +} + +func (m *mockIterator) ForEach(f func(mh multihash.Multihash) error) error { + for _, mh := range m.mhs { + if err := f(mh); err != nil { + return err + } + } + return nil +} + +// TestMultihashesForShard ensures that the inverted index correctly stores a single shard key per +// duplicate multihash +func TestMultihashesForShard(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + mhs := []multihash.Multihash{ + multihash.Multihash("mh1"), + multihash.Multihash("mh2"), + multihash.Multihash("mh3"), + } + + mi := &mockIterator{mhs: mhs} + path := t.TempDir() + invertedIndex, err := newSimpleInvertedIndex(path) + require.NoError(t, err) + + // 1. Add all 3 multihashes to shard1 + err = invertedIndex.AddMultihashesForShard(ctx, mi, shard.KeyFromString("shard1")) + require.NoError(t, err) + shardKeys, err := invertedIndex.GetShardsForMultihash(ctx, mhs[0]) + require.NoError(t, err) + require.Equal(t, []shard.Key{shard.KeyFromString("shard1")}, shardKeys) + + // 2. Add mh1 to shard2, and ensure that mh1 no longer points to shard1 + err = invertedIndex.AddMultihashesForShard(ctx, &mockIterator{mhs: mhs[:1]}, shard.KeyFromString("shard2")) + require.NoError(t, err) + shardKeys, err = invertedIndex.GetShardsForMultihash(ctx, mhs[0]) + require.NoError(t, err) + require.Equal(t, []shard.Key{shard.KeyFromString("shard2")}, shardKeys) +} diff --git a/share/eds/metrics.go b/share/eds/metrics.go new file mode 100644 index 0000000000..8d69a3ec41 --- /dev/null +++ b/share/eds/metrics.go @@ -0,0 +1,270 @@ +package eds + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +const ( + failedKey = "failed" + sizeKey = "eds_size" + + putResultKey = "result" + putOK putResult = "ok" + putExists putResult = "exists" + putFailed putResult = "failed" + + opNameKey = "op" + longOpResultKey = "result" + longOpUnresolved longOpResult = "unresolved" + longOpOK longOpResult = "ok" + longOpFailed longOpResult = "failed" + + dagstoreShardStatusKey = "shard_status" +) + +var meter = otel.Meter("eds_store") + +type putResult string + +type longOpResult string + +type metrics struct { + putTime metric.Float64Histogram + getCARTime metric.Float64Histogram + getCARBlockstoreTime metric.Float64Histogram + getDAHTime metric.Float64Histogram + removeTime metric.Float64Histogram + getTime metric.Float64Histogram + hasTime metric.Float64Histogram + listTime metric.Float64Histogram + + shardFailureCount metric.Int64Counter + + longOpTime metric.Float64Histogram + gcTime metric.Float64Histogram +} + +func (s *Store) WithMetrics() error { + putTime, err := meter.Float64Histogram("eds_store_put_time_histogram", + metric.WithDescription("eds store put time histogram(s)")) + if err != nil { + return err + } + + getCARTime, err := meter.Float64Histogram("eds_store_get_car_time_histogram", + metric.WithDescription("eds store get car time histogram(s)")) + if err != nil { + return err + } + + getCARBlockstoreTime, err := meter.Float64Histogram("eds_store_get_car_blockstore_time_histogram", + metric.WithDescription("eds store get car blockstore time histogram(s)")) + if err != nil { + return err + } + + getDAHTime, err := meter.Float64Histogram("eds_store_get_dah_time_histogram", + metric.WithDescription("eds store get dah time histogram(s)")) + if err != nil { + return err + } + + removeTime, err := meter.Float64Histogram("eds_store_remove_time_histogram", + metric.WithDescription("eds store remove time histogram(s)")) + if err != nil { + return err + } + + getTime, err := meter.Float64Histogram("eds_store_get_time_histogram", + metric.WithDescription("eds store get time histogram(s)")) + if err != nil { + return err + } + + hasTime, err := meter.Float64Histogram("eds_store_has_time_histogram", + metric.WithDescription("eds store has time histogram(s)")) + if err != nil { + return err + } + + listTime, err := meter.Float64Histogram("eds_store_list_time_histogram", + metric.WithDescription("eds store list time histogram(s)")) + if err != nil { + return err + } + + shardFailureCount, err := meter.Int64Counter("eds_store_shard_failure_counter", + metric.WithDescription("eds store OpShardFail counter")) + if err != nil { + return err + } + + longOpTime, err := meter.Float64Histogram("eds_store_long_operation_time_histogram", + metric.WithDescription("eds store long operation time histogram(s)")) + if err != nil { + return err + } + + gcTime, err := meter.Float64Histogram("eds_store_gc_time", + metric.WithDescription("dagstore gc time histogram(s)")) + if err != nil { + return err + } + + dagStoreShards, err := meter.Int64ObservableGauge("eds_store_dagstore_shards", + metric.WithDescription("dagstore amount of shards by status")) + if err != nil { + return err + } + + if err = s.cache.Load().EnableMetrics(); err != nil { + return err + } + + callback := func(ctx context.Context, observer metric.Observer) error { + stats := s.dgstr.Stats() + for status, amount := range stats { + observer.ObserveInt64(dagStoreShards, int64(amount), + metric.WithAttributes( + attribute.String(dagstoreShardStatusKey, status.String()), + )) + } + return nil + } + + if _, err := meter.RegisterCallback(callback, dagStoreShards); err != nil { + return err + } + + s.metrics = &metrics{ + putTime: putTime, + getCARTime: getCARTime, + getCARBlockstoreTime: getCARBlockstoreTime, + getDAHTime: getDAHTime, + removeTime: removeTime, + getTime: getTime, + hasTime: hasTime, + listTime: listTime, + shardFailureCount: shardFailureCount, + longOpTime: longOpTime, + gcTime: gcTime, + } + return nil +} + +func (m *metrics) observeGCtime(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + m.gcTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeShardFailure(ctx context.Context, shardKey string) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.shardFailureCount.Add(ctx, 1, metric.WithAttributes(attribute.String("shard_key", shardKey))) +} + +func (m *metrics) observePut(ctx context.Context, dur time.Duration, result putResult, size uint) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.putTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.String(putResultKey, string(result)), + attribute.Int(sizeKey, int(size)))) +} + +func (m *metrics) observeLongOp(ctx context.Context, opName string, dur time.Duration, result longOpResult) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.longOpTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.String(opNameKey, opName), + attribute.String(longOpResultKey, string(result)))) +} + +func (m *metrics) observeGetCAR(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.getCARTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeCARBlockstore(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.getCARBlockstoreTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeGetDAH(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.getDAHTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeRemove(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.removeTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeGet(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.getTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeHas(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.hasTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeList(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.listTime.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} diff --git a/share/eds/ods.go b/share/eds/ods.go new file mode 100644 index 0000000000..aa1219d41a --- /dev/null +++ b/share/eds/ods.go @@ -0,0 +1,98 @@ +package eds + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" + "github.com/ipld/go-car/util" +) + +// bufferedODSReader will read odsSquareSize amount of leaves from reader into the buffer. +// It exposes the buffer to be read by io.Reader interface implementation +type bufferedODSReader struct { + carReader *bufio.Reader + // current is the amount of CARv1 encoded leaves that have been read from reader. When current + // reaches odsSquareSize, bufferedODSReader will prevent further reads by returning io.EOF + current, odsSquareSize int + buf *bytes.Buffer +} + +// ODSReader reads CARv1 encoded data from io.ReadCloser and limits the reader to the CAR header +// and first quadrant (ODS) +func ODSReader(carReader io.Reader) (io.Reader, error) { + if carReader == nil { + return nil, errors.New("eds: can't create ODSReader over nil reader") + } + + odsR := &bufferedODSReader{ + carReader: bufio.NewReader(carReader), + buf: new(bytes.Buffer), + } + + // first LdRead reads the full CAR header to determine amount of shares in the ODS + data, err := util.LdRead(odsR.carReader) + if err != nil { + return nil, fmt.Errorf("reading header: %v", err) + } + + var header car.CarHeader + err = cbor.DecodeInto(data, &header) + if err != nil { + return nil, fmt.Errorf("invalid header: %w", err) + } + + // car header contains both row roots and col roots which is why + // we divide by 4 to get the ODSWidth + odsWidth := len(header.Roots) / 4 + odsR.odsSquareSize = odsWidth * odsWidth + + // NewCarReader will expect to read the header first, so write it first + return odsR, util.LdWrite(odsR.buf, data) +} + +func (r *bufferedODSReader) Read(p []byte) (n int, err error) { + // read leafs to the buffer until it has sufficient data to fill provided container or full ods is + // read + for r.current < r.odsSquareSize && r.buf.Len() < len(p) { + if err := r.readLeaf(); err != nil { + return 0, err + } + + r.current++ + } + + // read buffer to slice + return r.buf.Read(p) +} + +// readLeaf reads one leaf from reader into bufferedODSReader buffer +func (r *bufferedODSReader) readLeaf() error { + if _, err := r.carReader.Peek(1); err != nil { // no more blocks, likely clean io.EOF + return err + } + + l, err := binary.ReadUvarint(r.carReader) + if err != nil { + if err == io.EOF { + return io.ErrUnexpectedEOF // don't silently pretend this is a clean EOF + } + return err + } + + if l > uint64(util.MaxAllowedSectionSize) { // Don't OOM + return fmt.Errorf("malformed car; header `length`: %v is bigger than %v", l, util.MaxAllowedSectionSize) + } + + buf := make([]byte, 8) + n := binary.PutUvarint(buf, l) + r.buf.Write(buf[:n]) + + _, err = r.buf.ReadFrom(io.LimitReader(r.carReader, int64(l))) + return err +} diff --git a/share/eds/ods_test.go b/share/eds/ods_test.go new file mode 100644 index 0000000000..0f7c69e708 --- /dev/null +++ b/share/eds/ods_test.go @@ -0,0 +1,110 @@ +package eds + +import ( + "context" + "io" + "testing" + + "github.com/ipld/go-car" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" +) + +// TestODSReader ensures that the reader returned from ODSReader is capable of reading the CAR +// header and ODS. +func TestODSReader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // launch eds store + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + // generate random eds data and put it into the store + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // get CAR reader from store + r, err := edsStore.GetCAR(ctx, dah.Hash()) + assert.NoError(t, err) + defer func() { + require.NoError(t, r.Close()) + }() + + // create ODSReader wrapper based on car reader to limit reads to ODS only + odsR, err := ODSReader(r) + assert.NoError(t, err) + + // create CAR reader from ODSReader + carReader, err := car.NewCarReader(odsR) + assert.NoError(t, err) + + // validate ODS could be obtained from reader + for i := 0; i < 4; i++ { + for j := 0; j < 4; j++ { + // pick share from original eds + original := eds.GetCell(uint(i), uint(j)) + + // read block from odsReader based reader + block, err := carReader.Next() + assert.NoError(t, err) + + // check that original data from eds is same as data from reader + assert.Equal(t, original, share.GetData(block.RawData())) + } + } + + // Make sure no excess data is available to get from reader + _, err = carReader.Next() + assert.Error(t, io.EOF, err) +} + +// TestODSReaderReconstruction ensures that the reader returned from ODSReader provides sufficient +// data for EDS reconstruction +func TestODSReaderReconstruction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // launch eds store + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + // generate random eds data and put it into the store + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // get CAR reader from store + r, err := edsStore.GetCAR(ctx, dah.Hash()) + assert.NoError(t, err) + defer func() { + require.NoError(t, r.Close()) + }() + + // create ODSReader wrapper based on car reader to limit reads to ODS only + odsR, err := ODSReader(r) + assert.NoError(t, err) + + // reconstruct EDS from ODSReader + loaded, err := ReadEDS(ctx, odsR, dah.Hash()) + assert.NoError(t, err) + + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + loadedRowRoots, err := loaded.RowRoots() + require.NoError(t, err) + require.Equal(t, rowRoots, loadedRowRoots) + + colRoots, err := eds.ColRoots() + require.NoError(t, err) + loadedColRoots, err := loaded.ColRoots() + require.NoError(t, err) + require.Equal(t, colRoots, loadedColRoots) +} diff --git a/ipld/retriever.go b/share/eds/retriever.go similarity index 64% rename from ipld/retriever.go rename to share/eds/retriever.go index ea2224898d..c2966c3953 100644 --- a/ipld/retriever.go +++ b/share/eds/retriever.go @@ -1,59 +1,62 @@ -package ipld +package eds import ( "context" - "encoding/hex" "errors" "sync" "sync/atomic" "time" - "github.com/ipfs/go-blockservice" + "github.com/ipfs/boxo/blockservice" "github.com/ipfs/go-cid" - format "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" - "github.com/tendermint/tendermint/pkg/da" - "github.com/tendermint/tendermint/pkg/wrapper" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/celestia-node/ipld/plugin" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" -) -var log = logging.Logger("ipld") + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/ipld" +) -var tracer = otel.Tracer("ipld") +var ( + log = logging.Logger("share/eds") + tracer = otel.Tracer("share/eds") +) // Retriever retrieves rsmt2d.ExtendedDataSquares from the IPLD network. // Instead of requesting data 'share by share' it requests data by quadrants // minimizing bandwidth usage in the happy cases. // -// ---- ---- -// | 0 | 1 | -// ---- ---- -// | 2 | 3 | -// ---- ---- +// ---- ---- +// | 0 | 1 | +// ---- ---- +// | 2 | 3 | +// ---- ---- +// // Retriever randomly picks one of the data square quadrants and tries to request them one by one // until it is able to reconstruct the whole square. type Retriever struct { bServ blockservice.BlockService } -// NewRetriever creates a new instance of the Retriever over IPLD Service and rmst2d.Codec +// NewRetriever creates a new instance of the Retriever over IPLD BlockService and rmst2d.Codec func NewRetriever(bServ blockservice.BlockService) *Retriever { return &Retriever{bServ: bServ} } // Retrieve retrieves all the data committed to DataAvailabilityHeader. // -// If not available locally, it aims to request from the network only one quadrant (1/4) of the data square -// and reconstructs the other three quadrants (3/4). If the requested quadrant is not available within -// RetrieveQuadrantTimeout, it starts requesting another quadrant until either the data is -// reconstructed, context is canceled or ErrByzantine is generated. +// If not available locally, it aims to request from the network only one quadrant (1/4) of the +// data square and reconstructs the other three quadrants (3/4). If the requested quadrant is not +// available within RetrieveQuadrantTimeout, it starts requesting another quadrant until either the +// data is reconstructed, context is canceled or ErrByzantine is generated. func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader) (*rsmt2d.ExtendedDataSquare, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() // cancels all the ongoing requests if reconstruction succeeds early @@ -61,11 +64,10 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader ctx, span := tracer.Start(ctx, "retrieve-square") defer span.End() span.SetAttributes( - attribute.Int("size", len(dah.RowsRoots)), - attribute.String("data_hash", hex.EncodeToString(dah.Hash())), + attribute.Int("size", len(dah.RowRoots)), ) - log.Debugw("retrieving data square", "data_hash", hex.EncodeToString(dah.Hash()), "size", len(dah.RowsRoots)) + log.Debugw("retrieving data square", "data_hash", dah.String(), "size", len(dah.RowRoots)) ses, err := r.newSession(ctx, dah) if err != nil { return nil, err @@ -86,7 +88,7 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader var errByz *rsmt2d.ErrByzantineData if errors.As(err, &errByz) { span.RecordError(err) - return nil, NewErrByzantine(ctx, r.bServ, dah, errByz) + return nil, byzantine.NewErrByzantine(ctx, r.bServ, dah, errByz) } log.Warnw("not enough shares to reconstruct data square, requesting more...", "err", err) @@ -101,58 +103,57 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader // quadrant request retries. Also, provides an API // to reconstruct the block once enough shares are fetched. type retrievalSession struct { - bget blockservice.BlockGetter - adder *NmtNodeAdder - - treeFn rsmt2d.TreeConstructorFn - codec rsmt2d.Codec - - dah *da.DataAvailabilityHeader - squareImported *rsmt2d.ExtendedDataSquare + dah *da.DataAvailabilityHeader + bget blockservice.BlockGetter - quadrants []*quadrant - sharesLks []sync.Mutex - sharesCount uint32 - - squareLk sync.RWMutex - square [][]byte - squareSig chan struct{} - squareDn chan struct{} + // TODO(@Wondertan): Extract into a separate data structure + // https://github.com/celestiaorg/rsmt2d/issues/135 + squareQuadrants []*quadrant + squareCellsLks [][]sync.Mutex + squareCellsCount uint32 + squareSig chan struct{} + squareDn chan struct{} + squareLk sync.RWMutex + square *rsmt2d.ExtendedDataSquare span trace.Span } // newSession creates a new retrieval session and kicks off requesting process. func (r *Retriever) newSession(ctx context.Context, dah *da.DataAvailabilityHeader) (*retrievalSession, error) { - size := len(dah.RowsRoots) - adder := NewNmtNodeAdder( - ctx, - r.bServ, - format.MaxSizeBatchOption(batchSize(size)), - ) - ses := &retrievalSession{ - bget: blockservice.NewSession(ctx, r.bServ), - adder: adder, - treeFn: func() rsmt2d.Tree { - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(size)/2, nmt.NodeVisitor(adder.Visit)) - return &tree - }, - codec: DefaultRSMT2DCodec(), - dah: dah, - quadrants: newQuadrants(dah), - sharesLks: make([]sync.Mutex, size*size), - square: make([][]byte, size*size), - squareSig: make(chan struct{}, 1), - squareDn: make(chan struct{}), - span: trace.SpanFromContext(ctx), + size := len(dah.RowRoots) + + treeFn := func(_ rsmt2d.Axis, index uint) rsmt2d.Tree { + // use proofs adder if provided, to cache collected proofs while recomputing the eds + var opts []nmt.Option + visitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() + if visitor != nil { + opts = append(opts, nmt.NodeVisitor(visitor)) + } + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(size)/2, index, opts...) + return &tree } - square, err := rsmt2d.ImportExtendedDataSquare(ses.square, ses.codec, ses.treeFn) + square, err := rsmt2d.NewExtendedDataSquare(share.DefaultRSMT2DCodec(), treeFn, uint(size), share.Size) if err != nil { return nil, err } - ses.squareImported = square + ses := &retrievalSession{ + dah: dah, + bget: blockservice.NewSession(ctx, r.bServ), + squareQuadrants: newQuadrants(dah), + squareCellsLks: make([][]sync.Mutex, size), + squareSig: make(chan struct{}, 1), + squareDn: make(chan struct{}), + square: square, + span: trace.SpanFromContext(ctx), + } + for i := range ses.squareCellsLks { + ses.squareCellsLks[i] = make([]sync.Mutex, size) + } + go ses.request(ctx) return ses, nil } @@ -167,36 +168,24 @@ func (rs *retrievalSession) Done() <-chan struct{} { // Reconstruct tries to reconstruct the data square and returns it on success. func (rs *retrievalSession) Reconstruct(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { if rs.isReconstructed() { - return rs.squareImported, nil + return rs.square, nil } // prevent further writes to the square rs.squareLk.Lock() defer rs.squareLk.Unlock() - // TODO(@Wondertan): This is bad! - // * We should not reimport the square multiple times - // * We should set shares into imported square via SetShare(https://github.com/celestiaorg/rsmt2d/issues/83) - // to accomplish the above point. - { - squareImported, err := rsmt2d.ImportExtendedDataSquare(rs.square, rs.codec, rs.treeFn) - if err != nil { - return nil, err - } - rs.squareImported = squareImported - } - _, span := tracer.Start(ctx, "reconstruct-square") defer span.End() // and try to repair with what we have - err := rs.squareImported.Repair(rs.dah.RowsRoots, rs.dah.ColumnRoots, rs.codec, rs.treeFn) + err := rs.square.Repair(rs.dah.RowRoots, rs.dah.ColumnRoots) if err != nil { span.RecordError(err) return nil, err } - log.Infow("data square reconstructed", "data_hash", hex.EncodeToString(rs.dah.Hash()), "size", len(rs.dah.RowsRoots)) + log.Infow("data square reconstructed", "data_hash", rs.dah.String(), "size", len(rs.dah.RowRoots)) close(rs.squareDn) - return rs.squareImported, nil + return rs.square, nil } // isReconstructed report true whether the square attached to the session @@ -213,14 +202,7 @@ func (rs *retrievalSession) isReconstructed() bool { func (rs *retrievalSession) Close() error { defer rs.span.End() - // All shares which were requested or repaired are written to disk via `Commit`. - // Note that we store *all*, so they are served to the network, including commit of incorrect - // data(BEFP/ErrByzantineCase case), so that the network can check BEFP. - err := rs.adder.Commit() - if err != nil { - log.Errorw("committing DAG", "err", err) - } - return err + return nil } // request kicks off quadrants requests. @@ -229,8 +211,8 @@ func (rs *retrievalSession) Close() error { func (rs *retrievalSession) request(ctx context.Context) { t := time.NewTicker(RetrieveQuadrantTimeout) defer t.Stop() - for retry := 0; retry < len(rs.quadrants); retry++ { - q := rs.quadrants[retry] + for retry := 0; retry < len(rs.squareQuadrants); retry++ { + q := rs.squareQuadrants[retry] log.Debugw("requesting quadrant", "axis", q.source, "x", q.x, @@ -238,7 +220,7 @@ func (rs *retrievalSession) request(ctx context.Context) { "size", len(q.roots), ) rs.span.AddEvent("requesting quadrant", trace.WithAttributes( - attribute.Int("axis", q.source), + attribute.Int("axis", int(q.source)), attribute.Int("x", q.x), attribute.Int("y", q.y), attribute.Int("size", len(q.roots)), @@ -257,7 +239,7 @@ func (rs *retrievalSession) request(ctx context.Context) { "size", len(q.roots), ) rs.span.AddEvent("quadrant request timeout", trace.WithAttributes( - attribute.Int("axis", q.source), + attribute.Int("axis", int(q.source)), attribute.Int("x", q.x), attribute.Int("y", q.y), attribute.Int("size", len(q.roots)), @@ -272,10 +254,9 @@ func (rs *retrievalSession) doRequest(ctx context.Context, q *quadrant) { for i, root := range q.roots { go func(i int, root cid.Cid) { // get the root node - nd, err := plugin.GetNode(ctx, rs.bget, root) + nd, err := ipld.GetNode(ctx, rs.bget, root) if err != nil { rs.span.RecordError(err, trace.WithAttributes( - attribute.String("requesting-root", root.String()), attribute.Int("root-index", i), )) return @@ -283,16 +264,17 @@ func (rs *retrievalSession) doRequest(ctx context.Context, q *quadrant) { // and go get shares of left or the right side of the whole col/row axis // the left or the right side of the tree represent some portion of the quadrant // which we put into the rs.square share-by-share by calculating shares' indexes using q.index - GetShares(ctx, rs.bget, nd.Links()[q.x].Cid, size, func(j int, share Share) { + ipld.GetShares(ctx, rs.bget, nd.Links()[q.x].Cid, size, func(j int, share share.Share) { // NOTE: Each share can appear twice here, for a Row and Col, respectively. // These shares are always equal, and we allow only the first one to be written // in the square. - // NOTE-2: We never actually fetch shares from the network *twice*. - // Once a share is downloaded from the network it is cached on the IPLD(blockservice) level. - // calc index of the share - idx := q.index(i, j) + // NOTE-2: We may never actually fetch shares from the network *twice*. + // Once a share is downloaded from the network it may be cached on the IPLD(blockservice) level. + // + // calc position of the share + x, y := q.pos(i, j) // try to lock the share - ok := rs.sharesLks[idx].TryLock() + ok := rs.squareCellsLks[x][y].TryLock() if !ok { // if already locked and written - do nothing return @@ -309,14 +291,19 @@ func (rs *retrievalSession) doRequest(ctx context.Context, q *quadrant) { if rs.isReconstructed() { return } - rs.square[idx] = share + if err := rs.square.SetCell(uint(x), uint(y), share); err != nil { + // safe to ignore as: + // * share size already verified + // * the same share might come from either Row or Col + return + } // if we have >= 1/4 of the square we can start trying to Reconstruct // TODO(@Wondertan): This is not an ideal way to know when to start // reconstruction and can cause idle reconstruction tries in some cases, // but it is totally fine for the happy case and for now. // The earlier we correctly know that we have the full square - the earlier // we cancel ongoing requests - the less data is being wastedly transferred. - if atomic.AddUint32(&rs.sharesCount, 1) >= uint32(size*size) { + if atomic.AddUint32(&rs.squareCellsCount, 1) >= uint32(size*size) { select { case rs.squareSig <- struct{}{}: default: diff --git a/share/eds/retriever_no_race_test.go b/share/eds/retriever_no_race_test.go new file mode 100644 index 0000000000..15c6aa2fc4 --- /dev/null +++ b/share/eds/retriever_no_race_test.go @@ -0,0 +1,55 @@ +// go:build !race + +package eds + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +func TestRetriever_ByzantineError(t *testing.T) { + const width = 8 + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + bserv := ipld.NewMemBlockservice() + shares := edstest.RandEDS(t, width).Flattened() + _, err := ipld.ImportShares(ctx, shares, bserv) + require.NoError(t, err) + + // corrupt shares so that eds erasure coding does not match + copy(shares[14][share.NamespaceSize:], shares[15][share.NamespaceSize:]) + + // import corrupted eds + batchAdder := ipld.NewNmtNodeAdder(ctx, bserv, ipld.MaxSizeBatchOption(width*2)) + attackerEDS, err := rsmt2d.ImportExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(width), + nmt.NodeVisitor(batchAdder.Visit)), + ) + require.NoError(t, err) + err = batchAdder.Commit() + require.NoError(t, err) + + // ensure we rcv an error + dah, err := da.NewDataAvailabilityHeader(attackerEDS) + require.NoError(t, err) + r := NewRetriever(bserv) + _, err = r.Retrieve(ctx, &dah) + var errByz *byzantine.ErrByzantine + require.ErrorAs(t, err, &errByz) +} diff --git a/ipld/retriever_quadrant.go b/share/eds/retriever_quadrant.go similarity index 56% rename from ipld/retriever_quadrant.go rename to share/eds/retriever_quadrant.go index a35458b66d..3d616e9cd4 100644 --- a/ipld/retriever_quadrant.go +++ b/share/eds/retriever_quadrant.go @@ -1,14 +1,15 @@ -package ipld +package eds import ( - "math" "math/rand" "time" "github.com/ipfs/go-cid" - "github.com/tendermint/tendermint/pkg/da" - "github.com/celestiaorg/celestia-node/ipld/plugin" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/ipld" ) const ( @@ -25,8 +26,8 @@ const ( // starting to retrieve another quadrant. // // NOTE: -// * The whole data square must be retrieved in less than block time. -// * We have 4 quadrants from two sources(rows, cols) which equals to 8 in total. +// - The whole data square must be retrieved in less than block time. +// - We have 4 quadrants from two sources(rows, cols) which equals to 8 in total. var RetrieveQuadrantTimeout = blockTime / numQuadrants * 2 type quadrant struct { @@ -41,10 +42,8 @@ type quadrant struct { // |(0;1)| |(1;1)| // ------ ------- x, y int - // source defines the axis for quadrant - // it can be either 1 or 0 similar to x and y - // where 0 is Row source and 1 is Col respectively - source int + // source defines the axis(Row or Col) to fetch the quadrant from + source rsmt2d.Axis } // newQuadrants constructs a slice of quadrants from DAHeader. @@ -53,7 +52,7 @@ type quadrant struct { func newQuadrants(dah *da.DataAvailabilityHeader) []*quadrant { // combine all the roots into one slice, so they can be easily accessible by index daRoots := [][][]byte{ - dah.RowsRoots, + dah.RowRoots, dah.ColumnRoots, } // create a quadrant slice for each source(row;col) @@ -65,21 +64,17 @@ func newQuadrants(dah *da.DataAvailabilityHeader) []*quadrant { size, qsize := len(daRoots[source]), len(daRoots[source])/2 roots := make([]cid.Cid, size) for i, root := range daRoots[source] { - roots[i] = plugin.MustCidFromNamespacedSha256(root) + roots[i] = ipld.MustCidFromNamespacedSha256(root) } for i := range quadrants { - // convert quadrant index into coordinates + // convert quadrant 1D into into 2D coordinates x, y := i%2, i/2 - if source == 1 { // swap coordinates for column - x, y = y, x - } - quadrants[i] = &quadrant{ roots: roots[qsize*y : qsize*(y+1)], x: x, y: y, - source: source, + source: rsmt2d.Axis(source), } } } @@ -92,29 +87,16 @@ func newQuadrants(dah *da.DataAvailabilityHeader) []*quadrant { return quadrants } -// index calculates index for a share in a data square slice flattened by rows. -// -// NOTE: The complexity of the formula below comes from: -// * Goal to avoid share copying -// * Goal to make formula generic for both rows and cols -// * While data square is flattened by rows only -// TODO(@Wondertan): This can be simplified by making rsmt2d working over 3D byte slice(not flattened) -func (q *quadrant) index(rootIdx, cellIdx int) int { - size := len(q.roots) - // half square offsets, e.g. share is from Q3, - // so we add to index Q1+Q2 - halfSquareOffsetCol := pow(size*2, q.source) - halfSquareOffsetRow := pow(size*2, q.source^1) - // offsets for the axis, e.g. share is from Q4. - // so we add to index Q3 - offsetX := q.x * halfSquareOffsetCol * size - offsetY := q.y * halfSquareOffsetRow * size - - rootIdx *= halfSquareOffsetRow - cellIdx *= halfSquareOffsetCol - return rootIdx + cellIdx + offsetX + offsetY -} - -func pow(x, y int) int { - return int(math.Pow(float64(x), float64(y))) +// pos calculates position of a share in a data square. +func (q *quadrant) pos(rootIdx, cellIdx int) (int, int) { + cellIdx += len(q.roots) * q.x + rootIdx += len(q.roots) * q.y + switch q.source { + case rsmt2d.Row: + return rootIdx, cellIdx + case rsmt2d.Col: + return cellIdx, rootIdx + default: + panic("unknown axis") + } } diff --git a/share/eds/retriever_test.go b/share/eds/retriever_test.go new file mode 100644 index 0000000000..95da345d17 --- /dev/null +++ b/share/eds/retriever_test.go @@ -0,0 +1,214 @@ +package eds + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/ipfs/boxo/blockservice" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestRetriever_Retrieve(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bServ := ipld.NewMemBlockservice() + r := NewRetriever(bServ) + + type test struct { + name string + squareSize int + } + tests := []test{ + {"1x1(min)", 1}, + {"2x2(med)", 2}, + {"4x4(med)", 4}, + {"8x8(med)", 8}, + {"16x16(med)", 16}, + {"32x32(med)", 32}, + {"64x64(med)", 64}, + {"128x128(max)", share.MaxSquareSize}, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + // generate EDS + shares := sharetest.RandShares(t, tc.squareSize*tc.squareSize) + in, err := ipld.AddShares(ctx, shares, bServ) + require.NoError(t, err) + + // limit with timeout, specifically retrieval + ctx, cancel := context.WithTimeout(ctx, time.Minute*5) // the timeout is big for the max size which is long + defer cancel() + + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) + out, err := r.Retrieve(ctx, &dah) + require.NoError(t, err) + assert.True(t, in.Equals(out)) + }) + } +} + +// TestRetriever_MultipleRandQuadrants asserts that reconstruction succeeds +// when any three random quadrants requested. +func TestRetriever_MultipleRandQuadrants(t *testing.T) { + RetrieveQuadrantTimeout = time.Millisecond * 500 + const squareSize = 32 + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + bServ := ipld.NewMemBlockservice() + r := NewRetriever(bServ) + + // generate EDS + shares := sharetest.RandShares(t, squareSize*squareSize) + in, err := ipld.AddShares(ctx, shares, bServ) + require.NoError(t, err) + + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) + ses, err := r.newSession(ctx, &dah) + require.NoError(t, err) + + // wait until two additional quadrants requested + // this reliably allows us to reproduce the issue + time.Sleep(RetrieveQuadrantTimeout * 2) + // then ensure we have enough shares for reconstruction for slow machines e.g. CI + <-ses.Done() + + _, err = ses.Reconstruct(ctx) + assert.NoError(t, err) +} + +func TestFraudProofValidation(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer t.Cleanup(cancel) + bServ := ipld.NewMemBlockservice() + + odsSize := []int{2, 4, 16, 32, 64, 128} + for _, size := range odsSize { + t.Run(fmt.Sprintf("ods size:%d", size), func(t *testing.T) { + var errByz *byzantine.ErrByzantine + faultHeader, err := generateByzantineError(ctx, t, size, bServ) + require.True(t, errors.As(err, &errByz)) + + p := byzantine.CreateBadEncodingProof([]byte("hash"), faultHeader.Height(), errByz) + err = p.Validate(faultHeader) + require.NoError(t, err) + }) + } +} + +func generateByzantineError( + ctx context.Context, + t *testing.T, + odsSize int, + bServ blockservice.BlockService, +) (*header.ExtendedHeader, error) { + eds := edstest.RandByzantineEDS(t, odsSize) + err := ipld.ImportEDS(ctx, eds, bServ) + require.NoError(t, err) + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + _, err = NewRetriever(bServ).Retrieve(ctx, h.DAH) + + return h, err +} + +/* +BenchmarkBEFPValidation/ods_size:2 31273 38819 ns/op 68052 B/op 366 allocs/op +BenchmarkBEFPValidation/ods_size:4 14664 80439 ns/op 135892 B/op 894 allocs/op +BenchmarkBEFPValidation/ods_size:16 2850 386178 ns/op 587890 B/op 4945 allocs/op +BenchmarkBEFPValidation/ods_size:32 1399 874490 ns/op 1233399 B/op 11284 allocs/op +BenchmarkBEFPValidation/ods_size:64 619 2047540 ns/op 2578008 B/op 25364 allocs/op +BenchmarkBEFPValidation/ods_size:128 259 4934375 ns/op 5418406 B/op 56345 allocs/op +*/ +func BenchmarkBEFPValidation(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer b.Cleanup(cancel) + bServ := ipld.NewMemBlockservice() + r := NewRetriever(bServ) + t := &testing.T{} + odsSize := []int{2, 4, 16, 32, 64, 128} + for _, size := range odsSize { + b.Run(fmt.Sprintf("ods size:%d", size), func(b *testing.B) { + b.ResetTimer() + b.StopTimer() + eds := edstest.RandByzantineEDS(t, size) + err := ipld.ImportEDS(ctx, eds, bServ) + require.NoError(t, err) + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + _, err = r.Retrieve(ctx, h.DAH) + var errByz *byzantine.ErrByzantine + require.ErrorAs(t, err, &errByz) + b.StartTimer() + + for i := 0; i < b.N; i++ { + b.ReportAllocs() + p := byzantine.CreateBadEncodingProof([]byte("hash"), h.Height(), errByz) + err = p.Validate(h) + require.NoError(b, err) + } + }) + } +} + +/* +BenchmarkNewErrByzantineData/ods_size:2 29605 38846 ns/op 49518 B/op 579 allocs/op +BenchmarkNewErrByzantineData/ods_size:4 11380 105302 ns/op 134967 B/op 1571 allocs/op +BenchmarkNewErrByzantineData/ods_size:16 1902 631086 ns/op 830199 B/op 9601 allocs/op +BenchmarkNewErrByzantineData/ods_size:32 756 1530985 ns/op 1985272 B/op 22901 allocs/op +BenchmarkNewErrByzantineData/ods_size:64 340 3445544 ns/op 4767053 B/op 54704 allocs/op +BenchmarkNewErrByzantineData/ods_size:128 132 8740678 ns/op 11991093 B/op 136584 allocs/op +*/ +func BenchmarkNewErrByzantineData(b *testing.B) { + odsSize := []int{2, 4, 16, 32, 64, 128} + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + bServ := ipld.NewMemBlockservice() + r := NewRetriever(bServ) + t := &testing.T{} + for _, size := range odsSize { + b.Run(fmt.Sprintf("ods size:%d", size), func(b *testing.B) { + b.StopTimer() + eds := edstest.RandByzantineEDS(t, size) + err := ipld.ImportEDS(ctx, eds, bServ) + require.NoError(t, err) + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + ses, err := r.newSession(ctx, h.DAH) + require.NoError(t, err) + + select { + case <-ctx.Done(): + b.Fatal(ctx.Err()) + case <-ses.Done(): + } + + _, err = ses.Reconstruct(ctx) + assert.NoError(t, err) + var errByz *rsmt2d.ErrByzantineData + require.ErrorAs(t, err, &errByz) + b.StartTimer() + + for i := 0; i < b.N; i++ { + err = byzantine.NewErrByzantine(ctx, bServ, h.DAH, errByz) + require.NotNil(t, err) + } + }) + } +} diff --git a/share/eds/store.go b/share/eds/store.go new file mode 100644 index 0000000000..816065909e --- /dev/null +++ b/share/eds/store.go @@ -0,0 +1,644 @@ +package eds + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "sync" + "sync/atomic" + "time" + + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/index" + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/dagstore/shard" + bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/go-datastore" + carv1 "github.com/ipld/go-car" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/cache" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +const ( + blocksPath = "/blocks/" + indexPath = "/index/" + transientsPath = "/transients/" +) + +var ErrNotFound = errors.New("eds not found in store") + +// Store maintains (via DAGStore) a top-level index enabling granular and efficient random access to +// every share and/or Merkle proof over every registered CARv1 file. The EDSStore provides a custom +// blockstore interface implementation to achieve access. The main use-case is randomized sampling +// over the whole chain of EDS block data and getting data by namespace. +type Store struct { + cancel context.CancelFunc + + dgstr *dagstore.DAGStore + mounts *mount.Registry + + bs *blockstore + cache atomic.Pointer[cache.DoubleCache] + + carIdx index.FullIndexRepo + invertedIdx *simpleInvertedIndex + + basepath string + gcInterval time.Duration + // lastGCResult is only stored on the store for testing purposes. + lastGCResult atomic.Pointer[dagstore.GCResult] + + // stripedLocks is used to synchronize parallel operations + stripedLocks [256]sync.Mutex + shardFailures chan dagstore.ShardResult + + metrics *metrics +} + +// NewStore creates a new EDS Store under the given basepath and datastore. +func NewStore(params *Parameters, basePath string, ds datastore.Batching) (*Store, error) { + if err := params.Validate(); err != nil { + return nil, err + } + + err := setupPath(basePath) + if err != nil { + return nil, fmt.Errorf("failed to setup eds.Store directories: %w", err) + } + + r := mount.NewRegistry() + err = r.Register("fs", &inMemoryOnceMount{}) + if err != nil { + return nil, fmt.Errorf("failed to register memory mount on the registry: %w", err) + } + if err != nil { + return nil, fmt.Errorf("failed to register FS mount on the registry: %w", err) + } + + fsRepo, err := index.NewFSRepo(basePath + indexPath) + if err != nil { + return nil, fmt.Errorf("failed to create index repository: %w", err) + } + + invertedIdx, err := newSimpleInvertedIndex(basePath) + if err != nil { + return nil, fmt.Errorf("failed to create index: %w", err) + } + + failureChan := make(chan dagstore.ShardResult) + dagStore, err := dagstore.NewDAGStore( + dagstore.Config{ + TransientsDir: basePath + transientsPath, + IndexRepo: fsRepo, + Datastore: ds, + MountRegistry: r, + TopLevelIndex: invertedIdx, + FailureCh: failureChan, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create DAGStore: %w", err) + } + + recentBlocksCache, err := cache.NewAccessorCache("recent", params.RecentBlocksCacheSize) + if err != nil { + return nil, fmt.Errorf("failed to create recent blocks cache: %w", err) + } + + blockstoreCache, err := cache.NewAccessorCache("blockstore", params.BlockstoreCacheSize) + if err != nil { + return nil, fmt.Errorf("failed to create blockstore cache: %w", err) + } + + store := &Store{ + basepath: basePath, + dgstr: dagStore, + carIdx: fsRepo, + invertedIdx: invertedIdx, + gcInterval: params.GCInterval, + mounts: r, + shardFailures: failureChan, + } + store.bs = newBlockstore(store, ds) + store.cache.Store(cache.NewDoubleCache(recentBlocksCache, blockstoreCache)) + return store, nil +} + +func (s *Store) Start(ctx context.Context) error { + err := s.dgstr.Start(ctx) + if err != nil { + return err + } + // start Store only if DagStore succeeds + runCtx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + // initialize empty gc result to avoid panic on access + s.lastGCResult.Store(&dagstore.GCResult{ + Shards: make(map[shard.Key]error), + }) + + if s.gcInterval != 0 { + go s.gc(runCtx) + } + + go s.watchForFailures(runCtx) + return nil +} + +// Stop stops the underlying DAGStore. +func (s *Store) Stop(context.Context) error { + defer s.cancel() + if err := s.invertedIdx.close(); err != nil { + return err + } + return s.dgstr.Close() +} + +// gc periodically removes all inactive or errored shards. +func (s *Store) gc(ctx context.Context) { + ticker := time.NewTicker(s.gcInterval) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + tnow := time.Now() + res, err := s.dgstr.GC(ctx) + s.metrics.observeGCtime(ctx, time.Since(tnow), err != nil) + if err != nil { + log.Errorf("garbage collecting dagstore: %v", err) + return + } + s.lastGCResult.Store(res) + } + } +} + +func (s *Store) watchForFailures(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case res := <-s.shardFailures: + log.Errorw("removing shard after failure", "key", res.Key, "err", res.Error) + s.metrics.observeShardFailure(ctx, res.Key.String()) + k := share.MustDataHashFromString(res.Key.String()) + err := s.Remove(ctx, k) + if err != nil { + log.Errorw("failed to remove shard after failure", "key", res.Key, "err", err) + } + } + } +} + +// Put stores the given data square with DataRoot's hash as a key. +// +// The square is verified on the Exchange level, and Put only stores the square, trusting it. +// The resulting file stores all the shares and NMT Merkle Proofs of the EDS. +// Additionally, the file gets indexed s.t. store.Blockstore can access them. +func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) error { + ctx, span := tracer.Start(ctx, "store/put", trace.WithAttributes( + attribute.Int("width", int(square.Width())), + )) + + tnow := time.Now() + err := s.put(ctx, root, square) + result := putOK + switch { + case errors.Is(err, dagstore.ErrShardExists): + result = putExists + case err != nil: + result = putFailed + } + utils.SetStatusAndEnd(span, err) + s.metrics.observePut(ctx, time.Since(tnow), result, square.Width()) + return err +} + +func (s *Store) put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) (err error) { + lk := &s.stripedLocks[root[len(root)-1]] + lk.Lock() + defer lk.Unlock() + + // if root already exists, short-circuit + if has, _ := s.Has(ctx, root); has { + return dagstore.ErrShardExists + } + + key := root.String() + f, err := os.OpenFile(s.basepath+blocksPath+key, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return err + } + defer closeAndLog("car file", f) + + // save encoded eds into buffer + mount := &inMemoryOnceMount{ + // TODO: buffer could be pre-allocated with capacity calculated based on eds size. + buf: bytes.NewBuffer(nil), + FileMount: mount.FileMount{Path: s.basepath + blocksPath + key}, + } + err = WriteEDS(ctx, square, mount) + if err != nil { + return fmt.Errorf("failed to write EDS to file: %w", err) + } + + // write whole buffered mount data in one go to optimize i/o + if _, err = mount.WriteTo(f); err != nil { + return fmt.Errorf("failed to write EDS to file: %w", err) + } + + ch := make(chan dagstore.ShardResult, 1) + err = s.dgstr.RegisterShard(ctx, shard.KeyFromString(key), mount, ch, dagstore.RegisterOpts{}) + if err != nil { + return fmt.Errorf("failed to initiate shard registration: %w", err) + } + + var result dagstore.ShardResult + select { + case result = <-ch: + case <-ctx.Done(): + // if the context finished before the result was received, track the result in a separate goroutine + go trackLateResult("put", ch, s.metrics, time.Minute*5) + return ctx.Err() + } + + if result.Error != nil { + return fmt.Errorf("failed to register shard: %w", result.Error) + } + + // the accessor returned in the result will be nil, so the shard needs to be acquired first to + // become available in the cache. It might take some time, and the result should not affect the put + // operation, so do it in a goroutine + // TODO: Ideally, only recent blocks should be put in the cache, but there is no way right now to + // check such a condition. + go func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + ac, err := s.cache.Load().First().GetOrLoad(ctx, result.Key, s.getAccessor) + if err != nil { + log.Warnw("unable to put accessor to recent blocks accessors cache", "err", err) + return + } + + // need to close returned accessor to remove the reader reference + if err := ac.Close(); err != nil { + log.Warnw("unable to close accessor after loading", "err", err) + } + }() + + return nil +} + +// waitForResult waits for a result from the res channel for a maximum duration specified by +// maxWait. If the result is not received within the specified duration, it logs an error +// indicating that the parent context has expired and the shard registration is stuck. If a result +// is received, it checks for any error and logs appropriate messages. +func trackLateResult(opName string, res <-chan dagstore.ShardResult, metrics *metrics, maxWait time.Duration) { + tnow := time.Now() + select { + case <-time.After(maxWait): + metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpUnresolved) + log.Errorf("parent context is expired, while register shard is stuck for more than %v sec", time.Since(tnow)) + return + case result := <-res: + // don't observe if result was received right after launch of the func + if time.Since(tnow) < time.Second { + return + } + if result.Error != nil { + metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpFailed) + log.Errorf("failed to register shard after context expired: %v ago, err: %s", time.Since(tnow), result.Error) + return + } + metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpOK) + log.Warnf("parent context expired, but register shard finished with no error,"+ + " after context expired: %v ago", time.Since(tnow)) + return + } +} + +// GetCAR takes a DataRoot and returns a buffered reader to the respective EDS serialized as a +// CARv1 file. +// The Reader strictly reads the CAR header and first quadrant (1/4) of the EDS, omitting all the +// NMT Merkle proofs. Integrity of the store data is not verified. +// +// The shard is cached in the Store, so subsequent calls to GetCAR with the same root will use the +// same reader. The cache is responsible for closing the underlying reader. +func (s *Store) GetCAR(ctx context.Context, root share.DataHash) (io.ReadCloser, error) { + ctx, span := tracer.Start(ctx, "store/get-car") + tnow := time.Now() + r, err := s.getCAR(ctx, root) + s.metrics.observeGetCAR(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return r, err +} + +func (s *Store) getCAR(ctx context.Context, root share.DataHash) (io.ReadCloser, error) { + key := shard.KeyFromString(root.String()) + accessor, err := s.cache.Load().Get(key) + if err == nil { + return newReadCloser(accessor), nil + } + // If the accessor is not found in the cache, create a new one from dagstore. We don't put the + // accessor in the cache here because getCAR is used by shrex-eds. There is a lower probability, + // compared to other cache put triggers, that the same block will be requested again soon. + shardAccessor, err := s.getAccessor(ctx, key) + if err != nil { + return nil, fmt.Errorf("failed to get accessor: %w", err) + } + + return newReadCloser(shardAccessor), nil +} + +// Blockstore returns an IPFS blockstore providing access to individual shares/nodes of all EDS +// registered on the Store. NOTE: The blockstore does not store whole Celestia Blocks but IPFS +// blocks. We represent `shares` and NMT Merkle proofs as IPFS blocks and IPLD nodes so Bitswap can +// access those. +func (s *Store) Blockstore() bstore.Blockstore { + return s.bs +} + +// CARBlockstore returns an IPFS Blockstore providing access to individual shares/nodes of a +// specific EDS identified by DataHash and registered on the Store. NOTE: The Blockstore does not +// store whole Celestia Blocks but IPFS blocks. We represent `shares` and NMT Merkle proofs as IPFS +// blocks and IPLD nodes so Bitswap can access those. +func (s *Store) CARBlockstore( + ctx context.Context, + root share.DataHash, +) (*BlockstoreCloser, error) { + ctx, span := tracer.Start(ctx, "store/car-blockstore") + tnow := time.Now() + cbs, err := s.carBlockstore(ctx, root) + s.metrics.observeCARBlockstore(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return cbs, err +} + +func (s *Store) carBlockstore( + ctx context.Context, + root share.DataHash, +) (*BlockstoreCloser, error) { + key := shard.KeyFromString(root.String()) + accessor, err := s.cache.Load().Get(key) + if err == nil { + return blockstoreCloser(accessor) + } + + // if the accessor is not found in the cache, create a new one from dagstore + sa, err := s.getAccessor(ctx, key) + if err != nil { + return nil, fmt.Errorf("failed to get accessor: %w", err) + } + return blockstoreCloser(sa) +} + +// GetDAH returns the DataAvailabilityHeader for the EDS identified by DataHash. +func (s *Store) GetDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { + ctx, span := tracer.Start(ctx, "store/car-dah") + tnow := time.Now() + r, err := s.getDAH(ctx, root) + s.metrics.observeGetDAH(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return r, err +} + +func (s *Store) getDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { + r, err := s.getCAR(ctx, root) + if err != nil { + return nil, fmt.Errorf("eds/store: failed to get CAR file: %w", err) + } + defer closeAndLog("car reader", r) + + carHeader, err := carv1.ReadHeader(bufio.NewReader(r)) + if err != nil { + return nil, fmt.Errorf("eds/store: failed to read car header: %w", err) + } + + dah := dahFromCARHeader(carHeader) + if !bytes.Equal(dah.Hash(), root) { + return nil, fmt.Errorf("eds/store: content integrity mismatch from CAR for root %x", root) + } + return dah, nil +} + +// dahFromCARHeader returns the DataAvailabilityHeader stored in the CIDs of a CARv1 header. +func dahFromCARHeader(carHeader *carv1.CarHeader) *share.Root { + rootCount := len(carHeader.Roots) + rootBytes := make([][]byte, 0, rootCount) + for _, root := range carHeader.Roots { + rootBytes = append(rootBytes, ipld.NamespacedSha256FromCID(root)) + } + return &share.Root{ + RowRoots: rootBytes[:rootCount/2], + ColumnRoots: rootBytes[rootCount/2:], + } +} + +func (s *Store) getAccessor(ctx context.Context, key shard.Key) (cache.Accessor, error) { + ch := make(chan dagstore.ShardResult, 1) + err := s.dgstr.AcquireShard(ctx, key, ch, dagstore.AcquireOpts{}) + if err != nil { + if errors.Is(err, dagstore.ErrShardUnknown) { + return nil, ErrNotFound + } + return nil, fmt.Errorf("failed to initialize shard acquisition: %w", err) + } + + select { + case res := <-ch: + if res.Error != nil { + return nil, fmt.Errorf("failed to acquire shard: %w", res.Error) + } + return res.Accessor, nil + case <-ctx.Done(): + go trackLateResult("get_shard", ch, s.metrics, time.Minute) + return nil, ctx.Err() + } +} + +// Remove removes EDS from Store by the given share.Root hash and cleans up all +// the indexing. +func (s *Store) Remove(ctx context.Context, root share.DataHash) error { + ctx, span := tracer.Start(ctx, "store/remove") + tnow := time.Now() + err := s.remove(ctx, root) + s.metrics.observeRemove(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return err +} + +func (s *Store) remove(ctx context.Context, root share.DataHash) (err error) { + key := shard.KeyFromString(root.String()) + // remove open links to accessor from cache + if err := s.cache.Load().Remove(key); err != nil { + log.Warnw("remove accessor from cache", "err", err) + } + ch := make(chan dagstore.ShardResult, 1) + err = s.dgstr.DestroyShard(ctx, key, ch, dagstore.DestroyOpts{}) + if err != nil { + return fmt.Errorf("failed to initiate shard destruction: %w", err) + } + + select { + case result := <-ch: + if result.Error != nil { + return fmt.Errorf("failed to destroy shard: %w", result.Error) + } + case <-ctx.Done(): + go trackLateResult("remove", ch, s.metrics, time.Minute) + return ctx.Err() + } + + dropped, err := s.carIdx.DropFullIndex(key) + if !dropped { + log.Warnf("failed to drop index for %s", key) + } + if err != nil { + return fmt.Errorf("failed to drop index for %s: %w", key, err) + } + + err = os.Remove(s.basepath + blocksPath + root.String()) + if err != nil { + return fmt.Errorf("failed to remove CAR file: %w", err) + } + return nil +} + +// Get reads EDS out of Store by given DataRoot. +// +// It reads only one quadrant(1/4) of the EDS and verifies the integrity of the stored data by +// recomputing it. +func (s *Store) Get(ctx context.Context, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { + ctx, span := tracer.Start(ctx, "store/get") + tnow := time.Now() + eds, err := s.get(ctx, root) + s.metrics.observeGet(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return eds, err +} + +func (s *Store) get(ctx context.Context, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { + ctx, span := tracer.Start(ctx, "store/get") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + r, err := s.getCAR(ctx, root) + if err != nil { + return nil, fmt.Errorf("failed to get CAR file: %w", err) + } + defer closeAndLog("car reader", r) + + eds, err = ReadEDS(ctx, r, root) + if err != nil { + return nil, fmt.Errorf("failed to read EDS from CAR file: %w", err) + } + return eds, nil +} + +// Has checks if EDS exists by the given share.Root hash. +func (s *Store) Has(ctx context.Context, root share.DataHash) (has bool, err error) { + ctx, span := tracer.Start(ctx, "store/has") + tnow := time.Now() + eds, err := s.has(ctx, root) + s.metrics.observeHas(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return eds, err +} + +func (s *Store) has(_ context.Context, root share.DataHash) (bool, error) { + key := root.String() + info, err := s.dgstr.GetShardInfo(shard.KeyFromString(key)) + switch err { + case nil: + return true, info.Error + case dagstore.ErrShardUnknown: + return false, info.Error + default: + return false, err + } +} + +// List lists all the registered EDSes. +func (s *Store) List() ([]share.DataHash, error) { + ctx, span := tracer.Start(context.Background(), "store/list") + tnow := time.Now() + hashes, err := s.list() + s.metrics.observeList(ctx, time.Since(tnow), err != nil) + utils.SetStatusAndEnd(span, err) + return hashes, err +} + +func (s *Store) list() ([]share.DataHash, error) { + shards := s.dgstr.AllShardsInfo() + hashes := make([]share.DataHash, 0, len(shards)) + for shrd := range shards { + hash := share.MustDataHashFromString(shrd.String()) + hashes = append(hashes, hash) + } + return hashes, nil +} + +func setupPath(basepath string) error { + err := os.MkdirAll(basepath+blocksPath, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create blocks directory: %w", err) + } + err = os.MkdirAll(basepath+transientsPath, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create transients directory: %w", err) + } + err = os.MkdirAll(basepath+indexPath, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create index directory: %w", err) + } + return nil +} + +// inMemoryOnceMount is used to allow reading once from buffer before using main mount.Reader +type inMemoryOnceMount struct { + buf *bytes.Buffer + + readOnce atomic.Bool + mount.FileMount +} + +func (m *inMemoryOnceMount) Fetch(ctx context.Context) (mount.Reader, error) { + if m.buf != nil && !m.readOnce.Swap(true) { + reader := &inMemoryReader{Reader: bytes.NewReader(m.buf.Bytes())} + // release memory for gc, otherwise buffer will stick forever + m.buf = nil + return reader, nil + } + return m.FileMount.Fetch(ctx) +} + +func (m *inMemoryOnceMount) Write(b []byte) (int, error) { + return m.buf.Write(b) +} + +func (m *inMemoryOnceMount) WriteTo(w io.Writer) (int64, error) { + return io.Copy(w, bytes.NewReader(m.buf.Bytes())) +} + +// inMemoryReader extends bytes.Reader to implement mount.Reader interface +type inMemoryReader struct { + *bytes.Reader +} + +// Close allows inMemoryReader to satisfy mount.Reader interface +func (r *inMemoryReader) Close() error { + return nil +} diff --git a/share/eds/store_options.go b/share/eds/store_options.go new file mode 100644 index 0000000000..c8dcc69136 --- /dev/null +++ b/share/eds/store_options.go @@ -0,0 +1,43 @@ +package eds + +import ( + "errors" + "time" +) + +type Parameters struct { + // GC performs DAG store garbage collection by reclaiming transient files of + // shards that are currently available but inactive, or errored. + // We don't use transient files right now, so GC is turned off by default. + GCInterval time.Duration + + // RecentBlocksCacheSize is the size of the cache for recent blocks. + RecentBlocksCacheSize int + + // BlockstoreCacheSize is the size of the cache for blockstore requested accessors. + BlockstoreCacheSize int +} + +// DefaultParameters returns the default configuration values for the EDS store parameters. +func DefaultParameters() *Parameters { + return &Parameters{ + GCInterval: 0, + RecentBlocksCacheSize: 10, + BlockstoreCacheSize: 128, + } +} + +func (p *Parameters) Validate() error { + if p.GCInterval < 0 { + return errors.New("eds: GC interval cannot be negative") + } + + if p.RecentBlocksCacheSize < 1 { + return errors.New("eds: recent blocks cache size must be positive") + } + + if p.BlockstoreCacheSize < 1 { + return errors.New("eds: blockstore cache size must be positive") + } + return nil +} diff --git a/share/eds/store_test.go b/share/eds/store_test.go new file mode 100644 index 0000000000..6bc6972bb4 --- /dev/null +++ b/share/eds/store_test.go @@ -0,0 +1,539 @@ +package eds + +import ( + "context" + "io" + "os" + "sync" + "testing" + "time" + + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/shard" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + dsbadger "github.com/ipfs/go-ds-badger4" + "github.com/ipld/go-car" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/cache" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +func TestEDSStore(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + // PutRegistersShard tests if Put registers the shard on the underlying DAGStore + t.Run("PutRegistersShard", func(t *testing.T) { + eds, dah := randomEDS(t) + + // shard hasn't been registered yet + has, err := edsStore.Has(ctx, dah.Hash()) + assert.False(t, has) + assert.NoError(t, err) + + err = edsStore.Put(ctx, dah.Hash(), eds) + assert.NoError(t, err) + + _, err = edsStore.dgstr.GetShardInfo(shard.KeyFromString(dah.String())) + assert.NoError(t, err) + }) + + // PutIndexesEDS ensures that Putting an EDS indexes it into the car index + t.Run("PutIndexesEDS", func(t *testing.T) { + eds, dah := randomEDS(t) + + stat, _ := edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) + assert.False(t, stat.Exists) + + err = edsStore.Put(ctx, dah.Hash(), eds) + assert.NoError(t, err) + + stat, err = edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) + assert.True(t, stat.Exists) + assert.NoError(t, err) + }) + + // GetCAR ensures that the reader returned from GetCAR is capable of reading the CAR header and + // ODS. + t.Run("GetCAR", func(t *testing.T) { + eds, dah := randomEDS(t) + + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + r, err := edsStore.GetCAR(ctx, dah.Hash()) + assert.NoError(t, err) + defer func() { + require.NoError(t, r.Close()) + }() + carReader, err := car.NewCarReader(r) + assert.NoError(t, err) + + for i := 0; i < 4; i++ { + for j := 0; j < 4; j++ { + original := eds.GetCell(uint(i), uint(j)) + block, err := carReader.Next() + assert.NoError(t, err) + assert.Equal(t, original, share.GetData(block.RawData())) + } + } + }) + + t.Run("item not exist", func(t *testing.T) { + root := share.DataHash{1} + _, err := edsStore.GetCAR(ctx, root) + assert.ErrorIs(t, err, ErrNotFound) + + _, err = edsStore.GetDAH(ctx, root) + assert.ErrorIs(t, err, ErrNotFound) + + _, err = edsStore.CARBlockstore(ctx, root) + assert.ErrorIs(t, err, ErrNotFound) + }) + + t.Run("Remove", func(t *testing.T) { + eds, dah := randomEDS(t) + + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // assert that file now exists + _, err = os.Stat(edsStore.basepath + blocksPath + dah.String()) + assert.NoError(t, err) + + // accessor will be registered in cache async on put, so give it some time to settle + time.Sleep(time.Millisecond * 100) + + err = edsStore.Remove(ctx, dah.Hash()) + assert.NoError(t, err) + + // shard should no longer be registered on the dagstore + _, err = edsStore.dgstr.GetShardInfo(shard.KeyFromString(dah.String())) + assert.Error(t, err, "shard not found") + + // shard should have been dropped from the index, which also removes the file under /index/ + indexStat, err := edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) + assert.NoError(t, err) + assert.False(t, indexStat.Exists) + + // file no longer exists + _, err = os.Stat(edsStore.basepath + blocksPath + dah.String()) + assert.ErrorContains(t, err, "no such file or directory") + }) + + t.Run("Remove after OpShardFail", func(t *testing.T) { + eds, dah := randomEDS(t) + + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // assert that shard now exists + ok, err := edsStore.Has(ctx, dah.Hash()) + assert.NoError(t, err) + assert.True(t, ok) + + // assert that file now exists + path := edsStore.basepath + blocksPath + dah.String() + _, err = os.Stat(path) + assert.NoError(t, err) + + err = os.Remove(path) + assert.NoError(t, err) + + // accessor will be registered in cache async on put, so give it some time to settle + time.Sleep(time.Millisecond * 100) + + // remove non-failed accessor from cache + err = edsStore.cache.Load().Remove(shard.KeyFromString(dah.String())) + assert.NoError(t, err) + + _, err = edsStore.GetCAR(ctx, dah.Hash()) + assert.Error(t, err) + + ticker := time.NewTicker(time.Millisecond * 100) + defer ticker.Stop() + for { + select { + case <-ticker.C: + has, err := edsStore.Has(ctx, dah.Hash()) + if err == nil && !has { + // shard no longer exists after OpShardFail was detected from GetCAR call + return + } + case <-ctx.Done(): + t.Fatal("timeout waiting for shard to be removed") + } + } + }) + + t.Run("Has", func(t *testing.T) { + eds, dah := randomEDS(t) + + ok, err := edsStore.Has(ctx, dah.Hash()) + assert.NoError(t, err) + assert.False(t, ok) + + err = edsStore.Put(ctx, dah.Hash(), eds) + assert.NoError(t, err) + + ok, err = edsStore.Has(ctx, dah.Hash()) + assert.NoError(t, err) + assert.True(t, ok) + }) + + t.Run("RecentBlocksCache", func(t *testing.T) { + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // accessor will be registered in cache async on put, so give it some time to settle + time.Sleep(time.Millisecond * 100) + + // check, that the key is in the cache after put + shardKey := shard.KeyFromString(dah.String()) + _, err = edsStore.cache.Load().Get(shardKey) + assert.NoError(t, err) + }) + + t.Run("List", func(t *testing.T) { + const amount = 10 + hashes := make([]share.DataHash, 0, amount) + for range make([]byte, amount) { + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + hashes = append(hashes, dah.Hash()) + } + + hashesOut, err := edsStore.List() + require.NoError(t, err) + for _, hash := range hashes { + assert.Contains(t, hashesOut, hash) + } + }) + + t.Run("Parallel put", func(t *testing.T) { + const amount = 20 + eds, dah := randomEDS(t) + + wg := sync.WaitGroup{} + for i := 1; i < amount; i++ { + wg.Add(1) + go func() { + defer wg.Done() + err := edsStore.Put(ctx, dah.Hash(), eds) + if err != nil { + require.ErrorIs(t, err, dagstore.ErrShardExists) + } + }() + } + wg.Wait() + + eds, err := edsStore.Get(ctx, dah.Hash()) + require.NoError(t, err) + newDah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + require.Equal(t, dah.Hash(), newDah.Hash()) + }) +} + +// TestEDSStore_GC verifies that unused transient shards are collected by the GC periodically. +func TestEDSStore_GC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + edsStore, err := newStore(t) + edsStore.gcInterval = time.Second + require.NoError(t, err) + + // kicks off the gc goroutine + err = edsStore.Start(ctx) + require.NoError(t, err) + + eds, dah := randomEDS(t) + shardKey := shard.KeyFromString(dah.String()) + + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // accessor will be registered in cache async on put, so give it some time to settle + time.Sleep(time.Millisecond * 100) + + // remove links to the shard from cache + time.Sleep(time.Millisecond * 100) + key := shard.KeyFromString(share.DataHash(dah.Hash()).String()) + err = edsStore.cache.Load().Remove(key) + require.NoError(t, err) + + // doesn't exist yet + assert.NotContains(t, edsStore.lastGCResult.Load().Shards, shardKey) + + // wait for gc to run, retry three times + for i := 0; i < 3; i++ { + time.Sleep(edsStore.gcInterval) + if _, ok := edsStore.lastGCResult.Load().Shards[shardKey]; ok { + break + } + } + assert.Contains(t, edsStore.lastGCResult.Load().Shards, shardKey) + + // assert nil in this context means there was no error re-acquiring the shard during GC + assert.Nil(t, edsStore.lastGCResult.Load().Shards[shardKey]) +} + +func Test_BlockstoreCache(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + // store eds to the store with noopCache to allow clean cache after put + swap := edsStore.cache.Load() + edsStore.cache.Store(cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{})) + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // get any key from saved eds + bs, err := edsStore.carBlockstore(ctx, dah.Hash()) + require.NoError(t, err) + defer func() { + require.NoError(t, bs.Close()) + }() + keys, err := bs.AllKeysChan(ctx) + require.NoError(t, err) + var key cid.Cid + select { + case key = <-keys: + case <-ctx.Done(): + t.Fatal("context timeout") + } + + // swap back original cache + edsStore.cache.Store(swap) + + // key shouldn't be in cache yet, check for returned errCacheMiss + shardKey := shard.KeyFromString(dah.String()) + _, err = edsStore.cache.Load().Get(shardKey) + require.Error(t, err) + + // now get it from blockstore, to trigger storing to cache + _, err = edsStore.Blockstore().Get(ctx, key) + require.NoError(t, err) + + // should be no errCacheMiss anymore + _, err = edsStore.cache.Load().Get(shardKey) + require.NoError(t, err) +} + +// Test_CachedAccessor verifies that the reader represented by a cached accessor can be read from +// multiple times, without exhausting the underlying reader. +func Test_CachedAccessor(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // accessor will be registered in cache async on put, so give it some time to settle + time.Sleep(time.Millisecond * 100) + + // accessor should be in cache + _, err = edsStore.cache.Load().Get(shard.KeyFromString(dah.String())) + require.NoError(t, err) + + // first read from cached accessor + carReader, err := edsStore.getCAR(ctx, dah.Hash()) + require.NoError(t, err) + firstBlock, err := io.ReadAll(carReader) + require.NoError(t, err) + require.NoError(t, carReader.Close()) + + // second read from cached accessor + carReader, err = edsStore.getCAR(ctx, dah.Hash()) + require.NoError(t, err) + secondBlock, err := io.ReadAll(carReader) + require.NoError(t, err) + require.NoError(t, carReader.Close()) + + require.Equal(t, firstBlock, secondBlock) +} + +// Test_CachedAccessor verifies that the reader represented by a accessor obtained directly from +// dagstore can be read from multiple times, without exhausting the underlying reader. +func Test_NotCachedAccessor(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + // replace cache with noopCache to + edsStore.cache.Store(cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{})) + + eds, dah := randomEDS(t) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // accessor will be registered in cache async on put, so give it some time to settle + time.Sleep(time.Millisecond * 100) + + // accessor should not be in cache + _, err = edsStore.cache.Load().Get(shard.KeyFromString(dah.String())) + require.Error(t, err) + + // first read from direct accessor (not from cache) + carReader, err := edsStore.getCAR(ctx, dah.Hash()) + require.NoError(t, err) + firstBlock, err := io.ReadAll(carReader) + require.NoError(t, err) + require.NoError(t, carReader.Close()) + + // second read from direct accessor (not from cache) + carReader, err = edsStore.getCAR(ctx, dah.Hash()) + require.NoError(t, err) + secondBlock, err := io.ReadAll(carReader) + require.NoError(t, err) + require.NoError(t, carReader.Close()) + + require.Equal(t, firstBlock, secondBlock) +} + +func BenchmarkStore(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(cancel) + + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + edsStore, err := NewStore(DefaultParameters(), b.TempDir(), ds) + require.NoError(b, err) + err = edsStore.Start(ctx) + require.NoError(b, err) + + // BenchmarkStore/bench_put_128-10 10 3231859283 ns/op (~3sec) + b.Run("bench put 128", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + // pause the timer for initializing test data + b.StopTimer() + eds := edstest.RandEDS(b, 128) + dah, err := share.NewRoot(eds) + require.NoError(b, err) + b.StartTimer() + + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(b, err) + } + }) + + // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) + b.Run("bench read 128", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + // pause the timer for initializing test data + b.StopTimer() + eds := edstest.RandEDS(b, 128) + dah, err := share.NewRoot(eds) + require.NoError(b, err) + _ = edsStore.Put(ctx, dah.Hash(), eds) + b.StartTimer() + + _, err = edsStore.Get(ctx, dah.Hash()) + require.NoError(b, err) + } + }) +} + +// BenchmarkCacheEviction benchmarks the time it takes to load a block to the cache, when the +// cache size is set to 1. This forces cache eviction on every read. +// BenchmarkCacheEviction-10/128 384 3533586 ns/op (~3ms) +func BenchmarkCacheEviction(b *testing.B) { + const ( + blocks = 4 + size = 128 + ) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + b.Cleanup(cancel) + + dir := b.TempDir() + ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) + require.NoError(b, err) + + newStore := func(params *Parameters) *Store { + edsStore, err := NewStore(params, dir, ds) + require.NoError(b, err) + err = edsStore.Start(ctx) + require.NoError(b, err) + return edsStore + } + edsStore := newStore(DefaultParameters()) + + // generate EDSs and store them + cids := make([]cid.Cid, blocks) + for i := range cids { + eds := edstest.RandEDS(b, size) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(b, err) + + // store cids for read loop later + cids[i] = ipld.MustCidFromNamespacedSha256(dah.RowRoots[0]) + } + + // restart store to clear cache + require.NoError(b, edsStore.Stop(ctx)) + + // set BlockstoreCacheSize to 1 to force eviction on every read + params := DefaultParameters() + params.BlockstoreCacheSize = 1 + bstore := newStore(params).Blockstore() + + // start benchmark + b.ResetTimer() + for i := 0; i < b.N; i++ { + h := cids[i%blocks] + // every read will trigger eviction + _, err := bstore.Get(ctx, h) + require.NoError(b, err) + } +} + +func newStore(t *testing.T) (*Store, error) { + t.Helper() + + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + return NewStore(DefaultParameters(), t.TempDir(), ds) +} + +func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root) { + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + return eds, dah +} diff --git a/share/eds/testdata/README.md b/share/eds/testdata/README.md new file mode 100644 index 0000000000..960549e2a0 --- /dev/null +++ b/share/eds/testdata/README.md @@ -0,0 +1,5 @@ +# CARxEDS Testdata + +This directory contains an example CARv1 file of an EDS and its matching data availability header. + +They might need to be regenerated when modifying constants such as the default share size. This can be done by running the test utility in `eds_test.go` called `createTestData`. diff --git a/share/eds/testdata/example-root.json b/share/eds/testdata/example-root.json new file mode 100644 index 0000000000..999d6301b6 --- /dev/null +++ b/share/eds/testdata/example-root.json @@ -0,0 +1,22 @@ +{ +"row_roots": [ +"AAAAAAAAAAAAAAAAAAAAAAAAABPYEuDlO9Dz69oAAAAAAAAAAAAAAAAAAAAAAAAAMcklN0h38T4b/UBC/Cmr5YWmjmmxvi1e35vZBW14b8gDHBoTFVvY6H4J", +"AAAAAAAAAAAAAAAAAAAAAAAAADxyZecUZD41W5IAAAAAAAAAAAAAAAAAAAAAAAAAh8vQUZ38PaWyeUs7dQhphIuRIKiGaTr4KFwEhMRhejTd6/4NHdnKTDyY", +"AAAAAAAAAAAAAAAAAAAAAAAAAKDQatbQSwQ9uJsAAAAAAAAAAAAAAAAAAAAAAAAArtdqXCSsM1OlVCRZqqfZDnEO9eC5cwlgy5MQHb2g4NLr7nZYTruiOoz7", +"AAAAAAAAAAAAAAAAAAAAAAAAAMeUhM8LZBo9sWwAAAAAAAAAAAAAAAAAAAAAAAAA8PtvJpbDc4APKOK6MT1k61HuQXwauWw3nFWwr9pSljiYMv6jjjdLDF8o", +"/////////////////////////////////////////////////////////////////////////////xnHmhDh4Y8vfJrgewAcvLWpvI5XOyATj1IQDkCwvIEh", +"/////////////////////////////////////////////////////////////////////////////+qngp0AfoykfXwsMBukRtYxNA/bzW0+F3J7Q/+S1YZJ", +"/////////////////////////////////////////////////////////////////////////////4WNPrME/2MLrIZgAUoKaVx2GzJqDcYGrBg+sudPKUDy", +"/////////////////////////////////////////////////////////////////////////////6HdebpaHl7iTpLvmuPvtQNnkHfNOPyEhahxbVnIB2d1" +], +"column_roots": [ +"AAAAAAAAAAAAAAAAAAAAAAAAABPYEuDlO9Dz69oAAAAAAAAAAAAAAAAAAAAAAAAAx5SEzwtkGj2xbESyOeamsjGWUBQdAQoiSl+rMtNMo1wEtfGQnFS/g+K+", +"AAAAAAAAAAAAAAAAAAAAAAAAAC3uK6nhCxHTfBwAAAAAAAAAAAAAAAAAAAAAAAAA1fxnqHyO6qV39pcUQ8MuTfJ7RBhbSVWf0aamUP27KRY0II55oJoY6Ng6", +"AAAAAAAAAAAAAAAAAAAAAAAAAC6DkYeeBY/kKvAAAAAAAAAAAAAAAAAAAAAAAAAA47rxk8hoCnWGM+CX47TlYWBeE2unvRhA/j3EvHdxeL1rFRkaYfAd5eg7", +"AAAAAAAAAAAAAAAAAAAAAAAAADHJJTdId/E+G/0AAAAAAAAAAAAAAAAAAAAAAAAA8PtvJpbDc4APKAk5QPSH59HECE2sf/CDLKAZJjWo9DD4sLXJQ4jTZoH6", +"/////////////////////////////////////////////////////////////////////////////4lKCT3K11RnNIuLNfY+SfDZCYAE2iW0hjQHIVBpoN0q", +"/////////////////////////////////////////////////////////////////////////////1NpYcgayEVenbFeEO5LJ1j1/1sD+PvZWHDv+jqT1dLR", +"/////////////////////////////////////////////////////////////////////////////8FOWVuCU0rTzUW9tP2R47RmTBvwXX8ycKrMhgKEi1xa", +"/////////////////////////////////////////////////////////////////////////////7K5SoZ3HF5QgPvIXpKSr9eT4Xfiokc3PUMmXE4pBDTf" +] +} \ No newline at end of file diff --git a/share/eds/testdata/example.car b/share/eds/testdata/example.car new file mode 100644 index 0000000000..4d33c0ef33 Binary files /dev/null and b/share/eds/testdata/example.car differ diff --git a/share/eds/utils.go b/share/eds/utils.go new file mode 100644 index 0000000000..b897dd14b5 --- /dev/null +++ b/share/eds/utils.go @@ -0,0 +1,152 @@ +package eds + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/filecoin-project/dagstore" + "github.com/ipfs/boxo/blockservice" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/cache" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +// readCloser is a helper struct, that combines io.Reader and io.Closer +type readCloser struct { + io.Reader + io.Closer +} + +// BlockstoreCloser represents a blockstore that can also be closed. It combines the functionality +// of a dagstore.ReadBlockstore with that of an io.Closer. +type BlockstoreCloser struct { + dagstore.ReadBlockstore + io.Closer +} + +func newReadCloser(ac cache.Accessor) io.ReadCloser { + return readCloser{ + ac.Reader(), + ac, + } +} + +// blockstoreCloser constructs new BlockstoreCloser from cache.Accessor +func blockstoreCloser(ac cache.Accessor) (*BlockstoreCloser, error) { + bs, err := ac.Blockstore() + if err != nil { + return nil, fmt.Errorf("eds/store: failed to get blockstore: %w", err) + } + return &BlockstoreCloser{ + ReadBlockstore: bs, + Closer: ac, + }, nil +} + +func closeAndLog(name string, closer io.Closer) { + if err := closer.Close(); err != nil { + log.Warnw("closing "+name, "err", err) + } +} + +// RetrieveNamespaceFromStore gets all EDS shares in the given namespace from +// the EDS store through the corresponding CAR-level blockstore. It is extracted +// from the store getter to make it available for reuse in the shrexnd server. +func RetrieveNamespaceFromStore( + ctx context.Context, + store *Store, + dah *share.Root, + namespace share.Namespace, +) (shares share.NamespacedShares, err error) { + if err = namespace.ValidateForData(); err != nil { + return nil, err + } + + bs, err := store.CARBlockstore(ctx, dah.Hash()) + if errors.Is(err, ErrNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("failed to retrieve blockstore from eds store: %w", err) + } + defer func() { + if err := bs.Close(); err != nil { + log.Warnw("closing blockstore", "err", err) + } + }() + + // wrap the read-only CAR blockstore in a getter + blockGetter := NewBlockGetter(bs) + shares, err = CollectSharesByNamespace(ctx, blockGetter, dah, namespace) + if errors.Is(err, ipld.ErrNodeNotFound) { + // IPLD node not found after the index pointed to this shard and the CAR + // blockstore has been opened successfully is a strong indicator of + // corruption. We remove the block on bridges and fulls and return + // share.ErrNotFound to ensure the data is retrieved by the next getter. + // Note that this recovery is manual and will only be restored by an RPC + // call to SharesAvailable that fetches the same datahash that was + // removed. + err = store.Remove(ctx, dah.Hash()) + if err != nil { + log.Errorf("failed to remove CAR from store after detected corruption: %w", err) + } + err = share.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("failed to retrieve shares by namespace from store: %w", err) + } + + return shares, nil +} + +// CollectSharesByNamespace collects NamespaceShares within the given namespace from share.Root. +func CollectSharesByNamespace( + ctx context.Context, + bg blockservice.BlockGetter, + root *share.Root, + namespace share.Namespace, +) (shares share.NamespacedShares, err error) { + ctx, span := tracer.Start(ctx, "collect-shares-by-namespace", trace.WithAttributes( + attribute.String("namespace", namespace.String()), + )) + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + rootCIDs := ipld.FilterRootByNamespace(root, namespace) + if len(rootCIDs) == 0 { + return []share.NamespacedRow{}, nil + } + + errGroup, ctx := errgroup.WithContext(ctx) + shares = make([]share.NamespacedRow, len(rootCIDs)) + for i, rootCID := range rootCIDs { + // shadow loop variables, to ensure correct values are captured + i, rootCID := i, rootCID + errGroup.Go(func() error { + row, proof, err := ipld.GetSharesByNamespace(ctx, bg, rootCID, namespace, len(root.RowRoots)) + shares[i] = share.NamespacedRow{ + Shares: row, + Proof: proof, + } + if err != nil { + return fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), rootCID, err) + } + return nil + }) + } + + if err := errGroup.Wait(); err != nil { + return nil, err + } + + return shares, nil +} diff --git a/share/empty.go b/share/empty.go new file mode 100644 index 0000000000..ef3d088e1d --- /dev/null +++ b/share/empty.go @@ -0,0 +1,69 @@ +package share + +import ( + "bytes" + "fmt" + "sync" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/rsmt2d" +) + +// EmptyRoot returns Root of the empty block EDS. +func EmptyRoot() *Root { + initEmpty() + return emptyBlockRoot +} + +// EmptyExtendedDataSquare returns the EDS of the empty block data square. +func EmptyExtendedDataSquare() *rsmt2d.ExtendedDataSquare { + initEmpty() + return emptyBlockEDS +} + +// EmptyBlockShares returns the shares of the empty block. +func EmptyBlockShares() []Share { + initEmpty() + return emptyBlockShares +} + +var ( + emptyMu sync.Mutex + emptyBlockRoot *Root + emptyBlockEDS *rsmt2d.ExtendedDataSquare + emptyBlockShares []Share +) + +// initEmpty enables lazy initialization for constant empty block data. +func initEmpty() { + emptyMu.Lock() + defer emptyMu.Unlock() + if emptyBlockRoot != nil { + return + } + + // compute empty block EDS and DAH for it + result := shares.TailPaddingShares(appconsts.MinShareCount) + emptyBlockShares = shares.ToBytes(result) + + eds, err := da.ExtendShares(emptyBlockShares) + if err != nil { + panic(fmt.Errorf("failed to create empty EDS: %w", err)) + } + emptyBlockEDS = eds + + emptyBlockRoot, err = NewRoot(eds) + if err != nil { + panic(fmt.Errorf("failed to create empty DAH: %w", err)) + } + minDAH := da.MinDataAvailabilityHeader() + if !bytes.Equal(minDAH.Hash(), emptyBlockRoot.Hash()) { + panic(fmt.Sprintf("mismatch in calculated minimum DAH and minimum DAH from celestia-app, "+ + "expected %s, got %s", minDAH.String(), emptyBlockRoot.String())) + } + + // precompute Hash, so it's cached internally to avoid potential races + emptyBlockRoot.Hash() +} diff --git a/share/getter.go b/share/getter.go new file mode 100644 index 0000000000..3fcc93de33 --- /dev/null +++ b/share/getter.go @@ -0,0 +1,98 @@ +package share + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" +) + +var ( + // ErrNotFound is used to indicate that requested data could not be found. + ErrNotFound = errors.New("share: data not found") + // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the + // square size. + ErrOutOfBounds = errors.New("share: row or column index is larger than square size") +) + +// Getter interface provides a set of accessors for shares by the Root. +// Automatically verifies integrity of shares(exceptions possible depending on the implementation). +// +//go:generate mockgen -destination=mocks/getter.go -package=mocks . Getter +type Getter interface { + // GetShare gets a Share by coordinates in EDS. + GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (Share, error) + + // GetEDS gets the full EDS identified by the given extended header. + GetEDS(context.Context, *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) + + // GetSharesByNamespace gets all shares from an EDS within the given namespace. + // Shares are returned in a row-by-row order if the namespace spans multiple rows. + // Inclusion of returned data could be verified using Verify method on NamespacedShares. + // If no shares are found for target namespace non-inclusion could be also verified by calling + // Verify method. + GetSharesByNamespace(context.Context, *header.ExtendedHeader, Namespace) (NamespacedShares, error) +} + +// NamespacedShares represents all shares with proofs within a specific namespace of an EDS. +type NamespacedShares []NamespacedRow + +// Flatten returns the concatenated slice of all NamespacedRow shares. +func (ns NamespacedShares) Flatten() []Share { + shares := make([]Share, 0) + for _, row := range ns { + shares = append(shares, row.Shares...) + } + return shares +} + +// NamespacedRow represents all shares with proofs within a specific namespace of a single EDS row. +type NamespacedRow struct { + Shares []Share `json:"shares"` + Proof *nmt.Proof `json:"proof"` +} + +// Verify validates NamespacedShares by checking every row with nmt inclusion proof. +func (ns NamespacedShares) Verify(root *Root, namespace Namespace) error { + var originalRoots [][]byte + for _, row := range root.RowRoots { + if !namespace.IsOutsideRange(row, row) { + originalRoots = append(originalRoots, row) + } + } + + if len(originalRoots) != len(ns) { + return fmt.Errorf("amount of rows differs between root and namespace shares: expected %d, got %d", + len(originalRoots), len(ns)) + } + + for i, row := range ns { + // verify row data against row hash from original root + if !row.verify(originalRoots[i], namespace) { + return fmt.Errorf("row verification failed: row %d doesn't match original root: %s", i, root.String()) + } + } + return nil +} + +// verify validates the row using nmt inclusion proof. +func (row *NamespacedRow) verify(rowRoot []byte, namespace Namespace) bool { + // construct nmt leaves from shares by prepending namespace + leaves := make([][]byte, 0, len(row.Shares)) + for _, shr := range row.Shares { + leaves = append(leaves, append(GetNamespace(shr), shr...)) + } + + // verify namespace + return row.Proof.VerifyNamespace( + sha256.New(), + namespace.ToNMT(), + leaves, + rowRoot, + ) +} diff --git a/share/getters/cascade.go b/share/getters/cascade.go new file mode 100644 index 0000000000..3875127580 --- /dev/null +++ b/share/getters/cascade.go @@ -0,0 +1,153 @@ +package getters + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" +) + +var _ share.Getter = (*CascadeGetter)(nil) + +// CascadeGetter implements custom share.Getter that composes multiple Getter implementations in +// "cascading" order. +// +// See cascade func for details on cascading. +type CascadeGetter struct { + getters []share.Getter +} + +// NewCascadeGetter instantiates a new CascadeGetter from given share.Getters with given interval. +func NewCascadeGetter(getters []share.Getter) *CascadeGetter { + return &CascadeGetter{ + getters: getters, + } +} + +// GetShare gets a share from any of registered share.Getters in cascading order. +func (cg *CascadeGetter) GetShare( + ctx context.Context, header *header.ExtendedHeader, row, col int, +) (share.Share, error) { + ctx, span := tracer.Start(ctx, "cascade/get-share", trace.WithAttributes( + attribute.Int("row", row), + attribute.Int("col", col), + )) + defer span.End() + + upperBound := len(header.DAH.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } + get := func(ctx context.Context, get share.Getter) (share.Share, error) { + return get.GetShare(ctx, header, row, col) + } + + return cascadeGetters(ctx, cg.getters, get) +} + +// GetEDS gets a full EDS from any of registered share.Getters in cascading order. +func (cg *CascadeGetter) GetEDS( + ctx context.Context, header *header.ExtendedHeader, +) (*rsmt2d.ExtendedDataSquare, error) { + ctx, span := tracer.Start(ctx, "cascade/get-eds") + defer span.End() + + get := func(ctx context.Context, get share.Getter) (*rsmt2d.ExtendedDataSquare, error) { + return get.GetEDS(ctx, header) + } + + return cascadeGetters(ctx, cg.getters, get) +} + +// GetSharesByNamespace gets NamespacedShares from any of registered share.Getters in cascading +// order. +func (cg *CascadeGetter) GetSharesByNamespace( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, +) (share.NamespacedShares, error) { + ctx, span := tracer.Start(ctx, "cascade/get-shares-by-namespace", trace.WithAttributes( + attribute.String("namespace", namespace.String()), + )) + defer span.End() + + get := func(ctx context.Context, get share.Getter) (share.NamespacedShares, error) { + return get.GetSharesByNamespace(ctx, header, namespace) + } + + return cascadeGetters(ctx, cg.getters, get) +} + +// cascade implements a cascading retry algorithm for getting a value from multiple sources. +// Cascading implies trying the sources one-by-one in the given order with the +// given interval until either: +// - One of the sources returns the value +// - All of the sources errors +// - Context is canceled +// +// NOTE: New source attempts after interval do suspend running sources in progress. +func cascadeGetters[V any]( + ctx context.Context, + getters []share.Getter, + get func(context.Context, share.Getter) (V, error), +) (V, error) { + var ( + zero V + err error + ) + + if len(getters) == 0 { + return zero, errors.New("no getters provided") + } + + ctx, span := tracer.Start(ctx, "cascade", trace.WithAttributes( + attribute.Int("total-getters", len(getters)), + )) + defer func() { + if err != nil { + utils.SetStatusAndEnd(span, errors.New("all getters failed")) + } + }() + + for i, getter := range getters { + log.Debugf("cascade: launching getter #%d", i) + span.AddEvent("getter launched", trace.WithAttributes(attribute.Int("getter_idx", i))) + + // we split the timeout between left getters + // once async cascadegetter is implemented, we can remove this + getCtx, cancel := ctxWithSplitTimeout(ctx, len(getters)-i, 0) + val, getErr := get(getCtx, getter) + cancel() + if getErr == nil { + return val, nil + } + + if errors.Is(getErr, errOperationNotSupported) { + continue + } + + span.RecordError(getErr, trace.WithAttributes(attribute.Int("getter_idx", i))) + var byzantineErr *byzantine.ErrByzantine + if errors.As(getErr, &byzantineErr) { + // short circuit if byzantine error was detected (to be able to handle it correctly + // and create the BEFP) + return zero, byzantineErr + } + + err = errors.Join(err, getErr) + if ctx.Err() != nil { + return zero, err + } + } + return zero, err +} diff --git a/share/getters/cascade_test.go b/share/getters/cascade_test.go new file mode 100644 index 0000000000..d2b44883a1 --- /dev/null +++ b/share/getters/cascade_test.go @@ -0,0 +1,119 @@ +package getters + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/mocks" +) + +func TestCascadeGetter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + const gettersN = 3 + headers := make([]*header.ExtendedHeader, gettersN) + getters := make([]share.Getter, gettersN) + for i := range headers { + getters[i], headers[i] = TestGetter(t) + } + + getter := NewCascadeGetter(getters) + t.Run("GetShare", func(t *testing.T) { + for _, eh := range headers { + sh, err := getter.GetShare(ctx, eh, 0, 0) + assert.NoError(t, err) + assert.NotEmpty(t, sh) + } + }) + + t.Run("GetEDS", func(t *testing.T) { + for _, eh := range headers { + sh, err := getter.GetEDS(ctx, eh) + assert.NoError(t, err) + assert.NotEmpty(t, sh) + } + }) +} + +func TestCascade(t *testing.T) { + ctrl := gomock.NewController(t) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + timeoutGetter := mocks.NewMockGetter(ctrl) + immediateFailGetter := mocks.NewMockGetter(ctrl) + successGetter := mocks.NewMockGetter(ctrl) + ctxGetter := mocks.NewMockGetter(ctrl) + timeoutGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, _ *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + return nil, context.DeadlineExceeded + }).AnyTimes() + immediateFailGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). + Return(nil, errors.New("second getter fails immediately")).AnyTimes() + successGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). + Return(nil, nil).AnyTimes() + ctxGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, _ *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + return nil, ctx.Err() + }).AnyTimes() + + get := func(ctx context.Context, get share.Getter) (*rsmt2d.ExtendedDataSquare, error) { + return get.GetEDS(ctx, nil) + } + + t.Run("SuccessFirst", func(t *testing.T) { + getters := []share.Getter{successGetter, timeoutGetter, immediateFailGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.NoError(t, err) + }) + + t.Run("SuccessSecond", func(t *testing.T) { + getters := []share.Getter{immediateFailGetter, successGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.NoError(t, err) + }) + + t.Run("SuccessSecondAfterFirst", func(t *testing.T) { + getters := []share.Getter{timeoutGetter, successGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.NoError(t, err) + }) + + t.Run("SuccessAfterMultipleTimeouts", func(t *testing.T) { + getters := []share.Getter{timeoutGetter, immediateFailGetter, timeoutGetter, timeoutGetter, successGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.NoError(t, err) + }) + + t.Run("Error", func(t *testing.T) { + getters := []share.Getter{immediateFailGetter, timeoutGetter, immediateFailGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.Error(t, err) + assert.Equal(t, strings.Count(err.Error(), "\n"), 2) + }) + + t.Run("Context Canceled", func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + cancel() + getters := []share.Getter{ctxGetter, ctxGetter, ctxGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.Error(t, err) + assert.Equal(t, strings.Count(err.Error(), "\n"), 0) + }) + + t.Run("Single", func(t *testing.T) { + getters := []share.Getter{successGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.NoError(t, err) + }) +} diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go new file mode 100644 index 0000000000..7297766652 --- /dev/null +++ b/share/getters/getter_test.go @@ -0,0 +1,354 @@ +package getters + +import ( + "context" + "os" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + dsbadger "github.com/ipfs/go-ds-badger4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestStoreGetter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + tmpDir := t.TempDir() + storeCfg := eds.DefaultParameters() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + edsStore, err := eds.NewStore(storeCfg, tmpDir, ds) + require.NoError(t, err) + + err = edsStore.Start(ctx) + require.NoError(t, err) + + sg := NewStoreGetter(edsStore) + + t.Run("GetShare", func(t *testing.T) { + randEds, eh := randomEDS(t) + err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + require.NoError(t, err) + + squareSize := int(randEds.Width()) + for i := 0; i < squareSize; i++ { + for j := 0; j < squareSize; j++ { + share, err := sg.GetShare(ctx, eh, i, j) + require.NoError(t, err) + assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) + } + } + + // doesn't panic on indexes too high + _, err := sg.GetShare(ctx, eh, squareSize, squareSize) + require.ErrorIs(t, err, share.ErrOutOfBounds) + + // root not found + _, eh = randomEDS(t) + _, err = sg.GetShare(ctx, eh, 0, 0) + require.ErrorIs(t, err, share.ErrNotFound) + }) + + t.Run("GetEDS", func(t *testing.T) { + randEds, eh := randomEDS(t) + err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + require.NoError(t, err) + + retrievedEDS, err := sg.GetEDS(ctx, eh) + require.NoError(t, err) + assert.True(t, randEds.Equals(retrievedEDS)) + + // root not found + emptyRoot := da.MinDataAvailabilityHeader() + eh.DAH = &emptyRoot + _, err = sg.GetEDS(ctx, eh) + require.ErrorIs(t, err, share.ErrNotFound) + }) + + t.Run("GetSharesByNamespace", func(t *testing.T) { + randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) + err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + require.NoError(t, err) + + shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) + require.NoError(t, err) + require.NoError(t, shares.Verify(eh.DAH, namespace)) + assert.Len(t, shares.Flatten(), 2) + + // namespace not found + randNamespace := sharetest.RandV0Namespace() + emptyShares, err := sg.GetSharesByNamespace(ctx, eh, randNamespace) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) + + // root not found + emptyRoot := da.MinDataAvailabilityHeader() + eh.DAH = &emptyRoot + _, err = sg.GetSharesByNamespace(ctx, eh, namespace) + require.ErrorIs(t, err, share.ErrNotFound) + }) + + t.Run("GetSharesFromNamespace removes corrupted shard", func(t *testing.T) { + randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) + err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + require.NoError(t, err) + + // available + shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) + require.NoError(t, err) + require.NoError(t, shares.Verify(eh.DAH, namespace)) + assert.Len(t, shares.Flatten(), 2) + + // 'corrupt' existing CAR by overwriting with a random EDS + f, err := os.OpenFile(tmpDir+"/blocks/"+eh.DAH.String(), os.O_WRONLY, 0644) + require.NoError(t, err) + edsToOverwriteWith, eh := randomEDS(t) + err = eds.WriteEDS(ctx, edsToOverwriteWith, f) + require.NoError(t, err) + + shares, err = sg.GetSharesByNamespace(ctx, eh, namespace) + require.ErrorIs(t, err, share.ErrNotFound) + require.Nil(t, shares) + + // corruption detected, shard is removed + // try every 200ms until it passes or the context ends + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + t.Fatal("context ended before successful retrieval") + case <-ticker.C: + has, err := edsStore.Has(ctx, eh.DAH.Hash()) + if err != nil { + t.Fatal(err) + } + if !has { + require.NoError(t, err) + return + } + } + } + }) +} + +func TestIPLDGetter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + storeCfg := eds.DefaultParameters() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + edsStore, err := eds.NewStore(storeCfg, t.TempDir(), ds) + require.NoError(t, err) + + err = edsStore.Start(ctx) + require.NoError(t, err) + + bStore := edsStore.Blockstore() + bserv := ipld.NewBlockservice(bStore, offline.Exchange(edsStore.Blockstore())) + sg := NewIPLDGetter(bserv) + + t.Run("GetShare", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + randEds, eh := randomEDS(t) + err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + require.NoError(t, err) + + squareSize := int(randEds.Width()) + for i := 0; i < squareSize; i++ { + for j := 0; j < squareSize; j++ { + share, err := sg.GetShare(ctx, eh, i, j) + require.NoError(t, err) + assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) + } + } + + // doesn't panic on indexes too high + _, err := sg.GetShare(ctx, eh, squareSize+1, squareSize+1) + require.ErrorIs(t, err, share.ErrOutOfBounds) + + // root not found + _, eh = randomEDS(t) + _, err = sg.GetShare(ctx, eh, 0, 0) + require.ErrorIs(t, err, share.ErrNotFound) + }) + + t.Run("GetEDS", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + randEds, eh := randomEDS(t) + err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + require.NoError(t, err) + + retrievedEDS, err := sg.GetEDS(ctx, eh) + require.NoError(t, err) + assert.True(t, randEds.Equals(retrievedEDS)) + + // Ensure blocks still exist after cleanup + colRoots, _ := retrievedEDS.ColRoots() + has, err := bStore.Has(ctx, ipld.MustCidFromNamespacedSha256(colRoots[0])) + assert.NoError(t, err) + assert.True(t, has) + }) + + t.Run("GetSharesByNamespace", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) + err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + require.NoError(t, err) + + // first check that shares are returned correctly if they exist + shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) + require.NoError(t, err) + require.NoError(t, shares.Verify(eh.DAH, namespace)) + assert.Len(t, shares.Flatten(), 2) + + // namespace not found + randNamespace := sharetest.RandV0Namespace() + emptyShares, err := sg.GetSharesByNamespace(ctx, eh, randNamespace) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) + + // nid doesn't exist in root + emptyRoot := da.MinDataAvailabilityHeader() + eh.DAH = &emptyRoot + emptyShares, err = sg.GetSharesByNamespace(ctx, eh, namespace) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) + }) +} + +// BenchmarkIPLDGetterOverBusyCache benchmarks the performance of the IPLDGetter when the +// cache size of the underlying blockstore is less than the number of blocks being requested in +// parallel. This is to ensure performance doesn't degrade when the cache is being frequently +// evicted. +// BenchmarkIPLDGetterOverBusyCache-10/128 1 12460428417 ns/op (~12s) +func BenchmarkIPLDGetterOverBusyCache(b *testing.B) { + const ( + blocks = 10 + size = 128 + ) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + b.Cleanup(cancel) + + dir := b.TempDir() + ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) + require.NoError(b, err) + + newStore := func(params *eds.Parameters) *eds.Store { + edsStore, err := eds.NewStore(params, dir, ds) + require.NoError(b, err) + err = edsStore.Start(ctx) + require.NoError(b, err) + return edsStore + } + edsStore := newStore(eds.DefaultParameters()) + + // generate EDSs and store them + headers := make([]*header.ExtendedHeader, blocks) + for i := range headers { + eds := edstest.RandEDS(b, size) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(b, err) + + eh := headertest.RandExtendedHeader(b) + eh.DAH = &dah + + // store cids for read loop later + headers[i] = eh + } + + // restart store to clear cache + require.NoError(b, edsStore.Stop(ctx)) + + // set BlockstoreCacheSize to 1 to force eviction on every read + params := eds.DefaultParameters() + params.BlockstoreCacheSize = 1 + edsStore = newStore(params) + bstore := edsStore.Blockstore() + bserv := ipld.NewBlockservice(bstore, offline.Exchange(bstore)) + + // start client + getter := NewIPLDGetter(bserv) + + // request blocks in parallel + b.ResetTimer() + g := sync.WaitGroup{} + g.Add(blocks) + for _, h := range headers { + h := h + go func() { + defer g.Done() + _, err := getter.GetEDS(ctx, h) + require.NoError(b, err) + }() + } + g.Wait() +} + +func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *header.ExtendedHeader) { + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + return eds, eh +} + +// randomEDSWithDoubledNamespace generates a random EDS and ensures that there are two shares in the +// middle that share a namespace. +func randomEDSWithDoubledNamespace( + t *testing.T, + size int, +) (*rsmt2d.ExtendedDataSquare, []byte, *header.ExtendedHeader) { + n := size * size + randShares := sharetest.RandShares(t, n) + idx1 := (n - 1) / 2 + idx2 := n / 2 + + // Make it so that the two shares in two different rows have a common + // namespace. For example if size=4, the original data square looks like + // this: + // _ _ _ _ + // _ _ _ D + // D _ _ _ + // _ _ _ _ + // where the D shares have a common namespace. + copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) + + eds, err := rsmt2d.ComputeExtendedDataSquare( + randShares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(size)), + ) + require.NoError(t, err, "failure to recompute the extended data square") + dah, err := share.NewRoot(eds) + require.NoError(t, err) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + + return eds, share.GetNamespace(randShares[idx1]), eh +} diff --git a/share/getters/ipld.go b/share/getters/ipld.go new file mode 100644 index 0000000000..e9c930248d --- /dev/null +++ b/share/getters/ipld.go @@ -0,0 +1,165 @@ +package getters + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "github.com/ipfs/boxo/blockservice" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +var _ share.Getter = (*IPLDGetter)(nil) + +// IPLDGetter is a share.Getter that retrieves shares from the bitswap network. Result caching is +// handled by the provided blockservice. A blockservice session will be created for retrieval if the +// passed context is wrapped with WithSession. +type IPLDGetter struct { + rtrv *eds.Retriever + bServ blockservice.BlockService +} + +// NewIPLDGetter creates a new share.Getter that retrieves shares from the bitswap network. +func NewIPLDGetter(bServ blockservice.BlockService) *IPLDGetter { + return &IPLDGetter{ + rtrv: eds.NewRetriever(bServ), + bServ: bServ, + } +} + +// GetShare gets a single share at the given EDS coordinates from the bitswap network. +func (ig *IPLDGetter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { + var err error + ctx, span := tracer.Start(ctx, "ipld/get-share", trace.WithAttributes( + attribute.Int("row", row), + attribute.Int("col", col), + )) + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + dah := header.DAH + upperBound := len(dah.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } + root, leaf := ipld.Translate(dah, row, col) + + // wrap the blockservice in a session if it has been signaled in the context. + blockGetter := getGetter(ctx, ig.bServ) + s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) + if errors.Is(err, ipld.ErrNodeNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("getter/ipld: failed to retrieve share: %w", err) + } + + return s, nil +} + +func (ig *IPLDGetter) GetEDS( + ctx context.Context, + header *header.ExtendedHeader, +) (eds *rsmt2d.ExtendedDataSquare, err error) { + ctx, span := tracer.Start(ctx, "ipld/get-eds") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + // rtrv.Retrieve calls shares.GetShares until enough shares are retrieved to reconstruct the EDS + eds, err = ig.rtrv.Retrieve(ctx, header.DAH) + if errors.Is(err, ipld.ErrNodeNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } + var errByz *byzantine.ErrByzantine + if errors.As(err, &errByz) { + return nil, err + } + if err != nil { + return nil, fmt.Errorf("getter/ipld: failed to retrieve eds: %w", err) + } + return eds, nil +} + +func (ig *IPLDGetter) GetSharesByNamespace( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, +) (shares share.NamespacedShares, err error) { + ctx, span := tracer.Start(ctx, "ipld/get-shares-by-namespace", trace.WithAttributes( + attribute.String("namespace", namespace.String()), + )) + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + if err = namespace.ValidateForData(); err != nil { + return nil, err + } + + // wrap the blockservice in a session if it has been signaled in the context. + blockGetter := getGetter(ctx, ig.bServ) + shares, err = eds.CollectSharesByNamespace(ctx, blockGetter, header.DAH, namespace) + if errors.Is(err, ipld.ErrNodeNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("getter/ipld: failed to retrieve shares by namespace: %w", err) + } + return shares, nil +} + +var sessionKey = &session{} + +// session is a struct that can optionally be passed by context to the share.Getter methods using +// WithSession to indicate that a blockservice session should be created. +type session struct { + sync.Mutex + atomic.Pointer[blockservice.Session] + ctx context.Context +} + +// WithSession stores an empty session in the context, indicating that a blockservice session should +// be created. +func WithSession(ctx context.Context) context.Context { + return context.WithValue(ctx, sessionKey, &session{ctx: ctx}) +} + +func getGetter(ctx context.Context, service blockservice.BlockService) blockservice.BlockGetter { + s, ok := ctx.Value(sessionKey).(*session) + if !ok { + return service + } + + val := s.Load() + if val != nil { + return val + } + + s.Lock() + defer s.Unlock() + val = s.Load() + if val == nil { + val = blockservice.NewSession(s.ctx, service) + s.Store(val) + } + return val +} diff --git a/share/getters/shrex.go b/share/getters/shrex.go new file mode 100644 index 0000000000..826c6b1a10 --- /dev/null +++ b/share/getters/shrex.go @@ -0,0 +1,270 @@ +package getters + +import ( + "context" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/p2p" + "github.com/celestiaorg/celestia-node/share/p2p/peers" + "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" + "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" +) + +var _ share.Getter = (*ShrexGetter)(nil) + +const ( + // defaultMinRequestTimeout value is set according to observed time taken by healthy peer to + // serve getEDS request for block size 256 + defaultMinRequestTimeout = time.Minute // should be >= shrexeds server write timeout + defaultMinAttemptsCount = 3 +) + +var meter = otel.Meter("shrex/getter") + +type metrics struct { + edsAttempts metric.Int64Histogram + ndAttempts metric.Int64Histogram +} + +func (m *metrics) recordEDSAttempt(ctx context.Context, attemptCount int, success bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + m.edsAttempts.Record(ctx, int64(attemptCount), + metric.WithAttributes( + attribute.Bool("success", success))) +} + +func (m *metrics) recordNDAttempt(ctx context.Context, attemptCount int, success bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + m.ndAttempts.Record(ctx, int64(attemptCount), + metric.WithAttributes( + attribute.Bool("success", success))) +} + +func (sg *ShrexGetter) WithMetrics() error { + edsAttemptHistogram, err := meter.Int64Histogram( + "getters_shrex_eds_attempts_per_request", + metric.WithDescription("Number of attempts per shrex/eds request"), + ) + if err != nil { + return err + } + + ndAttemptHistogram, err := meter.Int64Histogram( + "getters_shrex_nd_attempts_per_request", + metric.WithDescription("Number of attempts per shrex/nd request"), + ) + if err != nil { + return err + } + + sg.metrics = &metrics{ + edsAttempts: edsAttemptHistogram, + ndAttempts: ndAttemptHistogram, + } + return nil +} + +// ShrexGetter is a share.Getter that uses the shrex/eds and shrex/nd protocol to retrieve shares. +type ShrexGetter struct { + edsClient *shrexeds.Client + ndClient *shrexnd.Client + + peerManager *peers.Manager + + // minRequestTimeout limits minimal timeout given to single peer by getter for serving the request. + minRequestTimeout time.Duration + // minAttemptsCount will be used to split request timeout into multiple attempts. It will allow to + // attempt multiple peers in scope of one request before context timeout is reached + minAttemptsCount int + + metrics *metrics +} + +func NewShrexGetter(edsClient *shrexeds.Client, ndClient *shrexnd.Client, peerManager *peers.Manager) *ShrexGetter { + return &ShrexGetter{ + edsClient: edsClient, + ndClient: ndClient, + peerManager: peerManager, + minRequestTimeout: defaultMinRequestTimeout, + minAttemptsCount: defaultMinAttemptsCount, + } +} + +func (sg *ShrexGetter) Start(ctx context.Context) error { + return sg.peerManager.Start(ctx) +} + +func (sg *ShrexGetter) Stop(ctx context.Context) error { + return sg.peerManager.Stop(ctx) +} + +func (sg *ShrexGetter) GetShare(context.Context, *header.ExtendedHeader, int, int) (share.Share, error) { + return nil, fmt.Errorf("getter/shrex: GetShare %w", errOperationNotSupported) +} + +func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + var ( + attempt int + err error + ) + ctx, span := tracer.Start(ctx, "shrex/get-eds") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + // short circuit if the data root is empty + if header.DAH.Equals(share.EmptyRoot()) { + return share.EmptyExtendedDataSquare(), nil + } + for { + if ctx.Err() != nil { + sg.metrics.recordEDSAttempt(ctx, attempt, false) + return nil, errors.Join(err, ctx.Err()) + } + attempt++ + start := time.Now() + peer, setStatus, getErr := sg.peerManager.Peer(ctx, header.DAH.Hash(), header.Height()) + if getErr != nil { + log.Debugw("eds: couldn't find peer", + "hash", header.DAH.String(), + "err", getErr, + "finished (s)", time.Since(start)) + sg.metrics.recordEDSAttempt(ctx, attempt, false) + return nil, errors.Join(err, getErr) + } + + reqStart := time.Now() + reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) + eds, getErr := sg.edsClient.RequestEDS(reqCtx, header.DAH.Hash(), peer) + cancel() + switch { + case getErr == nil: + setStatus(peers.ResultNoop) + sg.metrics.recordEDSAttempt(ctx, attempt, true) + return eds, nil + case errors.Is(getErr, context.DeadlineExceeded), + errors.Is(getErr, context.Canceled): + setStatus(peers.ResultCooldownPeer) + case errors.Is(getErr, p2p.ErrNotFound): + getErr = share.ErrNotFound + setStatus(peers.ResultCooldownPeer) + case errors.Is(getErr, p2p.ErrInvalidResponse): + setStatus(peers.ResultBlacklistPeer) + default: + setStatus(peers.ResultCooldownPeer) + } + + if !ErrorContains(err, getErr) { + err = errors.Join(err, getErr) + } + log.Debugw("eds: request failed", + "hash", header.DAH.String(), + "peer", peer.String(), + "attempt", attempt, + "err", getErr, + "finished (s)", time.Since(reqStart)) + } +} + +func (sg *ShrexGetter) GetSharesByNamespace( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, +) (share.NamespacedShares, error) { + if err := namespace.ValidateForData(); err != nil { + return nil, err + } + var ( + attempt int + err error + ) + ctx, span := tracer.Start(ctx, "shrex/get-shares-by-namespace", trace.WithAttributes( + attribute.String("namespace", namespace.String()), + )) + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + // verify that the namespace could exist inside the roots before starting network requests + dah := header.DAH + roots := ipld.FilterRootByNamespace(dah, namespace) + if len(roots) == 0 { + return []share.NamespacedRow{}, nil + } + + for { + if ctx.Err() != nil { + sg.metrics.recordNDAttempt(ctx, attempt, false) + return nil, errors.Join(err, ctx.Err()) + } + attempt++ + start := time.Now() + peer, setStatus, getErr := sg.peerManager.Peer(ctx, header.DAH.Hash(), header.Height()) + if getErr != nil { + log.Debugw("nd: couldn't find peer", + "hash", dah.String(), + "namespace", namespace.String(), + "err", getErr, + "finished (s)", time.Since(start)) + sg.metrics.recordNDAttempt(ctx, attempt, false) + return nil, errors.Join(err, getErr) + } + + reqStart := time.Now() + reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) + nd, getErr := sg.ndClient.RequestND(reqCtx, dah, namespace, peer) + cancel() + switch { + case getErr == nil: + // both inclusion and non-inclusion cases needs verification + if verErr := nd.Verify(dah, namespace); verErr != nil { + getErr = verErr + setStatus(peers.ResultBlacklistPeer) + break + } + setStatus(peers.ResultNoop) + sg.metrics.recordNDAttempt(ctx, attempt, true) + return nd, nil + case errors.Is(getErr, context.DeadlineExceeded), + errors.Is(getErr, context.Canceled): + setStatus(peers.ResultCooldownPeer) + case errors.Is(getErr, p2p.ErrNotFound): + getErr = share.ErrNotFound + setStatus(peers.ResultCooldownPeer) + case errors.Is(getErr, p2p.ErrInvalidResponse): + setStatus(peers.ResultBlacklistPeer) + default: + setStatus(peers.ResultCooldownPeer) + } + + if !ErrorContains(err, getErr) { + err = errors.Join(err, getErr) + } + log.Debugw("nd: request failed", + "hash", dah.String(), + "namespace", namespace.String(), + "peer", peer.String(), + "attempt", attempt, + "err", getErr, + "finished (s)", time.Since(reqStart)) + } +} diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go new file mode 100644 index 0000000000..075735579b --- /dev/null +++ b/share/getters/shrex_test.go @@ -0,0 +1,369 @@ +package getters + +import ( + "context" + "encoding/binary" + "errors" + "testing" + "time" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/p2p/peers" + "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" + "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestShrexGetter(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + // create test net + net, err := mocknet.FullMeshConnected(2) + require.NoError(t, err) + clHost, srvHost := net.Hosts()[0], net.Hosts()[1] + + // launch eds store and put test data into it + edsStore, err := newStore(t) + require.NoError(t, err) + err = edsStore.Start(ctx) + require.NoError(t, err) + + ndClient, _ := newNDClientServer(ctx, t, edsStore, srvHost, clHost) + edsClient, _ := newEDSClientServer(ctx, t, edsStore, srvHost, clHost) + + // create shrex Getter + sub := new(headertest.Subscriber) + peerManager, err := testManager(ctx, clHost, sub) + require.NoError(t, err) + getter := NewShrexGetter(edsClient, ndClient, peerManager) + require.NoError(t, getter.Start(ctx)) + + t.Run("ND_Available, total data size > 1mb", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second*10) + t.Cleanup(cancel) + + // generate test data + namespace := sharetest.RandV0Namespace() + randEDS, dah := edstest.RandEDSWithNamespace(t, namespace, 64) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + + got, err := getter.GetSharesByNamespace(ctx, eh, namespace) + require.NoError(t, err) + require.NoError(t, got.Verify(dah, namespace)) + }) + + t.Run("ND_err_not_found", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + // generate test data + _, dah, namespace := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + + _, err := getter.GetSharesByNamespace(ctx, eh, namespace) + require.ErrorIs(t, err, share.ErrNotFound) + }) + + t.Run("ND_namespace_not_included", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + // generate test data + eds, dah, maxNamespace := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + + nID, err := addToNamespace(maxNamespace, -1) + require.NoError(t, err) + // check for namespace to be between max and min namespace in root + require.Len(t, ipld.FilterRootByNamespace(dah, nID), 1) + + emptyShares, err := getter.GetSharesByNamespace(ctx, eh, nID) + require.NoError(t, err) + // no shares should be returned + require.Empty(t, emptyShares.Flatten()) + require.Nil(t, emptyShares.Verify(dah, nID)) + }) + + t.Run("ND_namespace_not_in_dah", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + // generate test data + eds, dah, maxNamespace := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + + namespace, err := addToNamespace(maxNamespace, 1) + require.NoError(t, err) + // check for namespace to be not in root + require.Len(t, ipld.FilterRootByNamespace(dah, namespace), 0) + + emptyShares, err := getter.GetSharesByNamespace(ctx, eh, namespace) + require.NoError(t, err) + // no shares should be returned + require.Empty(t, emptyShares.Flatten()) + require.Nil(t, emptyShares.Verify(dah, namespace)) + }) + + t.Run("EDS_Available", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + // generate test data + randEDS, dah, _ := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + + got, err := getter.GetEDS(ctx, eh) + require.NoError(t, err) + require.Equal(t, randEDS.Flattened(), got.Flattened()) + }) + + t.Run("EDS_ctx_deadline", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + + // generate test data + _, dah, _ := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + + cancel() + _, err := getter.GetEDS(ctx, eh) + require.ErrorIs(t, err, context.Canceled) + }) + + t.Run("EDS_err_not_found", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + // generate test data + _, dah, _ := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ + DataHash: dah.Hash(), + Height: 1, + }) + + _, err := getter.GetEDS(ctx, eh) + require.ErrorIs(t, err, share.ErrNotFound) + }) +} + +func newStore(t *testing.T) (*eds.Store, error) { + t.Helper() + + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + return eds.NewStore(eds.DefaultParameters(), t.TempDir(), ds) +} + +func generateTestEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root, share.Namespace) { + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + max := nmt.MaxNamespace(dah.RowRoots[(len(dah.RowRoots))/2-1], share.NamespaceSize) + return eds, dah, max +} + +func testManager( + ctx context.Context, host host.Host, headerSub libhead.Subscriber[*header.ExtendedHeader], +) (*peers.Manager, error) { + shrexSub, err := shrexsub.NewPubSub(ctx, host, "test") + if err != nil { + return nil, err + } + + connGater, err := conngater.NewBasicConnectionGater(ds_sync.MutexWrap(datastore.NewMapDatastore())) + if err != nil { + return nil, err + } + manager, err := peers.NewManager( + peers.DefaultParameters(), + host, + connGater, + peers.WithShrexSubPools(shrexSub, headerSub), + ) + return manager, err +} + +func newNDClientServer( + ctx context.Context, t *testing.T, edsStore *eds.Store, srvHost, clHost host.Host, +) (*shrexnd.Client, *shrexnd.Server) { + params := shrexnd.DefaultParameters() + + // create server and register handler + server, err := shrexnd.NewServer(params, srvHost, edsStore) + require.NoError(t, err) + require.NoError(t, server.Start(ctx)) + + t.Cleanup(func() { + _ = server.Stop(ctx) + }) + + // create client and connect it to server + client, err := shrexnd.NewClient(params, clHost) + require.NoError(t, err) + return client, server +} + +func newEDSClientServer( + ctx context.Context, t *testing.T, edsStore *eds.Store, srvHost, clHost host.Host, +) (*shrexeds.Client, *shrexeds.Server) { + params := shrexeds.DefaultParameters() + + // create server and register handler + server, err := shrexeds.NewServer(params, srvHost, edsStore) + require.NoError(t, err) + require.NoError(t, server.Start(ctx)) + + t.Cleanup(func() { + _ = server.Stop(ctx) + }) + + // create client and connect it to server + client, err := shrexeds.NewClient(params, clHost) + require.NoError(t, err) + return client, server +} + +// addToNamespace adds arbitrary int value to namespace, treating namespace as big-endian +// implementation of int +func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) { + if val == 0 { + return namespace, nil + } + // Convert the input integer to a byte slice and add it to result slice + result := make([]byte, len(namespace)) + if val > 0 { + binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(val)) + } else { + binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(-val)) + } + + // Perform addition byte by byte + var carry int + for i := len(namespace) - 1; i >= 0; i-- { + sum := 0 + if val > 0 { + sum = int(namespace[i]) + int(result[i]) + carry + } else { + sum = int(namespace[i]) - int(result[i]) + carry + } + + switch { + case sum > 255: + carry = 1 + sum -= 256 + case sum < 0: + carry = -1 + sum += 256 + default: + carry = 0 + } + + result[i] = uint8(sum) + } + + // Handle any remaining carry + if carry != 0 { + return nil, errors.New("namespace overflow") + } + + return result, nil +} + +func TestAddToNamespace(t *testing.T) { + testCases := []struct { + name string + value int + input share.Namespace + expected share.Namespace + expectedError error + }{ + { + name: "Positive value addition", + value: 42, + input: share.Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + expected: share.Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2b}, + expectedError: nil, + }, + { + name: "Negative value addition", + value: -42, + input: share.Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + expected: share.Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x01, 0x1, 0x1, 0x1, 0x0, 0xd7}, + expectedError: nil, + }, + { + name: "Overflow error", + value: 1, + input: share.Namespace{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + expected: nil, + expectedError: errors.New("namespace overflow"), + }, + { + name: "Overflow error negative", + value: -1, + input: share.Namespace{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + expected: nil, + expectedError: errors.New("namespace overflow"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := addToNamespace(tc.input, tc.value) + if tc.expectedError == nil { + require.NoError(t, err) + require.Equal(t, tc.expected, result) + return + } + require.Error(t, err) + if err.Error() != tc.expectedError.Error() { + t.Errorf("Unexpected error message. Expected: %v, Got: %v", tc.expectedError, err) + } + }) + } +} diff --git a/share/getters/store.go b/share/getters/store.go new file mode 100644 index 0000000000..d66a057c56 --- /dev/null +++ b/share/getters/store.go @@ -0,0 +1,122 @@ +package getters + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +var _ share.Getter = (*StoreGetter)(nil) + +// StoreGetter is a share.Getter that retrieves shares from an eds.Store. No results are saved to +// the eds.Store after retrieval. +type StoreGetter struct { + store *eds.Store +} + +// NewStoreGetter creates a new share.Getter that retrieves shares from an eds.Store. +func NewStoreGetter(store *eds.Store) *StoreGetter { + return &StoreGetter{ + store: store, + } +} + +// GetShare gets a single share at the given EDS coordinates from the eds.Store through the +// corresponding CAR-level blockstore. +func (sg *StoreGetter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { + dah := header.DAH + var err error + ctx, span := tracer.Start(ctx, "store/get-share", trace.WithAttributes( + attribute.Int("row", row), + attribute.Int("col", col), + )) + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + upperBound := len(dah.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } + root, leaf := ipld.Translate(dah, row, col) + bs, err := sg.store.CARBlockstore(ctx, dah.Hash()) + if errors.Is(err, eds.ErrNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("getter/store: failed to retrieve blockstore: %w", err) + } + defer func() { + if err := bs.Close(); err != nil { + log.Warnw("closing blockstore", "err", err) + } + }() + + // wrap the read-only CAR blockstore in a getter + blockGetter := eds.NewBlockGetter(bs) + s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) + if errors.Is(err, ipld.ErrNodeNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("getter/store: failed to retrieve share: %w", err) + } + + return s, nil +} + +// GetEDS gets the EDS identified by the given root from the EDS store. +func (sg *StoreGetter) GetEDS( + ctx context.Context, header *header.ExtendedHeader, +) (data *rsmt2d.ExtendedDataSquare, err error) { + ctx, span := tracer.Start(ctx, "store/get-eds") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + data, err = sg.store.Get(ctx, header.DAH.Hash()) + if errors.Is(err, eds.ErrNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("getter/store: failed to retrieve eds: %w", err) + } + return data, nil +} + +// GetSharesByNamespace gets all EDS shares in the given namespace from the EDS store through the +// corresponding CAR-level blockstore. +func (sg *StoreGetter) GetSharesByNamespace( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, +) (shares share.NamespacedShares, err error) { + ctx, span := tracer.Start(ctx, "store/get-shares-by-namespace", trace.WithAttributes( + attribute.String("namespace", namespace.String()), + )) + defer func() { + utils.SetStatusAndEnd(span, err) + }() + + ns, err := eds.RetrieveNamespaceFromStore(ctx, sg.store, header.DAH, namespace) + if err != nil { + return nil, fmt.Errorf("getter/store: %w", err) + } + return ns, nil +} diff --git a/share/getters/testing.go b/share/getters/testing.go new file mode 100644 index 0000000000..fafeb0541c --- /dev/null +++ b/share/getters/testing.go @@ -0,0 +1,76 @@ +package getters + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +// TestGetter provides a testing SingleEDSGetter and the root of the EDS it holds. +func TestGetter(t *testing.T) (share.Getter, *header.ExtendedHeader) { + eds := edstest.RandEDS(t, 8) + dah, err := share.NewRoot(eds) + eh := headertest.RandExtendedHeaderWithRoot(t, dah) + require.NoError(t, err) + return &SingleEDSGetter{ + EDS: eds, + }, eh +} + +// SingleEDSGetter contains a single EDS where data is retrieved from. +// Its primary use is testing, and GetSharesByNamespace is not supported. +type SingleEDSGetter struct { + EDS *rsmt2d.ExtendedDataSquare +} + +// GetShare gets a share from a kept EDS if exist and if the correct root is given. +func (seg *SingleEDSGetter) GetShare( + _ context.Context, + header *header.ExtendedHeader, + row, col int, +) (share.Share, error) { + err := seg.checkRoot(header.DAH) + if err != nil { + return nil, err + } + return seg.EDS.GetCell(uint(row), uint(col)), nil +} + +// GetEDS returns a kept EDS if the correct root is given. +func (seg *SingleEDSGetter) GetEDS( + _ context.Context, + header *header.ExtendedHeader, +) (*rsmt2d.ExtendedDataSquare, error) { + err := seg.checkRoot(header.DAH) + if err != nil { + return nil, err + } + return seg.EDS, nil +} + +// GetSharesByNamespace returns NamespacedShares from a kept EDS if the correct root is given. +func (seg *SingleEDSGetter) GetSharesByNamespace(context.Context, *header.ExtendedHeader, share.Namespace, +) (share.NamespacedShares, error) { + panic("SingleEDSGetter: GetSharesByNamespace is not implemented") +} + +func (seg *SingleEDSGetter) checkRoot(root *share.Root) error { + dah, err := da.NewDataAvailabilityHeader(seg.EDS) + if err != nil { + return err + } + if !root.Equals(&dah) { + return fmt.Errorf("unknown EDS: have %s, asked %s", dah.String(), root.String()) + } + return nil +} diff --git a/share/getters/utils.go b/share/getters/utils.go new file mode 100644 index 0000000000..2260183b4f --- /dev/null +++ b/share/getters/utils.go @@ -0,0 +1,57 @@ +package getters + +import ( + "context" + "errors" + "time" + + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" +) + +var ( + tracer = otel.Tracer("share/getters") + log = logging.Logger("share/getters") + + errOperationNotSupported = errors.New("operation is not supported") +) + +// ctxWithSplitTimeout will split timeout stored in context by splitFactor and return the result if +// it is greater than minTimeout. minTimeout == 0 will be ignored, splitFactor <= 0 will be ignored +func ctxWithSplitTimeout( + ctx context.Context, + splitFactor int, + minTimeout time.Duration, +) (context.Context, context.CancelFunc) { + deadline, ok := ctx.Deadline() + if !ok || splitFactor <= 0 { + if minTimeout == 0 { + return context.WithCancel(ctx) + } + return context.WithTimeout(ctx, minTimeout) + } + + timeout := time.Until(deadline) + if timeout < minTimeout { + return context.WithCancel(ctx) + } + + splitTimeout := timeout / time.Duration(splitFactor) + if splitTimeout < minTimeout { + return context.WithTimeout(ctx, minTimeout) + } + return context.WithTimeout(ctx, splitTimeout) +} + +// ErrorContains reports whether any error in err's tree matches any error in targets tree. +func ErrorContains(err, target error) bool { + if errors.Is(err, target) || target == nil { + return true + } + + target = errors.Unwrap(target) + if target == nil { + return false + } + return ErrorContains(err, target) +} diff --git a/share/getters/utils_test.go b/share/getters/utils_test.go new file mode 100644 index 0000000000..65de9d47f2 --- /dev/null +++ b/share/getters/utils_test.go @@ -0,0 +1,225 @@ +package getters + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_ErrorContains(t *testing.T) { + err1 := errors.New("1") + err2 := errors.New("2") + + w1 := func(err error) error { + return fmt.Errorf("wrap1: %w", err) + } + w2 := func(err error) error { + return fmt.Errorf("wrap1: %w", err) + } + + type args struct { + err error + target error + } + tests := []struct { + name string + args args + want bool + }{ + {"nil err", + args{ + err: nil, + target: err1, + }, + false, + }, + {"nil target", + args{ + err: err1, + target: nil, + }, + true, + }, + {"errors.Is true", + args{ + err: w1(err1), + target: err1, + }, + true, + }, + {"errors.Is false", + args{ + err: w1(err1), + target: err2, + }, + false, + }, + {"same wrap but different base error", + args{ + err: w1(err1), + target: w1(err2), + }, + false, + }, + {"both wrapped true", + args{ + err: w1(err1), + target: w2(err1), + }, + true, + }, + {"both wrapped false", + args{ + err: w1(err1), + target: w2(err2), + }, + false, + }, + {"multierr first in slice", + args{ + err: errors.Join(w1(err1), w2(err2)), + target: w2(err1), + }, + true, + }, + {"multierr second in slice", + args{ + err: errors.Join(w1(err1), w2(err2)), + target: w1(err2), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, + tt.want, + ErrorContains(tt.args.err, tt.args.target), + "ErrorContains(%v, %v)", tt.args.err, tt.args.target) + }) + } +} + +func Test_ctxWithSplitTimeout(t *testing.T) { + type args struct { + ctxTimeout time.Duration + splitFactor []int + minTimeout time.Duration + } + tests := []struct { + name string + args args + want time.Duration + }{ + { + name: "ctxTimeout > minTimeout, splitFactor <= 0", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{-1, 0}, + minTimeout: time.Minute, + }, + want: time.Minute, + }, + { + name: "ctxTimeout > minTimeout, splitFactor = 1", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{1}, + minTimeout: time.Minute, + }, + want: 3 * time.Minute, + }, + { + name: "ctxTimeout > minTimeout, splitFactor = 2", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{2}, + minTimeout: time.Minute, + }, + want: 3 * time.Minute / 2, + }, + { + name: "ctxTimeout > minTimeout, resulted timeout limited by minTimeout", + args: args{ + ctxTimeout: 3 * time.Minute, + splitFactor: []int{3, 4, 5}, + minTimeout: time.Minute, + }, + want: time.Minute, + }, + { + name: "ctxTimeout < minTimeout", + args: args{ + ctxTimeout: time.Minute, + splitFactor: []int{-1, 0, 1, 2, 3}, + minTimeout: 2 * time.Minute, + }, + want: time.Minute, + }, + { + name: "minTimeout = 0, splitFactor <= 1", + args: args{ + ctxTimeout: time.Minute, + splitFactor: []int{-1, 0, 1}, + minTimeout: 0, + }, + want: time.Minute, + }, + { + name: "minTimeout = 0, splitFactor > 1", + args: args{ + ctxTimeout: time.Minute, + splitFactor: []int{2}, + minTimeout: 0, + }, + want: time.Minute / 2, + }, + { + name: "no context timeout", + args: args{ + ctxTimeout: 0, + splitFactor: []int{-1, 0, 1, 2}, + minTimeout: time.Minute, + }, + want: time.Minute, + }, + { + name: "no context timeout, minTimeout = 0", + args: args{ + ctxTimeout: 0, + splitFactor: []int{-1, 0, 1, 2}, + minTimeout: 0, + }, + want: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, sf := range tt.args.splitFactor { + ctx, cancel := context.WithCancel(context.Background()) + // add timeout if original context should have it + if tt.args.ctxTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, tt.args.ctxTimeout) + } + t.Cleanup(cancel) + got, _ := ctxWithSplitTimeout(ctx, sf, tt.args.minTimeout) + dl, ok := got.Deadline() + // in case no deadline is found in ctx or not expected to be found, check both cases apply at the + // same time + if !ok || tt.want == 0 { + require.False(t, ok) + require.Equal(t, tt.want, time.Duration(0)) + continue + } + d := time.Until(dl) + require.True(t, d <= tt.want+time.Second) + require.True(t, d >= tt.want-time.Second) + } + }) + } +} diff --git a/share/ipld/add.go b/share/ipld/add.go new file mode 100644 index 0000000000..fbb743148c --- /dev/null +++ b/share/ipld/add.go @@ -0,0 +1,86 @@ +package ipld + +import ( + "context" + "fmt" + + "github.com/ipfs/boxo/blockservice" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" +) + +// AddShares erasures and extends shares to blockservice.BlockService using the provided +// ipld.NodeAdder. +func AddShares( + ctx context.Context, + shares []share.Share, + adder blockservice.BlockService, +) (*rsmt2d.ExtendedDataSquare, error) { + if len(shares) == 0 { + return nil, fmt.Errorf("empty data") // empty block is not an empty Data + } + squareSize := int(utils.SquareSize(len(shares))) + // create nmt adder wrapping batch adder with calculated size + batchAdder := NewNmtNodeAdder(ctx, adder, MaxSizeBatchOption(squareSize*2)) + // create the nmt wrapper to generate row and col commitments + // recompute the eds + eds, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(squareSize), + nmt.NodeVisitor(batchAdder.Visit)), + ) + if err != nil { + return nil, fmt.Errorf("failure to recompute the extended data square: %w", err) + } + // compute roots + _, err = eds.RowRoots() + if err != nil { + return nil, err + } + // commit the batch to ipfs + return eds, batchAdder.Commit() +} + +// ImportShares imports flattened chunks of data into Extended Data square and saves it in +// blockservice.BlockService +func ImportShares( + ctx context.Context, + shares [][]byte, + adder blockservice.BlockService, +) (*rsmt2d.ExtendedDataSquare, error) { + if len(shares) == 0 { + return nil, fmt.Errorf("ipld: importing empty data") + } + squareSize := int(utils.SquareSize(len(shares))) + // create nmt adder wrapping batch adder with calculated size + batchAdder := NewNmtNodeAdder(ctx, adder, MaxSizeBatchOption(squareSize*2)) + // recompute the eds + eds, err := rsmt2d.ImportExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(squareSize/2), + nmt.NodeVisitor(batchAdder.Visit)), + ) + if err != nil { + return nil, fmt.Errorf("failure to recompute the extended data square: %w", err) + } + // compute roots + _, err = eds.RowRoots() + if err != nil { + return nil, err + } + // commit the batch to DAG + return eds, batchAdder.Commit() +} + +func ImportEDS(ctx context.Context, square *rsmt2d.ExtendedDataSquare, adder blockservice.BlockService) error { + shares := square.Flattened() + _, err := ImportShares(ctx, shares, adder) + return err +} diff --git a/share/ipld/blockserv.go b/share/ipld/blockserv.go new file mode 100644 index 0000000000..2ed2a21c77 --- /dev/null +++ b/share/ipld/blockserv.go @@ -0,0 +1,30 @@ +package ipld + +import ( + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" +) + +// NewBlockservice constructs Blockservice for fetching NMTrees. +func NewBlockservice(bs blockstore.Blockstore, exchange exchange.Interface) blockservice.BlockService { + return blockservice.New(bs, exchange, blockservice.WithAllowlist(defaultAllowlist)) +} + +// NewMemBlockservice constructs Blockservice for fetching NMTrees with in-memory blockstore. +func NewMemBlockservice() blockservice.BlockService { + bstore := blockstore.NewBlockstore(sync.MutexWrap(datastore.NewMapDatastore())) + return NewBlockservice(bstore, nil) +} + +// defaultAllowlist keeps default list of hashes allowed in the network. +var defaultAllowlist allowlist + +type allowlist struct{} + +func (a allowlist) IsAllowed(code uint64) bool { + // we allow all codes except home-baked sha256NamespaceFlagged + return code == sha256NamespaceFlagged +} diff --git a/share/ipld/corrupted_data_test.go b/share/ipld/corrupted_data_test.go new file mode 100644 index 0000000000..0d0af6dd35 --- /dev/null +++ b/share/ipld/corrupted_data_test.go @@ -0,0 +1,51 @@ +package ipld_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/availability/full" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/getters" +) + +// sharesAvailableTimeout is an arbitrarily picked interval of time in which a TestNode is expected +// to be able to complete a SharesAvailable request from a connected peer in a TestDagNet. +const sharesAvailableTimeout = 2 * time.Second + +// TestNamespaceHasher_CorruptedData is an integration test that verifies that the NamespaceHasher +// of a recipient of corrupted data will not panic, and will throw away the corrupted data. +func TestNamespaceHasher_CorruptedData(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + net := availability_test.NewTestDAGNet(ctx, t) + + requester := full.Node(net) + provider, mockBS := availability_test.MockNode(t, net) + provider.Availability = full.TestAvailability(t, getters.NewIPLDGetter(provider.BlockService)) + net.ConnectAll() + + // before the provider starts attacking, we should be able to retrieve successfully. We pass a size + // 16 block, but this is not important to the test and any valid block size behaves the same. + root := availability_test.RandFillBS(t, 16, provider.BlockService) + + eh := headertest.RandExtendedHeaderWithRoot(t, root) + getCtx, cancelGet := context.WithTimeout(ctx, sharesAvailableTimeout) + t.Cleanup(cancelGet) + err := requester.SharesAvailable(getCtx, eh) + require.NoError(t, err) + + // clear the storage of the requester so that it must retrieve again, then start attacking + // we reinitialize the node to clear the eds store + requester = full.Node(net) + mockBS.Attacking = true + getCtx, cancelGet = context.WithTimeout(ctx, sharesAvailableTimeout) + t.Cleanup(cancelGet) + err = requester.SharesAvailable(getCtx, eh) + require.ErrorIs(t, err, share.ErrNotAvailable) +} diff --git a/share/ipld/get.go b/share/ipld/get.go new file mode 100644 index 0000000000..adf2ffa8c5 --- /dev/null +++ b/share/ipld/get.go @@ -0,0 +1,248 @@ +package ipld + +import ( + "context" + "errors" + "sync" + "sync/atomic" + + "github.com/gammazero/workerpool" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + "github.com/celestiaorg/celestia-node/share" +) + +// NumWorkersLimit sets global limit for workers spawned by GetShares. +// GetShares could be called MaxSquareSize(128) times per data square each +// spawning up to 128/2 goroutines and altogether this is 8192. Considering +// there can be N blocks fetched at the same time, e.g. during catching up data +// from the past, we multiply this number by the amount of allowed concurrent +// data square fetches(NumConcurrentSquares). +// +// NOTE: This value only limits amount of simultaneously running workers that +// are spawned as the load increases and are killed, once the load declines. +// +// TODO(@Wondertan): This assumes we have parallelized DASer implemented. Sync the values once it is shipped. +// TODO(@Wondertan): Allow configuration of values without global state. +var NumWorkersLimit = share.MaxSquareSize * share.MaxSquareSize / 2 * NumConcurrentSquares + +// NumConcurrentSquares limits the amount of squares that are fetched +// concurrently/simultaneously. +var NumConcurrentSquares = 8 + +// ErrNodeNotFound is used to signal when a nmt Node could not be found. +var ErrNodeNotFound = errors.New("nmt node not found") + +// Global worker pool that globally controls and limits goroutines spawned by +// GetShares. +// +// TODO(@Wondertan): Idle timeout for workers needs to be configured to around block time, +// so that workers spawned between each reconstruction for every new block are reused. +var pool = workerpool.New(NumWorkersLimit) + +// GetLeaf fetches and returns the raw leaf. +// It walks down the IPLD NMT tree until it finds the requested one. +func GetLeaf( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + leaf, total int, +) (ipld.Node, error) { + // request the node + nd, err := GetNode(ctx, bGetter, root) + if err != nil { + return nil, err + } + + // look for links + lnks := nd.Links() + if len(lnks) == 0 { + // in case there is none, we reached tree's bottom, so finally return the leaf. + return nd, err + } + + // route walk to appropriate children + total /= 2 // as we are using binary tree, every step decreases total leaves in a half + if leaf < total { + root = lnks[0].Cid // if target leave on the left, go with walk down the first children + } else { + root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second + } + + // recursively walk down through selected children + return GetLeaf(ctx, bGetter, root, leaf, total) +} + +// GetLeaves gets leaves from either local storage, or, if not found, requests +// them from immediate/connected peers. It puts them into the slice under index +// of node position in the tree (bin-tree-feat). +// Does not return any error, and returns/unblocks only on success +// (got all shares) or on context cancellation. +// +// It works concurrently by spawning workers in the pool which do one basic +// thing - block until data is fetched, s. t. share processing is never +// sequential, and thus we request *all* the shares available without waiting +// for others to finish. It is the required property to maximize data +// availability. As a side effect, we get concurrent tree traversal reducing +// time to data time. +// +// GetLeaves relies on the fact that the underlying data structure is a binary +// tree, so it's not suitable for anything else besides that. Parts on the +// implementation that rely on this property are explicitly tagged with +// (bin-tree-feat). +func GetLeaves(ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + maxShares int, + put func(int, ipld.Node), +) { + // this buffer ensures writes to 'jobs' are never blocking (bin-tree-feat) + jobs := make(chan *job, (maxShares+1)/2) // +1 for the case where 'maxShares' is 1 + jobs <- &job{cid: root, ctx: ctx} + // total is an amount of routines spawned and total amount of nodes we process (bin-tree-feat) + // so we can specify exact amount of loops we do, and wait for this amount + // of routines to finish processing + total := maxShares*2 - 1 + wg := sync.WaitGroup{} + wg.Add(total) + + // all preparations are done, so begin processing jobs + for i := 0; i < total; i++ { + select { + case j := <-jobs: + // work over each job concurrently, s.t. shares do not block + // processing of each other + pool.Submit(func() { + defer wg.Done() + + nd, err := GetNode(ctx, bGetter, j.cid) + if err != nil { + // we don't really care about errors here + // just fetch as much as possible + return + } + // check links to know what we should do with the node + lnks := nd.Links() + if len(lnks) == 0 { + // successfully fetched a share/leaf + // ladies and gentlemen, we got em! + put(j.sharePos, nd) + return + } + // ok, we found more links + for i, lnk := range lnks { + // send those to be processed + select { + case jobs <- &job{ + cid: lnk.Cid, + // calc position for children nodes (bin-tree-feat), + // s.t. 'if' above knows where to put a share + sharePos: j.sharePos*2 + i, + // we pass the context to job so that spans are tracked in a tree + // structure + ctx: ctx, + }: + case <-ctx.Done(): + return + } + } + }) + case <-ctx.Done(): + return + } + } + // "tick-tack, how much more should I wait before you get those shares?" - the goroutine + wg.Wait() +} + +// GetProof fetches and returns the leaf's Merkle Proof. +// It walks down the IPLD NMT tree until it reaches the leaf and returns collected proof +func GetProof( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + proof []cid.Cid, + leaf, total int, +) ([]cid.Cid, error) { + // request the node + nd, err := GetNode(ctx, bGetter, root) + if err != nil { + return nil, err + } + // look for links + lnks := nd.Links() + if len(lnks) == 0 { + p := make([]cid.Cid, len(proof)) + copy(p, proof) + return p, nil + } + + // route walk to appropriate children + total /= 2 // as we are using binary tree, every step decreases total leaves in a half + if leaf < total { + root = lnks[0].Cid // if target leave on the left, go with walk down the first children + proof = append(proof, lnks[1].Cid) + } else { + root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second + proof, err = GetProof(ctx, bGetter, root, proof, leaf, total) + if err != nil { + return nil, err + } + return append(proof, lnks[0].Cid), nil + } + + // recursively walk down through selected children + return GetProof(ctx, bGetter, root, proof, leaf, total) +} + +// chanGroup implements an atomic wait group, closing a jobs chan +// when fully done. +type chanGroup struct { + jobs chan job + counter int64 +} + +func (w *chanGroup) add(count int64) { + atomic.AddInt64(&w.counter, count) +} + +func (w *chanGroup) done() { + numRemaining := atomic.AddInt64(&w.counter, -1) + + // Close channel if this job was the last one + if numRemaining == 0 { + close(w.jobs) + } +} + +// job represents an encountered node to investigate during the `GetLeaves` +// and `CollectLeavesByNamespace` routines. +type job struct { + // we pass the context to job so that spans are tracked in a tree + // structure + ctx context.Context + // cid of the node that will be handled + cid cid.Cid + // sharePos represents potential share position in share slice + sharePos int + // depth represents the number of edges present in path from the root node of a tree to that node + depth int + // isAbsent indicates if target namespaceID is not included, only collect absence proofs + isAbsent bool +} + +func (j job) next(direction direction, cid cid.Cid, isAbsent bool) job { + var i int + if direction == right { + i++ + } + return job{ + ctx: j.ctx, + cid: cid, + sharePos: j.sharePos*2 + i, + depth: j.depth + 1, + isAbsent: isAbsent, + } +} diff --git a/share/ipld/get_shares.go b/share/ipld/get_shares.go new file mode 100644 index 0000000000..98db7012b5 --- /dev/null +++ b/share/ipld/get_shares.go @@ -0,0 +1,73 @@ +package ipld + +import ( + "context" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/share" +) + +// GetShare fetches and returns the data for leaf `leafIndex` of root `rootCid`. +func GetShare( + ctx context.Context, + bGetter blockservice.BlockGetter, + rootCid cid.Cid, + leafIndex int, + totalLeafs int, // this corresponds to the extended square width +) (share.Share, error) { + nd, err := GetLeaf(ctx, bGetter, rootCid, leafIndex, totalLeafs) + if err != nil { + return nil, err + } + + return leafToShare(nd), nil +} + +// GetShares walks the tree of a given root and puts shares into the given 'put' func. +// Does not return any error, and returns/unblocks only on success +// (got all shares) or on context cancellation. +func GetShares(ctx context.Context, bg blockservice.BlockGetter, root cid.Cid, shares int, put func(int, share.Share)) { + putNode := func(i int, leaf format.Node) { + put(i, leafToShare(leaf)) + } + GetLeaves(ctx, bg, root, shares, putNode) +} + +// GetSharesByNamespace walks the tree of a given root and returns its shares within the given +// Namespace. If a share could not be retrieved, err is not nil, and the returned array +// contains nil shares in place of the shares it was unable to retrieve. +func GetSharesByNamespace( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + namespace share.Namespace, + maxShares int, +) ([]share.Share, *nmt.Proof, error) { + data := NewNamespaceData(maxShares, namespace, WithLeaves(), WithProofs()) + err := data.CollectLeavesByNamespace(ctx, bGetter, root) + if err != nil { + return nil, nil, err + } + + leaves := data.Leaves() + + shares := make([]share.Share, len(leaves)) + for i, leaf := range leaves { + if leaf != nil { + shares[i] = leafToShare(leaf) + } + } + return shares, data.Proof(), err +} + +// leafToShare converts an NMT leaf into a Share. +func leafToShare(nd format.Node) share.Share { + // * Additional namespace is prepended so that parity data can be identified with a parity + // namespace, which we cut off + return share.GetData(nd.RawData()) +} diff --git a/share/ipld/get_shares_test.go b/share/ipld/get_shares_test.go new file mode 100644 index 0000000000..580efcb69b --- /dev/null +++ b/share/ipld/get_shares_test.go @@ -0,0 +1,504 @@ +package ipld + +import ( + "bytes" + "context" + "crypto/sha256" + "errors" + mrand "math/rand" + "sort" + "strconv" + "testing" + "time" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestGetShare(t *testing.T) { + const size = 8 + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + bServ := NewMemBlockservice() + + // generate random shares for the nmt + shares := sharetest.RandShares(t, size*size) + eds, err := AddShares(ctx, shares, bServ) + require.NoError(t, err) + + for i, leaf := range shares { + row := i / size + pos := i - (size * row) + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + share, err := GetShare(ctx, bServ, MustCidFromNamespacedSha256(rowRoots[row]), pos, size*2) + require.NoError(t, err) + assert.Equal(t, leaf, share) + } +} + +func TestBlockRecovery(t *testing.T) { + originalSquareWidth := 8 + shareCount := originalSquareWidth * originalSquareWidth + extendedSquareWidth := 2 * originalSquareWidth + extendedShareCount := extendedSquareWidth * extendedSquareWidth + + // generate test data + quarterShares := sharetest.RandShares(t, shareCount) + allShares := sharetest.RandShares(t, shareCount) + + testCases := []struct { + name string + shares []share.Share + expectErr bool + errString string + d int // number of shares to delete + }{ + {"missing 1/2 shares", quarterShares, false, "", extendedShareCount / 2}, + {"missing 1/4 shares", quarterShares, false, "", extendedShareCount / 4}, + {"max missing data", quarterShares, false, "", (originalSquareWidth + 1) * (originalSquareWidth + 1)}, + {"missing all but one shares", allShares, true, "failed to solve data square", extendedShareCount - 1}, + } + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + squareSize := utils.SquareSize(len(tc.shares)) + + testEds, err := rsmt2d.ComputeExtendedDataSquare( + tc.shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(squareSize), + ) + require.NoError(t, err) + + // calculate roots using the first complete square + rowRoots, err := testEds.RowRoots() + require.NoError(t, err) + colRoots, err := testEds.ColRoots() + require.NoError(t, err) + + flat := testEds.Flattened() + + // recover a partially complete square + rdata := removeRandShares(flat, tc.d) + testEds, err = rsmt2d.ImportExtendedDataSquare( + rdata, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(squareSize), + ) + require.NoError(t, err) + + err = testEds.Repair(rowRoots, colRoots) + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + return + } + assert.NoError(t, err) + + reds, err := rsmt2d.ImportExtendedDataSquare(rdata, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(squareSize)) + require.NoError(t, err) + // check that the squares are equal + assert.Equal(t, testEds.Flattened(), reds.Flattened()) + }) + } +} + +func Test_ConvertEDStoShares(t *testing.T) { + squareWidth := 16 + shares := sharetest.RandShares(t, squareWidth*squareWidth) + + // compute extended square + testEds, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(squareWidth)), + ) + require.NoError(t, err) + + resshares := testEds.FlattenedODS() + require.Equal(t, shares, resshares) +} + +// removes d shares from data +func removeRandShares(data [][]byte, d int) [][]byte { + count := len(data) + // remove shares randomly + for i := 0; i < d; { + ind := mrand.Intn(count) + if len(data[ind]) == 0 { + continue + } + data[ind] = nil + i++ + } + return data +} + +func TestGetSharesByNamespace(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + bServ := NewMemBlockservice() + + var tests = []struct { + rawData []share.Share + }{ + {rawData: sharetest.RandShares(t, 4)}, + {rawData: sharetest.RandShares(t, 16)}, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + // choose random namespace from rand shares + expected := tt.rawData[len(tt.rawData)/2] + namespace := share.GetNamespace(expected) + + // change rawData to contain several shares with same namespace + tt.rawData[(len(tt.rawData)/2)+1] = expected + // put raw data in BlockService + eds, err := AddShares(ctx, tt.rawData, bServ) + require.NoError(t, err) + + var shares []share.Share + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + for _, row := range rowRoots { + rcid := MustCidFromNamespacedSha256(row) + rowShares, _, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + if errors.Is(err, ErrNamespaceOutsideRange) { + continue + } + require.NoError(t, err) + + shares = append(shares, rowShares...) + } + + assert.Equal(t, 2, len(shares)) + for _, share := range shares { + assert.Equal(t, expected, share) + } + }) + } +} + +func TestCollectLeavesByNamespace_IncompleteData(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + bServ := NewMemBlockservice() + + shares := sharetest.RandShares(t, 16) + + // set all shares to the same namespace id + namespace := share.GetNamespace(shares[0]) + for _, shr := range shares { + copy(share.GetNamespace(shr), namespace) + } + + eds, err := AddShares(ctx, shares, bServ) + require.NoError(t, err) + + roots, err := eds.RowRoots() + require.NoError(t, err) + + // remove the second share from the first row + rcid := MustCidFromNamespacedSha256(roots[0]) + node, err := GetNode(ctx, bServ, rcid) + require.NoError(t, err) + + // Left side of the tree contains the original shares + data, err := GetNode(ctx, bServ, node.Links()[0].Cid) + require.NoError(t, err) + + // Second share is the left side's right child + l, err := GetNode(ctx, bServ, data.Links()[0].Cid) + require.NoError(t, err) + r, err := GetNode(ctx, bServ, l.Links()[1].Cid) + require.NoError(t, err) + err = bServ.DeleteBlock(ctx, r.Cid()) + require.NoError(t, err) + + namespaceData := NewNamespaceData(len(shares), namespace, WithLeaves()) + err = namespaceData.CollectLeavesByNamespace(ctx, bServ, rcid) + require.Error(t, err) + leaves := namespaceData.Leaves() + assert.Nil(t, leaves[1]) + assert.Equal(t, 4, len(leaves)) +} + +func TestCollectLeavesByNamespace_AbsentNamespaceId(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + bServ := NewMemBlockservice() + + shares := sharetest.RandShares(t, 1024) + + // set all shares to the same namespace + namespaces, err := randomNamespaces(5) + require.NoError(t, err) + minNamespace := namespaces[0] + minIncluded := namespaces[1] + midNamespace := namespaces[2] + maxIncluded := namespaces[3] + maxNamespace := namespaces[4] + + secondNamespaceFrom := mrand.Intn(len(shares)-2) + 1 + for i, shr := range shares { + if i < secondNamespaceFrom { + copy(share.GetNamespace(shr), minIncluded) + continue + } + copy(share.GetNamespace(shr), maxIncluded) + } + + var tests = []struct { + name string + data []share.Share + missingNamespace share.Namespace + isAbsence bool + }{ + {name: "Namespace less than the minimum namespace in data", data: shares, missingNamespace: minNamespace}, + {name: "Namespace greater than the maximum namespace in data", data: shares, missingNamespace: maxNamespace}, + {name: "Namespace in range but still missing", data: shares, missingNamespace: midNamespace, isAbsence: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + eds, err := AddShares(ctx, shares, bServ) + require.NoError(t, err) + assertNoRowContainsNID(ctx, t, bServ, eds, tt.missingNamespace, tt.isAbsence) + }) + } +} + +func TestCollectLeavesByNamespace_MultipleRowsContainingSameNamespaceId(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + bServ := NewMemBlockservice() + + shares := sharetest.RandShares(t, 16) + + // set all shares to the same namespace and data but the last one + namespace := share.GetNamespace(shares[0]) + commonNamespaceData := shares[0] + + for i, nspace := range shares { + if i == len(shares)-1 { + break + } + + copy(nspace, commonNamespaceData) + } + + eds, err := AddShares(ctx, shares, bServ) + require.NoError(t, err) + + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + + for _, row := range rowRoots { + rcid := MustCidFromNamespacedSha256(row) + data := NewNamespaceData(len(shares), namespace, WithLeaves()) + err := data.CollectLeavesByNamespace(ctx, bServ, rcid) + if errors.Is(err, ErrNamespaceOutsideRange) { + continue + } + assert.Nil(t, err) + leaves := data.Leaves() + for _, node := range leaves { + // test that the data returned by collectLeavesByNamespace for nid + // matches the commonNamespaceData that was copied across almost all data + assert.Equal(t, commonNamespaceData, share.GetData(node.RawData())) + } + } +} + +func TestGetSharesWithProofsByNamespace(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + bServ := NewMemBlockservice() + + var tests = []struct { + rawData []share.Share + }{ + {rawData: sharetest.RandShares(t, 4)}, + {rawData: sharetest.RandShares(t, 16)}, + {rawData: sharetest.RandShares(t, 64)}, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + rand := mrand.New(mrand.NewSource(time.Now().UnixNano())) + // choose random range in shares slice + from := rand.Intn(len(tt.rawData)) + to := rand.Intn(len(tt.rawData)) + + if to < from { + from, to = to, from + } + + expected := tt.rawData[from] + namespace := share.GetNamespace(expected) + + // change rawData to contain several shares with same namespace + for i := from; i <= to; i++ { + tt.rawData[i] = expected + } + + // put raw data in BlockService + eds, err := AddShares(ctx, tt.rawData, bServ) + require.NoError(t, err) + + var shares []share.Share + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + for _, row := range rowRoots { + rcid := MustCidFromNamespacedSha256(row) + rowShares, proof, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + if namespace.IsOutsideRange(row, row) { + require.ErrorIs(t, err, ErrNamespaceOutsideRange) + continue + } + require.NoError(t, err) + if len(rowShares) > 0 { + require.NotNil(t, proof) + // append shares to check integrity later + shares = append(shares, rowShares...) + + // construct nodes from shares by prepending namespace + var leaves [][]byte + for _, shr := range rowShares { + leaves = append(leaves, append(share.GetNamespace(shr), shr...)) + } + + // verify namespace + verified := proof.VerifyNamespace( + sha256.New(), + namespace.ToNMT(), + leaves, + NamespacedSha256FromCID(rcid)) + require.True(t, verified) + + // verify inclusion + verified = proof.VerifyInclusion( + sha256.New(), + namespace.ToNMT(), + rowShares, + NamespacedSha256FromCID(rcid)) + require.True(t, verified) + } + } + + // validate shares + assert.Equal(t, to-from+1, len(shares)) + for _, share := range shares { + assert.Equal(t, expected, share) + } + }) + } +} + +func TestBatchSize(t *testing.T) { + tests := []struct { + name string + origWidth int + }{ + {"2", 2}, + {"4", 4}, + {"8", 8}, + {"16", 16}, + {"32", 32}, + {"64", 64}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(tt.origWidth)) + defer cancel() + + bs := NewMemBlockservice() + + randEds := edstest.RandEDS(t, tt.origWidth) + _, err := AddShares(ctx, randEds.FlattenedODS(), bs) + require.NoError(t, err) + + out, err := bs.Blockstore().AllKeysChan(ctx) + require.NoError(t, err) + + var count int + for range out { + count++ + } + extendedWidth := tt.origWidth * 2 + assert.Equalf(t, count, BatchSize(extendedWidth), "batchSize(%v)", extendedWidth) + }) + } +} + +func assertNoRowContainsNID( + ctx context.Context, + t *testing.T, + bServ blockservice.BlockService, + eds *rsmt2d.ExtendedDataSquare, + namespace share.Namespace, + isAbsent bool, +) { + rowRoots, err := eds.RowRoots() + require.NoError(t, err) + rowRootCount := len(rowRoots) + // get all row root cids + rowRootCIDs := make([]cid.Cid, rowRootCount) + for i, rowRoot := range rowRoots { + rowRootCIDs[i] = MustCidFromNamespacedSha256(rowRoot) + } + + // for each row root cid check if the min namespace exists + var absentCount, foundAbsenceRows int + for _, rowRoot := range rowRoots { + var outsideRange bool + if !namespace.IsOutsideRange(rowRoot, rowRoot) { + // namespace does belong to namespace range of the row + absentCount++ + } else { + outsideRange = true + } + data := NewNamespaceData(rowRootCount, namespace, WithProofs()) + rootCID := MustCidFromNamespacedSha256(rowRoot) + err := data.CollectLeavesByNamespace(ctx, bServ, rootCID) + if outsideRange { + require.ErrorIs(t, err, ErrNamespaceOutsideRange) + continue + } + require.NoError(t, err) + + // if no error returned, check absence proof + foundAbsenceRows++ + verified := data.Proof().VerifyNamespace(sha256.New(), namespace.ToNMT(), nil, rowRoot) + require.True(t, verified) + } + + if isAbsent { + require.Equal(t, foundAbsenceRows, absentCount) + // there should be max 1 row that has namespace range containing namespace + require.LessOrEqual(t, absentCount, 1) + } +} + +func randomNamespaces(total int) ([]share.Namespace, error) { + namespaces := make([]share.Namespace, total) + for i := range namespaces { + namespaces[i] = sharetest.RandV0Namespace() + } + sort.Slice(namespaces, func(i, j int) bool { return bytes.Compare(namespaces[i], namespaces[j]) < 0 }) + return namespaces, nil +} diff --git a/share/ipld/namespace_data.go b/share/ipld/namespace_data.go new file mode 100644 index 0000000000..5a6fd2abb4 --- /dev/null +++ b/share/ipld/namespace_data.go @@ -0,0 +1,334 @@ +package ipld + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/share" +) + +var ErrNamespaceOutsideRange = errors.New("share/ipld: " + + "target namespace is outside of namespace range for the given root") + +// Option is the functional option that is applied to the NamespaceData instance +// to configure data that needs to be stored. +type Option func(*NamespaceData) + +// WithLeaves option specifies that leaves should be collected during retrieval. +func WithLeaves() Option { + return func(data *NamespaceData) { + // we over-allocate space for leaves since we do not know how many we will find + // on the level above, the length of the Row is passed in as maxShares + data.leaves = make([]ipld.Node, data.maxShares) + } +} + +// WithProofs option specifies that proofs should be collected during retrieval. +func WithProofs() Option { + return func(data *NamespaceData) { + data.proofs = newProofCollector(data.maxShares) + } +} + +// NamespaceData stores all leaves under the given namespace with their corresponding proofs. +type NamespaceData struct { + leaves []ipld.Node + proofs *proofCollector + + bounds fetchedBounds + maxShares int + namespace share.Namespace + + isAbsentNamespace atomic.Bool + absenceProofLeaf ipld.Node +} + +func NewNamespaceData(maxShares int, namespace share.Namespace, options ...Option) *NamespaceData { + data := &NamespaceData{ + // we don't know where in the tree the leaves in the namespace are, + // so we keep track of the bounds to return the correct slice + // maxShares acts as a sentinel to know if we find any leaves + bounds: fetchedBounds{int64(maxShares), 0}, + maxShares: maxShares, + namespace: namespace, + } + + for _, opt := range options { + opt(data) + } + return data +} + +func (n *NamespaceData) validate(rootCid cid.Cid) error { + if err := n.namespace.Validate(); err != nil { + return err + } + + if n.leaves == nil && n.proofs == nil { + return errors.New("share/ipld: empty NamespaceData, nothing specified to retrieve") + } + + root := NamespacedSha256FromCID(rootCid) + if n.namespace.IsOutsideRange(root, root) { + return ErrNamespaceOutsideRange + } + return nil +} + +func (n *NamespaceData) addLeaf(pos int, nd ipld.Node) { + // bounds will be needed in `Proof` method + n.bounds.update(int64(pos)) + + if n.isAbsentNamespace.Load() { + if n.absenceProofLeaf != nil { + log.Fatal("there should be only one absence leaf") + } + n.absenceProofLeaf = nd + return + } + + if n.leaves == nil { + return + } + + if nd != nil { + n.leaves[pos] = nd + } +} + +// noLeaves checks that there are no leaves under the given root in the given namespace. +func (n *NamespaceData) noLeaves() bool { + return n.bounds.lowest == int64(n.maxShares) +} + +type direction int + +const ( + left direction = iota + 1 + right +) + +func (n *NamespaceData) addProof(d direction, cid cid.Cid, depth int) { + if n.proofs == nil { + return + } + + switch d { + case left: + n.proofs.addLeft(cid, depth) + case right: + n.proofs.addRight(cid, depth) + default: + panic(fmt.Sprintf("share/ipld: invalid direction: %d", d)) + } +} + +// Leaves returns retrieved leaves within the bounds in case `WithLeaves` option was passed, +// otherwise nil will be returned. +func (n *NamespaceData) Leaves() []ipld.Node { + if n.leaves == nil || n.noLeaves() || n.isAbsentNamespace.Load() { + return nil + } + return n.leaves[n.bounds.lowest : n.bounds.highest+1] +} + +// Proof returns proofs within the bounds in case if `WithProofs` option was passed, +// otherwise nil will be returned. +func (n *NamespaceData) Proof() *nmt.Proof { + if n.proofs == nil { + return nil + } + + // return an empty Proof if leaves are not available + if n.noLeaves() { + return &nmt.Proof{} + } + + nodes := make([][]byte, len(n.proofs.Nodes())) + for i, node := range n.proofs.Nodes() { + nodes[i] = NamespacedSha256FromCID(node) + } + + if n.isAbsentNamespace.Load() { + proof := nmt.NewAbsenceProof( + int(n.bounds.lowest), + int(n.bounds.highest)+1, + nodes, + NamespacedSha256FromCID(n.absenceProofLeaf.Cid()), + NMTIgnoreMaxNamespace, + ) + return &proof + } + proof := nmt.NewInclusionProof( + int(n.bounds.lowest), + int(n.bounds.highest)+1, + nodes, + NMTIgnoreMaxNamespace, + ) + return &proof +} + +// CollectLeavesByNamespace collects leaves and corresponding proof that could be used to verify +// leaves inclusion. It returns as many leaves from the given root with the given Namespace as +// it can retrieve. If no shares are found, it returns error as nil. A +// non-nil error means that only partial data is returned, because at least one share retrieval +// failed. The following implementation is based on `GetShares`. +func (n *NamespaceData) CollectLeavesByNamespace( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, +) error { + if err := n.validate(root); err != nil { + return err + } + + // buffer the jobs to avoid blocking, we only need as many + // queued as the number of shares in the second-to-last layer + jobs := make(chan job, (n.maxShares+1)/2) + jobs <- job{cid: root, ctx: ctx} + + var wg chanGroup + wg.jobs = jobs + wg.add(1) + + var ( + singleErr sync.Once + retrievalErr error + ) + + for { + var j job + var ok bool + select { + case j, ok = <-jobs: + case <-ctx.Done(): + return ctx.Err() + } + + if !ok { + return retrievalErr + } + pool.Submit(func() { + defer wg.done() + + // if an error is likely to be returned or not depends on + // the underlying impl of the blockservice, currently it is not a realistic probability + nd, err := GetNode(ctx, bGetter, j.cid) + if err != nil { + singleErr.Do(func() { + retrievalErr = err + }) + log.Errorw("could not retrieve IPLD node", + "namespace", n.namespace.String(), + "pos", j.sharePos, + "err", err, + ) + // we still need to update the bounds + n.addLeaf(j.sharePos, nil) + return + } + + links := nd.Links() + if len(links) == 0 { + // successfully fetched a leaf belonging to the namespace + // we found a leaf, so we update the bounds + n.addLeaf(j.sharePos, nd) + return + } + + // this node has links in the namespace, so keep walking + newJobs := n.traverseLinks(j, links) + for _, j := range newJobs { + wg.add(1) + select { + case jobs <- j: + case <-ctx.Done(): + return + } + } + }) + } +} + +func (n *NamespaceData) traverseLinks(j job, links []*ipld.Link) []job { + if j.isAbsent { + return n.collectAbsenceProofs(j, links) + } + return n.collectNDWithProofs(j, links) +} + +func (n *NamespaceData) collectAbsenceProofs(j job, links []*ipld.Link) []job { + leftLink := links[0].Cid + rightLink := links[1].Cid + // traverse to the left node, while collecting right node as proof + n.addProof(right, rightLink, j.depth) + return []job{j.next(left, leftLink, j.isAbsent)} +} + +func (n *NamespaceData) collectNDWithProofs(j job, links []*ipld.Link) []job { + leftCid := links[0].Cid + rightCid := links[1].Cid + leftLink := NamespacedSha256FromCID(leftCid) + rightLink := NamespacedSha256FromCID(rightCid) + + var nextJobs []job + // check if target namespace is outside of boundaries of both links + if n.namespace.IsOutsideRange(leftLink, rightLink) { + log.Fatalf("target namespace outside of boundaries of links at depth: %v", j.depth) + } + + if !n.namespace.IsAboveMax(leftLink) { + // namespace is within the range of left link + nextJobs = append(nextJobs, j.next(left, leftCid, false)) + } else { + // proof is on the left side, if the namespace is on the right side of the range of left link + n.addProof(left, leftCid, j.depth) + if n.namespace.IsBelowMin(rightLink) { + // namespace is not included in either links, convert to absence collector + n.isAbsentNamespace.Store(true) + nextJobs = append(nextJobs, j.next(right, rightCid, true)) + return nextJobs + } + } + + if !n.namespace.IsBelowMin(rightLink) { + // namespace is within the range of right link + nextJobs = append(nextJobs, j.next(right, rightCid, false)) + } else { + // proof is on the right side, if the namespace is on the left side of the range of right link + n.addProof(right, rightCid, j.depth) + } + return nextJobs +} + +type fetchedBounds struct { + lowest int64 + highest int64 +} + +// update checks if the passed index is outside the current bounds, +// and updates the bounds atomically if it extends them. +func (b *fetchedBounds) update(index int64) { + lowest := atomic.LoadInt64(&b.lowest) + // try to write index to the lower bound if appropriate, and retry until the atomic op is successful + // CAS ensures that we don't overwrite if the bound has been updated in another goroutine after the + // comparison here + for index < lowest && !atomic.CompareAndSwapInt64(&b.lowest, lowest, index) { + lowest = atomic.LoadInt64(&b.lowest) + } + // we always run both checks because element can be both the lower and higher bound + // for example, if there is only one share in the namespace + highest := atomic.LoadInt64(&b.highest) + for index > highest && !atomic.CompareAndSwapInt64(&b.highest, highest, index) { + highest = atomic.LoadInt64(&b.highest) + } +} diff --git a/share/ipld/nmt.go b/share/ipld/nmt.go new file mode 100644 index 0000000000..6dba300965 --- /dev/null +++ b/share/ipld/nmt.go @@ -0,0 +1,174 @@ +package ipld + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "hash" + "math/rand" + + "github.com/ipfs/boxo/blockservice" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" + mh "github.com/multiformats/go-multihash" + mhcore "github.com/multiformats/go-multihash/core" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/share" +) + +var ( + log = logging.Logger("ipld") +) + +const ( + // Below used multiformats (one codec, one multihash) seem free: + // https://github.com/multiformats/multicodec/blob/master/table.csv + + // nmtCodec is the codec used for leaf and inner nodes of a Namespaced Merkle Tree. + nmtCodec = 0x7700 + + // sha256NamespaceFlagged is the multihash code used to hash blocks + // that contain an NMT node (inner and leaf nodes). + sha256NamespaceFlagged = 0x7701 + + // NmtHashSize is the size of a digest created by an NMT in bytes. + NmtHashSize = 2*share.NamespaceSize + sha256.Size + + // innerNodeSize is the size of data in inner nodes. + innerNodeSize = NmtHashSize * 2 + + // leafNodeSize is the size of data in leaf nodes. + leafNodeSize = share.NamespaceSize + appconsts.ShareSize + + // cidPrefixSize is the size of the prepended buffer of the CID encoding + // for NamespacedSha256. For more information, see: + // https://multiformats.io/multihash/#the-multihash-format + cidPrefixSize = 4 + + // NMTIgnoreMaxNamespace is currently used value for IgnoreMaxNamespace option in NMT. + // IgnoreMaxNamespace defines whether the largest possible Namespace MAX_NID should be 'ignored'. + // If set to true, this allows for shorter proofs in particular use-cases. + NMTIgnoreMaxNamespace = true +) + +func init() { + // required for Bitswap to hash and verify inbound data correctly + mhcore.Register(sha256NamespaceFlagged, func() hash.Hash { + nh := nmt.NewNmtHasher(sha256.New(), share.NamespaceSize, true) + nh.Reset() + return nh + }) +} + +func GetNode(ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid) (ipld.Node, error) { + block, err := bGetter.GetBlock(ctx, root) + if err != nil { + var errNotFound ipld.ErrNotFound + if errors.As(err, &errNotFound) { + return nil, ErrNodeNotFound + } + return nil, err + } + + return nmtNode{Block: block}, nil +} + +type nmtNode struct { + blocks.Block +} + +func newNMTNode(id cid.Cid, data []byte) nmtNode { + b, err := blocks.NewBlockWithCid(data, id) + if err != nil { + panic(fmt.Sprintf("wrong hash for block, cid: %s", id.String())) + } + return nmtNode{Block: b} +} + +func (n nmtNode) Copy() ipld.Node { + d := make([]byte, len(n.RawData())) + copy(d, n.RawData()) + return newNMTNode(n.Cid(), d) +} + +func (n nmtNode) Links() []*ipld.Link { + switch len(n.RawData()) { + default: + panic(fmt.Sprintf("unexpected size %v", len(n.RawData()))) + case innerNodeSize: + leftCid := MustCidFromNamespacedSha256(n.RawData()[:NmtHashSize]) + rightCid := MustCidFromNamespacedSha256(n.RawData()[NmtHashSize:]) + + return []*ipld.Link{{Cid: leftCid}, {Cid: rightCid}} + case leafNodeSize: + return nil + } +} + +func (n nmtNode) Resolve([]string) (interface{}, []string, error) { + panic("method not implemented") +} + +func (n nmtNode) Tree(string, int) []string { + panic("method not implemented") +} + +func (n nmtNode) ResolveLink([]string) (*ipld.Link, []string, error) { + panic("method not implemented") +} + +func (n nmtNode) Stat() (*ipld.NodeStat, error) { + panic("method not implemented") +} + +func (n nmtNode) Size() (uint64, error) { + panic("method not implemented") +} + +// CidFromNamespacedSha256 uses a hash from an nmt tree to create a CID +func CidFromNamespacedSha256(namespacedHash []byte) (cid.Cid, error) { + if got, want := len(namespacedHash), NmtHashSize; got != want { + return cid.Cid{}, fmt.Errorf("invalid namespaced hash length, got: %v, want: %v", got, want) + } + buf, err := mh.Encode(namespacedHash, sha256NamespaceFlagged) + if err != nil { + return cid.Undef, err + } + return cid.NewCidV1(nmtCodec, buf), nil +} + +// MustCidFromNamespacedSha256 is a wrapper around cidFromNamespacedSha256 that panics +// in case of an error. Use with care and only in places where no error should occur. +func MustCidFromNamespacedSha256(hash []byte) cid.Cid { + cidFromHash, err := CidFromNamespacedSha256(hash) + if err != nil { + panic( + fmt.Sprintf("malformed hash: %s, codec: %v", + err, + mh.Codes[sha256NamespaceFlagged]), + ) + } + return cidFromHash +} + +// Translate transforms square coordinates into IPLD NMT tree path to a leaf node. +// It also adds randomization to evenly spread fetching from Rows and Columns. +func Translate(dah *da.DataAvailabilityHeader, row, col int) (cid.Cid, int) { + if rand.Intn(2) == 0 { //nolint:gosec + return MustCidFromNamespacedSha256(dah.ColumnRoots[col]), row + } + + return MustCidFromNamespacedSha256(dah.RowRoots[row]), col +} + +// NamespacedSha256FromCID derives the Namespaced hash from the given CID. +func NamespacedSha256FromCID(cid cid.Cid) []byte { + return cid.Hash()[cidPrefixSize:] +} diff --git a/share/ipld/nmt_adder.go b/share/ipld/nmt_adder.go new file mode 100644 index 0000000000..7ce52859b2 --- /dev/null +++ b/share/ipld/nmt_adder.go @@ -0,0 +1,195 @@ +package ipld + +import ( + "context" + "fmt" + "sync" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + "github.com/celestiaorg/nmt" +) + +type ctxKey int + +const ( + proofsAdderKey ctxKey = iota +) + +// NmtNodeAdder adds ipld.Nodes to the underlying ipld.Batch if it is inserted +// into a nmt tree. +type NmtNodeAdder struct { + // lock protects Batch, Set and error from parallel writes / reads + lock sync.Mutex + ctx context.Context + add *ipld.Batch + leaves *cid.Set + err error +} + +// NewNmtNodeAdder returns a new NmtNodeAdder with the provided context and +// batch. Note that the context provided should have a timeout +// It is not thread-safe. +func NewNmtNodeAdder(ctx context.Context, bs blockservice.BlockService, opts ...ipld.BatchOption) *NmtNodeAdder { + return &NmtNodeAdder{ + add: ipld.NewBatch(ctx, merkledag.NewDAGService(bs), opts...), + ctx: ctx, + leaves: cid.NewSet(), + } +} + +// Visit is a NodeVisitor that can be used during the creation of a new NMT to +// create and add ipld.Nodes to the Batch while computing the root of the NMT. +func (n *NmtNodeAdder) Visit(hash []byte, children ...[]byte) { + n.lock.Lock() + defer n.lock.Unlock() + + if n.err != nil { + return // protect from further visits if there is an error + } + id := MustCidFromNamespacedSha256(hash) + switch len(children) { + case 1: + if n.leaves.Visit(id) { + n.err = n.add.Add(n.ctx, newNMTNode(id, children[0])) + } + case 2: + n.err = n.add.Add(n.ctx, newNMTNode(id, append(children[0], children[1]...))) + default: + panic("expected a binary tree") + } +} + +// Commit checks for errors happened during Visit and if absent commits data to inner Batch. +func (n *NmtNodeAdder) Commit() error { + n.lock.Lock() + defer n.lock.Unlock() + + if n.err != nil { + return fmt.Errorf("before batch commit: %w", n.err) + } + + n.err = n.add.Commit() + if n.err != nil { + return fmt.Errorf("after batch commit: %w", n.err) + } + return nil +} + +// MaxSizeBatchOption sets the maximum amount of buffered data before writing +// blocks. +func MaxSizeBatchOption(size int) ipld.BatchOption { + return ipld.MaxSizeBatchOption(BatchSize(size)) +} + +// BatchSize calculates the amount of nodes that are generated from block of 'squareSizes' +// to be batched in one write. +func BatchSize(squareSize int) int { + // (squareSize*2-1) - amount of nodes in a generated binary tree + // squareSize*2 - the total number of trees, both for rows and cols + // (squareSize*squareSize) - all the shares + // + // Note that while our IPLD tree looks like this: + // ---X + // -X---X + // X-X-X-X + // here we count leaves only once: the CIDs are the same for columns and rows + // and for the last two layers as well: + return (squareSize*2-1)*squareSize*2 - (squareSize * squareSize) +} + +// ProofsAdder is used to collect proof nodes, while traversing merkle tree +type ProofsAdder struct { + lock sync.RWMutex + proofs map[cid.Cid][]byte +} + +// NewProofsAdder creates new instance of ProofsAdder. +func NewProofsAdder(squareSize int) *ProofsAdder { + return &ProofsAdder{ + // preallocate map to fit all inner nodes for given square size + proofs: make(map[cid.Cid][]byte, innerNodesAmount(squareSize)), + } +} + +// CtxWithProofsAdder creates context, that will contain ProofsAdder. If context is leaked to +// another go-routine, proofs will be not collected by gc. To prevent it, use Purge after Proofs +// are collected from adder, to preemptively release memory allocated for proofs. +func CtxWithProofsAdder(ctx context.Context, adder *ProofsAdder) context.Context { + return context.WithValue(ctx, proofsAdderKey, adder) +} + +// ProofsAdderFromCtx extracts ProofsAdder from context +func ProofsAdderFromCtx(ctx context.Context) *ProofsAdder { + val := ctx.Value(proofsAdderKey) + adder, ok := val.(*ProofsAdder) + if !ok || adder == nil { + return nil + } + return adder +} + +// Proofs returns proofs collected by ProofsAdder +func (a *ProofsAdder) Proofs() map[cid.Cid][]byte { + if a == nil { + return nil + } + + a.lock.RLock() + defer a.lock.RUnlock() + return a.proofs +} + +// VisitFn returns NodeVisitorFn, that will collect proof nodes while traversing merkle tree. +func (a *ProofsAdder) VisitFn() nmt.NodeVisitorFn { + if a == nil { + return nil + } + + a.lock.RLock() + defer a.lock.RUnlock() + + // proofs are already collected, don't collect second time + if len(a.proofs) > 0 { + return nil + } + return a.visitInnerNodes +} + +// Purge removed proofs from ProofsAdder allowing GC to collect the memory +func (a *ProofsAdder) Purge() { + if a == nil { + return + } + + a.lock.Lock() + defer a.lock.Unlock() + + a.proofs = nil +} + +func (a *ProofsAdder) visitInnerNodes(hash []byte, children ...[]byte) { + switch len(children) { + case 1: + break + case 2: + id := MustCidFromNamespacedSha256(hash) + a.addProof(id, append(children[0], children[1]...)) + default: + panic("expected a binary tree") + } +} + +func (a *ProofsAdder) addProof(id cid.Cid, proof []byte) { + a.lock.Lock() + defer a.lock.Unlock() + a.proofs[id] = proof +} + +// innerNodesAmount return amount of inner nodes in eds with given size +func innerNodesAmount(squareSize int) int { + return 2 * (squareSize - 1) * squareSize +} diff --git a/share/ipld/nmt_test.go b/share/ipld/nmt_test.go new file mode 100644 index 0000000000..77268d7112 --- /dev/null +++ b/share/ipld/nmt_test.go @@ -0,0 +1,41 @@ +package ipld + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +// TestNamespaceFromCID checks that deriving the Namespaced hash from +// the given CID works correctly. +func TestNamespaceFromCID(t *testing.T) { + var tests = []struct { + eds *rsmt2d.ExtendedDataSquare + }{ + // note that the number of shares must be a power of two + {eds: edstest.RandEDS(t, 4)}, + {eds: edstest.RandEDS(t, 16)}, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + dah, err := da.NewDataAvailabilityHeader(tt.eds) + require.NoError(t, err) + // check to make sure NamespacedHash is correctly derived from CID + for _, row := range dah.RowRoots { + c, err := CidFromNamespacedSha256(row) + require.NoError(t, err) + + got := NamespacedSha256FromCID(c) + assert.Equal(t, row, got) + } + }) + } +} diff --git a/share/ipld/proof_collector.go b/share/ipld/proof_collector.go new file mode 100644 index 0000000000..937c8b416e --- /dev/null +++ b/share/ipld/proof_collector.go @@ -0,0 +1,52 @@ +package ipld + +import ( + "math" + + "github.com/ipfs/go-cid" +) + +// proofCollector collects proof nodes' CIDs for the construction of a shares inclusion validation +// nmt.Proof. +type proofCollector struct { + left, right []cid.Cid +} + +func newProofCollector(maxShares int) *proofCollector { + // maximum possible amount of required proofs from each side is equal to tree height. + height := int(math.Log2(float64(maxShares))) + 1 + return &proofCollector{ + left: make([]cid.Cid, height), + right: make([]cid.Cid, height), + } +} + +func (c *proofCollector) addLeft(cid cid.Cid, depth int) { + c.left[depth] = cid +} + +func (c *proofCollector) addRight(cid cid.Cid, depth int) { + c.right[depth] = cid +} + +// Nodes returns nodes collected by proofCollector in the order that nmt.Proof validator will use +// to traverse the tree. +func (c *proofCollector) Nodes() []cid.Cid { + cids := make([]cid.Cid, 0, len(c.left)+len(c.right)) + // left side will be traversed in bottom-up order + for _, cid := range c.left { + if cid.Defined() { + cids = append(cids, cid) + } + } + + // right side of the tree will be traversed from top to bottom, + // so sort in reversed order + for i := len(c.right) - 1; i >= 0; i-- { + cid := c.right[i] + if cid.Defined() { + cids = append(cids, cid) + } + } + return cids +} diff --git a/ipld/plugin/test_helpers.go b/share/ipld/test_helpers.go similarity index 60% rename from ipld/plugin/test_helpers.go rename to share/ipld/test_helpers.go index f27f6b92db..e3456c0b25 100644 --- a/ipld/plugin/test_helpers.go +++ b/share/ipld/test_helpers.go @@ -1,7 +1,7 @@ -package plugin +package ipld import ( - mrand "math/rand" + "crypto/rand" "testing" "github.com/ipfs/go-cid" @@ -9,8 +9,8 @@ import ( ) func RandNamespacedCID(t *testing.T) cid.Cid { - raw := make([]byte, nmtHashSize) - _, err := mrand.Read(raw) // nolint:gosec // G404: Use of weak random number generator + raw := make([]byte, NmtHashSize) + _, err := rand.Read(raw) require.NoError(t, err) id, err := CidFromNamespacedSha256(raw) require.NoError(t, err) diff --git a/share/ipld/utils.go b/share/ipld/utils.go new file mode 100644 index 0000000000..d3e987e7f3 --- /dev/null +++ b/share/ipld/utils.go @@ -0,0 +1,18 @@ +package ipld + +import ( + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-node/share" +) + +// FilterRootByNamespace returns the row roots from the given share.Root that contain the namespace. +func FilterRootByNamespace(root *share.Root, namespace share.Namespace) []cid.Cid { + rowRootCIDs := make([]cid.Cid, 0, len(root.RowRoots)) + for _, row := range root.RowRoots { + if !namespace.IsOutsideRange(row, row) { + rowRootCIDs = append(rowRootCIDs, MustCidFromNamespacedSha256(row)) + } + } + return rowRootCIDs +} diff --git a/share/mocks/getter.go b/share/mocks/getter.go new file mode 100644 index 0000000000..738e2b246c --- /dev/null +++ b/share/mocks/getter.go @@ -0,0 +1,83 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/share (interfaces: Getter) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + header "github.com/celestiaorg/celestia-node/header" + share "github.com/celestiaorg/celestia-node/share" + rsmt2d "github.com/celestiaorg/rsmt2d" + gomock "github.com/golang/mock/gomock" +) + +// MockGetter is a mock of Getter interface. +type MockGetter struct { + ctrl *gomock.Controller + recorder *MockGetterMockRecorder +} + +// MockGetterMockRecorder is the mock recorder for MockGetter. +type MockGetterMockRecorder struct { + mock *MockGetter +} + +// NewMockGetter creates a new mock instance. +func NewMockGetter(ctrl *gomock.Controller) *MockGetter { + mock := &MockGetter{ctrl: ctrl} + mock.recorder = &MockGetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGetter) EXPECT() *MockGetterMockRecorder { + return m.recorder +} + +// GetEDS mocks base method. +func (m *MockGetter) GetEDS(arg0 context.Context, arg1 *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEDS", arg0, arg1) + ret0, _ := ret[0].(*rsmt2d.ExtendedDataSquare) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEDS indicates an expected call of GetEDS. +func (mr *MockGetterMockRecorder) GetEDS(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEDS", reflect.TypeOf((*MockGetter)(nil).GetEDS), arg0, arg1) +} + +// GetShare mocks base method. +func (m *MockGetter) GetShare(arg0 context.Context, arg1 *header.ExtendedHeader, arg2, arg3 int) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetShare", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetShare indicates an expected call of GetShare. +func (mr *MockGetterMockRecorder) GetShare(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShare", reflect.TypeOf((*MockGetter)(nil).GetShare), arg0, arg1, arg2, arg3) +} + +// GetSharesByNamespace mocks base method. +func (m *MockGetter) GetSharesByNamespace(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 share.Namespace) (share.NamespacedShares, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSharesByNamespace", arg0, arg1, arg2) + ret0, _ := ret[0].(share.NamespacedShares) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSharesByNamespace indicates an expected call of GetSharesByNamespace. +func (mr *MockGetterMockRecorder) GetSharesByNamespace(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSharesByNamespace", reflect.TypeOf((*MockGetter)(nil).GetSharesByNamespace), arg0, arg1, arg2) +} diff --git a/share/namespace.go b/share/namespace.go new file mode 100644 index 0000000000..df4ad74058 --- /dev/null +++ b/share/namespace.go @@ -0,0 +1,184 @@ +package share + +import ( + "bytes" + "encoding/hex" + "fmt" + + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/nmt/namespace" +) + +// NamespaceSize is a system-wide size for NMT namespaces. +const NamespaceSize = appns.NamespaceSize + +// Various reserved namespaces. +var ( + // MaxPrimaryReservedNamespace is the highest primary reserved namespace. + // Namespaces lower than this are reserved for protocol use. + MaxPrimaryReservedNamespace = Namespace(appns.MaxPrimaryReservedNamespace.Bytes()) + // MinSecondaryReservedNamespace is the lowest secondary reserved namespace + // reserved for protocol use. Namespaces higher than this are reserved for + // protocol use. + MinSecondaryReservedNamespace = Namespace(appns.MinSecondaryReservedNamespace.Bytes()) + ParitySharesNamespace = Namespace(appns.ParitySharesNamespace.Bytes()) + TailPaddingNamespace = Namespace(appns.TailPaddingNamespace.Bytes()) + PrimaryReservedPaddingNamespace = Namespace(appns.PrimaryReservedPaddingNamespace.Bytes()) + TxNamespace = Namespace(appns.TxNamespace.Bytes()) + PayForBlobNamespace = Namespace(appns.PayForBlobNamespace.Bytes()) + ISRNamespace = Namespace(appns.IntermediateStateRootsNamespace.Bytes()) +) + +// Namespace represents namespace of a Share. +// Consists of version byte and namespace ID. +type Namespace []byte + +// NewBlobNamespaceV0 takes a variable size byte slice and creates a valid version 0 Blob Namespace. +// The byte slice must be <= 10 bytes. +// If it is less than 10 bytes, it will be left padded to size 10 with 0s. +// Use predefined namespaces above, if non-blob namespace is needed. +func NewBlobNamespaceV0(id []byte) (Namespace, error) { + if len(id) == 0 || len(id) > appns.NamespaceVersionZeroIDSize { + return nil, fmt.Errorf( + "namespace id must be > 0 && <= %d, but it was %d bytes", appns.NamespaceVersionZeroIDSize, len(id)) + } + + n := make(Namespace, NamespaceSize) + // version and zero padding are already set as zero, + // so simply copying subNID to the end is enough to comply the V0 spec + copy(n[len(n)-len(id):], id) + return n, n.ValidateForBlob() +} + +// NamespaceFromBytes converts bytes into Namespace and validates it. +func NamespaceFromBytes(b []byte) (Namespace, error) { + n := Namespace(b) + return n, n.Validate() +} + +// Version reports version of the Namespace. +func (n Namespace) Version() byte { + return n[appns.NamespaceVersionSize-1] +} + +// ID reports ID of the Namespace. +func (n Namespace) ID() namespace.ID { + return namespace.ID(n[appns.NamespaceVersionSize:]) +} + +// ToNMT converts the whole Namespace(both Version and ID parts) into NMT's namespace.ID +// NOTE: Once https://github.com/celestiaorg/nmt/issues/206 is closed Namespace should become NNT's +// type. +func (n Namespace) ToNMT() namespace.ID { + return namespace.ID(n) +} + +// ToAppNamespace converts the Namespace to App's definition of Namespace. +// TODO: Unify types between node and app +func (n Namespace) ToAppNamespace() appns.Namespace { + return appns.Namespace{Version: n.Version(), ID: n.ID()} +} + +// Len reports the total length of the namespace. +func (n Namespace) Len() int { + return len(n) +} + +// String stringifies the Namespace. +func (n Namespace) String() string { + return hex.EncodeToString(n) +} + +// Equals compares two Namespaces. +func (n Namespace) Equals(target Namespace) bool { + return bytes.Equal(n, target) +} + +// Validate checks if the namespace is correct. +func (n Namespace) Validate() error { + if n.Len() != NamespaceSize { + return fmt.Errorf("invalid namespace length: expected %d, got %d", NamespaceSize, n.Len()) + } + if n.Version() != appns.NamespaceVersionZero && n.Version() != appns.NamespaceVersionMax { + return fmt.Errorf("invalid namespace version %v", n.Version()) + } + if len(n.ID()) != appns.NamespaceIDSize { + return fmt.Errorf("invalid namespace id length: expected %d, got %d", appns.NamespaceIDSize, n.ID().Size()) + } + if n.Version() == appns.NamespaceVersionZero && !bytes.HasPrefix(n.ID(), appns.NamespaceVersionZeroPrefix) { + return fmt.Errorf("invalid namespace id: expect %d leading zeroes", len(appns.NamespaceVersionZeroPrefix)) + } + return nil +} + +// ValidateForData checks if the Namespace is of real/useful data. +func (n Namespace) ValidateForData() error { + if err := n.Validate(); err != nil { + return err + } + if n.Equals(ParitySharesNamespace) || n.Equals(TailPaddingNamespace) { + return fmt.Errorf("invalid data namespace(%s): parity and tail padding namespace are forbidden", n) + } + if n.Version() != appns.NamespaceVersionZero { + return fmt.Errorf("invalid data namespace(%s): only version 0 is supported", n) + } + return nil +} + +// ValidateForBlob checks if the Namespace is valid blob namespace. +func (n Namespace) ValidateForBlob() error { + if err := n.ValidateForData(); err != nil { + return err + } + if bytes.Compare(n, MaxPrimaryReservedNamespace) < 1 { + return fmt.Errorf("invalid blob namespace(%s): reserved namespaces are forbidden", n) + } + if bytes.Compare(n, MinSecondaryReservedNamespace) > -1 { + return fmt.Errorf("invalid blob namespace(%s): reserved namespaces are forbidden", n) + } + return nil +} + +// IsAboveMax checks if the namespace is above the maximum namespace of the given hash. +func (n Namespace) IsAboveMax(nodeHash []byte) bool { + return !n.IsLessOrEqual(nodeHash[n.Len() : n.Len()*2]) +} + +// IsBelowMin checks if the target namespace is below the minimum namespace of the given hash. +func (n Namespace) IsBelowMin(nodeHash []byte) bool { + return n.IsLess(nodeHash[:n.Len()]) +} + +// IsOutsideRange checks if the namespace is outside the min-max range of the given hashes. +func (n Namespace) IsOutsideRange(leftNodeHash, rightNodeHash []byte) bool { + return n.IsBelowMin(leftNodeHash) || n.IsAboveMax(rightNodeHash) +} + +// Repeat copies the Namespace t times. +func (n Namespace) Repeat(t int) []Namespace { + ns := make([]Namespace, t) + for i := 0; i < t; i++ { + ns[i] = n + } + return ns +} + +// IsLess reports if the Namespace is less than the target. +func (n Namespace) IsLess(target Namespace) bool { + return bytes.Compare(n, target) == -1 +} + +// IsLessOrEqual reports if the Namespace is less than the target. +func (n Namespace) IsLessOrEqual(target Namespace) bool { + return bytes.Compare(n, target) < 1 +} + +// IsGreater reports if the Namespace is greater than the target. +func (n Namespace) IsGreater(target Namespace) bool { + return bytes.Compare(n, target) == 1 +} + +// IsGreaterOrEqualThan reports if the Namespace is greater or equal than the target. +func (n Namespace) IsGreaterOrEqualThan(target Namespace) bool { + return bytes.Compare(n, target) > -1 +} diff --git a/share/namespace_test.go b/share/namespace_test.go new file mode 100644 index 0000000000..786441b043 --- /dev/null +++ b/share/namespace_test.go @@ -0,0 +1,216 @@ +package share + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + appns "github.com/celestiaorg/celestia-app/pkg/namespace" +) + +var ( + validID = append( + appns.NamespaceVersionZeroPrefix, + bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)..., + ) + tooShortID = append(appns.NamespaceVersionZeroPrefix, []byte{1}...) + tooLongID = append(appns.NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceSize)...) + invalidPrefixID = bytes.Repeat([]byte{1}, NamespaceSize) +) + +func TestNewNamespaceV0(t *testing.T) { + type testCase struct { + name string + subNid []byte + expected Namespace + wantErr bool + } + testCases := []testCase{ + { + name: "8 byte id, gets left padded", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, + expected: Namespace{ + 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros + 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}, // id with left padding + wantErr: false, + }, + { + name: "10 byte id, no padding", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10}, + expected: Namespace{ + 0x0, // version + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros + 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}, // id + wantErr: false, + }, + { + name: "11 byte id", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10, 0x11}, + expected: []byte{}, + wantErr: true, + }, + { + name: "nil id", + subNid: nil, + expected: []byte{}, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := NewBlobNamespaceV0(tc.subNid) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.expected, got) + }) + } +} + +func TestFrom(t *testing.T) { + type testCase struct { + name string + bytes []byte + wantErr bool + want Namespace + } + validNamespace := []byte{} + validNamespace = append(validNamespace, appns.NamespaceVersionZero) + validNamespace = append(validNamespace, appns.NamespaceVersionZeroPrefix...) + validNamespace = append(validNamespace, bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize)...) + parityNamespace := bytes.Repeat([]byte{0xFF}, NamespaceSize) + + testCases := []testCase{ + { + name: "valid namespace", + bytes: validNamespace, + wantErr: false, + want: append([]byte{appns.NamespaceVersionZero}, validID...), + }, + { + name: "parity namespace", + bytes: parityNamespace, + wantErr: false, + want: append([]byte{appns.NamespaceVersionMax}, bytes.Repeat([]byte{0xFF}, appns.NamespaceIDSize)...), + }, + { + name: "unsupported version", + bytes: append([]byte{1}, append( + appns.NamespaceVersionZeroPrefix, + bytes.Repeat([]byte{1}, NamespaceSize-len(appns.NamespaceVersionZeroPrefix))..., + )...), + wantErr: true, + }, + { + name: "unsupported id: too short", + bytes: append([]byte{appns.NamespaceVersionZero}, tooShortID...), + wantErr: true, + }, + { + name: "unsupported id: too long", + bytes: append([]byte{appns.NamespaceVersionZero}, tooLongID...), + wantErr: true, + }, + { + name: "unsupported id: invalid prefix", + bytes: append([]byte{appns.NamespaceVersionZero}, invalidPrefixID...), + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := NamespaceFromBytes(tc.bytes) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestValidateForBlob(t *testing.T) { + type testCase struct { + name string + ns Namespace + wantErr bool + } + + validNamespace, err := NewBlobNamespaceV0(bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize)) + require.NoError(t, err) + + testCases := []testCase{ + { + name: "valid blob namespace", + ns: validNamespace, + wantErr: false, + }, + { + name: "invalid blob namespace: parity shares namespace", + ns: ParitySharesNamespace, + wantErr: true, + }, + { + name: "invalid blob namespace: tail padding namespace", + ns: TailPaddingNamespace, + wantErr: true, + }, + { + name: "invalid blob namespace: tx namespace", + ns: TxNamespace, + wantErr: true, + }, + { + name: "invalid blob namespace: namespace version max", + ns: append([]byte{appns.NamespaceVersionMax}, bytes.Repeat([]byte{0x0}, appns.NamespaceIDSize)...), + wantErr: true, + }, + { + name: "invalid blob namespace: primary reserved namespace", + ns: primaryReservedNamespace(0x10), + wantErr: true, + }, + { + name: "invalid blob namespace: secondary reserved namespace", + ns: secondaryReservedNamespace(0x10), + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.ns.ValidateForBlob() + + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +func primaryReservedNamespace(lastByte byte) Namespace { + result := make([]byte, NamespaceSize) + result = append(result, appns.NamespaceVersionZero) + result = append(result, appns.NamespaceVersionZeroPrefix...) + result = append(result, bytes.Repeat([]byte{0x0}, appns.NamespaceVersionZeroIDSize-1)...) + result = append(result, lastByte) + return result +} + +func secondaryReservedNamespace(lastByte byte) Namespace { + result := make([]byte, NamespaceSize) + result = append(result, appns.NamespaceVersionMax) + result = append(result, bytes.Repeat([]byte{0xFF}, appns.NamespaceIDSize-1)...) + result = append(result, lastByte) + return result +} diff --git a/share/p2p/discovery/backoff.go b/share/p2p/discovery/backoff.go new file mode 100644 index 0000000000..7294915727 --- /dev/null +++ b/share/p2p/discovery/backoff.go @@ -0,0 +1,116 @@ +package discovery + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/discovery/backoff" +) + +const ( + // gcInterval is a default period after which disconnected peers will be removed from cache + gcInterval = time.Minute + // connectTimeout is the timeout used for dialing peers and discovering peer addresses. + connectTimeout = time.Minute * 2 +) + +var ( + defaultBackoffFactory = backoff.NewFixedBackoff(time.Minute * 10) + errBackoffNotEnded = errors.New("share/discovery: backoff period has not ended") +) + +// backoffConnector wraps a libp2p.Host to establish a connection with peers +// with adding a delay for the next connection attempt. +type backoffConnector struct { + h host.Host + backoff backoff.BackoffFactory + + cacheLk sync.Mutex + cacheData map[peer.ID]backoffData +} + +// backoffData stores time when next connection attempt with the remote peer. +type backoffData struct { + nexttry time.Time + backoff backoff.BackoffStrategy +} + +func newBackoffConnector(h host.Host, factory backoff.BackoffFactory) *backoffConnector { + return &backoffConnector{ + h: h, + backoff: factory, + cacheData: make(map[peer.ID]backoffData), + } +} + +// Connect puts peer to the backoffCache and tries to establish a connection with it. +func (b *backoffConnector) Connect(ctx context.Context, p peer.AddrInfo) error { + if b.HasBackoff(p.ID) { + return errBackoffNotEnded + } + + ctx, cancel := context.WithTimeout(ctx, connectTimeout) + defer cancel() + + err := b.h.Connect(ctx, p) + // we don't want to add backoff when the context is canceled. + if !errors.Is(err, context.Canceled) { + b.Backoff(p.ID) + } + return err +} + +// Backoff adds or extends backoff delay for the peer. +func (b *backoffConnector) Backoff(p peer.ID) { + b.cacheLk.Lock() + defer b.cacheLk.Unlock() + + data, ok := b.cacheData[p] + if !ok { + data = backoffData{} + data.backoff = b.backoff() + b.cacheData[p] = data + } + + data.nexttry = time.Now().Add(data.backoff.Delay()) + b.cacheData[p] = data +} + +// HasBackoff checks if peer is in backoff. +func (b *backoffConnector) HasBackoff(p peer.ID) bool { + b.cacheLk.Lock() + cache, ok := b.cacheData[p] + b.cacheLk.Unlock() + return ok && time.Now().Before(cache.nexttry) +} + +// GC is a perpetual GCing loop. +func (b *backoffConnector) GC(ctx context.Context) { + ticker := time.NewTicker(gcInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + b.cacheLk.Lock() + for id, cache := range b.cacheData { + if cache.nexttry.Before(time.Now()) { + delete(b.cacheData, id) + } + } + b.cacheLk.Unlock() + } + } +} + +func (b *backoffConnector) Size() int { + b.cacheLk.Lock() + defer b.cacheLk.Unlock() + return len(b.cacheData) +} diff --git a/service/share/backoff_test.go b/share/p2p/discovery/backoff_test.go similarity index 94% rename from service/share/backoff_test.go rename to share/p2p/discovery/backoff_test.go index 48b342d0ee..24814ed199 100644 --- a/service/share/backoff_test.go +++ b/share/p2p/discovery/backoff_test.go @@ -1,15 +1,14 @@ -package share +package discovery import ( "context" "testing" "time" - "github.com/stretchr/testify/require" - - "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/p2p/discovery/backoff" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" ) func TestBackoff_ConnectPeer(t *testing.T) { @@ -43,6 +42,6 @@ func TestBackoff_ResetBackoffPeriod(t *testing.T) { info := host.InfoFromHost(m.Hosts()[1]) require.NoError(t, b.Connect(ctx, *info)) nexttry := b.cacheData[info.ID].nexttry - b.RestartBackoff(info.ID) + b.Backoff(info.ID) require.True(t, b.cacheData[info.ID].nexttry.After(nexttry)) } diff --git a/share/p2p/discovery/discovery.go b/share/p2p/discovery/discovery.go new file mode 100644 index 0000000000..0f44d42dbe --- /dev/null +++ b/share/p2p/discovery/discovery.go @@ -0,0 +1,382 @@ +package discovery + +import ( + "context" + "errors" + "fmt" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" + "golang.org/x/sync/errgroup" +) + +var log = logging.Logger("share/discovery") + +const ( + // eventbusBufSize is the size of the buffered channel to handle + // events in libp2p. We specify a larger buffer size for the channel + // to avoid overflowing and blocking subscription during disconnection bursts. + // (by default it is 16) + eventbusBufSize = 64 + + // findPeersTimeout limits the FindPeers operation in time + findPeersTimeout = time.Minute + + // retryTimeout defines time interval between discovery and advertise attempts. + retryTimeout = time.Second + + // logInterval defines the time interval at which a warning message will be logged + // if the desired number of nodes is not detected. + logInterval = 5 * time.Minute +) + +// discoveryRetryTimeout defines time interval between discovery attempts, needed for tests +var discoveryRetryTimeout = retryTimeout + +// Discovery combines advertise and discover services and allows to store discovered nodes. +// TODO: The code here gets horribly hairy, so we should refactor this at some point +type Discovery struct { + // Tag is used as rondezvous point for discovery service + tag string + set *limitedSet + host host.Host + disc discovery.Discovery + connector *backoffConnector + // onUpdatedPeers will be called on peer set changes + onUpdatedPeers OnUpdatedPeers + + triggerDisc chan struct{} + + metrics *metrics + + cancel context.CancelFunc + + params *Parameters +} + +type OnUpdatedPeers func(peerID peer.ID, isAdded bool) + +func (f OnUpdatedPeers) add(next OnUpdatedPeers) OnUpdatedPeers { + return func(peerID peer.ID, isAdded bool) { + f(peerID, isAdded) + next(peerID, isAdded) + } +} + +// NewDiscovery constructs a new discovery. +func NewDiscovery( + params *Parameters, + h host.Host, + d discovery.Discovery, + tag string, + opts ...Option, +) (*Discovery, error) { + if err := params.Validate(); err != nil { + return nil, err + } + + if tag == "" { + return nil, fmt.Errorf("discovery: tag cannot be empty") + } + o := newOptions(opts...) + return &Discovery{ + tag: tag, + set: newLimitedSet(params.PeersLimit), + host: h, + disc: d, + connector: newBackoffConnector(h, defaultBackoffFactory), + onUpdatedPeers: o.onUpdatedPeers, + params: params, + triggerDisc: make(chan struct{}), + }, nil +} + +func (d *Discovery) Start(context.Context) error { + ctx, cancel := context.WithCancel(context.Background()) + d.cancel = cancel + + sub, err := d.host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}, eventbus.BufSize(eventbusBufSize)) + if err != nil { + return fmt.Errorf("subscribing for connection events: %w", err) + } + + go d.discoveryLoop(ctx) + go d.disconnectsLoop(ctx, sub) + go d.connector.GC(ctx) + return nil +} + +func (d *Discovery) Stop(context.Context) error { + d.cancel() + return nil +} + +// Peers provides a list of discovered peers in the "full" topic. +// If Discovery hasn't found any peers, it blocks until at least one peer is found. +func (d *Discovery) Peers(ctx context.Context) ([]peer.ID, error) { + return d.set.Peers(ctx) +} + +// Discard removes the peer from the peer set and rediscovers more if soft peer limit is not +// reached. Reports whether peer was removed with bool. +func (d *Discovery) Discard(id peer.ID) bool { + if !d.set.Contains(id) { + return false + } + + d.host.ConnManager().Unprotect(id, d.tag) + d.connector.Backoff(id) + d.set.Remove(id) + d.onUpdatedPeers(id, false) + log.Debugw("removed peer from the peer set", "peer", id.String()) + + if d.set.Size() < d.set.Limit() { + // trigger discovery + select { + case d.triggerDisc <- struct{}{}: + default: + } + } + + return true +} + +// Advertise is a utility function that persistently advertises a service through an Advertiser. +// TODO: Start advertising only after the reachability is confirmed by AutoNAT +func (d *Discovery) Advertise(ctx context.Context) { + timer := time.NewTimer(d.params.AdvertiseInterval) + defer timer.Stop() + for { + _, err := d.disc.Advertise(ctx, d.tag) + d.metrics.observeAdvertise(ctx, err) + if err != nil { + if ctx.Err() != nil { + return + } + log.Warnw("error advertising", "rendezvous", d.tag, "err", err) + + // we don't want retry indefinitely in busy loop + // internal discovery mechanism may need some time before attempts + errTimer := time.NewTimer(retryTimeout) + select { + case <-errTimer.C: + errTimer.Stop() + if !timer.Stop() { + <-timer.C + } + continue + case <-ctx.Done(): + errTimer.Stop() + return + } + } + + log.Debugf("advertised") + if !timer.Stop() { + <-timer.C + } + timer.Reset(d.params.AdvertiseInterval) + select { + case <-timer.C: + case <-ctx.Done(): + return + } + } +} + +// discoveryLoop ensures we always have '~peerLimit' connected peers. +// It initiates peer discovery upon request and restarts the process until the soft limit is +// reached. +func (d *Discovery) discoveryLoop(ctx context.Context) { + t := time.NewTicker(discoveryRetryTimeout) + defer t.Stop() + + warnTicker := time.NewTicker(logInterval) + defer warnTicker.Stop() + + for { + // drain all previous ticks from the channel + drainChannel(t.C) + select { + case <-t.C: + if !d.discover(ctx) { + // rerun discovery if the number of peers hasn't reached the limit + continue + } + case <-warnTicker.C: + if d.set.Size() < d.set.Limit() { + log.Warnf( + "Potentially degraded connectivity, unable to discover the desired amount of full node peers in %v. "+ + "Number of peers discovered: %d. Required: %d.", + logInterval, d.set.Size(), d.set.Limit(), + ) + } + // Do not break the loop; just continue + continue + case <-ctx.Done(): + return + } + } +} + +// disconnectsLoop listen for disconnect events and ensures Discovery state +// is updated. +func (d *Discovery) disconnectsLoop(ctx context.Context, sub event.Subscription) { + defer sub.Close() + + for { + select { + case <-ctx.Done(): + return + case e, ok := <-sub.Out(): + if !ok { + log.Error("connection subscription was closed unexpectedly") + return + } + + if evnt := e.(event.EvtPeerConnectednessChanged); evnt.Connectedness == network.NotConnected { + d.Discard(evnt.Peer) + } + } + } +} + +// discover finds new peers and reports whether it succeeded. +func (d *Discovery) discover(ctx context.Context) bool { + size := d.set.Size() + want := d.set.Limit() - size + if want == 0 { + log.Debugw("reached soft peer limit, skipping discovery", "size", size) + return true + } + // TODO @renaynay: eventually, have a mechanism to catch if wanted amount of peers + // has not been discovered in X amount of time so that users are warned of degraded + // FN connectivity. + log.Debugw("discovering peers", "want", want) + + // we use errgroup as it provide limits + var wg errgroup.Group + // limit to minimize chances of overreaching the limit + wg.SetLimit(int(d.set.Limit())) + + findCtx, findCancel := context.WithTimeout(ctx, findPeersTimeout) + defer func() { + // some workers could still be running, wait them to finish before canceling findCtx + wg.Wait() //nolint:errcheck + findCancel() + }() + + peers, err := d.disc.FindPeers(findCtx, d.tag) + if err != nil { + log.Error("unable to start discovery", "err", err) + return false + } + + for { + select { + case p, ok := <-peers: + if !ok { + break + } + + peer := p + wg.Go(func() error { + if findCtx.Err() != nil { + log.Debug("find has been canceled, skip peer") + return nil + } + + // we don't pass findCtx so that we don't cancel in progress connections + // that are likely to be valuable + if !d.handleDiscoveredPeer(ctx, peer) { + return nil + } + + size := d.set.Size() + log.Debugw("found peer", "peer", peer.ID.String(), "found_amount", size) + if size < d.set.Limit() { + return nil + } + + log.Infow("discovered wanted peers", "amount", size) + findCancel() // stop discovery when we are done + return nil + }) + + continue + case <-findCtx.Done(): + } + + isEnoughPeers := d.set.Size() >= d.set.Limit() + d.metrics.observeFindPeers(ctx, isEnoughPeers) + log.Debugw("discovery finished", "discovered_wanted", isEnoughPeers) + return isEnoughPeers + } +} + +// handleDiscoveredPeer adds peer to the internal if can connect or is connected. +// Report whether it succeeded. +func (d *Discovery) handleDiscoveredPeer(ctx context.Context, peer peer.AddrInfo) bool { + logger := log.With("peer", peer.ID.String()) + switch { + case peer.ID == d.host.ID(): + d.metrics.observeHandlePeer(ctx, handlePeerSkipSelf) + logger.Debug("skip handle: self discovery") + return false + case d.set.Size() >= d.set.Limit(): + d.metrics.observeHandlePeer(ctx, handlePeerEnoughPeers) + logger.Debug("skip handle: enough peers found") + return false + } + + switch d.host.Network().Connectedness(peer.ID) { + case network.Connected: + d.connector.Backoff(peer.ID) // we still have to backoff the connected peer + case network.NotConnected: + err := d.connector.Connect(ctx, peer) + if errors.Is(err, errBackoffNotEnded) { + d.metrics.observeHandlePeer(ctx, handlePeerBackoff) + logger.Debug("skip handle: backoff") + return false + } + if err != nil { + d.metrics.observeHandlePeer(ctx, handlePeerConnErr) + logger.Debugw("unable to connect", "err", err) + return false + } + default: + panic("unknown connectedness") + } + + if !d.set.Add(peer.ID) { + d.metrics.observeHandlePeer(ctx, handlePeerInSet) + logger.Debug("peer is already in discovery set") + return false + } + d.onUpdatedPeers(peer.ID, true) + d.metrics.observeHandlePeer(ctx, handlePeerConnected) + logger.Debug("added peer to set") + + // Tag to protect peer from being killed by ConnManager + // NOTE: This is does not protect from remote killing the connection. + // In the future, we should design a protocol that keeps bidirectional agreement on whether + // connection should be kept or not, similar to mesh link in GossipSub. + d.host.ConnManager().Protect(peer.ID, d.tag) + return true +} + +func drainChannel(c <-chan time.Time) { + for { + select { + case <-c: + default: + return + } + } +} diff --git a/share/p2p/discovery/discovery_test.go b/share/p2p/discovery/discovery_test.go new file mode 100644 index 0000000000..1d0078196f --- /dev/null +++ b/share/p2p/discovery/discovery_test.go @@ -0,0 +1,210 @@ +// go:build !race + +package discovery + +import ( + "context" + "testing" + "time" + + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/discovery/routing" + basic "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + fullNodesTag = "full" +) + +func TestDiscovery(t *testing.T) { + const nodes = 10 // higher number brings higher coverage + + discoveryRetryTimeout = time.Millisecond * 100 // defined in discovery.go + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) + t.Cleanup(cancel) + + tn := newTestnet(ctx, t) + + type peerUpdate struct { + peerID peer.ID + isAdded bool + } + updateCh := make(chan peerUpdate) + submit := func(peerID peer.ID, isAdded bool) { + updateCh <- peerUpdate{peerID: peerID, isAdded: isAdded} + } + + host, routingDisc := tn.peer() + params := DefaultParameters() + params.PeersLimit = nodes + + // start discovery listener service for peerA + peerA := tn.startNewDiscovery(params, host, routingDisc, fullNodesTag, + WithOnPeersUpdate(submit), + ) + + // start discovery advertisement services for other peers + params.AdvertiseInterval = time.Millisecond * 100 + discs := make([]*Discovery, nodes) + for i := range discs { + host, routingDisc := tn.peer() + disc, err := NewDiscovery(params, host, routingDisc, fullNodesTag) + require.NoError(t, err) + go disc.Advertise(tn.ctx) + discs[i] = tn.startNewDiscovery(params, host, routingDisc, fullNodesTag) + + select { + case res := <-updateCh: + require.Equal(t, discs[i].host.ID(), res.peerID) + require.True(t, res.isAdded) + case <-ctx.Done(): + t.Fatal("did not discover peer in time") + } + } + + assert.EqualValues(t, nodes, peerA.set.Size()) + + // disconnect peerA from all peers and check that notifications are received on updateCh channel + for _, disc := range discs { + peerID := disc.host.ID() + err := peerA.host.Network().ClosePeer(peerID) + require.NoError(t, err) + + select { + case res := <-updateCh: + require.Equal(t, peerID, res.peerID) + require.False(t, res.isAdded) + case <-ctx.Done(): + t.Fatal("did not disconnect from peer in time") + } + } + + assert.EqualValues(t, 0, peerA.set.Size()) +} + +func TestDiscoveryTagged(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + tn := newTestnet(ctx, t) + + // launch 2 peers, that advertise with different tags + adv1, routingDisc1 := tn.peer() + adv2, routingDisc2 := tn.peer() + + // sub will discover both peers, but on different tags + sub, routingDisc := tn.peer() + + params := DefaultParameters() + + // create 2 discovery services for sub, each with a different tag + done1 := make(chan struct{}) + tn.startNewDiscovery(params, sub, routingDisc, "tag1", + WithOnPeersUpdate(checkPeer(t, adv1.ID(), done1))) + + done2 := make(chan struct{}) + tn.startNewDiscovery(params, sub, routingDisc, "tag2", + WithOnPeersUpdate(checkPeer(t, adv2.ID(), done2))) + + // run discovery services for advertisers + ds1 := tn.startNewDiscovery(params, adv1, routingDisc1, "tag1") + go ds1.Advertise(tn.ctx) + + ds2 := tn.startNewDiscovery(params, adv2, routingDisc2, "tag2") + go ds2.Advertise(tn.ctx) + + // wait for discovery services to discover each other on different tags + select { + case <-done1: + case <-ctx.Done(): + t.Fatal("did not discover peer in time") + } + + select { + case <-done2: + case <-ctx.Done(): + t.Fatal("did not discover peer in time") + } +} + +type testnet struct { + ctx context.Context + T *testing.T + + bootstrapper peer.AddrInfo +} + +func newTestnet(ctx context.Context, t *testing.T) *testnet { + bus := eventbus.NewBus() + swarm := swarmt.GenSwarm(t, swarmt.OptDisableTCP, swarmt.EventBus(bus)) + hst, err := basic.NewHost(swarm, &basic.HostOpts{EventBus: bus}) + require.NoError(t, err) + hst.Start() + + _, err = dht.New(ctx, hst, + dht.Mode(dht.ModeServer), + dht.BootstrapPeers(), + dht.ProtocolPrefix("/test"), + ) + require.NoError(t, err) + + return &testnet{ctx: ctx, T: t, bootstrapper: *host.InfoFromHost(hst)} +} + +func (t *testnet) startNewDiscovery( + params *Parameters, + hst host.Host, + routingDisc discovery.Discovery, + tag string, + opts ...Option, +) *Discovery { + disc, err := NewDiscovery(params, hst, routingDisc, tag, opts...) + require.NoError(t.T, err) + err = disc.Start(t.ctx) + require.NoError(t.T, err) + t.T.Cleanup(func() { + err := disc.Stop(t.ctx) + require.NoError(t.T, err) + }) + return disc +} + +func (t *testnet) peer() (host.Host, discovery.Discovery) { + bus := eventbus.NewBus() + swarm := swarmt.GenSwarm(t.T, swarmt.OptDisableTCP, swarmt.EventBus(bus)) + hst, err := basic.NewHost(swarm, &basic.HostOpts{EventBus: bus}) + require.NoError(t.T, err) + hst.Start() + + err = hst.Connect(t.ctx, t.bootstrapper) + require.NoError(t.T, err) + + dht, err := dht.New(t.ctx, hst, + dht.Mode(dht.ModeServer), + dht.ProtocolPrefix("/test"), + // needed to reduce connections to peers on DHT level + dht.BucketSize(1), + ) + require.NoError(t.T, err) + + err = dht.Bootstrap(t.ctx) + require.NoError(t.T, err) + + return hst, routing.NewRoutingDiscovery(dht) +} + +func checkPeer(t *testing.T, expected peer.ID, done chan struct{}) func(peerID peer.ID, isAdded bool) { + return func(peerID peer.ID, isAdded bool) { + defer close(done) + require.Equal(t, expected, peerID) + require.True(t, isAdded) + } +} diff --git a/share/p2p/discovery/metrics.go b/share/p2p/discovery/metrics.go new file mode 100644 index 0000000000..78b62a7d97 --- /dev/null +++ b/share/p2p/discovery/metrics.go @@ -0,0 +1,161 @@ +package discovery + +import ( + "context" + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +const ( + discoveryEnoughPeersKey = "enough_peers" + + handlePeerResultKey = "result" + handlePeerSkipSelf handlePeerResult = "skip_self" + handlePeerEnoughPeers handlePeerResult = "skip_enough_peers" + handlePeerBackoff handlePeerResult = "skip_backoff" + handlePeerConnected handlePeerResult = "connected" + handlePeerConnErr handlePeerResult = "conn_err" + handlePeerInSet handlePeerResult = "in_set" + + advertiseFailedKey = "failed" +) + +var meter = otel.Meter("share_discovery") + +type handlePeerResult string + +type metrics struct { + peersAmount metric.Int64ObservableGauge + discoveryResult metric.Int64Counter // attributes: enough_peers[bool],is_canceled[bool] + handlePeerResult metric.Int64Counter // attributes: result[string] + advertise metric.Int64Counter // attributes: failed[bool] + peerAdded metric.Int64Counter + peerRemoved metric.Int64Counter +} + +// WithMetrics turns on metric collection in discoery. +func (d *Discovery) WithMetrics() error { + metrics, err := initMetrics(d) + if err != nil { + return fmt.Errorf("discovery: init metrics: %w", err) + } + d.metrics = metrics + d.onUpdatedPeers = d.onUpdatedPeers.add(metrics.observeOnPeersUpdate) + return nil +} + +func initMetrics(d *Discovery) (*metrics, error) { + peersAmount, err := meter.Int64ObservableGauge("discovery_amount_of_peers", + metric.WithDescription("amount of peers in discovery set")) + if err != nil { + return nil, err + } + + discoveryResult, err := meter.Int64Counter("discovery_find_peers_result", + metric.WithDescription("result of find peers run")) + if err != nil { + return nil, err + } + + handlePeerResultCounter, err := meter.Int64Counter("discovery_handler_peer_result", + metric.WithDescription("result handling found peer")) + if err != nil { + return nil, err + } + + advertise, err := meter.Int64Counter("discovery_advertise_event", + metric.WithDescription("advertise events counter")) + if err != nil { + return nil, err + } + + peerAdded, err := meter.Int64Counter("discovery_add_peer", + metric.WithDescription("add peer to discovery set counter")) + if err != nil { + return nil, err + } + + peerRemoved, err := meter.Int64Counter("discovery_remove_peer", + metric.WithDescription("remove peer from discovery set counter")) + if err != nil { + return nil, err + } + + backOffSize, err := meter.Int64ObservableGauge("discovery_backoff_amount", + metric.WithDescription("amount of peers in backoff")) + if err != nil { + return nil, err + } + + metrics := &metrics{ + peersAmount: peersAmount, + discoveryResult: discoveryResult, + handlePeerResult: handlePeerResultCounter, + advertise: advertise, + peerAdded: peerAdded, + peerRemoved: peerRemoved, + } + + callback := func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(peersAmount, int64(d.set.Size())) + observer.ObserveInt64(backOffSize, int64(d.connector.Size())) + return nil + } + _, err = meter.RegisterCallback(callback, peersAmount, backOffSize) + if err != nil { + return nil, fmt.Errorf("registering metrics callback: %w", err) + } + return metrics, nil +} + +func (m *metrics) observeFindPeers(ctx context.Context, isEnoughPeers bool) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.discoveryResult.Add(ctx, 1, + metric.WithAttributes( + attribute.Bool(discoveryEnoughPeersKey, isEnoughPeers))) +} + +func (m *metrics) observeHandlePeer(ctx context.Context, result handlePeerResult) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.handlePeerResult.Add(ctx, 1, + metric.WithAttributes( + attribute.String(handlePeerResultKey, string(result)))) +} + +func (m *metrics) observeAdvertise(ctx context.Context, err error) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + + m.advertise.Add(ctx, 1, + metric.WithAttributes( + attribute.Bool(advertiseFailedKey, err != nil))) +} + +func (m *metrics) observeOnPeersUpdate(_ peer.ID, isAdded bool) { + if m == nil { + return + } + ctx := context.Background() + + if isAdded { + m.peerAdded.Add(ctx, 1) + return + } + m.peerRemoved.Add(ctx, 1) +} diff --git a/share/p2p/discovery/options.go b/share/p2p/discovery/options.go new file mode 100644 index 0000000000..de4b13a7db --- /dev/null +++ b/share/p2p/discovery/options.go @@ -0,0 +1,67 @@ +package discovery + +import ( + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// Parameters is the set of Parameters that must be configured for the Discovery module +type Parameters struct { + // PeersLimit defines the soft limit of FNs to connect to via discovery. + // Set 0 to disable. + PeersLimit uint + // AdvertiseInterval is a interval between advertising sessions. + // Set -1 to disable. + // NOTE: only full and bridge can advertise themselves. + AdvertiseInterval time.Duration +} + +// options is the set of options that can be configured for the Discovery module +type options struct { + // onUpdatedPeers will be called on peer set changes + onUpdatedPeers OnUpdatedPeers +} + +// Option is a function that configures Discovery Parameters +type Option func(*options) + +// DefaultParameters returns the default Parameters' configuration values +// for the Discovery module +func DefaultParameters() *Parameters { + return &Parameters{ + PeersLimit: 5, + AdvertiseInterval: time.Hour, + } +} + +// Validate validates the values in Parameters +func (p *Parameters) Validate() error { + if p.PeersLimit <= 0 { + return fmt.Errorf("discovery: peers limit cannot be zero or negative") + } + + if p.AdvertiseInterval <= 0 { + return fmt.Errorf("discovery: advertise interval cannot be zero or negative") + } + return nil +} + +// WithOnPeersUpdate chains OnPeersUpdate callbacks on every update of discovered peers list. +func WithOnPeersUpdate(f OnUpdatedPeers) Option { + return func(p *options) { + p.onUpdatedPeers = p.onUpdatedPeers.add(f) + } +} + +func newOptions(opts ...Option) *options { + defaults := &options{ + onUpdatedPeers: func(peer.ID, bool) {}, + } + + for _, opt := range opts { + opt(defaults) + } + return defaults +} diff --git a/share/p2p/discovery/set.go b/share/p2p/discovery/set.go new file mode 100644 index 0000000000..a22e10f06e --- /dev/null +++ b/share/p2p/discovery/set.go @@ -0,0 +1,93 @@ +package discovery + +import ( + "context" + "sync" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// limitedSet is a thread safe set of peers with given limit. +// Inspired by libp2p peer.Set but extended with Remove method. +type limitedSet struct { + lk sync.RWMutex + ps map[peer.ID]struct{} + + limit uint + waitPeer chan peer.ID +} + +// newLimitedSet constructs a set with the maximum peers amount. +func newLimitedSet(limit uint) *limitedSet { + ps := new(limitedSet) + ps.ps = make(map[peer.ID]struct{}) + ps.limit = limit + ps.waitPeer = make(chan peer.ID) + return ps +} + +func (ps *limitedSet) Contains(p peer.ID) bool { + ps.lk.RLock() + _, ok := ps.ps[p] + ps.lk.RUnlock() + return ok +} + +func (ps *limitedSet) Limit() uint { + return ps.limit +} + +func (ps *limitedSet) Size() uint { + ps.lk.RLock() + defer ps.lk.RUnlock() + return uint(len(ps.ps)) +} + +// Add attempts to add the given peer into the set. +func (ps *limitedSet) Add(p peer.ID) (added bool) { + ps.lk.Lock() + if _, ok := ps.ps[p]; ok { + ps.lk.Unlock() + return false + } + ps.ps[p] = struct{}{} + ps.lk.Unlock() + + for { + // peer will be pushed to the channel only when somebody is reading from it. + // this is done to handle case when Peers() was called on empty set. + select { + case ps.waitPeer <- p: + default: + return true + } + } +} + +func (ps *limitedSet) Remove(id peer.ID) { + ps.lk.Lock() + delete(ps.ps, id) + ps.lk.Unlock() +} + +// Peers returns all discovered peers from the set. +func (ps *limitedSet) Peers(ctx context.Context) ([]peer.ID, error) { + ps.lk.RLock() + if len(ps.ps) > 0 { + out := make([]peer.ID, 0, len(ps.ps)) + for p := range ps.ps { + out = append(out, p) + } + ps.lk.RUnlock() + return out, nil + } + ps.lk.RUnlock() + + // block until a new peer will be discovered + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-ps.waitPeer: + return []peer.ID{p}, nil + } +} diff --git a/service/share/set_test.go b/share/p2p/discovery/set_test.go similarity index 52% rename from service/share/set_test.go rename to share/p2p/discovery/set_test.go index 5c95f05904..d5113a2291 100644 --- a/service/share/set_test.go +++ b/share/p2p/discovery/set_test.go @@ -1,11 +1,12 @@ -package share +package discovery import ( + "context" "testing" - - "github.com/stretchr/testify/require" + "time" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" ) func TestSet_TryAdd(t *testing.T) { @@ -14,29 +15,17 @@ func TestSet_TryAdd(t *testing.T) { require.NoError(t, err) set := newLimitedSet(1) - require.NoError(t, set.TryAdd(h.ID())) + set.Add(h.ID()) require.True(t, set.Contains(h.ID())) } -func TestSet_TryAddFails(t *testing.T) { - m := mocknet.New() - h1, err := m.GenPeer() - require.NoError(t, err) - h2, err := m.GenPeer() - require.NoError(t, err) - - set := newLimitedSet(1) - require.NoError(t, set.TryAdd(h1.ID())) - require.Error(t, set.TryAdd(h2.ID())) -} - func TestSet_Remove(t *testing.T) { m := mocknet.New() h, err := m.GenPeer() require.NoError(t, err) set := newLimitedSet(1) - require.NoError(t, set.TryAdd(h.ID())) + set.Add(h.ID()) set.Remove(h.ID()) require.False(t, set.Contains(h.ID())) } @@ -49,9 +38,37 @@ func TestSet_Peers(t *testing.T) { require.NoError(t, err) set := newLimitedSet(2) - require.NoError(t, set.TryAdd(h1.ID())) - require.NoError(t, set.TryAdd(h2.ID())) - require.True(t, len(set.Peers()) == 2) + set.Add(h1.ID()) + set.Add(h2.ID()) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*1) + t.Cleanup(cancel) + + peers, err := set.Peers(ctx) + require.NoError(t, err) + require.True(t, len(peers) == 2) +} + +// TestSet_WaitPeers ensures that `Peers` will be unblocked once +// a new peer was discovered. +func TestSet_WaitPeers(t *testing.T) { + m := mocknet.New() + h1, err := m.GenPeer() + require.NoError(t, err) + + set := newLimitedSet(2) + go func() { + time.Sleep(time.Millisecond * 500) + set.Add(h1.ID()) + }() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + t.Cleanup(cancel) + + // call `Peers` on empty set will block until a new peer will be discovered + peers, err := set.Peers(ctx) + require.NoError(t, err) + require.True(t, len(peers) == 1) } func TestSet_Size(t *testing.T) { @@ -62,9 +79,9 @@ func TestSet_Size(t *testing.T) { require.NoError(t, err) set := newLimitedSet(2) - require.NoError(t, set.TryAdd(h1.ID())) - require.NoError(t, set.TryAdd(h2.ID())) - require.Equal(t, 2, set.Size()) + set.Add(h1.ID()) + set.Add(h2.ID()) + require.EqualValues(t, 2, set.Size()) set.Remove(h2.ID()) - require.Equal(t, 1, set.Size()) + require.EqualValues(t, 1, set.Size()) } diff --git a/share/p2p/doc.go b/share/p2p/doc.go new file mode 100644 index 0000000000..991ddf94db --- /dev/null +++ b/share/p2p/doc.go @@ -0,0 +1,18 @@ +// Package p2p provides p2p functionality that powers the share exchange protocols used by celestia-node. +// The available protocols are: +// +// - shrexsub : a floodsub-based pubsub protocol that is used to broadcast/subscribe to the event +// of new EDS in the network to peers. +// +// - shrexnd: a request/response protocol that is used to request shares by namespace or namespace data from peers. +// +// - shrexeds: a request/response protocol that is used to request extended data square shares from peers. +// This protocol exchanges the original data square in between the client and server, and it's up to the +// receiver to compute the extended data square. +// +// This package also defines a peer manager that is used to manage network peers that can be used to exchange +// shares. The peer manager is primarily responsible for providing peers to request shares from, +// and is primarily used by `getters.ShrexGetter` in share/getters/shrex.go. +// +// Find out more about each protocol in their respective sub-packages. +package p2p diff --git a/share/p2p/errors.go b/share/p2p/errors.go new file mode 100644 index 0000000000..cb7b596f47 --- /dev/null +++ b/share/p2p/errors.go @@ -0,0 +1,17 @@ +package p2p + +import ( + "errors" +) + +// ErrNotFound is returned when a peer is unable to find the requested data or resource. +// It is used to signal that the peer couldn't serve the data successfully, and it's not +// available at the moment. The request may be retried later, but it's unlikely to succeed. +var ErrNotFound = errors.New("the requested data or resource could not be found") + +var ErrRateLimited = errors.New("server is overloaded and rate limited the request") + +// ErrInvalidResponse is returned when a peer returns an invalid response or caused an internal +// error. It is used to signal that the peer couldn't serve the data successfully, and should not be +// retried. +var ErrInvalidResponse = errors.New("server returned an invalid response or caused an internal error") diff --git a/share/p2p/metrics.go b/share/p2p/metrics.go new file mode 100644 index 0000000000..55aefda81d --- /dev/null +++ b/share/p2p/metrics.go @@ -0,0 +1,73 @@ +package p2p + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +var meter = otel.Meter("shrex/eds") + +type status string + +const ( + StatusBadRequest status = "bad_request" + StatusSendRespErr status = "send_resp_err" + StatusSendReqErr status = "send_req_err" + StatusReadRespErr status = "read_resp_err" + StatusInternalErr status = "internal_err" + StatusNotFound status = "not_found" + StatusTimeout status = "timeout" + StatusSuccess status = "success" + StatusRateLimited status = "rate_limited" +) + +type Metrics struct { + totalRequestCounter metric.Int64Counter +} + +// ObserveRequests increments the total number of requests sent with the given status as an +// attribute. +func (m *Metrics) ObserveRequests(ctx context.Context, count int64, status status) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + m.totalRequestCounter.Add(ctx, count, + metric.WithAttributes( + attribute.String("status", string(status)), + )) +} + +func InitClientMetrics(protocol string) (*Metrics, error) { + totalRequestCounter, err := meter.Int64Counter( + fmt.Sprintf("shrex_%s_client_total_requests", protocol), + metric.WithDescription(fmt.Sprintf("Total count of sent shrex/%s requests", protocol)), + ) + if err != nil { + return nil, err + } + + return &Metrics{ + totalRequestCounter: totalRequestCounter, + }, nil +} + +func InitServerMetrics(protocol string) (*Metrics, error) { + totalRequestCounter, err := meter.Int64Counter( + fmt.Sprintf("shrex_%s_server_total_responses", protocol), + metric.WithDescription(fmt.Sprintf("Total count of sent shrex/%s responses", protocol)), + ) + if err != nil { + return nil, err + } + + return &Metrics{ + totalRequestCounter: totalRequestCounter, + }, nil +} diff --git a/share/p2p/middleware.go b/share/p2p/middleware.go new file mode 100644 index 0000000000..df0a690af7 --- /dev/null +++ b/share/p2p/middleware.go @@ -0,0 +1,48 @@ +package p2p + +import ( + "sync/atomic" + + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/network" +) + +var log = logging.Logger("shrex/middleware") + +type Middleware struct { + // concurrencyLimit is the maximum number of requests that can be processed at once. + concurrencyLimit int64 + // parallelRequests is the number of requests currently being processed. + parallelRequests atomic.Int64 + // numRateLimited is the number of requests that were rate limited. + numRateLimited atomic.Int64 +} + +func NewMiddleware(concurrencyLimit int) *Middleware { + return &Middleware{ + concurrencyLimit: int64(concurrencyLimit), + } +} + +// DrainCounter returns the current value of the rate limit counter and resets it to 0. +func (m *Middleware) DrainCounter() int64 { + return m.numRateLimited.Swap(0) +} + +func (m *Middleware) RateLimitHandler(handler network.StreamHandler) network.StreamHandler { + return func(stream network.Stream) { + current := m.parallelRequests.Add(1) + defer m.parallelRequests.Add(-1) + + if current > m.concurrencyLimit { + m.numRateLimited.Add(1) + log.Debug("concurrency limit reached") + err := stream.Close() + if err != nil { + log.Debugw("server: closing stream", "err", err) + } + return + } + handler(stream) + } +} diff --git a/share/p2p/params.go b/share/p2p/params.go new file mode 100644 index 0000000000..6636e38fc5 --- /dev/null +++ b/share/p2p/params.go @@ -0,0 +1,69 @@ +package p2p + +import ( + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/protocol" +) + +// Parameters is the set of parameters that must be configured for the shrex/eds protocol. +type Parameters struct { + // ServerReadTimeout sets the timeout for reading messages from the stream. + ServerReadTimeout time.Duration + + // ServerWriteTimeout sets the timeout for writing messages to the stream. + ServerWriteTimeout time.Duration + + // HandleRequestTimeout defines the deadline for handling request. + HandleRequestTimeout time.Duration + + // ConcurrencyLimit is the maximum number of concurrently handled streams + ConcurrencyLimit int + + // networkID is prepended to the protocolID and represents the network the protocol is + // running on. + networkID string +} + +func DefaultParameters() *Parameters { + return &Parameters{ + ServerReadTimeout: 5 * time.Second, + ServerWriteTimeout: time.Minute, // based on max observed sample time for 256 blocks (~50s) + HandleRequestTimeout: time.Minute, + ConcurrencyLimit: 10, + } +} + +const errSuffix = "value should be positive and non-zero" + +func (p *Parameters) Validate() error { + if p.ServerReadTimeout <= 0 { + return fmt.Errorf("invalid stream read timeout: %v, %s", p.ServerReadTimeout, errSuffix) + } + if p.ServerWriteTimeout <= 0 { + return fmt.Errorf("invalid write timeout: %v, %s", p.ServerWriteTimeout, errSuffix) + } + if p.HandleRequestTimeout <= 0 { + return fmt.Errorf("invalid handle request timeout: %v, %s", p.HandleRequestTimeout, errSuffix) + } + if p.ConcurrencyLimit <= 0 { + return fmt.Errorf("invalid concurrency limit: %s", errSuffix) + } + return nil +} + +// WithNetworkID sets the value of networkID in params +func (p *Parameters) WithNetworkID(networkID string) { + p.networkID = networkID +} + +// NetworkID returns the value of networkID stored in params +func (p *Parameters) NetworkID() string { + return p.networkID +} + +// ProtocolID creates a protocol ID string according to common format +func ProtocolID(networkID, protocolString string) protocol.ID { + return protocol.ID(fmt.Sprintf("/%s%s", networkID, protocolString)) +} diff --git a/share/p2p/peers/doc.go b/share/p2p/peers/doc.go new file mode 100644 index 0000000000..bc1647eb42 --- /dev/null +++ b/share/p2p/peers/doc.go @@ -0,0 +1,52 @@ +// Package peers provides a peer manager that handles peer discovery and peer selection for the shrex getter. +// +// The peer manager is responsible for: +// - Discovering peers +// - Selecting peers for data retrieval +// - Validating peers +// - Blacklisting peers +// - Garbage collecting peers +// +// The peer manager is not responsible for: +// - Connecting to peers +// - Disconnecting from peers +// - Sending data to peers +// - Receiving data from peers +// +// The peer manager is a mechanism to store peers from shrexsub, a mechanism that +// handles "peer discovery" and "peer selection" by relying on a shrexsub subscription +// and header subscriptions, such that it listens for new headers and +// new shares and uses this information to pool peers by shares. +// +// This gives the peer manager an ability to block peers that gossip invalid shares, but also access a list of peers +// that are known to have been gossiping valid shares. +// The peers are then returned on request using a round-robin algorithm to return a different peer each time. +// If no peers are found, the peer manager will rely on full nodes retrieved from discovery. +// +// The peer manager is only concerned with recent heights, thus it retrieves peers that +// were active since `initialHeight`. +// The peer manager will also garbage collect peers such that it blacklists peers that +// have been active since `initialHeight` but have been found to be invalid. +// +// The peer manager is passed to the shrex getter and is used at request time to +// select peers for a given data hash for data retrieval. +// +// # Usage +// +// The peer manager is created using [NewManager] constructor: +// +// peerManager := peers.NewManager(headerSub, shrexSub, discovery, host, connGater, opts...) +// +// After creating the peer manager, it should be started to kick off listening and +// validation routines that enable peer selection and retrieval: +// +// err := peerManager.Start(ctx) +// +// The peer manager can be stopped at any time to stop all peer discovery and validation routines: +// +// err := peerManager.Stop(ctx) +// +// The peer manager can be used to select peers for a given datahash for shares retrieval: +// +// peer, err := peerManager.Peer(ctx, hash) +package peers diff --git a/share/p2p/peers/manager.go b/share/p2p/peers/manager.go new file mode 100644 index 0000000000..1a00059628 --- /dev/null +++ b/share/p2p/peers/manager.go @@ -0,0 +1,521 @@ +package peers + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +const ( + // ResultNoop indicates operation was successful and no extra action is required + ResultNoop result = "result_noop" + // ResultCooldownPeer will put returned peer on cooldown, meaning it won't be available by Peer + // method for some time + ResultCooldownPeer = "result_cooldown_peer" + // ResultBlacklistPeer will blacklist peer. Blacklisted peers will be disconnected and blocked from + // any p2p communication in future by libp2p Gater + ResultBlacklistPeer = "result_blacklist_peer" + + // eventbusBufSize is the size of the buffered channel to handle + // events in libp2p + eventbusBufSize = 32 + + // storedPoolsAmount is the amount of pools for recent headers that will be stored in the peer + // manager + storedPoolsAmount = 10 +) + +type result string + +var log = logging.Logger("shrex/peer-manager") + +// Manager keeps track of peers coming from shrex.Sub and from discovery +type Manager struct { + lock sync.Mutex + params Parameters + + // header subscription is necessary in order to Validate the inbound eds hash + headerSub libhead.Subscriber[*header.ExtendedHeader] + shrexSub *shrexsub.PubSub + host host.Host + connGater *conngater.BasicConnectionGater + + // pools collecting peers from shrexSub and stores them by datahash + pools map[string]*syncPool + + // initialHeight is the height of the first header received from headersub + initialHeight atomic.Uint64 + // messages from shrex.Sub with height below storeFrom will be ignored, since we don't need to + // track peers for those headers + storeFrom atomic.Uint64 + + // fullNodes collects full nodes peer.ID found via discovery + fullNodes *pool + + // hashes that are not in the chain + blacklistedHashes map[string]bool + + metrics *metrics + + headerSubDone chan struct{} + disconnectedPeersDone chan struct{} + cancel context.CancelFunc +} + +// DoneFunc updates internal state depending on call results. Should be called once per returned +// peer from Peer method +type DoneFunc func(result) + +type syncPool struct { + *pool + + // isValidatedDataHash indicates if datahash was validated by receiving corresponding extended + // header from headerSub + isValidatedDataHash atomic.Bool + // height is the height of the header that corresponds to datahash + height uint64 + // createdAt is the syncPool creation time + createdAt time.Time +} + +func NewManager( + params Parameters, + host host.Host, + connGater *conngater.BasicConnectionGater, + options ...Option, +) (*Manager, error) { + if err := params.Validate(); err != nil { + return nil, err + } + + s := &Manager{ + params: params, + connGater: connGater, + host: host, + pools: make(map[string]*syncPool), + blacklistedHashes: make(map[string]bool), + headerSubDone: make(chan struct{}), + disconnectedPeersDone: make(chan struct{}), + } + + for _, opt := range options { + err := opt(s) + if err != nil { + return nil, err + } + } + + s.fullNodes = newPool(s.params.PeerCooldown) + return s, nil +} + +func (m *Manager) Start(startCtx context.Context) error { + ctx, cancel := context.WithCancel(context.Background()) + m.cancel = cancel + + // pools will only be populated with senders of shrexsub notifications if the WithShrexSubPools + // option is used. + if m.shrexSub == nil && m.headerSub == nil { + return nil + } + + validatorFn := m.metrics.validationObserver(m.Validate) + err := m.shrexSub.AddValidator(validatorFn) + if err != nil { + return fmt.Errorf("registering validator: %w", err) + } + err = m.shrexSub.Start(startCtx) + if err != nil { + return fmt.Errorf("starting shrexsub: %w", err) + } + + headerSub, err := m.headerSub.Subscribe() + if err != nil { + return fmt.Errorf("subscribing to headersub: %w", err) + } + + sub, err := m.host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}, eventbus.BufSize(eventbusBufSize)) + if err != nil { + return fmt.Errorf("subscribing to libp2p events: %w", err) + } + + go m.subscribeHeader(ctx, headerSub) + go m.subscribeDisconnectedPeers(ctx, sub) + go m.GC(ctx) + return nil +} + +func (m *Manager) Stop(ctx context.Context) error { + m.cancel() + + // we do not need to wait for headersub and disconnected peers to finish + // here, since they were never started + if m.headerSub == nil && m.shrexSub == nil { + return nil + } + + select { + case <-m.headerSubDone: + case <-ctx.Done(): + return ctx.Err() + } + + select { + case <-m.disconnectedPeersDone: + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// Peer returns peer collected from shrex.Sub for given datahash if any available. +// If there is none, it will look for full nodes collected from discovery. If there is no discovered +// full nodes, it will wait until any peer appear in either source or timeout happen. +// After fetching data using given peer, caller is required to call returned DoneFunc using +// appropriate result value +func (m *Manager) Peer(ctx context.Context, datahash share.DataHash, height uint64, +) (peer.ID, DoneFunc, error) { + p := m.validatedPool(datahash.String(), height) + + // first, check if a peer is available for the given datahash + peerID, ok := p.tryGet() + if ok { + if m.removeIfUnreachable(p, peerID) { + return m.Peer(ctx, datahash, height) + } + return m.newPeer(ctx, datahash, peerID, sourceShrexSub, p.len(), 0) + } + + // if no peer for datahash is currently available, try to use full node + // obtained from discovery + peerID, ok = m.fullNodes.tryGet() + if ok { + return m.newPeer(ctx, datahash, peerID, sourceFullNodes, m.fullNodes.len(), 0) + } + + // no peers are available right now, wait for the first one + start := time.Now() + select { + case peerID = <-p.next(ctx): + if m.removeIfUnreachable(p, peerID) { + return m.Peer(ctx, datahash, height) + } + return m.newPeer(ctx, datahash, peerID, sourceShrexSub, p.len(), time.Since(start)) + case peerID = <-m.fullNodes.next(ctx): + return m.newPeer(ctx, datahash, peerID, sourceFullNodes, m.fullNodes.len(), time.Since(start)) + case <-ctx.Done(): + return "", nil, ctx.Err() + } +} + +// UpdateFullNodePool is called by discovery when new full node is discovered or removed +func (m *Manager) UpdateFullNodePool(peerID peer.ID, isAdded bool) { + if isAdded { + if m.isBlacklistedPeer(peerID) { + log.Debugw("got blacklisted peer from discovery", "peer", peerID.String()) + return + } + m.fullNodes.add(peerID) + log.Debugw("added to full nodes", "peer", peerID) + return + } + + log.Debugw("removing peer from discovered full nodes", "peer", peerID.String()) + m.fullNodes.remove(peerID) +} + +func (m *Manager) newPeer( + ctx context.Context, + datahash share.DataHash, + peerID peer.ID, + source peerSource, + poolSize int, + waitTime time.Duration, +) (peer.ID, DoneFunc, error) { + log.Debugw("got peer", + "hash", datahash.String(), + "peer", peerID.String(), + "source", source, + "pool_size", poolSize, + "wait (s)", waitTime) + m.metrics.observeGetPeer(ctx, source, poolSize, waitTime) + return peerID, m.doneFunc(datahash, peerID, source), nil +} + +func (m *Manager) doneFunc(datahash share.DataHash, peerID peer.ID, source peerSource) DoneFunc { + return func(result result) { + log.Debugw("set peer result", + "hash", datahash.String(), + "peer", peerID.String(), + "source", source, + "result", result) + m.metrics.observeDoneResult(source, result) + switch result { + case ResultNoop: + case ResultCooldownPeer: + if source == sourceFullNodes { + m.fullNodes.putOnCooldown(peerID) + return + } + m.getPool(datahash.String()).putOnCooldown(peerID) + case ResultBlacklistPeer: + m.blacklistPeers(reasonMisbehave, peerID) + } + } +} + +// subscribeHeader takes datahash from received header and validates corresponding peer pool. +func (m *Manager) subscribeHeader(ctx context.Context, headerSub libhead.Subscription[*header.ExtendedHeader]) { + defer close(m.headerSubDone) + defer headerSub.Cancel() + + for { + h, err := headerSub.NextHeader(ctx) + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + log.Errorw("get next header from sub", "err", err) + continue + } + m.validatedPool(h.DataHash.String(), h.Height()) + + // store first header for validation purposes + if m.initialHeight.CompareAndSwap(0, h.Height()) { + log.Debugw("stored initial height", "height", h.Height()) + } + + // update storeFrom if header height + m.storeFrom.Store(uint64(max(0, int(h.Height())-storedPoolsAmount))) + log.Debugw("updated lowest stored height", "height", h.Height()) + } +} + +// subscribeDisconnectedPeers subscribes to libp2p connectivity events and removes disconnected +// peers from full nodes pool +func (m *Manager) subscribeDisconnectedPeers(ctx context.Context, sub event.Subscription) { + defer close(m.disconnectedPeersDone) + defer sub.Close() + for { + select { + case <-ctx.Done(): + return + case e, ok := <-sub.Out(): + if !ok { + log.Fatal("Subscription for connectedness events is closed.") //nolint:gocritic + return + } + // listen to disconnect event to remove peer from full nodes pool + connStatus := e.(event.EvtPeerConnectednessChanged) + if connStatus.Connectedness == network.NotConnected { + peer := connStatus.Peer + if m.fullNodes.has(peer) { + log.Debugw("peer disconnected, removing from full nodes", "peer", peer.String()) + m.fullNodes.remove(peer) + } + } + } + } +} + +// Validate will collect peer.ID into corresponding peer pool +func (m *Manager) Validate(_ context.Context, peerID peer.ID, msg shrexsub.Notification) pubsub.ValidationResult { + logger := log.With("peer", peerID.String(), "hash", msg.DataHash.String()) + + // messages broadcast from self should bypass the validation with Accept + if peerID == m.host.ID() { + logger.Debug("received datahash from self") + return pubsub.ValidationAccept + } + + // punish peer for sending invalid hash if it has misbehaved in the past + if m.isBlacklistedHash(msg.DataHash) { + logger.Debug("received blacklisted hash, reject validation") + return pubsub.ValidationReject + } + + if m.isBlacklistedPeer(peerID) { + logger.Debug("received message from blacklisted peer, reject validation") + return pubsub.ValidationReject + } + + if msg.Height < m.storeFrom.Load() { + logger.Debug("received message for past header") + return pubsub.ValidationIgnore + } + + p := m.getOrCreatePool(msg.DataHash.String(), msg.Height) + logger.Debugw("got hash from shrex-sub") + + p.add(peerID) + if p.isValidatedDataHash.Load() { + // add peer to full nodes pool only if datahash has been already validated + m.fullNodes.add(peerID) + } + return pubsub.ValidationIgnore +} + +func (m *Manager) getPool(datahash string) *syncPool { + m.lock.Lock() + defer m.lock.Unlock() + return m.pools[datahash] +} + +func (m *Manager) getOrCreatePool(datahash string, height uint64) *syncPool { + m.lock.Lock() + defer m.lock.Unlock() + + p, ok := m.pools[datahash] + if !ok { + p = &syncPool{ + height: height, + pool: newPool(m.params.PeerCooldown), + createdAt: time.Now(), + } + m.pools[datahash] = p + } + + return p +} + +func (m *Manager) blacklistPeers(reason blacklistPeerReason, peerIDs ...peer.ID) { + m.metrics.observeBlacklistPeers(reason, len(peerIDs)) + + for _, peerID := range peerIDs { + // blacklisted peers will be logged regardless of EnableBlackListing whether option being is + // enabled, until blacklisting is not properly tested and enabled by default. + log.Debugw("blacklisting peer", "peer", peerID.String(), "reason", reason) + if !m.params.EnableBlackListing { + continue + } + + m.fullNodes.remove(peerID) + // add peer to the blacklist, so we can't connect to it in the future. + err := m.connGater.BlockPeer(peerID) + if err != nil { + log.Warnw("failed to block peer", "peer", peerID, "err", err) + } + // close connections to peer. + err = m.host.Network().ClosePeer(peerID) + if err != nil { + log.Warnw("failed to close connection with peer", "peer", peerID, "err", err) + } + } +} + +func (m *Manager) isBlacklistedPeer(peerID peer.ID) bool { + return !m.connGater.InterceptPeerDial(peerID) +} + +func (m *Manager) isBlacklistedHash(hash share.DataHash) bool { + m.lock.Lock() + defer m.lock.Unlock() + return m.blacklistedHashes[hash.String()] +} + +func (m *Manager) validatedPool(hashStr string, height uint64) *syncPool { + p := m.getOrCreatePool(hashStr, height) + if p.isValidatedDataHash.CompareAndSwap(false, true) { + log.Debugw("pool marked validated", "datahash", hashStr) + // if pool is proven to be valid, add all collected peers to full nodes + m.fullNodes.add(p.peers()...) + } + return p +} + +// removeIfUnreachable removes peer from some pool if it is blacklisted or disconnected +func (m *Manager) removeIfUnreachable(pool *syncPool, peerID peer.ID) bool { + if m.isBlacklistedPeer(peerID) || !m.fullNodes.has(peerID) { + log.Debugw("removing outdated peer from pool", "peer", peerID.String()) + pool.remove(peerID) + return true + } + return false +} + +func (m *Manager) GC(ctx context.Context) { + ticker := time.NewTicker(m.params.GcInterval) + defer ticker.Stop() + + var blacklist []peer.ID + for { + select { + case <-ticker.C: + case <-ctx.Done(): + return + } + + blacklist = m.cleanUp() + if len(blacklist) > 0 { + m.blacklistPeers(reasonInvalidHash, blacklist...) + } + } +} + +func (m *Manager) cleanUp() []peer.ID { + if m.initialHeight.Load() == 0 { + // can't blacklist peers until initialHeight is set + return nil + } + + m.lock.Lock() + defer m.lock.Unlock() + + addToBlackList := make(map[peer.ID]struct{}) + for h, p := range m.pools { + if p.isValidatedDataHash.Load() { + // remove pools that are outdated + if p.height < m.storeFrom.Load() { + delete(m.pools, h) + } + continue + } + + // can't validate datahashes below initial height + if p.height < m.initialHeight.Load() { + delete(m.pools, h) + continue + } + + // find pools that are not validated in time + if time.Since(p.createdAt) > m.params.PoolValidationTimeout { + delete(m.pools, h) + + log.Debug("blacklisting datahash with all corresponding peers", + "hash", h, + "peer_list", p.peersList) + // blacklist hash + m.blacklistedHashes[h] = true + + // blacklist peers + for _, peer := range p.peersList { + addToBlackList[peer] = struct{}{} + } + } + } + + blacklist := make([]peer.ID, 0, len(addToBlackList)) + for peerID := range addToBlackList { + blacklist = append(blacklist, peerID) + } + return blacklist +} diff --git a/share/p2p/peers/manager_test.go b/share/p2p/peers/manager_test.go new file mode 100644 index 0000000000..d4a188ff56 --- /dev/null +++ b/share/p2p/peers/manager_test.go @@ -0,0 +1,571 @@ +package peers + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + dht "github.com/libp2p/go-libp2p-kad-dht" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +func TestManager(t *testing.T) { + t.Run("Validate pool by headerSub", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + // create headerSub mock + h := testHeader() + headerSub := newSubLock(h, nil) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // wait until header is requested from header sub + err = headerSub.wait(ctx, 1) + require.NoError(t, err) + + // check validation + require.True(t, manager.pools[h.DataHash.String()].isValidatedDataHash.Load()) + stopManager(t, manager) + }) + + t.Run("Validate pool by shrex.Getter", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + h := testHeader() + headerSub := newSubLock(h, nil) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + peerID, msg := peer.ID("peer1"), newShrexSubMsg(h) + result := manager.Validate(ctx, peerID, msg) + require.Equal(t, pubsub.ValidationIgnore, result) + + pID, _, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) + require.NoError(t, err) + require.Equal(t, peerID, pID) + + // check pool validation + require.True(t, manager.getPool(h.DataHash.String()).isValidatedDataHash.Load()) + }) + + t.Run("validator", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + // create headerSub mock + h := testHeader() + headerSub := newSubLock(h, nil) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // own messages should be be accepted + msg := newShrexSubMsg(h) + result := manager.Validate(ctx, manager.host.ID(), msg) + require.Equal(t, pubsub.ValidationAccept, result) + + // normal messages should be ignored + peerID := peer.ID("peer1") + result = manager.Validate(ctx, peerID, msg) + require.Equal(t, pubsub.ValidationIgnore, result) + + // mark peer as misbehaved to blacklist it + pID, done, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) + require.NoError(t, err) + require.Equal(t, peerID, pID) + manager.params.EnableBlackListing = true + done(ResultBlacklistPeer) + + // new messages from misbehaved peer should be Rejected + result = manager.Validate(ctx, pID, msg) + require.Equal(t, pubsub.ValidationReject, result) + + stopManager(t, manager) + }) + + t.Run("cleanup", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + // create headerSub mock + h := testHeader() + headerSub := newSubLock(h) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + require.NoError(t, headerSub.wait(ctx, 1)) + + // set syncTimeout to 0 to allow cleanup to find outdated datahash + manager.params.PoolValidationTimeout = 0 + + // create unvalidated pool + peerID := peer.ID("peer1") + msg := shrexsub.Notification{ + DataHash: share.DataHash("datahash1datahash1datahash1datahash1datahash1"), + Height: 2, + } + manager.Validate(ctx, peerID, msg) + + // create validated pool + validDataHash := share.DataHash("datahash2") + manager.fullNodes.add("full") // add FN to unblock Peer call + manager.Peer(ctx, validDataHash, h.Height()) //nolint:errcheck + require.Len(t, manager.pools, 3) + + // trigger cleanup + blacklisted := manager.cleanUp() + require.Contains(t, blacklisted, peerID) + require.Len(t, manager.pools, 2) + + // messages with blacklisted hash should be rejected right away + peerID2 := peer.ID("peer2") + result := manager.Validate(ctx, peerID2, msg) + require.Equal(t, pubsub.ValidationReject, result) + + // check blacklisted pools + require.True(t, manager.isBlacklistedHash(msg.DataHash)) + require.False(t, manager.isBlacklistedHash(validDataHash)) + }) + + t.Run("no peers from shrex.Sub, get from discovery", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + // create headerSub mock + h := testHeader() + headerSub := newSubLock(h) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // add peers to fullnodes, imitating discovery add + peers := []peer.ID{"peer1", "peer2", "peer3"} + manager.fullNodes.add(peers...) + + peerID, _, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) + require.NoError(t, err) + require.Contains(t, peers, peerID) + + stopManager(t, manager) + }) + + t.Run("no peers from shrex.Sub and from discovery. Wait", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + // create headerSub mock + h := testHeader() + headerSub := newSubLock(h) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // make sure peers are not returned before timeout + timeoutCtx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + t.Cleanup(cancel) + _, _, err = manager.Peer(timeoutCtx, h.DataHash.Bytes(), h.Height()) + require.ErrorIs(t, err, context.DeadlineExceeded) + + peers := []peer.ID{"peer1", "peer2", "peer3"} + + // launch wait routine + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + peerID, _, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) + require.NoError(t, err) + require.Contains(t, peers, peerID) + }() + + // send peers + manager.fullNodes.add(peers...) + + // wait for peer to be received + select { + case <-doneCh: + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + } + + stopManager(t, manager) + }) + + t.Run("shrexSub sends a message lower than first headerSub header height, headerSub first", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + h := testHeader() + h.RawHeader.Height = 100 + headerSub := newSubLock(h, nil) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // unlock headerSub to read first header + require.NoError(t, headerSub.wait(ctx, 1)) + // pool will be created for first headerSub header datahash + require.Len(t, manager.pools, 1) + + // create shrexSub msg with height lower than first header from headerSub + msg := shrexsub.Notification{ + DataHash: share.DataHash("datahash"), + Height: h.Height() - 1, + } + result := manager.Validate(ctx, "peer", msg) + require.Equal(t, pubsub.ValidationIgnore, result) + // pool will be created for first shrexSub message + require.Len(t, manager.pools, 2) + + blacklisted := manager.cleanUp() + require.Empty(t, blacklisted) + // trigger cleanup and outdated pool should be removed + require.Len(t, manager.pools, 1) + }) + + t.Run("shrexSub sends a message lower than first headerSub header height, shrexSub first", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + h := testHeader() + h.RawHeader.Height = 100 + headerSub := newSubLock(h, nil) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // create shrexSub msg with height lower than first header from headerSub + msg := shrexsub.Notification{ + DataHash: share.DataHash("datahash"), + Height: h.Height() - 1, + } + result := manager.Validate(ctx, "peer", msg) + require.Equal(t, pubsub.ValidationIgnore, result) + + // pool will be created for first shrexSub message + require.Len(t, manager.pools, 1) + + // unlock headerSub to allow it to send next message + require.NoError(t, headerSub.wait(ctx, 1)) + // second pool should be created + require.Len(t, manager.pools, 2) + + // trigger cleanup and outdated pool should be removed + blacklisted := manager.cleanUp() + require.Len(t, manager.pools, 1) + + // check that no peers or hashes were blacklisted + manager.params.PoolValidationTimeout = 0 + require.Len(t, blacklisted, 0) + require.Len(t, manager.blacklistedHashes, 0) + }) + + t.Run("pools store window", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + h := testHeader() + h.RawHeader.Height = storedPoolsAmount * 2 + headerSub := newSubLock(h, nil) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // unlock headerSub to read first header + require.NoError(t, headerSub.wait(ctx, 1)) + // pool will be created for first headerSub header datahash + require.Len(t, manager.pools, 1) + + // create shrexSub msg with height lower than storedPoolsAmount + msg := shrexsub.Notification{ + DataHash: share.DataHash("datahash"), + Height: h.Height() - storedPoolsAmount - 3, + } + result := manager.Validate(ctx, "peer", msg) + require.Equal(t, pubsub.ValidationIgnore, result) + + // shrexSub message should be discarded and amount of pools should not change + require.Len(t, manager.pools, 1) + }) +} + +func TestIntegration(t *testing.T) { + t.Run("get peer from shrexsub", func(t *testing.T) { + nw, err := mocknet.FullMeshLinked(2) + require.NoError(t, err) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + bnPubSub, err := shrexsub.NewPubSub(ctx, nw.Hosts()[0], "test") + require.NoError(t, err) + + fnPubSub, err := shrexsub.NewPubSub(ctx, nw.Hosts()[1], "test") + require.NoError(t, err) + + require.NoError(t, bnPubSub.Start(ctx)) + require.NoError(t, fnPubSub.Start(ctx)) + + fnPeerManager, err := testManager(ctx, newSubLock()) + require.NoError(t, err) + fnPeerManager.host = nw.Hosts()[1] + + require.NoError(t, fnPubSub.AddValidator(fnPeerManager.Validate)) + _, err = fnPubSub.Subscribe() + require.NoError(t, err) + + time.Sleep(time.Millisecond * 100) + require.NoError(t, nw.ConnectAllButSelf()) + time.Sleep(time.Millisecond * 100) + + // broadcast from BN + randHash := rand.Bytes(32) + require.NoError(t, bnPubSub.Broadcast(ctx, shrexsub.Notification{ + DataHash: randHash, + Height: 1, + })) + + // FN should get message + gotPeer, _, err := fnPeerManager.Peer(ctx, randHash, 13) + require.NoError(t, err) + + // check that gotPeer matched bridge node + require.Equal(t, nw.Hosts()[0].ID(), gotPeer) + }) + + t.Run("get peer from discovery", func(t *testing.T) { + fullNodesTag := "fullNodes" + nw, err := mocknet.FullMeshConnected(3) + require.NoError(t, err) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + // set up bootstrapper + bsHost := nw.Hosts()[0] + bs := host.InfoFromHost(bsHost) + opts := []dht.Option{ + dht.Mode(dht.ModeAuto), + dht.BootstrapPeers(*bs), + dht.RoutingTableRefreshPeriod(time.Second), + } + + bsOpts := opts + bsOpts = append(bsOpts, + dht.Mode(dht.ModeServer), // it must accept incoming connections + dht.BootstrapPeers(), // no bootstrappers for a bootstrapper ¯\_(ツ)_/¯ + ) + bsRouter, err := dht.New(ctx, bsHost, bsOpts...) + require.NoError(t, err) + require.NoError(t, bsRouter.Bootstrap(ctx)) + + // set up broadcaster node + bnHost := nw.Hosts()[1] + bnRouter, err := dht.New(ctx, bnHost, opts...) + require.NoError(t, err) + + params := discovery.DefaultParameters() + params.AdvertiseInterval = time.Second + + bnDisc, err := discovery.NewDiscovery( + params, + bnHost, + routingdisc.NewRoutingDiscovery(bnRouter), + fullNodesTag, + ) + require.NoError(t, err) + + // set up full node / receiver node + fnHost := nw.Hosts()[2] + fnRouter, err := dht.New(ctx, fnHost, opts...) + require.NoError(t, err) + + // init peer manager for full node + connGater, err := conngater.NewBasicConnectionGater(dssync.MutexWrap(datastore.NewMapDatastore())) + require.NoError(t, err) + fnPeerManager, err := NewManager( + DefaultParameters(), + nil, + connGater, + ) + require.NoError(t, err) + + waitCh := make(chan struct{}) + checkDiscoveredPeer := func(peerID peer.ID, isAdded bool) { + defer close(waitCh) + // check that obtained peer id is BN + require.Equal(t, bnHost.ID(), peerID) + } + + // set up discovery for full node with hook to peer manager and check discovered peer + params = discovery.DefaultParameters() + params.AdvertiseInterval = time.Second + params.PeersLimit = 10 + + fnDisc, err := discovery.NewDiscovery( + params, + fnHost, + routingdisc.NewRoutingDiscovery(fnRouter), + fullNodesTag, + discovery.WithOnPeersUpdate(fnPeerManager.UpdateFullNodePool), + discovery.WithOnPeersUpdate(checkDiscoveredPeer), + ) + require.NoError(t, fnDisc.Start(ctx)) + t.Cleanup(func() { + err = fnDisc.Stop(ctx) + require.NoError(t, err) + }) + + require.NoError(t, bnRouter.Bootstrap(ctx)) + require.NoError(t, fnRouter.Bootstrap(ctx)) + + go bnDisc.Advertise(ctx) + + select { + case <-waitCh: + require.Contains(t, fnPeerManager.fullNodes.peersList, bnHost.ID()) + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + } + + }) +} + +func testManager(ctx context.Context, headerSub libhead.Subscriber[*header.ExtendedHeader]) (*Manager, error) { + host, err := mocknet.New().GenPeer() + if err != nil { + return nil, err + } + shrexSub, err := shrexsub.NewPubSub(ctx, host, "test") + if err != nil { + return nil, err + } + + connGater, err := conngater.NewBasicConnectionGater(dssync.MutexWrap(datastore.NewMapDatastore())) + if err != nil { + return nil, err + } + manager, err := NewManager( + DefaultParameters(), + host, + connGater, + WithShrexSubPools(shrexSub, headerSub), + ) + + if err != nil { + return nil, err + } + err = manager.Start(ctx) + return manager, err +} + +func stopManager(t *testing.T, m *Manager) { + closeCtx, cancel := context.WithTimeout(context.Background(), time.Second) + t.Cleanup(cancel) + require.NoError(t, m.Stop(closeCtx)) +} + +func testHeader() *header.ExtendedHeader { + return &header.ExtendedHeader{ + RawHeader: header.RawHeader{ + Height: 1, + DataHash: rand.Bytes(32), + }, + } +} + +type subLock struct { + next chan struct{} + wg *sync.WaitGroup + expected []*header.ExtendedHeader +} + +func (s subLock) wait(ctx context.Context, count int) error { + s.wg.Add(count) + for i := 0; i < count; i++ { + err := s.release(ctx) + if err != nil { + return err + } + } + s.wg.Wait() + return nil +} + +func (s subLock) release(ctx context.Context) error { + select { + case s.next <- struct{}{}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func newSubLock(expected ...*header.ExtendedHeader) *subLock { + wg := &sync.WaitGroup{} + wg.Add(1) + return &subLock{ + next: make(chan struct{}), + expected: expected, + wg: wg, + } +} + +func (s *subLock) Subscribe() (libhead.Subscription[*header.ExtendedHeader], error) { + return s, nil +} + +func (s *subLock) SetVerifier(func(context.Context, *header.ExtendedHeader) error) error { + panic("implement me") +} + +func (s *subLock) NextHeader(ctx context.Context) (*header.ExtendedHeader, error) { + s.wg.Done() + + // wait for call to be unlocked by release + select { + case <-s.next: + h := s.expected[0] + s.expected = s.expected[1:] + return h, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (s *subLock) Cancel() { +} + +func newShrexSubMsg(h *header.ExtendedHeader) shrexsub.Notification { + return shrexsub.Notification{ + DataHash: h.DataHash.Bytes(), + Height: h.Height(), + } +} diff --git a/share/p2p/peers/metrics.go b/share/p2p/peers/metrics.go new file mode 100644 index 0000000000..098610c595 --- /dev/null +++ b/share/p2p/peers/metrics.go @@ -0,0 +1,267 @@ +package peers + +import ( + "context" + "fmt" + "sync" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +const ( + isInstantKey = "is_instant" + doneResultKey = "done_result" + + sourceKey = "source" + sourceShrexSub peerSource = "shrexsub" + sourceFullNodes peerSource = "full_nodes" + + blacklistPeerReasonKey = "blacklist_reason" + reasonInvalidHash blacklistPeerReason = "invalid_hash" + reasonMisbehave blacklistPeerReason = "misbehave" + + validationResultKey = "validation_result" + validationAccept = "accept" + validationReject = "reject" + validationIgnore = "ignore" + + peerStatusKey = "peer_status" + peerStatusActive peerStatus = "active" + peerStatusCooldown peerStatus = "cooldown" + + poolStatusKey = "pool_status" + poolStatusCreated poolStatus = "created" + poolStatusValidated poolStatus = "validated" + poolStatusBlacklisted poolStatus = "blacklisted" + // Pool status model: + // created(unvalidated) + // / \ + // validated blacklisted +) + +var meter = otel.Meter("shrex_peer_manager") + +type blacklistPeerReason string + +type peerStatus string + +type poolStatus string + +type peerSource string + +type metrics struct { + getPeer metric.Int64Counter // attributes: source, is_instant + getPeerWaitTimeHistogram metric.Int64Histogram // attributes: source + getPeerPoolSizeHistogram metric.Int64Histogram // attributes: source + doneResult metric.Int64Counter // attributes: source, done_result + validationResult metric.Int64Counter // attributes: validation_result + + shrexPools metric.Int64ObservableGauge // attributes: pool_status + fullNodesPool metric.Int64ObservableGauge // attributes: pool_status + blacklistedPeersByReason sync.Map + blacklistedPeers metric.Int64ObservableGauge // attributes: blacklist_reason +} + +func initMetrics(manager *Manager) (*metrics, error) { + getPeer, err := meter.Int64Counter("peer_manager_get_peer_counter", + metric.WithDescription("get peer counter")) + if err != nil { + return nil, err + } + + getPeerWaitTimeHistogram, err := meter.Int64Histogram("peer_manager_get_peer_ms_time_hist", + metric.WithDescription("get peer time histogram(ms), observed only for async get(is_instant = false)")) + if err != nil { + return nil, err + } + + getPeerPoolSizeHistogram, err := meter.Int64Histogram("peer_manager_get_peer_pool_size_hist", + metric.WithDescription("amount of available active peers in pool at time when get was called")) + if err != nil { + return nil, err + } + + doneResult, err := meter.Int64Counter("peer_manager_done_result_counter", + metric.WithDescription("done results counter")) + if err != nil { + return nil, err + } + + validationResult, err := meter.Int64Counter("peer_manager_validation_result_counter", + metric.WithDescription("validation result counter")) + if err != nil { + return nil, err + } + + shrexPools, err := meter.Int64ObservableGauge("peer_manager_pools_gauge", + metric.WithDescription("pools amount")) + if err != nil { + return nil, err + } + + fullNodesPool, err := meter.Int64ObservableGauge("peer_manager_full_nodes_gauge", + metric.WithDescription("full nodes pool peers amount")) + if err != nil { + return nil, err + } + + blacklisted, err := meter.Int64ObservableGauge("peer_manager_blacklisted_peers", + metric.WithDescription("blacklisted peers amount")) + if err != nil { + return nil, err + } + + metrics := &metrics{ + getPeer: getPeer, + getPeerWaitTimeHistogram: getPeerWaitTimeHistogram, + doneResult: doneResult, + validationResult: validationResult, + shrexPools: shrexPools, + fullNodesPool: fullNodesPool, + getPeerPoolSizeHistogram: getPeerPoolSizeHistogram, + blacklistedPeers: blacklisted, + } + + callback := func(ctx context.Context, observer metric.Observer) error { + for poolStatus, count := range manager.shrexPools() { + observer.ObserveInt64(shrexPools, count, + metric.WithAttributes( + attribute.String(poolStatusKey, string(poolStatus)))) + } + + observer.ObserveInt64(fullNodesPool, int64(manager.fullNodes.len()), + metric.WithAttributes( + attribute.String(peerStatusKey, string(peerStatusActive)))) + observer.ObserveInt64(fullNodesPool, int64(manager.fullNodes.cooldown.len()), + metric.WithAttributes( + attribute.String(peerStatusKey, string(peerStatusCooldown)))) + + metrics.blacklistedPeersByReason.Range(func(key, value any) bool { + reason := key.(blacklistPeerReason) + amount := value.(int) + observer.ObserveInt64(blacklisted, int64(amount), + metric.WithAttributes( + attribute.String(blacklistPeerReasonKey, string(reason)))) + return true + }) + return nil + } + _, err = meter.RegisterCallback(callback, shrexPools, fullNodesPool, blacklisted) + if err != nil { + return nil, fmt.Errorf("registering metrics callback: %w", err) + } + return metrics, nil +} + +func (m *metrics) observeGetPeer( + ctx context.Context, + source peerSource, poolSize int, waitTime time.Duration, +) { + if m == nil { + return + } + ctx = utils.ResetContextOnError(ctx) + m.getPeer.Add(ctx, 1, + metric.WithAttributes( + attribute.String(sourceKey, string(source)), + attribute.Bool(isInstantKey, waitTime == 0))) + if source == sourceShrexSub { + m.getPeerPoolSizeHistogram.Record(ctx, int64(poolSize), + metric.WithAttributes( + attribute.String(sourceKey, string(source)))) + } + + // record wait time only for async gets + if waitTime > 0 { + m.getPeerWaitTimeHistogram.Record(ctx, waitTime.Milliseconds(), + metric.WithAttributes( + attribute.String(sourceKey, string(source)))) + } +} + +func (m *metrics) observeDoneResult(source peerSource, result result) { + if m == nil { + return + } + + ctx := context.Background() + m.doneResult.Add(ctx, 1, + metric.WithAttributes( + attribute.String(sourceKey, string(source)), + attribute.String(doneResultKey, string(result)))) +} + +// validationObserver is a middleware that observes validation results as metrics +func (m *metrics) validationObserver(validator shrexsub.ValidatorFn) shrexsub.ValidatorFn { + if m == nil { + return validator + } + return func(ctx context.Context, id peer.ID, n shrexsub.Notification) pubsub.ValidationResult { + res := validator(ctx, id, n) + + var resStr string + switch res { + case pubsub.ValidationAccept: + resStr = validationAccept + case pubsub.ValidationReject: + resStr = validationReject + case pubsub.ValidationIgnore: + resStr = validationIgnore + default: + resStr = "unknown" + } + + ctx = utils.ResetContextOnError(ctx) + + m.validationResult.Add(ctx, 1, + metric.WithAttributes( + attribute.String(validationResultKey, resStr))) + return res + } +} + +// observeBlacklistPeers stores amount of blacklisted peers by reason +func (m *metrics) observeBlacklistPeers(reason blacklistPeerReason, amount int) { + if m == nil { + return + } + for { + prevVal, loaded := m.blacklistedPeersByReason.LoadOrStore(reason, amount) + if !loaded { + return + } + + newVal := prevVal.(int) + amount + if m.blacklistedPeersByReason.CompareAndSwap(reason, prevVal, newVal) { + return + } + } +} + +// shrexPools collects amount of shrex pools by poolStatus +func (m *Manager) shrexPools() map[poolStatus]int64 { + m.lock.Lock() + defer m.lock.Unlock() + + shrexPools := make(map[poolStatus]int64) + for _, p := range m.pools { + if !p.isValidatedDataHash.Load() { + shrexPools[poolStatusCreated]++ + continue + } + + // pool is validated but not synced + shrexPools[poolStatusValidated]++ + } + + shrexPools[poolStatusBlacklisted] = int64(len(m.blacklistedHashes)) + return shrexPools +} diff --git a/share/p2p/peers/options.go b/share/p2p/peers/options.go new file mode 100644 index 0000000000..2970dd2465 --- /dev/null +++ b/share/p2p/peers/options.go @@ -0,0 +1,84 @@ +package peers + +import ( + "fmt" + "time" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" +) + +type Parameters struct { + // PoolValidationTimeout is the timeout used for validating incoming datahashes. Pools that have + // been created for datahashes from shrexsub that do not see this hash from headersub after this + // timeout will be garbage collected. + PoolValidationTimeout time.Duration + + // PeerCooldown is the time a peer is put on cooldown after a ResultCooldownPeer. + PeerCooldown time.Duration + + // GcInterval is the interval at which the manager will garbage collect unvalidated pools. + GcInterval time.Duration + + // EnableBlackListing turns on blacklisting for misbehaved peers + EnableBlackListing bool +} + +type Option func(*Manager) error + +// Validate validates the values in Parameters +func (p *Parameters) Validate() error { + if p.PoolValidationTimeout <= 0 { + return fmt.Errorf("peer-manager: validation timeout must be positive") + } + + if p.PeerCooldown <= 0 { + return fmt.Errorf("peer-manager: peer cooldown must be positive") + } + + if p.GcInterval <= 0 { + return fmt.Errorf("peer-manager: garbage collection interval must be positive") + } + + return nil +} + +// DefaultParameters returns the default configuration values for the peer manager parameters +func DefaultParameters() Parameters { + return Parameters{ + // PoolValidationTimeout's default value is based on the default daser sampling timeout of 1 minute. + // If a received datahash has not tried to be sampled within these two minutes, the pool will be + // removed. + PoolValidationTimeout: 2 * time.Minute, + // PeerCooldown's default value is based on initial network tests that showed a ~3.5 second + // sync time for large blocks. This value gives our (discovery) peers enough time to sync + // the new block before we ask them again. + PeerCooldown: 3 * time.Second, + GcInterval: time.Second * 30, + // blacklisting is off by default //TODO(@walldiss): enable blacklisting once all related issues + // are resolved + EnableBlackListing: false, + } +} + +// WithShrexSubPools passes a shrexsub and headersub instance to be used to populate and validate +// pools from shrexsub notifications. +func WithShrexSubPools(shrexSub *shrexsub.PubSub, headerSub libhead.Subscriber[*header.ExtendedHeader]) Option { + return func(m *Manager) error { + m.shrexSub = shrexSub + m.headerSub = headerSub + return nil + } +} + +// WithMetrics turns on metric collection in peer manager. +func (m *Manager) WithMetrics() error { + metrics, err := initMetrics(m) + if err != nil { + return fmt.Errorf("peer-manager: init metrics: %w", err) + } + m.metrics = metrics + return nil +} diff --git a/share/p2p/peers/pool.go b/share/p2p/peers/pool.go new file mode 100644 index 0000000000..365ef0306d --- /dev/null +++ b/share/p2p/peers/pool.go @@ -0,0 +1,226 @@ +package peers + +import ( + "context" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +const defaultCleanupThreshold = 2 + +// pool stores peers and provides methods for simple round-robin access. +type pool struct { + m sync.RWMutex + peersList []peer.ID + statuses map[peer.ID]status + cooldown *timedQueue + activeCount int + nextIdx int + + hasPeer bool + hasPeerCh chan struct{} + + cleanupThreshold int +} + +type status int + +const ( + active status = iota + cooldown + removed +) + +// newPool returns new empty pool. +func newPool(peerCooldownTime time.Duration) *pool { + p := &pool{ + peersList: make([]peer.ID, 0), + statuses: make(map[peer.ID]status), + hasPeerCh: make(chan struct{}), + cleanupThreshold: defaultCleanupThreshold, + } + p.cooldown = newTimedQueue(peerCooldownTime, p.afterCooldown) + return p +} + +// tryGet returns peer along with bool flag indicating success of operation. +func (p *pool) tryGet() (peer.ID, bool) { + p.m.Lock() + defer p.m.Unlock() + + if p.activeCount == 0 { + return "", false + } + + // if pointer is out of range, point to first element + if p.nextIdx > len(p.peersList)-1 { + p.nextIdx = 0 + } + + start := p.nextIdx + for { + peerID := p.peersList[p.nextIdx] + + p.nextIdx++ + if p.nextIdx == len(p.peersList) { + p.nextIdx = 0 + } + + if p.statuses[peerID] == active { + return peerID, true + } + + // full circle passed + if p.nextIdx == start { + return "", false + } + } +} + +// next sends a peer to the returned channel when it becomes available. +func (p *pool) next(ctx context.Context) <-chan peer.ID { + peerCh := make(chan peer.ID, 1) + go func() { + for { + if peerID, ok := p.tryGet(); ok { + peerCh <- peerID + return + } + + p.m.RLock() + hasPeerCh := p.hasPeerCh + p.m.RUnlock() + select { + case <-hasPeerCh: + case <-ctx.Done(): + return + } + } + }() + return peerCh +} + +func (p *pool) add(peers ...peer.ID) { + p.m.Lock() + defer p.m.Unlock() + + for _, peerID := range peers { + status, ok := p.statuses[peerID] + if ok && status != removed { + continue + } + + if !ok { + p.peersList = append(p.peersList, peerID) + } + + p.statuses[peerID] = active + p.activeCount++ + } + p.checkHasPeers() +} + +func (p *pool) remove(peers ...peer.ID) { + p.m.Lock() + defer p.m.Unlock() + + for _, peerID := range peers { + if status, ok := p.statuses[peerID]; ok && status != removed { + p.statuses[peerID] = removed + if status == active { + p.activeCount-- + } + } + } + + // do cleanup if too much garbage + if len(p.peersList) >= p.activeCount+p.cleanupThreshold { + p.cleanup() + } + p.checkHasPeers() +} + +func (p *pool) has(peer peer.ID) bool { + p.m.RLock() + defer p.m.RUnlock() + + status, ok := p.statuses[peer] + return ok && status != removed +} + +func (p *pool) peers() []peer.ID { + p.m.RLock() + defer p.m.RUnlock() + + peers := make([]peer.ID, 0, len(p.peersList)) + for peer, status := range p.statuses { + if status != removed { + peers = append(peers, peer) + } + } + return peers +} + +// cleanup will reduce memory footprint of pool. +func (p *pool) cleanup() { + newList := make([]peer.ID, 0, p.activeCount) + for _, peerID := range p.peersList { + status := p.statuses[peerID] + switch status { + case active, cooldown: + newList = append(newList, peerID) + case removed: + delete(p.statuses, peerID) + } + } + p.peersList = newList +} + +func (p *pool) putOnCooldown(peerID peer.ID) { + p.m.Lock() + defer p.m.Unlock() + + if status, ok := p.statuses[peerID]; ok && status == active { + p.cooldown.push(peerID) + + p.statuses[peerID] = cooldown + p.activeCount-- + p.checkHasPeers() + } +} + +func (p *pool) afterCooldown(peerID peer.ID) { + p.m.Lock() + defer p.m.Unlock() + + // item could have been already removed by the time afterCooldown is called + if status, ok := p.statuses[peerID]; !ok || status != cooldown { + return + } + + p.statuses[peerID] = active + p.activeCount++ + p.checkHasPeers() +} + +// checkHasPeers will check and indicate if there are peers in the pool. +func (p *pool) checkHasPeers() { + if p.activeCount > 0 && !p.hasPeer { + p.hasPeer = true + close(p.hasPeerCh) + return + } + + if p.activeCount == 0 && p.hasPeer { + p.hasPeerCh = make(chan struct{}) + p.hasPeer = false + } +} + +func (p *pool) len() int { + p.m.RLock() + defer p.m.RUnlock() + return p.activeCount +} diff --git a/share/p2p/peers/pool_test.go b/share/p2p/peers/pool_test.go new file mode 100644 index 0000000000..ac9d38f261 --- /dev/null +++ b/share/p2p/peers/pool_test.go @@ -0,0 +1,184 @@ +package peers + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +func TestPool(t *testing.T) { + t.Run("add / remove peers", func(t *testing.T) { + p := newPool(time.Second) + + peers := []peer.ID{"peer1", "peer1", "peer2", "peer3"} + // adding same peer twice should not produce copies + p.add(peers...) + require.Equal(t, len(peers)-1, p.activeCount) + + p.remove("peer1", "peer2") + require.Equal(t, len(peers)-3, p.activeCount) + + peerID, ok := p.tryGet() + require.True(t, ok) + require.Equal(t, peers[3], peerID) + + p.remove("peer3") + p.remove("peer3") + require.Equal(t, 0, p.activeCount) + _, ok = p.tryGet() + require.False(t, ok) + }) + + t.Run("round robin", func(t *testing.T) { + p := newPool(time.Second) + + peers := []peer.ID{"peer1", "peer1", "peer2", "peer3"} + // adding same peer twice should not produce copies + p.add(peers...) + require.Equal(t, 3, p.activeCount) + + peerID, ok := p.tryGet() + require.True(t, ok) + require.Equal(t, peer.ID("peer1"), peerID) + + peerID, ok = p.tryGet() + require.True(t, ok) + require.Equal(t, peer.ID("peer2"), peerID) + + peerID, ok = p.tryGet() + require.True(t, ok) + require.Equal(t, peer.ID("peer3"), peerID) + + peerID, ok = p.tryGet() + require.True(t, ok) + require.Equal(t, peer.ID("peer1"), peerID) + + p.remove("peer2", "peer3") + require.Equal(t, 1, p.activeCount) + + // pointer should skip removed items until found active one + peerID, ok = p.tryGet() + require.True(t, ok) + require.Equal(t, peer.ID("peer1"), peerID) + }) + + t.Run("wait for peer", func(t *testing.T) { + timeout := time.Second + shortCtx, cancel := context.WithTimeout(context.Background(), timeout/10) + t.Cleanup(cancel) + + longCtx, cancel := context.WithTimeout(context.Background(), timeout) + t.Cleanup(cancel) + + p := newPool(time.Second) + done := make(chan struct{}) + + go func() { + select { + case <-p.next(shortCtx): + case <-shortCtx.Done(): + require.Error(t, shortCtx.Err()) + // unlock longCtx waiter by adding new peer + p.add("peer1") + } + }() + + go func() { + defer close(done) + select { + case peerID := <-p.next(longCtx): + require.Equal(t, peer.ID("peer1"), peerID) + case <-longCtx.Done(): + require.NoError(t, longCtx.Err()) + } + }() + + select { + case <-done: + case <-longCtx.Done(): + require.NoError(t, longCtx.Err()) + } + }) + + t.Run("nextIdx got removed", func(t *testing.T) { + p := newPool(time.Second) + + peers := []peer.ID{"peer1", "peer2", "peer3"} + p.add(peers...) + p.nextIdx = 2 + p.remove(peers[p.nextIdx]) + + // if previous nextIdx was removed, tryGet should iterate until available peer found + peerID, ok := p.tryGet() + require.True(t, ok) + require.Equal(t, peers[0], peerID) + }) + + t.Run("cleanup", func(t *testing.T) { + p := newPool(time.Second) + p.cleanupThreshold = 3 + + peers := []peer.ID{"peer1", "peer2", "peer3", "peer4", "peer5"} + p.add(peers...) + require.Equal(t, len(peers), p.activeCount) + + // point to last element that will be removed, to check how pointer will be updated + p.nextIdx = len(peers) - 1 + + // remove some, but not trigger cleanup yet + p.remove(peers[3:]...) + require.Equal(t, len(peers)-2, p.activeCount) + require.Equal(t, len(peers), len(p.statuses)) + + // trigger cleanup + p.remove(peers[2]) + require.Equal(t, len(peers)-3, p.activeCount) + require.Equal(t, len(peers)-3, len(p.statuses)) + + // nextIdx pointer should be updated after next tryGet + p.tryGet() + require.Equal(t, 1, p.nextIdx) + }) + + t.Run("cooldown blocks get", func(t *testing.T) { + ttl := time.Second / 10 + p := newPool(ttl) + + peerID := peer.ID("peer1") + p.add(peerID) + + _, ok := p.tryGet() + require.True(t, ok) + + p.putOnCooldown(peerID) + // item should be unavailable + _, ok = p.tryGet() + require.False(t, ok) + + ctx, cancel := context.WithTimeout(context.Background(), ttl*5) + defer cancel() + select { + case <-p.next(ctx): + case <-ctx.Done(): + t.Fatal("item should be already available") + } + }) + + t.Run("put on cooldown removed item should be noop", func(t *testing.T) { + p := newPool(time.Second) + p.cleanupThreshold = 3 + + peerID := peer.ID("peer1") + p.add(peerID) + + p.remove(peerID) + p.cleanup() + p.putOnCooldown(peerID) + + _, ok := p.tryGet() + require.False(t, ok) + }) +} diff --git a/share/p2p/peers/timedqueue.go b/share/p2p/peers/timedqueue.go new file mode 100644 index 0000000000..3ed7e29a2c --- /dev/null +++ b/share/p2p/peers/timedqueue.go @@ -0,0 +1,91 @@ +package peers + +import ( + "sync" + "time" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p/core/peer" +) + +// timedQueue store items for ttl duration and releases it with calling onPop callback. Each item +// is tracked independently +type timedQueue struct { + sync.Mutex + items []item + + // ttl is the amount of time each item exist in the timedQueue + ttl time.Duration + clock clock.Clock + after *clock.Timer + // onPop will be called on item peer.ID after it is released + onPop func(peer.ID) +} + +type item struct { + peer.ID + createdAt time.Time +} + +func newTimedQueue(ttl time.Duration, onPop func(peer.ID)) *timedQueue { + return &timedQueue{ + items: make([]item, 0), + clock: clock.New(), + ttl: ttl, + onPop: onPop, + } +} + +// releaseExpired will release all expired items +func (q *timedQueue) releaseExpired() { + q.Lock() + defer q.Unlock() + q.releaseUnsafe() +} + +func (q *timedQueue) releaseUnsafe() { + if len(q.items) == 0 { + return + } + + var i int + for _, next := range q.items { + timeIn := q.clock.Since(next.createdAt) + if timeIn < q.ttl { + // item is not expired yet, create a timer that will call releaseExpired + q.after.Stop() + q.after = q.clock.AfterFunc(q.ttl-timeIn, q.releaseExpired) + break + } + + // item is expired + q.onPop(next.ID) + i++ + } + + if i > 0 { + copy(q.items, q.items[i:]) + q.items = q.items[:len(q.items)-i] + } +} + +func (q *timedQueue) push(peerID peer.ID) { + q.Lock() + defer q.Unlock() + + q.items = append(q.items, item{ + ID: peerID, + createdAt: q.clock.Now(), + }) + + // if it is the first item in queue, create a timer to call releaseExpired after its expiration + if len(q.items) == 1 { + q.after = q.clock.AfterFunc(q.ttl, q.releaseExpired) + } +} + +func (q *timedQueue) len() int { + q.Lock() + defer q.Unlock() + return len(q.items) +} diff --git a/share/p2p/peers/timedqueue_test.go b/share/p2p/peers/timedqueue_test.go new file mode 100644 index 0000000000..fb5ef9629f --- /dev/null +++ b/share/p2p/peers/timedqueue_test.go @@ -0,0 +1,61 @@ +package peers + +import ( + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +func TestTimedQueue(t *testing.T) { + t.Run("push item", func(t *testing.T) { + peers := []peer.ID{"peer1", "peer2"} + ttl := time.Second + + popCh := make(chan struct{}, 1) + queue := newTimedQueue(ttl, func(id peer.ID) { + go func() { + require.Contains(t, peers, id) + popCh <- struct{}{} + }() + }) + mock := clock.NewMock() + queue.clock = mock + + // push first item | global time : 0 + queue.push(peers[0]) + require.Equal(t, queue.len(), 1) + + // push second item with ttl/2 gap | global time : ttl/2 + mock.Add(ttl / 2) + queue.push(peers[1]) + require.Equal(t, queue.len(), 2) + + // advance clock 1 nano sec before first item should expire | global time : ttl - 1 + mock.Add(ttl/2 - 1) + // check that releaseExpired doesn't remove items + queue.releaseExpired() + require.Equal(t, queue.len(), 2) + // first item should be released after its own timeout | global time : ttl + mock.Add(1) + + select { + case <-popCh: + case <-time.After(ttl): + t.Fatal("first item is not released") + + } + require.Equal(t, queue.len(), 1) + + // first item should be released after ttl/2 gap timeout | global time : 3/2*ttl + mock.Add(ttl / 2) + select { + case <-popCh: + case <-time.After(ttl): + t.Fatal("second item is not released") + } + require.Equal(t, queue.len(), 0) + }) +} diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go new file mode 100644 index 0000000000..7602bb5fb0 --- /dev/null +++ b/share/p2p/shrexeds/client.go @@ -0,0 +1,177 @@ +package shrexeds + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/celestiaorg/go-libp2p-messenger/serde" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/p2p" + pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" +) + +// Client is responsible for requesting EDSs for blocksync over the ShrEx/EDS protocol. +type Client struct { + params *Parameters + protocolID protocol.ID + host host.Host + + metrics *p2p.Metrics +} + +// NewClient creates a new ShrEx/EDS client. +func NewClient(params *Parameters, host host.Host) (*Client, error) { + if err := params.Validate(); err != nil { + return nil, fmt.Errorf("shrex-eds: client creation failed: %w", err) + } + + return &Client{ + params: params, + host: host, + protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), + }, nil +} + +// RequestEDS requests the ODS from the given peers and returns the EDS upon success. +func (c *Client) RequestEDS( + ctx context.Context, + dataHash share.DataHash, + peer peer.ID, +) (*rsmt2d.ExtendedDataSquare, error) { + eds, err := c.doRequest(ctx, dataHash, peer) + if err == nil { + return eds, nil + } + log.Debugw("client: eds request to peer failed", "peer", peer.String(), "hash", dataHash.String(), "error", err) + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + return nil, err + } + // some net.Errors also mean the context deadline was exceeded, but yamux/mocknet do not + // unwrap to a ctx err + var ne net.Error + if errors.As(err, &ne) && ne.Timeout() { + if deadline, _ := ctx.Deadline(); deadline.Before(time.Now()) { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + return nil, context.DeadlineExceeded + } + } + if err != p2p.ErrNotFound { + log.Warnw("client: eds request to peer failed", + "peer", peer.String(), + "hash", dataHash.String(), + "err", err) + } + + return nil, err +} + +func (c *Client) doRequest( + ctx context.Context, + dataHash share.DataHash, + to peer.ID, +) (*rsmt2d.ExtendedDataSquare, error) { + streamOpenCtx, cancel := context.WithTimeout(ctx, c.params.ServerReadTimeout) + defer cancel() + stream, err := c.host.NewStream(streamOpenCtx, to, c.protocolID) + if err != nil { + return nil, fmt.Errorf("failed to open stream: %w", err) + } + defer stream.Close() + + c.setStreamDeadlines(ctx, stream) + + req := &pb.EDSRequest{Hash: dataHash} + + // request ODS + log.Debugw("client: requesting ods", "hash", dataHash.String(), "peer", to.String()) + _, err = serde.Write(stream, req) + if err != nil { + stream.Reset() //nolint:errcheck + return nil, fmt.Errorf("failed to write request to stream: %w", err) + } + err = stream.CloseWrite() + if err != nil { + log.Debugw("client: error closing write", "err", err) + } + + // read and parse status from peer + resp := new(pb.EDSResponse) + err = stream.SetReadDeadline(time.Now().Add(c.params.ServerReadTimeout)) + if err != nil { + log.Debugw("client: failed to set read deadline for reading status", "err", err) + } + _, err = serde.Read(stream, resp) + if err != nil { + // server closes the stream here if we are rate limited + if errors.Is(err, io.EOF) { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusRateLimited) + return nil, p2p.ErrNotFound + } + stream.Reset() //nolint:errcheck + return nil, fmt.Errorf("failed to read status from stream: %w", err) + } + + switch resp.Status { + case pb.Status_OK: + // reset stream deadlines to original values, since read deadline was changed during status read + c.setStreamDeadlines(ctx, stream) + // use header and ODS bytes to construct EDS and verify it against dataHash + eds, err := eds.ReadEDS(ctx, stream, dataHash) + if err != nil { + return nil, fmt.Errorf("failed to read eds from ods bytes: %w", err) + } + c.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) + return eds, nil + case pb.Status_NOT_FOUND: + c.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) + return nil, p2p.ErrNotFound + case pb.Status_INVALID: + log.Debug("client: invalid request") + fallthrough + case pb.Status_INTERNAL: + fallthrough + default: + c.metrics.ObserveRequests(ctx, 1, p2p.StatusInternalErr) + return nil, p2p.ErrInvalidResponse + } +} + +func (c *Client) setStreamDeadlines(ctx context.Context, stream network.Stream) { + // set read/write deadline to use context deadline if it exists + if dl, ok := ctx.Deadline(); ok { + err := stream.SetDeadline(dl) + if err == nil { + return + } + log.Debugw("client: setting deadline: %s", "err", err) + } + + // if deadline not set, client read deadline defaults to server write deadline + if c.params.ServerWriteTimeout != 0 { + err := stream.SetReadDeadline(time.Now().Add(c.params.ServerWriteTimeout)) + if err != nil { + log.Debugw("client: setting read deadline", "err", err) + } + } + + // if deadline not set, client write deadline defaults to server read deadline + if c.params.ServerReadTimeout != 0 { + err := stream.SetWriteDeadline(time.Now().Add(c.params.ServerReadTimeout)) + if err != nil { + log.Debugw("client: setting write deadline", "err", err) + } + } +} diff --git a/share/p2p/shrexeds/doc.go b/share/p2p/shrexeds/doc.go new file mode 100644 index 0000000000..6bad3061f9 --- /dev/null +++ b/share/p2p/shrexeds/doc.go @@ -0,0 +1,51 @@ +// This package defines a protocol that is used to request +// extended data squares from peers in the network. +// +// This protocol is a request/response protocol that allows for sending requests for extended data squares by data root +// to the peers in the network and receiving a response containing the original data square(s), which is used +// to recompute the extended data square. +// +// The streams are established using the protocol ID: +// +// - "{networkID}/shrex/eds/v0.0.1" where networkID is the network ID of the network. (e.g. "arabica") +// +// When a peer receives a request for extended data squares, it will read +// the original data square from the EDS store by retrieving the underlying +// CARv1 file containing the full extended data square, but will limit reading +// to the original data square shares only. +// The client on the other hand will take care of computing the extended data squares from +// the original data square on receipt. +// +// # Usage +// +// To use a shrexeds client to request extended data squares from a peer, you must +// first create a new `shrexeds.Client` instance by: +// +// client, err := shrexeds.NewClient(params, host) +// +// where `params` is a `shrexeds.Parameters` instance and `host` is a `libp2p.Host` instance. +// +// To request extended data squares from a peer, you must first create a `Client.RequestEDS` instance by: +// +// eds, err := client.RequestEDS(ctx, dataHash, peer) +// +// where: +// - `ctx` is a `context.Context` instance, +// - `dataHash` is the data root of the extended data square and +// - `peer` is the peer ID of the peer to request the extended data square from. +// +// To use a shrexeds server to respond to requests for extended data squares from peers +// you must first create a new `shrexeds.Server` instance by: +// +// server, err := shrexeds.NewServer(params, host, store) +// +// where `params` is a [Parameters] instance, `host` is a libp2p.Host instance and `store` is a [eds.Store] instance. +// +// To start the server, you must call `Start` on the server: +// +// err := server.Start(ctx) +// +// To stop the server, you must call `Stop` on the server: +// +// err := server.Stop(ctx) +package shrexeds diff --git a/share/p2p/shrexeds/exchange_test.go b/share/p2p/shrexeds/exchange_test.go new file mode 100644 index 0000000000..9155be6dec --- /dev/null +++ b/share/p2p/shrexeds/exchange_test.go @@ -0,0 +1,166 @@ +package shrexeds + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + libhost "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/p2p" +) + +func TestExchange_RequestEDS(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + store, client, server := makeExchange(t) + + err := store.Start(ctx) + require.NoError(t, err) + + err = server.Start(ctx) + require.NoError(t, err) + + // Testcase: EDS is immediately available + t.Run("EDS_Available", func(t *testing.T) { + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + err = store.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + assert.NoError(t, err) + assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) + }) + + // Testcase: EDS is unavailable initially, but is found after multiple requests + t.Run("EDS_AvailableAfterDelay", func(t *testing.T) { + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + lock := make(chan struct{}) + go func() { + <-lock + err = store.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + lock <- struct{}{} + }() + + requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + assert.ErrorIs(t, err, p2p.ErrNotFound) + assert.Nil(t, requestedEDS) + + // unlock write + lock <- struct{}{} + // wait for write to finish + <-lock + + requestedEDS, err = client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + assert.NoError(t, err) + assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) + }) + + // Testcase: Invalid request excludes peer from round-robin, stopping request + t.Run("EDS_InvalidRequest", func(t *testing.T) { + dataHash := []byte("invalid") + requestedEDS, err := client.RequestEDS(ctx, dataHash, server.host.ID()) + assert.ErrorContains(t, err, "stream reset") + assert.Nil(t, requestedEDS) + }) + + t.Run("EDS_err_not_found", func(t *testing.T) { + timeoutCtx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + _, err = client.RequestEDS(timeoutCtx, dah.Hash(), server.host.ID()) + require.ErrorIs(t, err, p2p.ErrNotFound) + }) + + // Testcase: Concurrency limit reached + t.Run("EDS_concurrency_limit", func(t *testing.T) { + store, client, server := makeExchange(t) + + require.NoError(t, store.Start(ctx)) + require.NoError(t, server.Start(ctx)) + + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + rateLimit := 2 + wg := sync.WaitGroup{} + wg.Add(rateLimit) + + // mockHandler will block requests on server side until test is over + lock := make(chan struct{}) + defer close(lock) + mockHandler := func(network.Stream) { + wg.Done() + select { + case <-lock: + case <-ctx.Done(): + t.Fatal("timeout") + } + } + middleware := p2p.NewMiddleware(rateLimit) + server.host.SetStreamHandler(server.protocolID, + middleware.RateLimitHandler(mockHandler)) + + // take server concurrency slots with blocked requests + for i := 0; i < rateLimit; i++ { + go func(i int) { + client.RequestEDS(ctx, nil, server.host.ID()) //nolint:errcheck + }(i) + } + + // wait until all server slots are taken + wg.Wait() + _, err = client.RequestEDS(ctx, nil, server.host.ID()) + require.ErrorIs(t, err, p2p.ErrNotFound) + }) +} + +func newStore(t *testing.T) *eds.Store { + t.Helper() + + storeCfg := eds.DefaultParameters() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + store, err := eds.NewStore(storeCfg, t.TempDir(), ds) + require.NoError(t, err) + return store +} + +func createMocknet(t *testing.T, amount int) []libhost.Host { + t.Helper() + + net, err := mocknet.FullMeshConnected(amount) + require.NoError(t, err) + // get host and peer + return net.Hosts() +} + +func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { + t.Helper() + store := newStore(t) + hosts := createMocknet(t, 2) + + client, err := NewClient(DefaultParameters(), hosts[0]) + require.NoError(t, err) + server, err := NewServer(DefaultParameters(), hosts[1], store) + require.NoError(t, err) + + return store, client, server +} diff --git a/share/p2p/shrexeds/params.go b/share/p2p/shrexeds/params.go new file mode 100644 index 0000000000..795cb313ed --- /dev/null +++ b/share/p2p/shrexeds/params.go @@ -0,0 +1,54 @@ +package shrexeds + +import ( + "fmt" + + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/share/p2p" +) + +const protocolString = "/shrex/eds/v0.0.1" + +var log = logging.Logger("shrex/eds") + +// Parameters is the set of parameters that must be configured for the shrex/eds protocol. +type Parameters struct { + *p2p.Parameters + + // BufferSize defines the size of the buffer used for writing an ODS over the stream. + BufferSize uint64 +} + +func DefaultParameters() *Parameters { + return &Parameters{ + Parameters: p2p.DefaultParameters(), + BufferSize: 32 * 1024, + } +} + +func (p *Parameters) Validate() error { + if p.BufferSize <= 0 { + return fmt.Errorf("invalid buffer size: %v, value should be positive and non-zero", p.BufferSize) + } + + return p.Parameters.Validate() +} + +func (c *Client) WithMetrics() error { + metrics, err := p2p.InitClientMetrics("eds") + if err != nil { + return fmt.Errorf("shrex/eds: init Metrics: %w", err) + } + c.metrics = metrics + return nil +} + +func (s *Server) WithMetrics() error { + metrics, err := p2p.InitServerMetrics("eds") + if err != nil { + return fmt.Errorf("shrex/eds: init Metrics: %w", err) + } + s.metrics = metrics + return nil +} diff --git a/share/p2p/shrexeds/pb/extended_data_square.pb.go b/share/p2p/shrexeds/pb/extended_data_square.pb.go new file mode 100644 index 0000000000..ed1a96ae3b --- /dev/null +++ b/share/p2p/shrexeds/pb/extended_data_square.pb.go @@ -0,0 +1,509 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: share/p2p/shrexeds/pb/extended_data_square.proto + +package extended_data_square + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Status int32 + +const ( + Status_INVALID Status = 0 + Status_OK Status = 1 + Status_NOT_FOUND Status = 2 + Status_INTERNAL Status = 3 +) + +var Status_name = map[int32]string{ + 0: "INVALID", + 1: "OK", + 2: "NOT_FOUND", + 3: "INTERNAL", +} + +var Status_value = map[string]int32{ + "INVALID": 0, + "OK": 1, + "NOT_FOUND": 2, + "INTERNAL": 3, +} + +func (x Status) String() string { + return proto.EnumName(Status_name, int32(x)) +} + +func (Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_49d42aa96098056e, []int{0} +} + +type EDSRequest struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *EDSRequest) Reset() { *m = EDSRequest{} } +func (m *EDSRequest) String() string { return proto.CompactTextString(m) } +func (*EDSRequest) ProtoMessage() {} +func (*EDSRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_49d42aa96098056e, []int{0} +} +func (m *EDSRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EDSRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EDSRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EDSRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EDSRequest.Merge(m, src) +} +func (m *EDSRequest) XXX_Size() int { + return m.Size() +} +func (m *EDSRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EDSRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EDSRequest proto.InternalMessageInfo + +func (m *EDSRequest) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +type EDSResponse struct { + Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=Status" json:"status,omitempty"` +} + +func (m *EDSResponse) Reset() { *m = EDSResponse{} } +func (m *EDSResponse) String() string { return proto.CompactTextString(m) } +func (*EDSResponse) ProtoMessage() {} +func (*EDSResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_49d42aa96098056e, []int{1} +} +func (m *EDSResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EDSResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EDSResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EDSResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EDSResponse.Merge(m, src) +} +func (m *EDSResponse) XXX_Size() int { + return m.Size() +} +func (m *EDSResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EDSResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EDSResponse proto.InternalMessageInfo + +func (m *EDSResponse) GetStatus() Status { + if m != nil { + return m.Status + } + return Status_INVALID +} + +func init() { + proto.RegisterEnum("Status", Status_name, Status_value) + proto.RegisterType((*EDSRequest)(nil), "EDSRequest") + proto.RegisterType((*EDSResponse)(nil), "EDSResponse") +} + +func init() { + proto.RegisterFile("share/p2p/shrexeds/pb/extended_data_square.proto", fileDescriptor_49d42aa96098056e) +} + +var fileDescriptor_49d42aa96098056e = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x28, 0xce, 0x48, 0x2c, + 0x4a, 0xd5, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0xce, 0x28, 0x4a, 0xad, 0x48, 0x4d, 0x29, 0xd6, 0x2f, + 0x48, 0xd2, 0x4f, 0xad, 0x28, 0x49, 0xcd, 0x4b, 0x49, 0x4d, 0x89, 0x4f, 0x49, 0x2c, 0x49, 0x8c, + 0x2f, 0x2e, 0x2c, 0x4d, 0x2c, 0x4a, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x52, 0xe0, 0xe2, + 0x72, 0x75, 0x09, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, 0xe2, 0x62, 0xc9, 0x48, + 0x2c, 0xce, 0x90, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xb8, 0xc1, + 0x2a, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe4, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, + 0x8b, 0xc1, 0x8a, 0xf8, 0x8c, 0xd8, 0xf5, 0x82, 0xc1, 0xdc, 0x20, 0xa8, 0xb0, 0x96, 0x15, 0x17, + 0x1b, 0x44, 0x44, 0x88, 0x9b, 0x8b, 0xdd, 0xd3, 0x2f, 0xcc, 0xd1, 0xc7, 0xd3, 0x45, 0x80, 0x41, + 0x88, 0x8d, 0x8b, 0xc9, 0xdf, 0x5b, 0x80, 0x51, 0x88, 0x97, 0x8b, 0xd3, 0xcf, 0x3f, 0x24, 0xde, + 0xcd, 0x3f, 0xd4, 0xcf, 0x45, 0x80, 0x49, 0x88, 0x87, 0x8b, 0xc3, 0xd3, 0x2f, 0xc4, 0x35, 0xc8, + 0xcf, 0xd1, 0x47, 0x80, 0xd9, 0x49, 0xe2, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, + 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, + 0x92, 0xd8, 0xc0, 0xce, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x1d, 0xd4, 0xa7, 0xe2, + 0x00, 0x00, 0x00, +} + +func (m *EDSRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EDSRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EDSRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintExtendedDataSquare(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EDSResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EDSResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EDSResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintExtendedDataSquare(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintExtendedDataSquare(dAtA []byte, offset int, v uint64) int { + offset -= sovExtendedDataSquare(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EDSRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovExtendedDataSquare(uint64(l)) + } + return n +} + +func (m *EDSResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovExtendedDataSquare(uint64(m.Status)) + } + return n +} + +func sovExtendedDataSquare(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozExtendedDataSquare(x uint64) (n int) { + return sovExtendedDataSquare(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EDSRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExtendedDataSquare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EDSRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EDSRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExtendedDataSquare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthExtendedDataSquare + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthExtendedDataSquare + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipExtendedDataSquare(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthExtendedDataSquare + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EDSResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExtendedDataSquare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EDSResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EDSResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExtendedDataSquare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= Status(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipExtendedDataSquare(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthExtendedDataSquare + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipExtendedDataSquare(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowExtendedDataSquare + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowExtendedDataSquare + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowExtendedDataSquare + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthExtendedDataSquare + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupExtendedDataSquare + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthExtendedDataSquare + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthExtendedDataSquare = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowExtendedDataSquare = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupExtendedDataSquare = fmt.Errorf("proto: unexpected end of group") +) diff --git a/share/p2p/shrexeds/pb/extended_data_square.proto b/share/p2p/shrexeds/pb/extended_data_square.proto new file mode 100644 index 0000000000..63750962e9 --- /dev/null +++ b/share/p2p/shrexeds/pb/extended_data_square.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +message EDSRequest { + bytes hash = 1; // identifies the requested EDS. +} + +enum Status { + INVALID = 0; + OK = 1; // data found + NOT_FOUND = 2; // data not found + INTERNAL = 3; // internal server error +} + +message EDSResponse { + Status status = 1; +} diff --git a/share/p2p/shrexeds/server.go b/share/p2p/shrexeds/server.go new file mode 100644 index 0000000000..11b99a3438 --- /dev/null +++ b/share/p2p/shrexeds/server.go @@ -0,0 +1,199 @@ +package shrexeds + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + "go.uber.org/zap" + + "github.com/celestiaorg/go-libp2p-messenger/serde" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/p2p" + p2p_pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" +) + +// Server is responsible for serving ODSs for blocksync over the ShrEx/EDS protocol. +type Server struct { + ctx context.Context + cancel context.CancelFunc + + host host.Host + protocolID protocol.ID + + store *eds.Store + + params *Parameters + middleware *p2p.Middleware + metrics *p2p.Metrics +} + +// NewServer creates a new ShrEx/EDS server. +func NewServer(params *Parameters, host host.Host, store *eds.Store) (*Server, error) { + if err := params.Validate(); err != nil { + return nil, fmt.Errorf("shrex-eds: server creation failed: %w", err) + } + + return &Server{ + host: host, + store: store, + protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), + params: params, + middleware: p2p.NewMiddleware(params.ConcurrencyLimit), + }, nil +} + +func (s *Server) Start(context.Context) error { + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.host.SetStreamHandler(s.protocolID, s.middleware.RateLimitHandler(s.handleStream)) + return nil +} + +func (s *Server) Stop(context.Context) error { + defer s.cancel() + s.host.RemoveStreamHandler(s.protocolID) + return nil +} + +func (s *Server) observeRateLimitedRequests() { + numRateLimited := s.middleware.DrainCounter() + if numRateLimited > 0 { + s.metrics.ObserveRequests(context.Background(), numRateLimited, p2p.StatusRateLimited) + } +} + +func (s *Server) handleStream(stream network.Stream) { + logger := log.With("peer", stream.Conn().RemotePeer().String()) + logger.Debug("server: handling eds request") + + s.observeRateLimitedRequests() + + // read request from stream to get the dataHash for store lookup + req, err := s.readRequest(logger, stream) + if err != nil { + logger.Warnw("server: reading request from stream", "err", err) + stream.Reset() //nolint:errcheck + return + } + + // ensure the requested dataHash is a valid root + hash := share.DataHash(req.Hash) + err = hash.Validate() + if err != nil { + logger.Warnw("server: invalid request", "err", err) + stream.Reset() //nolint:errcheck + return + } + logger = logger.With("hash", hash.String()) + + ctx, cancel := context.WithTimeout(s.ctx, s.params.HandleRequestTimeout) + defer cancel() + + // determine whether the EDS is available in our store + // we do not close the reader, so that other requests will not need to re-open the file. + // closing is handled by the LRU cache. + edsReader, err := s.store.GetCAR(ctx, hash) + var status p2p_pb.Status + switch { + case err == nil: + defer func() { + if err := edsReader.Close(); err != nil { + log.Warnw("closing car reader", "err", err) + } + }() + status = p2p_pb.Status_OK + case errors.Is(err, eds.ErrNotFound): + logger.Warnw("server: request hash not found") + s.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) + status = p2p_pb.Status_NOT_FOUND + case err != nil: + logger.Errorw("server: get CAR", "err", err) + status = p2p_pb.Status_INTERNAL + } + + // inform the client of our status + err = s.writeStatus(logger, status, stream) + if err != nil { + logger.Warnw("server: writing status to stream", "err", err) + stream.Reset() //nolint:errcheck + return + } + // if we cannot serve the EDS, we are already done + if status != p2p_pb.Status_OK { + err = stream.Close() + if err != nil { + logger.Debugw("server: closing stream", "err", err) + } + return + } + + // start streaming the ODS to the client + err = s.writeODS(logger, edsReader, stream) + if err != nil { + logger.Warnw("server: writing ods to stream", "err", err) + stream.Reset() //nolint:errcheck + return + } + + s.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) + err = stream.Close() + if err != nil { + logger.Debugw("server: closing stream", "err", err) + } +} + +func (s *Server) readRequest(logger *zap.SugaredLogger, stream network.Stream) (*p2p_pb.EDSRequest, error) { + err := stream.SetReadDeadline(time.Now().Add(s.params.ServerReadTimeout)) + if err != nil { + logger.Debugw("server: set read deadline", "err", err) + } + + req := new(p2p_pb.EDSRequest) + _, err = serde.Read(stream, req) + if err != nil { + return nil, err + } + err = stream.CloseRead() + if err != nil { + logger.Debugw("server: closing read", "err", err) + } + + return req, nil +} + +func (s *Server) writeStatus(logger *zap.SugaredLogger, status p2p_pb.Status, stream network.Stream) error { + err := stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) + if err != nil { + logger.Debugw("server: set write deadline", "err", err) + } + + resp := &p2p_pb.EDSResponse{Status: status} + _, err = serde.Write(stream, resp) + return err +} + +func (s *Server) writeODS(logger *zap.SugaredLogger, edsReader io.Reader, stream network.Stream) error { + err := stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) + if err != nil { + logger.Debugw("server: set read deadline", "err", err) + } + + odsReader, err := eds.ODSReader(edsReader) + if err != nil { + return fmt.Errorf("creating ODS reader: %w", err) + } + buf := make([]byte, s.params.BufferSize) + _, err = io.CopyBuffer(stream, odsReader, buf) + if err != nil { + return fmt.Errorf("writing ODS bytes: %w", err) + } + + return nil +} diff --git a/share/p2p/shrexnd/client.go b/share/p2p/shrexnd/client.go new file mode 100644 index 0000000000..86c5150095 --- /dev/null +++ b/share/p2p/shrexnd/client.go @@ -0,0 +1,223 @@ +package shrexnd + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/celestiaorg/go-libp2p-messenger/serde" + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/p2p" + pb "github.com/celestiaorg/celestia-node/share/p2p/shrexnd/pb" +) + +// Client implements client side of shrex/nd protocol to obtain namespaced shares data from remote +// peers. +type Client struct { + params *Parameters + protocolID protocol.ID + + host host.Host + metrics *p2p.Metrics +} + +// NewClient creates a new shrEx/nd client +func NewClient(params *Parameters, host host.Host) (*Client, error) { + if err := params.Validate(); err != nil { + return nil, fmt.Errorf("shrex-nd: client creation failed: %w", err) + } + + return &Client{ + host: host, + protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), + params: params, + }, nil +} + +// RequestND requests namespaced data from the given peer. +// Returns NamespacedShares with unverified inclusion proofs against the share.Root. +func (c *Client) RequestND( + ctx context.Context, + root *share.Root, + namespace share.Namespace, + peer peer.ID, +) (share.NamespacedShares, error) { + if err := namespace.ValidateForData(); err != nil { + return nil, err + } + + shares, err := c.doRequest(ctx, root, namespace, peer) + if err == nil { + return shares, nil + } + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + return nil, err + } + // some net.Errors also mean the context deadline was exceeded, but yamux/mocknet do not + // unwrap to a ctx err + var ne net.Error + if errors.As(err, &ne) && ne.Timeout() { + if deadline, _ := ctx.Deadline(); deadline.Before(time.Now()) { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + return nil, context.DeadlineExceeded + } + } + if err != p2p.ErrNotFound && err != p2p.ErrRateLimited { + log.Warnw("client-nd: peer returned err", "err", err) + } + return nil, err +} + +func (c *Client) doRequest( + ctx context.Context, + root *share.Root, + namespace share.Namespace, + peerID peer.ID, +) (share.NamespacedShares, error) { + stream, err := c.host.NewStream(ctx, peerID, c.protocolID) + if err != nil { + return nil, err + } + defer stream.Close() + + c.setStreamDeadlines(ctx, stream) + + req := &pb.GetSharesByNamespaceRequest{ + RootHash: root.Hash(), + Namespace: namespace, + } + + _, err = serde.Write(stream, req) + if err != nil { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusSendReqErr) + stream.Reset() //nolint:errcheck + return nil, fmt.Errorf("client-nd: writing request: %w", err) + } + + err = stream.CloseWrite() + if err != nil { + log.Debugw("client-nd: closing write side of the stream", "err", err) + } + + if err := c.readStatus(ctx, stream); err != nil { + return nil, err + } + return c.readNamespacedShares(ctx, stream) +} + +func (c *Client) readStatus(ctx context.Context, stream network.Stream) error { + var resp pb.GetSharesByNamespaceStatusResponse + _, err := serde.Read(stream, &resp) + if err != nil { + // server is overloaded and closed the stream + if errors.Is(err, io.EOF) { + c.metrics.ObserveRequests(ctx, 1, p2p.StatusRateLimited) + return p2p.ErrRateLimited + } + c.metrics.ObserveRequests(ctx, 1, p2p.StatusReadRespErr) + stream.Reset() //nolint:errcheck + return fmt.Errorf("client-nd: reading status response: %w", err) + } + + return c.convertStatusToErr(ctx, resp.Status) +} + +// readNamespacedShares converts proto Rows to share.NamespacedShares +func (c *Client) readNamespacedShares( + ctx context.Context, + stream network.Stream, +) (share.NamespacedShares, error) { + var shares share.NamespacedShares + for { + var row pb.NamespaceRowResponse + _, err := serde.Read(stream, &row) + if err != nil { + if errors.Is(err, io.EOF) { + // all data is received and steam is closed by server + return shares, nil + } + c.metrics.ObserveRequests(ctx, 1, p2p.StatusReadRespErr) + return nil, err + } + var proof nmt.Proof + if row.Proof != nil { + if len(row.Shares) != 0 { + proof = nmt.NewInclusionProof( + int(row.Proof.Start), + int(row.Proof.End), + row.Proof.Nodes, + row.Proof.IsMaxNamespaceIgnored, + ) + } else { + proof = nmt.NewAbsenceProof( + int(row.Proof.Start), + int(row.Proof.End), + row.Proof.Nodes, + row.Proof.LeafHash, + row.Proof.IsMaxNamespaceIgnored, + ) + } + } + shares = append(shares, share.NamespacedRow{ + Shares: row.Shares, + Proof: &proof, + }) + } +} + +func (c *Client) setStreamDeadlines(ctx context.Context, stream network.Stream) { + // set read/write deadline to use context deadline if it exists + deadline, ok := ctx.Deadline() + if ok { + err := stream.SetDeadline(deadline) + if err == nil { + return + } + log.Debugw("client-nd: set stream deadline", "err", err) + } + + // if deadline not set, client read deadline defaults to server write deadline + if c.params.ServerWriteTimeout != 0 { + err := stream.SetReadDeadline(time.Now().Add(c.params.ServerWriteTimeout)) + if err != nil { + log.Debugw("client-nd: set read deadline", "err", err) + } + } + + // if deadline not set, client write deadline defaults to server read deadline + if c.params.ServerReadTimeout != 0 { + err := stream.SetWriteDeadline(time.Now().Add(c.params.ServerReadTimeout)) + if err != nil { + log.Debugw("client-nd: set write deadline", "err", err) + } + } +} + +func (c *Client) convertStatusToErr(ctx context.Context, status pb.StatusCode) error { + switch status { + case pb.StatusCode_OK: + c.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) + return nil + case pb.StatusCode_NOT_FOUND: + c.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) + return p2p.ErrNotFound + case pb.StatusCode_INVALID: + log.Warn("client-nd: invalid request") + fallthrough + case pb.StatusCode_INTERNAL: + fallthrough + default: + return p2p.ErrInvalidResponse + } +} diff --git a/share/p2p/shrexnd/doc.go b/share/p2p/shrexnd/doc.go new file mode 100644 index 0000000000..74ba7397e8 --- /dev/null +++ b/share/p2p/shrexnd/doc.go @@ -0,0 +1,43 @@ +// This package defines a protocol that is used to request namespaced data from peers in the network. +// +// This protocol is a request/response protocol that sends a request for specific data that +// lives in a specific namespace ID and receives a response with the data. +// +// The streams are established using the protocol ID: +// +// - "{networkID}/shrex/nd/0.0.1" where networkID is the network ID of the network. (e.g. "arabica") +// +// The protocol uses protobuf to serialize and deserialize messages. +// +// # Usage +// +// To use a shrexnd client to request data from a peer, you must first create a new `shrexnd.Client` instance by: +// +// 1. Create a new client using `NewClient` and pass in the parameters of the protocol and the host: +// +// client, err := shrexnd.NewClient(params, host) +// +// 2. Request data from a peer by calling [Client.RequestND] on the client and +// pass in the context, the data root, the namespace ID and the peer ID: +// +// data, err := client.RequestND(ctx, dataRoot, peerID, namespaceID) +// +// where data is of type [share.NamespacedShares] +// +// To use a shrexnd server to respond to requests from peers, you must first create a new `shrexnd.Server` instance by: +// +// 1. Create a new server using `NewServer` and pass in the parameters of +// the protocol, the host, the store and store share getter: +// +// server, err := shrexnd.NewServer(params, host, store, storeShareGetter) +// +// where store is of type [share.Store] and storeShareGetter is of type [share.Getter] +// +// 2. Start the server by calling `Start` on the server: +// +// err := server.Start(ctx) +// +// 3. Stop the server by calling `Stop` on the server: +// +// err := server.Stop(ctx) +package shrexnd diff --git a/share/p2p/shrexnd/exchange_test.go b/share/p2p/shrexnd/exchange_test.go new file mode 100644 index 0000000000..cb8bbe9d74 --- /dev/null +++ b/share/p2p/shrexnd/exchange_test.go @@ -0,0 +1,134 @@ +package shrexnd + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + libhost "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/p2p" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestExchange_RequestND_NotFound(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + edsStore, client, server := makeExchange(t) + require.NoError(t, edsStore.Start(ctx)) + require.NoError(t, server.Start(ctx)) + + t.Run("CAR_not_exist", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + root := share.Root{} + namespace := sharetest.RandV0Namespace() + _, err := client.RequestND(ctx, &root, namespace, server.host.ID()) + require.ErrorIs(t, err, p2p.ErrNotFound) + }) + + t.Run("ErrNamespaceNotFound", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + + namespace := sharetest.RandV0Namespace() + emptyShares, err := client.RequestND(ctx, dah, namespace, server.host.ID()) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) + }) +} + +func TestExchange_RequestND(t *testing.T) { + t.Run("ND_concurrency_limit", func(t *testing.T) { + net, err := mocknet.FullMeshConnected(2) + require.NoError(t, err) + + client, err := NewClient(DefaultParameters(), net.Hosts()[0]) + require.NoError(t, err) + server, err := NewServer(DefaultParameters(), net.Hosts()[1], nil) + require.NoError(t, err) + + require.NoError(t, server.Start(context.Background())) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + t.Cleanup(cancel) + + rateLimit := 2 + wg := sync.WaitGroup{} + wg.Add(rateLimit) + + // mockHandler will block requests on server side until test is over + lock := make(chan struct{}) + defer close(lock) + mockHandler := func(network.Stream) { + wg.Done() + select { + case <-lock: + case <-ctx.Done(): + t.Fatal("timeout") + } + } + middleware := p2p.NewMiddleware(rateLimit) + server.host.SetStreamHandler(server.protocolID, + middleware.RateLimitHandler(mockHandler)) + + // take server concurrency slots with blocked requests + for i := 0; i < rateLimit; i++ { + go func(i int) { + client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) //nolint:errcheck + }(i) + } + + // wait until all server slots are taken + wg.Wait() + _, err = client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) + require.ErrorIs(t, err, p2p.ErrRateLimited) + }) +} + +func newStore(t *testing.T) *eds.Store { + t.Helper() + + storeCfg := eds.DefaultParameters() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + store, err := eds.NewStore(storeCfg, t.TempDir(), ds) + require.NoError(t, err) + return store +} + +func createMocknet(t *testing.T, amount int) []libhost.Host { + t.Helper() + + net, err := mocknet.FullMeshConnected(amount) + require.NoError(t, err) + // get host and peer + return net.Hosts() +} + +func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { + t.Helper() + store := newStore(t) + hosts := createMocknet(t, 2) + + client, err := NewClient(DefaultParameters(), hosts[0]) + require.NoError(t, err) + server, err := NewServer(DefaultParameters(), hosts[1], store) + require.NoError(t, err) + + return store, client, server +} diff --git a/share/p2p/shrexnd/params.go b/share/p2p/shrexnd/params.go new file mode 100644 index 0000000000..8489627a07 --- /dev/null +++ b/share/p2p/shrexnd/params.go @@ -0,0 +1,38 @@ +package shrexnd + +import ( + "fmt" + + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/share/p2p" +) + +const protocolString = "/shrex/nd/v0.0.3" + +var log = logging.Logger("shrex/nd") + +// Parameters is the set of parameters that must be configured for the shrex/eds protocol. +type Parameters = p2p.Parameters + +func DefaultParameters() *Parameters { + return p2p.DefaultParameters() +} + +func (c *Client) WithMetrics() error { + metrics, err := p2p.InitClientMetrics("nd") + if err != nil { + return fmt.Errorf("shrex/nd: init Metrics: %w", err) + } + c.metrics = metrics + return nil +} + +func (srv *Server) WithMetrics() error { + metrics, err := p2p.InitServerMetrics("nd") + if err != nil { + return fmt.Errorf("shrex/nd: init Metrics: %w", err) + } + srv.metrics = metrics + return nil +} diff --git a/share/p2p/shrexnd/pb/share.pb.go b/share/p2p/shrexnd/pb/share.pb.go new file mode 100644 index 0000000000..7e3c11416f --- /dev/null +++ b/share/p2p/shrexnd/pb/share.pb.go @@ -0,0 +1,801 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: share/p2p/shrexnd/pb/share.proto + +package share_p2p_shrex_nd + +import ( + fmt "fmt" + pb "github.com/celestiaorg/nmt/pb" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type StatusCode int32 + +const ( + StatusCode_INVALID StatusCode = 0 + StatusCode_OK StatusCode = 1 + StatusCode_NOT_FOUND StatusCode = 2 + StatusCode_INTERNAL StatusCode = 3 +) + +var StatusCode_name = map[int32]string{ + 0: "INVALID", + 1: "OK", + 2: "NOT_FOUND", + 3: "INTERNAL", +} + +var StatusCode_value = map[string]int32{ + "INVALID": 0, + "OK": 1, + "NOT_FOUND": 2, + "INTERNAL": 3, +} + +func (x StatusCode) String() string { + return proto.EnumName(StatusCode_name, int32(x)) +} + +func (StatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ed9f13149b0de397, []int{0} +} + +type GetSharesByNamespaceRequest struct { + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + Namespace []byte `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` +} + +func (m *GetSharesByNamespaceRequest) Reset() { *m = GetSharesByNamespaceRequest{} } +func (m *GetSharesByNamespaceRequest) String() string { return proto.CompactTextString(m) } +func (*GetSharesByNamespaceRequest) ProtoMessage() {} +func (*GetSharesByNamespaceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ed9f13149b0de397, []int{0} +} +func (m *GetSharesByNamespaceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSharesByNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSharesByNamespaceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSharesByNamespaceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSharesByNamespaceRequest.Merge(m, src) +} +func (m *GetSharesByNamespaceRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSharesByNamespaceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSharesByNamespaceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSharesByNamespaceRequest proto.InternalMessageInfo + +func (m *GetSharesByNamespaceRequest) GetRootHash() []byte { + if m != nil { + return m.RootHash + } + return nil +} + +func (m *GetSharesByNamespaceRequest) GetNamespace() []byte { + if m != nil { + return m.Namespace + } + return nil +} + +type GetSharesByNamespaceStatusResponse struct { + Status StatusCode `protobuf:"varint,1,opt,name=status,proto3,enum=share.p2p.shrex.nd.StatusCode" json:"status,omitempty"` +} + +func (m *GetSharesByNamespaceStatusResponse) Reset() { *m = GetSharesByNamespaceStatusResponse{} } +func (m *GetSharesByNamespaceStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetSharesByNamespaceStatusResponse) ProtoMessage() {} +func (*GetSharesByNamespaceStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ed9f13149b0de397, []int{1} +} +func (m *GetSharesByNamespaceStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSharesByNamespaceStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSharesByNamespaceStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSharesByNamespaceStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSharesByNamespaceStatusResponse.Merge(m, src) +} +func (m *GetSharesByNamespaceStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *GetSharesByNamespaceStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSharesByNamespaceStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSharesByNamespaceStatusResponse proto.InternalMessageInfo + +func (m *GetSharesByNamespaceStatusResponse) GetStatus() StatusCode { + if m != nil { + return m.Status + } + return StatusCode_INVALID +} + +type NamespaceRowResponse struct { + Shares [][]byte `protobuf:"bytes,1,rep,name=shares,proto3" json:"shares,omitempty"` + Proof *pb.Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *NamespaceRowResponse) Reset() { *m = NamespaceRowResponse{} } +func (m *NamespaceRowResponse) String() string { return proto.CompactTextString(m) } +func (*NamespaceRowResponse) ProtoMessage() {} +func (*NamespaceRowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ed9f13149b0de397, []int{2} +} +func (m *NamespaceRowResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceRowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NamespaceRowResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NamespaceRowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceRowResponse.Merge(m, src) +} +func (m *NamespaceRowResponse) XXX_Size() int { + return m.Size() +} +func (m *NamespaceRowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceRowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NamespaceRowResponse proto.InternalMessageInfo + +func (m *NamespaceRowResponse) GetShares() [][]byte { + if m != nil { + return m.Shares + } + return nil +} + +func (m *NamespaceRowResponse) GetProof() *pb.Proof { + if m != nil { + return m.Proof + } + return nil +} + +func init() { + proto.RegisterEnum("share.p2p.shrex.nd.StatusCode", StatusCode_name, StatusCode_value) + proto.RegisterType((*GetSharesByNamespaceRequest)(nil), "share.p2p.shrex.nd.GetSharesByNamespaceRequest") + proto.RegisterType((*GetSharesByNamespaceStatusResponse)(nil), "share.p2p.shrex.nd.GetSharesByNamespaceStatusResponse") + proto.RegisterType((*NamespaceRowResponse)(nil), "share.p2p.shrex.nd.NamespaceRowResponse") +} + +func init() { proto.RegisterFile("share/p2p/shrexnd/pb/share.proto", fileDescriptor_ed9f13149b0de397) } + +var fileDescriptor_ed9f13149b0de397 = []byte{ + // 326 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, + 0x10, 0xc6, 0x93, 0x96, 0x37, 0x6f, 0x3b, 0xad, 0x35, 0x2c, 0x22, 0xc5, 0xca, 0x52, 0x02, 0x42, + 0xf1, 0xb0, 0x81, 0x08, 0x1e, 0x85, 0xd6, 0xfa, 0xa7, 0x58, 0x52, 0xd9, 0xb6, 0xe2, 0x41, 0x28, + 0x1b, 0xbb, 0x92, 0x8b, 0xd9, 0x35, 0xbb, 0x45, 0xfd, 0x16, 0x7e, 0x2c, 0x8f, 0x3d, 0x7a, 0x94, + 0xf6, 0x8b, 0x48, 0xb6, 0xd1, 0x1c, 0xf4, 0xb6, 0xf3, 0xcc, 0x33, 0xbf, 0x7d, 0x66, 0xa0, 0xad, + 0x62, 0x96, 0x72, 0x5f, 0x06, 0xd2, 0x57, 0x71, 0xca, 0x5f, 0x92, 0xb9, 0x2f, 0x23, 0xdf, 0x88, + 0x44, 0xa6, 0x42, 0x0b, 0x84, 0xf2, 0x22, 0x90, 0xc4, 0x38, 0x48, 0x32, 0xdf, 0x6b, 0xc8, 0xc8, + 0x97, 0xa9, 0x10, 0x0f, 0x1b, 0x8f, 0x77, 0x0b, 0xad, 0x0b, 0xae, 0xc7, 0x99, 0x51, 0xf5, 0x5e, + 0x43, 0xf6, 0xc8, 0x95, 0x64, 0xf7, 0x9c, 0xf2, 0xa7, 0x05, 0x57, 0x1a, 0xb5, 0xa0, 0x9a, 0x0a, + 0xa1, 0x67, 0x31, 0x53, 0x71, 0xd3, 0x6e, 0xdb, 0x9d, 0x3a, 0xad, 0x64, 0xc2, 0x25, 0x53, 0x31, + 0xda, 0x87, 0x6a, 0xf2, 0x3d, 0xd0, 0x2c, 0x99, 0x66, 0x21, 0x78, 0x77, 0xe0, 0xfd, 0x45, 0x1e, + 0x6b, 0xa6, 0x17, 0x8a, 0x72, 0x25, 0x45, 0xa2, 0x38, 0x3a, 0x06, 0x47, 0x19, 0xc5, 0xd0, 0x1b, + 0x01, 0x26, 0xbf, 0x43, 0x93, 0xcd, 0xcc, 0xa9, 0x98, 0x73, 0x9a, 0xbb, 0xbd, 0x29, 0xec, 0x14, + 0x61, 0xc5, 0xf3, 0x0f, 0x6f, 0x17, 0x1c, 0x03, 0xc8, 0x78, 0xe5, 0x4e, 0x9d, 0xe6, 0x15, 0x3a, + 0x80, 0x7f, 0x66, 0x6d, 0x93, 0xb3, 0x16, 0x6c, 0x93, 0xfc, 0x08, 0x11, 0xb9, 0xce, 0x1e, 0x74, + 0xd3, 0x3d, 0x3c, 0x01, 0x28, 0x3e, 0x43, 0x35, 0xf8, 0x3f, 0x08, 0x6f, 0xba, 0xc3, 0x41, 0xdf, + 0xb5, 0x90, 0x03, 0xa5, 0xd1, 0x95, 0x6b, 0xa3, 0x2d, 0xa8, 0x86, 0xa3, 0xc9, 0xec, 0x7c, 0x34, + 0x0d, 0xfb, 0x6e, 0x09, 0xd5, 0xa1, 0x32, 0x08, 0x27, 0x67, 0x34, 0xec, 0x0e, 0xdd, 0x72, 0xaf, + 0xf9, 0xbe, 0xc2, 0xf6, 0x72, 0x85, 0xed, 0xcf, 0x15, 0xb6, 0xdf, 0xd6, 0xd8, 0x5a, 0xae, 0xb1, + 0xf5, 0xb1, 0xc6, 0x56, 0xe4, 0x98, 0x7b, 0x1f, 0x7d, 0x05, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x53, + 0xb4, 0x86, 0xb7, 0x01, 0x00, 0x00, +} + +func (m *GetSharesByNamespaceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSharesByNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSharesByNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintShare(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.RootHash) > 0 { + i -= len(m.RootHash) + copy(dAtA[i:], m.RootHash) + i = encodeVarintShare(dAtA, i, uint64(len(m.RootHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSharesByNamespaceStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSharesByNamespaceStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSharesByNamespaceStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintShare(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NamespaceRowResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceRowResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceRowResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShare(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Shares) > 0 { + for iNdEx := len(m.Shares) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shares[iNdEx]) + copy(dAtA[i:], m.Shares[iNdEx]) + i = encodeVarintShare(dAtA, i, uint64(len(m.Shares[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintShare(dAtA []byte, offset int, v uint64) int { + offset -= sovShare(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GetSharesByNamespaceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RootHash) + if l > 0 { + n += 1 + l + sovShare(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovShare(uint64(l)) + } + return n +} + +func (m *GetSharesByNamespaceStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovShare(uint64(m.Status)) + } + return n +} + +func (m *NamespaceRowResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shares) > 0 { + for _, b := range m.Shares { + l = len(b) + n += 1 + l + sovShare(uint64(l)) + } + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovShare(uint64(l)) + } + return n +} + +func sovShare(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozShare(x uint64) (n int) { + return sovShare(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSharesByNamespaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSharesByNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthShare + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthShare + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) + if m.RootHash == nil { + m.RootHash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthShare + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthShare + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = append(m.Namespace[:0], dAtA[iNdEx:postIndex]...) + if m.Namespace == nil { + m.Namespace = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShare(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShare + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSharesByNamespaceStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSharesByNamespaceStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSharesByNamespaceStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= StatusCode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShare(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShare + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceRowResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceRowResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceRowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shares", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthShare + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthShare + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shares = append(m.Shares, make([]byte, postIndex-iNdEx)) + copy(m.Shares[len(m.Shares)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShare + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShare + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &pb.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShare(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShare + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipShare(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShare + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShare + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShare + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthShare + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupShare + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthShare + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthShare = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowShare = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupShare = fmt.Errorf("proto: unexpected end of group") +) diff --git a/share/p2p/shrexnd/pb/share.proto b/share/p2p/shrexnd/pb/share.proto new file mode 100644 index 0000000000..a5bdbfa071 --- /dev/null +++ b/share/p2p/shrexnd/pb/share.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package share.p2p.shrex.nd; +import "pb/proof.proto"; + +message GetSharesByNamespaceRequest{ + bytes root_hash = 1; + bytes namespace = 2; +} + +message GetSharesByNamespaceStatusResponse{ + StatusCode status = 1; +} + +enum StatusCode { + INVALID = 0; + OK = 1; + NOT_FOUND = 2; + INTERNAL = 3; +}; + +message NamespaceRowResponse { + repeated bytes shares = 1; + proof.pb.Proof proof = 2; +} diff --git a/share/p2p/shrexnd/server.go b/share/p2p/shrexnd/server.go new file mode 100644 index 0000000000..33e61ff472 --- /dev/null +++ b/share/p2p/shrexnd/server.go @@ -0,0 +1,254 @@ +package shrexnd + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + "go.uber.org/zap" + + "github.com/celestiaorg/go-libp2p-messenger/serde" + nmt_pb "github.com/celestiaorg/nmt/pb" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/p2p" + pb "github.com/celestiaorg/celestia-node/share/p2p/shrexnd/pb" +) + +// Server implements server side of shrex/nd protocol to serve namespaced share to remote +// peers. +type Server struct { + cancel context.CancelFunc + + host host.Host + protocolID protocol.ID + + handler network.StreamHandler + store *eds.Store + + params *Parameters + middleware *p2p.Middleware + metrics *p2p.Metrics +} + +// NewServer creates new Server +func NewServer(params *Parameters, host host.Host, store *eds.Store) (*Server, error) { + if err := params.Validate(); err != nil { + return nil, fmt.Errorf("shrex-nd: server creation failed: %w", err) + } + + srv := &Server{ + store: store, + host: host, + params: params, + protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), + middleware: p2p.NewMiddleware(params.ConcurrencyLimit), + } + + ctx, cancel := context.WithCancel(context.Background()) + srv.cancel = cancel + + srv.handler = srv.middleware.RateLimitHandler(srv.streamHandler(ctx)) + return srv, nil +} + +// Start starts the server +func (srv *Server) Start(context.Context) error { + srv.host.SetStreamHandler(srv.protocolID, srv.handler) + return nil +} + +// Stop stops the server +func (srv *Server) Stop(context.Context) error { + srv.cancel() + srv.host.RemoveStreamHandler(srv.protocolID) + return nil +} + +func (srv *Server) streamHandler(ctx context.Context) network.StreamHandler { + return func(s network.Stream) { + err := srv.handleNamespacedData(ctx, s) + if err != nil { + s.Reset() //nolint:errcheck + return + } + if err = s.Close(); err != nil { + log.Debugw("server: closing stream", "err", err) + } + } +} + +// SetHandler sets server handler +func (srv *Server) SetHandler(handler network.StreamHandler) { + srv.handler = handler +} + +func (srv *Server) observeRateLimitedRequests() { + numRateLimited := srv.middleware.DrainCounter() + if numRateLimited > 0 { + srv.metrics.ObserveRequests(context.Background(), numRateLimited, p2p.StatusRateLimited) + } +} + +func (srv *Server) handleNamespacedData(ctx context.Context, stream network.Stream) error { + logger := log.With("source", "server", "peer", stream.Conn().RemotePeer().String()) + logger.Debug("handling nd request") + + srv.observeRateLimitedRequests() + req, err := srv.readRequest(logger, stream) + if err != nil { + logger.Warnw("read request", "err", err) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusBadRequest) + return err + } + + logger = logger.With("namespace", share.Namespace(req.Namespace).String(), + "hash", share.DataHash(req.RootHash).String()) + + ctx, cancel := context.WithTimeout(ctx, srv.params.HandleRequestTimeout) + defer cancel() + + shares, status, err := srv.getNamespaceData(ctx, req.RootHash, req.Namespace) + if err != nil { + // server should respond with status regardless if there was an error getting data + sendErr := srv.respondStatus(ctx, logger, stream, status) + if sendErr != nil { + logger.Errorw("sending response", "err", sendErr) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) + } + logger.Errorw("handling request", "err", err) + return errors.Join(err, sendErr) + } + + err = srv.respondStatus(ctx, logger, stream, status) + if err != nil { + logger.Errorw("sending response", "err", err) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) + return err + } + + err = srv.sendNamespacedShares(shares, stream) + if err != nil { + logger.Errorw("send nd data", "err", err) + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) + return err + } + return nil +} + +func (srv *Server) readRequest( + logger *zap.SugaredLogger, + stream network.Stream, +) (*pb.GetSharesByNamespaceRequest, error) { + err := stream.SetReadDeadline(time.Now().Add(srv.params.ServerReadTimeout)) + if err != nil { + logger.Debugw("setting read deadline", "err", err) + } + + var req pb.GetSharesByNamespaceRequest + _, err = serde.Read(stream, &req) + if err != nil { + return nil, fmt.Errorf("reading request: %w", err) + + } + + logger.Debugw("new request") + err = stream.CloseRead() + if err != nil { + logger.Debugw("closing read side of the stream", "err", err) + } + + err = validateRequest(req) + if err != nil { + return nil, fmt.Errorf("invalid request: %w", err) + } + return &req, nil +} + +func (srv *Server) getNamespaceData(ctx context.Context, + hash share.DataHash, namespace share.Namespace) (share.NamespacedShares, pb.StatusCode, error) { + dah, err := srv.store.GetDAH(ctx, hash) + if err != nil { + if errors.Is(err, eds.ErrNotFound) { + return nil, pb.StatusCode_NOT_FOUND, nil + } + return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving DAH: %w", err) + } + + shares, err := eds.RetrieveNamespaceFromStore(ctx, srv.store, dah, namespace) + if err != nil { + return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving shares: %w", err) + } + + return shares, pb.StatusCode_OK, nil +} + +func (srv *Server) respondStatus( + ctx context.Context, + logger *zap.SugaredLogger, + stream network.Stream, + status pb.StatusCode, +) error { + srv.observeStatus(ctx, status) + + err := stream.SetWriteDeadline(time.Now().Add(srv.params.ServerWriteTimeout)) + if err != nil { + logger.Debugw("setting write deadline", "err", err) + } + + _, err = serde.Write(stream, &pb.GetSharesByNamespaceStatusResponse{Status: status}) + if err != nil { + return fmt.Errorf("writing response: %w", err) + } + + return nil +} + +// sendNamespacedShares encodes shares into proto messages and sends it to client +func (srv *Server) sendNamespacedShares(shares share.NamespacedShares, stream network.Stream) error { + for _, row := range shares { + row := &pb.NamespaceRowResponse{ + Shares: row.Shares, + Proof: &nmt_pb.Proof{ + Start: int64(row.Proof.Start()), + End: int64(row.Proof.End()), + Nodes: row.Proof.Nodes(), + LeafHash: row.Proof.LeafHash(), + IsMaxNamespaceIgnored: row.Proof.IsMaxNamespaceIDIgnored(), + }, + } + _, err := serde.Write(stream, row) + if err != nil { + return fmt.Errorf("writing nd data to stream: %w", err) + } + } + return nil +} + +func (srv *Server) observeStatus(ctx context.Context, status pb.StatusCode) { + switch { + case status == pb.StatusCode_OK: + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) + case status == pb.StatusCode_NOT_FOUND: + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) + case status == pb.StatusCode_INTERNAL: + srv.metrics.ObserveRequests(ctx, 1, p2p.StatusInternalErr) + } +} + +// validateRequest checks correctness of the request +func validateRequest(req pb.GetSharesByNamespaceRequest) error { + if err := share.Namespace(req.Namespace).ValidateForData(); err != nil { + return err + } + if len(req.RootHash) != sha256.Size { + return fmt.Errorf("incorrect root hash length: %v", len(req.RootHash)) + } + return nil +} diff --git a/share/p2p/shrexsub/doc.go b/share/p2p/shrexsub/doc.go new file mode 100644 index 0000000000..95d08361a2 --- /dev/null +++ b/share/p2p/shrexsub/doc.go @@ -0,0 +1,58 @@ +// This package defines a protocol that is used to broadcast shares to peers over a pubsub network. +// +// This protocol runs on a rudimentary floodsub network is primarily a pubsub protocol +// that broadcasts and listens for shares over a pubsub topic. +// +// The pubsub topic used by this protocol is: +// +// "{networkID}/eds-sub/v0.1.0" +// +// where networkID is the network ID of the celestia-node that is running the protocol. (e.g. "arabica") +// +// # Usage +// +// To use this protocol, you must first create a new `shrexsub.PubSub` instance by: +// +// pubsub, err := shrexsub.NewPubSub(ctx, host, networkID) +// +// where host is the libp2p host that is running the protocol, and networkID is the network ID of the celestia-node +// that is running the protocol. +// +// After this, you can start the pubsub protocol by: +// +// err := pubsub.Start(ctx) +// +// Once you have started the `shrexsub.PubSub` instance, you can broadcast a share by: +// +// err := pubsub.Broadcast(ctx, notification) +// +// where `notification` is of type [shrexsub.Notification]. +// +// and `DataHash` is the hash of the share that you want to broadcast, and `Height` is the height of the share. +// +// You can also subscribe to the pubsub topic by: +// +// sub, err := pubsub.Subscribe(ctx) +// +// and then receive notifications by: +// +// for { +// select { +// case <-ctx.Done(): +// sub.Cancel() +// return +// case notification, err := <-sub.Next(): +// // handle notification or err +// } +// } +// +// You can also manipulate the received pubsub messages by using the [PubSub.AddValidator] method: +// +// pubsub.AddValidator(validator ValidatorFn) +// +// where `validator` is of type [shrexsub.ValidatorFn] and `Notification` is the same as above. +// +// You can also stop the pubsub protocol by: +// +// err := pubsub.Stop(ctx) +package shrexsub diff --git a/share/p2p/shrexsub/pb/notification.pb.go b/share/p2p/shrexsub/pb/notification.pb.go new file mode 100644 index 0000000000..e154dc62b7 --- /dev/null +++ b/share/p2p/shrexsub/pb/notification.pb.go @@ -0,0 +1,354 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: share/p2p/shrexsub/pb/notification.proto + +package share_p2p_shrex_sub + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type RecentEDSNotification struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + DataHash []byte `protobuf:"bytes,2,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` +} + +func (m *RecentEDSNotification) Reset() { *m = RecentEDSNotification{} } +func (m *RecentEDSNotification) String() string { return proto.CompactTextString(m) } +func (*RecentEDSNotification) ProtoMessage() {} +func (*RecentEDSNotification) Descriptor() ([]byte, []int) { + return fileDescriptor_1a6ade914b560e62, []int{0} +} +func (m *RecentEDSNotification) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecentEDSNotification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RecentEDSNotification.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RecentEDSNotification) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecentEDSNotification.Merge(m, src) +} +func (m *RecentEDSNotification) XXX_Size() int { + return m.Size() +} +func (m *RecentEDSNotification) XXX_DiscardUnknown() { + xxx_messageInfo_RecentEDSNotification.DiscardUnknown(m) +} + +var xxx_messageInfo_RecentEDSNotification proto.InternalMessageInfo + +func (m *RecentEDSNotification) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RecentEDSNotification) GetDataHash() []byte { + if m != nil { + return m.DataHash + } + return nil +} + +func init() { + proto.RegisterType((*RecentEDSNotification)(nil), "share.p2p.shrex.sub.RecentEDSNotification") +} + +func init() { + proto.RegisterFile("share/p2p/shrexsub/pb/notification.proto", fileDescriptor_1a6ade914b560e62) +} + +var fileDescriptor_1a6ade914b560e62 = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x28, 0xce, 0x48, 0x2c, + 0x4a, 0xd5, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0xce, 0x28, 0x4a, 0xad, 0x28, 0x2e, 0x4d, 0xd2, 0x2f, + 0x48, 0xd2, 0xcf, 0xcb, 0x2f, 0xc9, 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, 0xab, 0xd4, 0x2b, 0x30, 0x2a, 0xd0, 0x03, 0xab, 0xd4, + 0x2b, 0x2e, 0x4d, 0x52, 0xf2, 0xe1, 0x12, 0x0d, 0x4a, 0x4d, 0x4e, 0xcd, 0x2b, 0x71, 0x75, 0x09, + 0xf6, 0x43, 0xd2, 0x23, 0x24, 0xc6, 0xc5, 0x96, 0x91, 0x9a, 0x99, 0x9e, 0x51, 0x22, 0xc1, 0xa8, + 0xc0, 0xa8, 0xc1, 0x12, 0x04, 0xe5, 0x09, 0x49, 0x73, 0x71, 0xa6, 0x24, 0x96, 0x24, 0xc6, 0x67, + 0x24, 0x16, 0x67, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xf0, 0x04, 0x71, 0x80, 0x04, 0x3c, 0x12, 0x8b, + 0x33, 0x9c, 0x24, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, + 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x89, 0x0d, 0xec, + 0x06, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0x16, 0xea, 0xc6, 0xaf, 0x00, 0x00, 0x00, +} + +func (m *RecentEDSNotification) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecentEDSNotification) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RecentEDSNotification) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintNotification(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x12 + } + if m.Height != 0 { + i = encodeVarintNotification(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintNotification(dAtA []byte, offset int, v uint64) int { + offset -= sovNotification(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RecentEDSNotification) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovNotification(uint64(m.Height)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovNotification(uint64(l)) + } + return n +} + +func sovNotification(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozNotification(x uint64) (n int) { + return sovNotification(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RecentEDSNotification) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNotification + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecentEDSNotification: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecentEDSNotification: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNotification + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNotification + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNotification + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthNotification + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNotification(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthNotification + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNotification(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNotification + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNotification + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNotification + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthNotification + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNotification + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthNotification + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthNotification = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNotification = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNotification = fmt.Errorf("proto: unexpected end of group") +) diff --git a/share/p2p/shrexsub/pb/notification.proto b/share/p2p/shrexsub/pb/notification.proto new file mode 100644 index 0000000000..d96cf3369e --- /dev/null +++ b/share/p2p/shrexsub/pb/notification.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package share.p2p.shrex.sub; + +message RecentEDSNotification { + uint64 height = 1; + bytes data_hash = 2; +} + diff --git a/share/p2p/shrexsub/pubsub.go b/share/p2p/shrexsub/pubsub.go new file mode 100644 index 0000000000..ed713b4614 --- /dev/null +++ b/share/p2p/shrexsub/pubsub.go @@ -0,0 +1,137 @@ +package shrexsub + +import ( + "context" + "fmt" + + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/celestiaorg/celestia-node/share" + pb "github.com/celestiaorg/celestia-node/share/p2p/shrexsub/pb" +) + +var log = logging.Logger("shrex-sub") + +// pubsubTopic hardcodes the name of the EDS floodsub topic with the provided networkID. +func pubsubTopicID(networkID string) string { + return fmt.Sprintf("%s/eds-sub/v0.1.0", networkID) +} + +// ValidatorFn is an injectable func and governs EDS notification msg validity. +// It receives the notification and sender peer and expects the validation result. +// ValidatorFn is allowed to be blocking for an indefinite time or until the context is canceled. +type ValidatorFn func(context.Context, peer.ID, Notification) pubsub.ValidationResult + +// BroadcastFn aliases the function that broadcasts the DataHash. +type BroadcastFn func(context.Context, Notification) error + +// Notification is the format of message sent by Broadcaster +type Notification struct { + DataHash share.DataHash + Height uint64 +} + +// PubSub manages receiving and propagating the EDS from/to the network +// over "eds-sub" subscription. +type PubSub struct { + pubSub *pubsub.PubSub + topic *pubsub.Topic + + pubsubTopic string + cancelRelay pubsub.RelayCancelFunc +} + +// NewPubSub creates a libp2p.PubSub wrapper. +func NewPubSub(ctx context.Context, h host.Host, networkID string) (*PubSub, error) { + pubsub, err := pubsub.NewFloodSub(ctx, h) + if err != nil { + return nil, err + } + return &PubSub{ + pubSub: pubsub, + pubsubTopic: pubsubTopicID(networkID), + }, nil +} + +// Start creates an instances of FloodSub and joins specified topic. +func (s *PubSub) Start(context.Context) error { + topic, err := s.pubSub.Join(s.pubsubTopic) + if err != nil { + return err + } + + cancel, err := topic.Relay() + if err != nil { + return err + } + + s.cancelRelay = cancel + s.topic = topic + return nil +} + +// Stop completely stops the PubSub: +// * Unregisters all the added Validators +// * Closes the `ShrEx/Sub` topic +func (s *PubSub) Stop(context.Context) error { + s.cancelRelay() + err := s.pubSub.UnregisterTopicValidator(s.pubsubTopic) + if err != nil { + log.Warnw("unregistering topic", "err", err) + } + return s.topic.Close() +} + +// AddValidator registers given ValidatorFn for EDS notifications. +// Any amount of Validators can be registered. +func (s *PubSub) AddValidator(v ValidatorFn) error { + return s.pubSub.RegisterTopicValidator(s.pubsubTopic, v.validate) +} + +func (v ValidatorFn) validate(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { + var pbmsg pb.RecentEDSNotification + if err := pbmsg.Unmarshal(msg.Data); err != nil { + log.Debugw("validator: unmarshal error", "err", err) + return pubsub.ValidationReject + } + + n := Notification{ + DataHash: pbmsg.DataHash, + Height: pbmsg.Height, + } + if n.Height == 0 || n.DataHash.IsEmptyRoot() || n.DataHash.Validate() != nil { + // hard reject malicious height (height 0 does not exist) and + // empty/invalid datahashes + return pubsub.ValidationReject + } + return v(ctx, p, n) +} + +// Subscribe provides a new Subscription for EDS notifications. +func (s *PubSub) Subscribe() (*Subscription, error) { + if s.topic == nil { + return nil, fmt.Errorf("shrex-sub: topic is not started") + } + return newSubscription(s.topic) +} + +// Broadcast sends the EDS notification (DataHash) to every connected peer. +func (s *PubSub) Broadcast(ctx context.Context, notification Notification) error { + if notification.DataHash.IsEmptyRoot() { + // no need to broadcast datahash of an empty block EDS + return nil + } + + msg := pb.RecentEDSNotification{ + Height: notification.Height, + DataHash: notification.DataHash, + } + data, err := msg.Marshal() + if err != nil { + return fmt.Errorf("shrex-sub: marshal notification, %w", err) + } + return s.topic.Publish(ctx, data) +} diff --git a/share/p2p/shrexsub/pubsub_test.go b/share/p2p/shrexsub/pubsub_test.go new file mode 100644 index 0000000000..85b16c055b --- /dev/null +++ b/share/p2p/shrexsub/pubsub_test.go @@ -0,0 +1,124 @@ +package shrexsub + +import ( + "context" + "testing" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" + + pb "github.com/celestiaorg/celestia-node/share/p2p/shrexsub/pb" +) + +func TestPubSub(t *testing.T) { + h, err := mocknet.FullMeshConnected(2) + require.NoError(t, err) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + pSub1, err := NewPubSub(ctx, h.Hosts()[0], "test") + require.NoError(t, err) + + pSub2, err := NewPubSub(ctx, h.Hosts()[1], "test") + require.NoError(t, err) + err = pSub2.AddValidator( + func(ctx context.Context, p peer.ID, n Notification) pubsub.ValidationResult { + // only testing shrexsub validation here + return pubsub.ValidationAccept + }, + ) + require.NoError(t, err) + + require.NoError(t, pSub1.Start(ctx)) + require.NoError(t, pSub2.Start(ctx)) + + subs, err := pSub2.Subscribe() + require.NoError(t, err) + + var tests = []struct { + name string + notif Notification + errExpected bool + }{ + { + name: "valid height, valid hash", + notif: Notification{ + Height: 1, + DataHash: rand.Bytes(32), + }, + errExpected: false, + }, + { + name: "valid height, invalid hash (<32 bytes)", + notif: Notification{ + Height: 2, + DataHash: rand.Bytes(20), + }, + errExpected: true, + }, + { + name: "valid height, invalid hash (>32 bytes)", + notif: Notification{ + Height: 2, + DataHash: rand.Bytes(64), + }, + errExpected: true, + }, + { + name: "invalid height, valid hash", + notif: Notification{ + Height: 0, + DataHash: rand.Bytes(32), + }, + errExpected: true, + }, + { + name: "invalid height, nil hash", + notif: Notification{ + Height: 0, + DataHash: nil, + }, + errExpected: true, + }, + { + name: "valid height, nil hash", + notif: Notification{ + Height: 30, + DataHash: nil, + }, + errExpected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + msg := pb.RecentEDSNotification{ + Height: tt.notif.Height, + DataHash: tt.notif.DataHash, + } + data, err := msg.Marshal() + require.NoError(t, err) + + err = pSub1.topic.Publish(ctx, data, pubsub.WithReadiness(pubsub.MinTopicSize(1))) + require.NoError(t, err) + + reqCtx, reqCtxCancel := context.WithTimeout(context.Background(), time.Millisecond*100) + defer reqCtxCancel() + + got, err := subs.Next(reqCtx) + if tt.errExpected { + require.Error(t, err) + require.ErrorIs(t, err, context.DeadlineExceeded) + return + } + require.NoError(t, err) + require.NoError(t, err) + require.Equal(t, tt.notif, got) + }) + } + +} diff --git a/share/p2p/shrexsub/subscription.go b/share/p2p/shrexsub/subscription.go new file mode 100644 index 0000000000..32a3e65e51 --- /dev/null +++ b/share/p2p/shrexsub/subscription.go @@ -0,0 +1,51 @@ +package shrexsub + +import ( + "context" + "fmt" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + + pb "github.com/celestiaorg/celestia-node/share/p2p/shrexsub/pb" +) + +// Subscription is a wrapper over pubsub.Subscription that handles +// receiving an EDS DataHash from other peers. +type Subscription struct { + subscription *pubsub.Subscription +} + +func newSubscription(t *pubsub.Topic) (*Subscription, error) { + subs, err := t.Subscribe() + if err != nil { + return nil, err + } + + return &Subscription{subscription: subs}, nil +} + +// Next blocks the caller until any new EDS DataHash notification arrives. +// Returns only notifications which successfully pass validation. +func (subs *Subscription) Next(ctx context.Context) (Notification, error) { + msg, err := subs.subscription.Next(ctx) + if err != nil { + log.Errorw("listening for the next eds hash", "err", err) + return Notification{}, err + } + + log.Debugw("received message", "topic", msg.Message.GetTopic(), "sender", msg.ReceivedFrom) + var pbmsg pb.RecentEDSNotification + if err := pbmsg.Unmarshal(msg.Data); err != nil { + log.Debugw("unmarshal error", "err", err) + return Notification{}, fmt.Errorf("shrex-sub: unmarshal notification, %w", err) + } + return Notification{ + DataHash: pbmsg.DataHash, + Height: pbmsg.Height, + }, nil +} + +// Cancel stops the subscription. +func (subs *Subscription) Cancel() { + subs.subscription.Cancel() +} diff --git a/share/share.go b/share/share.go new file mode 100644 index 0000000000..4079028d82 --- /dev/null +++ b/share/share.go @@ -0,0 +1,73 @@ +package share + +import ( + "bytes" + "encoding/hex" + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" +) + +var ( + // DefaultRSMT2DCodec sets the default rsmt2d.Codec for shares. + DefaultRSMT2DCodec = appconsts.DefaultCodec +) + +const ( + // Size is a system-wide size of a share, including both data and namespace GetNamespace + Size = appconsts.ShareSize +) + +var ( + // MaxSquareSize is currently the maximum size supported for unerasured data in + // rsmt2d.ExtendedDataSquare. + MaxSquareSize = appconsts.SquareSizeUpperBound(appconsts.LatestVersion) +) + +// Share contains the raw share data without the corresponding namespace. +// NOTE: Alias for the byte is chosen to keep maximal compatibility, especially with rsmt2d. +// Ideally, we should define reusable type elsewhere and make everyone(Core, rsmt2d, ipld) to rely +// on it. +type Share = []byte + +// GetNamespace slices Namespace out of the Share. +func GetNamespace(s Share) Namespace { + return s[:NamespaceSize] +} + +// GetData slices out data of the Share. +func GetData(s Share) []byte { + return s[NamespaceSize:] +} + +// DataHash is a representation of the Root hash. +type DataHash []byte + +func (dh DataHash) Validate() error { + if len(dh) != 32 { + return fmt.Errorf("invalid hash size, expected 32, got %d", len(dh)) + } + return nil +} + +func (dh DataHash) String() string { + return fmt.Sprintf("%X", []byte(dh)) +} + +// IsEmptyRoot check whether DataHash corresponds to the root of an empty block EDS. +func (dh DataHash) IsEmptyRoot() bool { + return bytes.Equal(EmptyRoot().Hash(), dh) +} + +// MustDataHashFromString converts a hex string to a valid datahash. +func MustDataHashFromString(datahash string) DataHash { + dh, err := hex.DecodeString(datahash) + if err != nil { + panic(fmt.Sprintf("datahash conversion: passed string was not valid hex: %s", datahash)) + } + err = DataHash(dh).Validate() + if err != nil { + panic(fmt.Sprintf("datahash validation: passed hex string failed: %s", err)) + } + return dh +} diff --git a/share/sharetest/testing.go b/share/sharetest/testing.go new file mode 100644 index 0000000000..3889260393 --- /dev/null +++ b/share/sharetest/testing.go @@ -0,0 +1,78 @@ +package sharetest + +import ( + "bytes" + "math/rand" + "sort" + "sync" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/namespace" + + "github.com/celestiaorg/celestia-node/share" +) + +// RandShares generate 'total' amount of shares filled with random data. It uses require.TestingT +// to be able to take both a *testing.T and a *testing.B. +func RandShares(t require.TestingT, total int) []share.Share { + if total&(total-1) != 0 { + t.Errorf("total must be power of 2: %d", total) + t.FailNow() + } + + shares := make([]share.Share, total) + for i := range shares { + shr := make([]byte, share.Size) + copy(share.GetNamespace(shr), RandV0Namespace()) + rndMu.Lock() + _, err := rnd.Read(share.GetData(shr)) + rndMu.Unlock() + require.NoError(t, err) + shares[i] = shr + } + sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i], shares[j]) < 0 }) + + return shares +} + +// RandSharesWithNamespace is same the as RandShares, but sets same namespace for all shares. +func RandSharesWithNamespace(t require.TestingT, namespace share.Namespace, total int) []share.Share { + if total&(total-1) != 0 { + t.Errorf("total must be power of 2: %d", total) + t.FailNow() + } + + shares := make([]share.Share, total) + rnd := rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec + for i := range shares { + shr := make([]byte, share.Size) + copy(share.GetNamespace(shr), namespace) + _, err := rnd.Read(share.GetData(shr)) + require.NoError(t, err) + shares[i] = shr + } + sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i], shares[j]) < 0 }) + return shares +} + +// RandV0Namespace generates random valid data namespace for testing purposes. +func RandV0Namespace() share.Namespace { + rb := make([]byte, namespace.NamespaceVersionZeroIDSize) + rndMu.Lock() + rnd.Read(rb) + rndMu.Unlock() + for { + namespace, _ := share.NewBlobNamespaceV0(rb) + if err := namespace.ValidateForData(); err != nil { + continue + } + return namespace + } +} + +var ( + rnd = rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec + rndMu sync.Mutex +) diff --git a/specs/.gitignore b/specs/.gitignore new file mode 100644 index 0000000000..7585238efe --- /dev/null +++ b/specs/.gitignore @@ -0,0 +1 @@ +book diff --git a/specs/book.toml b/specs/book.toml new file mode 100644 index 0000000000..2ab3d5a398 --- /dev/null +++ b/specs/book.toml @@ -0,0 +1,13 @@ +[book] +authors = ["Celestia Labs"] +language = "en" +multilingual = false +src = "src" +title = "Celestia Node Specification" + +[output.html] +git-repository-url = "https://github.com/celestiaorg/celestia-node" + +[preprocessor.toc] +command = "mdbook-toc" +renderer = ["html"] diff --git a/specs/src/SUMMARY.md b/specs/src/SUMMARY.md new file mode 100644 index 0000000000..dd6d59d972 --- /dev/null +++ b/specs/src/SUMMARY.md @@ -0,0 +1,3 @@ +# Summary + +- [WIP](./WIP.md) diff --git a/specs/src/WIP.md b/specs/src/WIP.md new file mode 100644 index 0000000000..85e6ff194b --- /dev/null +++ b/specs/src/WIP.md @@ -0,0 +1 @@ +# WIP diff --git a/state/address_test.go b/state/address_test.go new file mode 100644 index 0000000000..d701b38aa8 --- /dev/null +++ b/state/address_test.go @@ -0,0 +1,57 @@ +package state + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAddressMarshalling(t *testing.T) { + testCases := []struct { + name string + addressString string + addressFromStr func(string) (interface{}, error) + marshalJSON func(interface{}) ([]byte, error) + unmarshalJSON func([]byte) (interface{}, error) + }{ + { + name: "Account Address", + addressString: "celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h", + addressFromStr: func(s string) (interface{}, error) { return sdk.AccAddressFromBech32(s) }, + marshalJSON: func(addr interface{}) ([]byte, error) { return addr.(sdk.AccAddress).MarshalJSON() }, + unmarshalJSON: func(b []byte) (interface{}, error) { + var addr sdk.AccAddress + err := addr.UnmarshalJSON(b) + return addr, err + }, + }, + { + name: "Validator Address", + addressString: "celestiavaloper1q3v5cugc8cdpud87u4zwy0a74uxkk6u4q4gx4p", + addressFromStr: func(s string) (interface{}, error) { return sdk.ValAddressFromBech32(s) }, + marshalJSON: func(addr interface{}) ([]byte, error) { return addr.(sdk.ValAddress).MarshalJSON() }, + unmarshalJSON: func(b []byte) (interface{}, error) { + var addr sdk.ValAddress + err := addr.UnmarshalJSON(b) + return addr, err + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + addr, err := tc.addressFromStr(tc.addressString) + require.NoError(t, err) + + addrBytes, err := tc.marshalJSON(addr) + assert.NoError(t, err) + assert.Equal(t, []byte("\""+tc.addressString+"\""), addrBytes) + + addrUnmarshalled, err := tc.unmarshalJSON(addrBytes) + assert.NoError(t, err) + assert.Equal(t, addr, addrUnmarshalled) + }) + } +} diff --git a/state/core_access.go b/state/core_access.go new file mode 100644 index 0000000000..c3fbd4836a --- /dev/null +++ b/state/core_access.go @@ -0,0 +1,579 @@ +package state + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + "time" + + sdkErrors "cosmossdk.io/errors" + "github.com/cosmos/cosmos-sdk/api/tendermint/abci" + nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdktypes "github.com/cosmos/cosmos-sdk/types" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" + auth "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + logging "github.com/ipfs/go-log/v2" + "github.com/tendermint/tendermint/crypto/merkle" + rpcclient "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/rpc/client/http" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/celestiaorg/celestia-app/app" + apperrors "github.com/celestiaorg/celestia-app/app/errors" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appblob "github.com/celestiaorg/celestia-app/x/blob" + apptypes "github.com/celestiaorg/celestia-app/x/blob/types" + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/header" +) + +var ( + log = logging.Logger("state") + ErrInvalidAmount = errors.New("state: amount must be greater than zero") +) + +const maxRetries = 5 + +// CoreAccessor implements service over a gRPC connection +// with a celestia-core node. +type CoreAccessor struct { + ctx context.Context + cancel context.CancelFunc + + signer *apptypes.KeyringSigner + getter libhead.Head[*header.ExtendedHeader] + + queryCli banktypes.QueryClient + stakingCli stakingtypes.QueryClient + rpcCli rpcclient.ABCIClient + + prt *merkle.ProofRuntime + + coreConn *grpc.ClientConn + coreIP string + rpcPort string + grpcPort string + + // these fields are mutatable and thus need to be protected by a mutex + lock sync.Mutex + lastPayForBlob int64 + payForBlobCount int64 + // minGasPrice is the minimum gas price that the node will accept. + // NOTE: just because the first node accepts the transaction, does not mean it + // will find a proposer that does accept the transaction. Better would be + // to set a global min gas price that correct processes conform to. + minGasPrice float64 +} + +// NewCoreAccessor dials the given celestia-core endpoint and +// constructs and returns a new CoreAccessor (state service) with the active +// connection. +func NewCoreAccessor( + signer *apptypes.KeyringSigner, + getter libhead.Head[*header.ExtendedHeader], + coreIP, + rpcPort string, + grpcPort string, +) *CoreAccessor { + // create verifier + prt := merkle.DefaultProofRuntime() + prt.RegisterOpDecoder(storetypes.ProofOpIAVLCommitment, storetypes.CommitmentOpDecoder) + prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder) + return &CoreAccessor{ + signer: signer, + getter: getter, + coreIP: coreIP, + rpcPort: rpcPort, + grpcPort: grpcPort, + prt: prt, + } +} + +func (ca *CoreAccessor) Start(ctx context.Context) error { + if ca.coreConn != nil { + return fmt.Errorf("core-access: already connected to core endpoint") + } + ca.ctx, ca.cancel = context.WithCancel(context.Background()) + + // dial given celestia-core endpoint + endpoint := fmt.Sprintf("%s:%s", ca.coreIP, ca.grpcPort) + client, err := grpc.DialContext( + ctx, + endpoint, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return err + } + ca.coreConn = client + // create the query client + queryCli := banktypes.NewQueryClient(ca.coreConn) + ca.queryCli = queryCli + // create the staking query client + stakingCli := stakingtypes.NewQueryClient(ca.coreConn) + ca.stakingCli = stakingCli + // create ABCI query client + cli, err := http.New(fmt.Sprintf("http://%s:%s", ca.coreIP, ca.rpcPort), "/websocket") + if err != nil { + return err + } + ca.rpcCli = cli + + ca.minGasPrice, err = ca.queryMinimumGasPrice(ctx) + if err != nil { + return fmt.Errorf("querying minimum gas price: %w", err) + } + + return nil +} + +func (ca *CoreAccessor) Stop(context.Context) error { + if ca.cancel == nil { + log.Warn("core accessor already stopped") + return nil + } + if ca.coreConn == nil { + log.Warn("no connection found to close") + return nil + } + defer ca.cancelCtx() + + // close out core connection + err := ca.coreConn.Close() + if err != nil { + return err + } + + ca.coreConn = nil + ca.queryCli = nil + return nil +} + +func (ca *CoreAccessor) cancelCtx() { + ca.cancel() + ca.cancel = nil +} + +func (ca *CoreAccessor) constructSignedTx( + ctx context.Context, + msg sdktypes.Msg, + opts ...apptypes.TxBuilderOption, +) ([]byte, error) { + // should be called first in order to make a valid tx + err := ca.signer.QueryAccountNumber(ctx, ca.coreConn) + if err != nil { + return nil, err + } + + tx, err := ca.signer.BuildSignedTx(ca.signer.NewTxBuilder(opts...), msg) + if err != nil { + return nil, err + } + return ca.signer.EncodeTx(tx) +} + +// SubmitPayForBlob builds, signs, and synchronously submits a MsgPayForBlob. It blocks until the +// transaction is committed and returns the TxResponse. If gasLim is set to 0, the method will +// automatically estimate the gas limit. If the fee is negative, the method will use the nodes min +// gas price multiplied by the gas limit. +func (ca *CoreAccessor) SubmitPayForBlob( + ctx context.Context, + fee Int, + gasLim uint64, + blobs []*blob.Blob, +) (*TxResponse, error) { + if len(blobs) == 0 { + return nil, errors.New("state: no blobs provided") + } + + appblobs := make([]*apptypes.Blob, len(blobs)) + for i := range blobs { + if err := blobs[i].Namespace().ValidateForBlob(); err != nil { + return nil, err + } + appblobs[i] = &blobs[i].Blob + } + + // we only estimate gas if the user wants us to (by setting the gasLim to 0). In the future we may + // want to make these arguments optional. + if gasLim == 0 { + blobSizes := make([]uint32, len(blobs)) + for i, blob := range blobs { + blobSizes[i] = uint32(len(blob.Data)) + } + + // TODO (@cmwaters): the default gas per byte and the default tx size cost per byte could be changed + // through governance. This section could be more robust by tracking these values and adjusting the + // gas limit accordingly (as is done for the gas price) + gasLim = apptypes.EstimateGas(blobSizes, appconsts.DefaultGasPerBlobByte, auth.DefaultTxSizeCostPerByte) + } + + minGasPrice := ca.getMinGasPrice() + + // set the fee for the user as the minimum gas price multiplied by the gas limit + estimatedFee := false + if fee.IsNegative() { + estimatedFee = true + fee = sdktypes.NewInt(int64(math.Ceil(minGasPrice * float64(gasLim)))) + } + + var lastErr error + for attempt := 0; attempt < maxRetries; attempt++ { + response, err := appblob.SubmitPayForBlob( + ctx, + ca.signer, + ca.coreConn, + sdktx.BroadcastMode_BROADCAST_MODE_BLOCK, + appblobs, + apptypes.SetGasLimit(gasLim), + withFee(fee), + ) + + // the node is capable of changing the min gas price at any time so we must be able to detect it and + // update our version accordingly + if apperrors.IsInsufficientMinGasPrice(err) && estimatedFee { + // The error message contains enough information to parse the new min gas price + minGasPrice, err = apperrors.ParseInsufficientMinGasPrice(err, minGasPrice, gasLim) + if err != nil { + return nil, fmt.Errorf("parsing insufficient min gas price error: %w", err) + } + ca.setMinGasPrice(minGasPrice) + lastErr = err + // update the fee to retry again + fee = sdktypes.NewInt(int64(math.Ceil(minGasPrice * float64(gasLim)))) + continue + } + + // metrics should only be counted on a successful PFD tx + if err == nil && response.Code == 0 { + ca.markSuccessfulPFB() + } + + if response != nil && response.Code != 0 { + err = errors.Join(err, sdkErrors.ABCIError(response.Codespace, response.Code, response.Logs.String())) + } + return response, err + } + return nil, fmt.Errorf("failed to submit blobs after %d attempts: %w", maxRetries, lastErr) +} + +func (ca *CoreAccessor) AccountAddress(context.Context) (Address, error) { + addr, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return Address{nil}, err + } + return Address{addr}, nil +} + +func (ca *CoreAccessor) Balance(ctx context.Context) (*Balance, error) { + addr, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + return ca.BalanceForAddress(ctx, Address{addr}) +} + +func (ca *CoreAccessor) BalanceForAddress(ctx context.Context, addr Address) (*Balance, error) { + head, err := ca.getter.Head(ctx) + if err != nil { + return nil, err + } + // construct an ABCI query for the height at head-1 because + // the AppHash contained in the head is actually the state root + // after applying the transactions contained in the previous block. + // TODO @renaynay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use this method + // instead + prefixedAccountKey := append(banktypes.CreateAccountBalancesPrefix(addr.Bytes()), []byte(app.BondDenom)...) + abciReq := abci.RequestQuery{ + // TODO @renayay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use const instead + Path: fmt.Sprintf("store/%s/key", banktypes.StoreKey), + Height: int64(head.Height() - 1), + Data: prefixedAccountKey, + Prove: true, + } + opts := rpcclient.ABCIQueryOptions{ + Height: abciReq.Height, + Prove: abciReq.Prove, + } + result, err := ca.rpcCli.ABCIQueryWithOptions(ctx, abciReq.Path, abciReq.Data, opts) + if err != nil { + return nil, err + } + if !result.Response.IsOK() { + return nil, sdkErrorToGRPCError(result.Response) + } + // unmarshal balance information + value := result.Response.Value + // if the value returned is empty, the account balance does not yet exist + if len(value) == 0 { + log.Errorf("balance for account %s does not exist at block height %d", addr.String(), head.Height()-1) + return &Balance{ + Denom: app.BondDenom, + Amount: sdktypes.NewInt(0), + }, nil + } + coin, ok := sdktypes.NewIntFromString(string(value)) + if !ok { + return nil, fmt.Errorf("cannot convert %s into sdktypes.Int", string(value)) + } + // verify balance + err = ca.prt.VerifyValueFromKeys( + result.Response.GetProofOps(), + head.AppHash, + [][]byte{[]byte(banktypes.StoreKey), + prefixedAccountKey, + }, value) + if err != nil { + return nil, err + } + + return &Balance{ + Denom: app.BondDenom, + Amount: coin, + }, nil +} + +func (ca *CoreAccessor) SubmitTx(ctx context.Context, tx Tx) (*TxResponse, error) { + txResp, err := apptypes.BroadcastTx(ctx, ca.coreConn, sdktx.BroadcastMode_BROADCAST_MODE_BLOCK, tx) + if err != nil { + return nil, err + } + return txResp.TxResponse, nil +} + +func (ca *CoreAccessor) SubmitTxWithBroadcastMode( + ctx context.Context, + tx Tx, + mode sdktx.BroadcastMode, +) (*TxResponse, error) { + txResp, err := apptypes.BroadcastTx(ctx, ca.coreConn, mode, tx) + if err != nil { + return nil, err + } + return txResp.TxResponse, nil +} + +func (ca *CoreAccessor) Transfer( + ctx context.Context, + addr AccAddress, + amount, + fee Int, + gasLim uint64, +) (*TxResponse, error) { + if amount.IsNil() || amount.Int64() <= 0 { + return nil, ErrInvalidAmount + } + + from, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + coins := sdktypes.NewCoins(sdktypes.NewCoin(app.BondDenom, amount)) + msg := banktypes.NewMsgSend(from, addr, coins) + signedTx, err := ca.constructSignedTx(ctx, msg, apptypes.SetGasLimit(gasLim), withFee(fee)) + if err != nil { + return nil, err + } + return ca.SubmitTx(ctx, signedTx) +} + +func (ca *CoreAccessor) CancelUnbondingDelegation( + ctx context.Context, + valAddr ValAddress, + amount, + height, + fee Int, + gasLim uint64, +) (*TxResponse, error) { + if amount.IsNil() || amount.Int64() <= 0 { + return nil, ErrInvalidAmount + } + + from, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + coins := sdktypes.NewCoin(app.BondDenom, amount) + msg := stakingtypes.NewMsgCancelUnbondingDelegation(from, valAddr, height.Int64(), coins) + signedTx, err := ca.constructSignedTx(ctx, msg, apptypes.SetGasLimit(gasLim), withFee(fee)) + if err != nil { + return nil, err + } + return ca.SubmitTx(ctx, signedTx) +} + +func (ca *CoreAccessor) BeginRedelegate( + ctx context.Context, + srcValAddr, + dstValAddr ValAddress, + amount, + fee Int, + gasLim uint64, +) (*TxResponse, error) { + if amount.IsNil() || amount.Int64() <= 0 { + return nil, ErrInvalidAmount + } + + from, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + coins := sdktypes.NewCoin(app.BondDenom, amount) + msg := stakingtypes.NewMsgBeginRedelegate(from, srcValAddr, dstValAddr, coins) + signedTx, err := ca.constructSignedTx(ctx, msg, apptypes.SetGasLimit(gasLim), withFee(fee)) + if err != nil { + return nil, err + } + return ca.SubmitTx(ctx, signedTx) +} + +func (ca *CoreAccessor) Undelegate( + ctx context.Context, + delAddr ValAddress, + amount, + fee Int, + gasLim uint64, +) (*TxResponse, error) { + if amount.IsNil() || amount.Int64() <= 0 { + return nil, ErrInvalidAmount + } + + from, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + coins := sdktypes.NewCoin(app.BondDenom, amount) + msg := stakingtypes.NewMsgUndelegate(from, delAddr, coins) + signedTx, err := ca.constructSignedTx(ctx, msg, apptypes.SetGasLimit(gasLim), withFee(fee)) + if err != nil { + return nil, err + } + return ca.SubmitTx(ctx, signedTx) +} + +func (ca *CoreAccessor) Delegate( + ctx context.Context, + delAddr ValAddress, + amount Int, + fee Int, + gasLim uint64, +) (*TxResponse, error) { + if amount.IsNil() || amount.Int64() <= 0 { + return nil, ErrInvalidAmount + } + + from, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + coins := sdktypes.NewCoin(app.BondDenom, amount) + msg := stakingtypes.NewMsgDelegate(from, delAddr, coins) + signedTx, err := ca.constructSignedTx(ctx, msg, apptypes.SetGasLimit(gasLim), withFee(fee)) + if err != nil { + return nil, err + } + return ca.SubmitTx(ctx, signedTx) +} + +func (ca *CoreAccessor) QueryDelegation( + ctx context.Context, + valAddr ValAddress, +) (*stakingtypes.QueryDelegationResponse, error) { + delAddr, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + return ca.stakingCli.Delegation(ctx, &stakingtypes.QueryDelegationRequest{ + DelegatorAddr: delAddr.String(), + ValidatorAddr: valAddr.String(), + }) +} + +func (ca *CoreAccessor) QueryUnbonding( + ctx context.Context, + valAddr ValAddress, +) (*stakingtypes.QueryUnbondingDelegationResponse, error) { + delAddr, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + return ca.stakingCli.UnbondingDelegation(ctx, &stakingtypes.QueryUnbondingDelegationRequest{ + DelegatorAddr: delAddr.String(), + ValidatorAddr: valAddr.String(), + }) +} +func (ca *CoreAccessor) QueryRedelegations( + ctx context.Context, + srcValAddr, + dstValAddr ValAddress, +) (*stakingtypes.QueryRedelegationsResponse, error) { + delAddr, err := ca.signer.GetSignerInfo().GetAddress() + if err != nil { + return nil, err + } + return ca.stakingCli.Redelegations(ctx, &stakingtypes.QueryRedelegationsRequest{ + DelegatorAddr: delAddr.String(), + SrcValidatorAddr: srcValAddr.String(), + DstValidatorAddr: dstValAddr.String(), + }) +} + +func (ca *CoreAccessor) LastPayForBlob() int64 { + ca.lock.Lock() + defer ca.lock.Unlock() + return ca.lastPayForBlob +} + +func (ca *CoreAccessor) PayForBlobCount() int64 { + ca.lock.Lock() + defer ca.lock.Unlock() + return ca.payForBlobCount +} + +func (ca *CoreAccessor) markSuccessfulPFB() { + ca.lock.Lock() + defer ca.lock.Unlock() + ca.lastPayForBlob = time.Now().UnixMilli() + ca.payForBlobCount++ +} + +func (ca *CoreAccessor) setMinGasPrice(minGasPrice float64) { + ca.lock.Lock() + defer ca.lock.Unlock() + ca.minGasPrice = minGasPrice +} + +func (ca *CoreAccessor) getMinGasPrice() float64 { + ca.lock.Lock() + defer ca.lock.Unlock() + return ca.minGasPrice +} + +// QueryMinimumGasPrice returns the minimum gas price required by the node. +func (ca *CoreAccessor) queryMinimumGasPrice( + ctx context.Context, +) (float64, error) { + rsp, err := nodeservice.NewServiceClient(ca.coreConn).Config(ctx, &nodeservice.ConfigRequest{}) + if err != nil { + return 0, err + } + + coins, err := sdktypes.ParseDecCoins(rsp.MinimumGasPrice) + if err != nil { + return 0, err + } + return coins.AmountOf(app.BondDenom).MustFloat64(), nil +} + +func withFee(fee Int) apptypes.TxBuilderOption { + gasFee := sdktypes.NewCoins(sdktypes.NewCoin(app.BondDenom, fee)) + return apptypes.SetFeeAmount(gasFee) +} diff --git a/state/core_access_test.go b/state/core_access_test.go new file mode 100644 index 0000000000..ad7b916ea3 --- /dev/null +++ b/state/core_access_test.go @@ -0,0 +1,98 @@ +//go:build !race + +package state + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + "cosmossdk.io/math" + sdktypes "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/test/util/testnode" + blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/share" +) + +func TestSubmitPayForBlob(t *testing.T) { + accounts := []string{"jimy", "rob"} + tmCfg := testnode.DefaultTendermintConfig() + tmCfg.Consensus.TimeoutCommit = time.Millisecond * 1 + appConf := testnode.DefaultAppConfig() + appConf.API.Enable = true + appConf.MinGasPrices = fmt.Sprintf("0.002%s", app.BondDenom) + + config := testnode.DefaultConfig().WithTendermintConfig(tmCfg).WithAppConfig(appConf).WithAccounts(accounts) + cctx, rpcAddr, grpcAddr := testnode.NewNetwork(t, config) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + signer := blobtypes.NewKeyringSigner(cctx.Keyring, accounts[0], cctx.ChainID) + ca := NewCoreAccessor(signer, nil, "127.0.0.1", extractPort(rpcAddr), extractPort(grpcAddr)) + // start the accessor + err := ca.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + _ = ca.Stop(ctx) + }) + + ns, err := share.NewBlobNamespaceV0([]byte("namespace")) + require.NoError(t, err) + blobbyTheBlob, err := blob.NewBlobV0(ns, []byte("data")) + require.NoError(t, err) + + minGas, err := ca.queryMinimumGasPrice(ctx) + require.NoError(t, err) + require.Equal(t, appconsts.DefaultMinGasPrice, minGas) + + testcases := []struct { + name string + blobs []*blob.Blob + fee math.Int + gasLim uint64 + expErr error + }{ + { + name: "empty blobs", + blobs: []*blob.Blob{}, + fee: sdktypes.ZeroInt(), + gasLim: 0, + expErr: errors.New("state: no blobs provided"), + }, + { + name: "good blob with user provided gas and fees", + blobs: []*blob.Blob{blobbyTheBlob}, + fee: sdktypes.NewInt(10_000), // roughly 0.12 utia per gas (should be good) + gasLim: blobtypes.DefaultEstimateGas([]uint32{uint32(len(blobbyTheBlob.Data))}), + expErr: nil, + }, + // TODO: add more test cases. The problem right now is that the celestia-app doesn't + // correctly construct the node (doesn't pass the min gas price) hence the price on + // everything is zero and we can't actually test the correct behavior + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + resp, err := ca.SubmitPayForBlob(ctx, tc.fee, tc.gasLim, tc.blobs) + require.Equal(t, tc.expErr, err) + if err == nil { + require.EqualValues(t, 0, resp.Code) + } + }) + } + +} + +func extractPort(addr string) string { + splitStr := strings.Split(addr, ":") + return splitStr[len(splitStr)-1] +} diff --git a/service/state/doc.go b/state/doc.go similarity index 57% rename from service/state/doc.go rename to state/doc.go index 79f8e6a106..5d27b50f64 100644 --- a/service/state/doc.go +++ b/state/doc.go @@ -7,11 +7,10 @@ // celestia network. // // `Accessor` will contain three different implementations: -// 1. Implementation over a gRPC connection with a celestia-core node -// called `CoreAccess`. -// 2. Implementation over a libp2p stream with a state-providing node. -// 3. Implementation over a local running instance of the -// celestia-application (this feature will be implemented in *Full* -// nodes). -// +// 1. Implementation over a gRPC connection with a celestia-core node +// called `CoreAccess`. +// 2. Implementation over a libp2p stream with a state-providing node. +// 3. Implementation over a local running instance of the +// celestia-application (this feature will be implemented in *Full* +// nodes). package state diff --git a/service/state/helpers.go b/state/helpers.go similarity index 100% rename from service/state/helpers.go rename to state/helpers.go diff --git a/state/integration_test.go b/state/integration_test.go new file mode 100644 index 0000000000..193e7bddc7 --- /dev/null +++ b/state/integration_test.go @@ -0,0 +1,150 @@ +package state + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/cosmos/cosmos-sdk/client/flags" + sdk "github.com/cosmos/cosmos-sdk/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + abci "github.com/tendermint/tendermint/abci/types" + rpcclient "github.com/tendermint/tendermint/rpc/client" + "google.golang.org/grpc" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/test/util/testfactory" + "github.com/celestiaorg/celestia-app/test/util/testnode" + blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/core" + "github.com/celestiaorg/celestia-node/header" +) + +func TestIntegrationTestSuite(t *testing.T) { + suite.Run(t, new(IntegrationTestSuite)) +} + +type IntegrationTestSuite struct { + suite.Suite + + cleanups []func() error + accounts []string + cctx testnode.Context + + accessor *CoreAccessor +} + +func (s *IntegrationTestSuite) SetupSuite() { + if testing.Short() { + s.T().Skip("skipping test in unit-tests") + } + s.T().Log("setting up integration test suite") + + cfg := core.DefaultTestConfig() + s.cctx = core.StartTestNodeWithConfig(s.T(), cfg) + s.accounts = cfg.Accounts + + signer := blobtypes.NewKeyringSigner(s.cctx.Keyring, s.accounts[0], s.cctx.ChainID) + accessor := NewCoreAccessor(signer, localHeader{s.cctx.Client}, "", "", "") + setClients(accessor, s.cctx.GRPCClient, s.cctx.Client) + s.accessor = accessor + + // required to ensure the Head request is non-nil + _, err := s.cctx.WaitForHeight(3) + require.NoError(s.T(), err) +} + +func setClients(ca *CoreAccessor, conn *grpc.ClientConn, abciCli rpcclient.ABCIClient) { + ca.coreConn = conn + // create the query client + queryCli := banktypes.NewQueryClient(ca.coreConn) + ca.queryCli = queryCli + // create the staking query client + stakingCli := stakingtypes.NewQueryClient(ca.coreConn) + ca.stakingCli = stakingCli + + ca.rpcCli = abciCli +} + +func (s *IntegrationTestSuite) TearDownSuite() { + s.T().Log("tearing down integration test suite") + require := s.Require() + require.NoError(s.accessor.Stop(s.cctx.GoContext())) + for _, c := range s.cleanups { + err := c() + require.NoError(err) + } +} + +func (s *IntegrationTestSuite) getAddress(acc string) sdk.Address { + rec, err := s.cctx.Keyring.Key(acc) + require.NoError(s.T(), err) + + addr, err := rec.GetAddress() + require.NoError(s.T(), err) + + return addr +} + +type localHeader struct { + client rpcclient.Client +} + +func (l localHeader) Head( + ctx context.Context, + _ ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { + latest, err := l.client.Block(ctx, nil) + if err != nil { + return nil, err + } + h := &header.ExtendedHeader{ + RawHeader: latest.Block.Header, + } + return h, nil +} + +func (s *IntegrationTestSuite) TestGetBalance() { + require := s.Require() + expectedBal := sdk.NewCoin(app.BondDenom, sdk.NewInt(int64(99999999999999999))) + for _, acc := range s.accounts { + bal, err := s.accessor.BalanceForAddress(context.Background(), Address{s.getAddress(acc)}) + require.NoError(err) + require.Equal(&expectedBal, bal) + } +} + +// This test can be used to generate a json encoded block for other test data, +// such as that in share/availability/light/testdata +func (s *IntegrationTestSuite) TestGenerateJSONBlock() { + t := s.T() + t.Skip("skipping testdata generation test") + resp, err := s.cctx.FillBlock(4, s.accounts, flags.BroadcastSync) + require := s.Require() + require.NoError(err) + require.Equal(abci.CodeTypeOK, resp.Code) + require.NoError(s.cctx.WaitForNextBlock()) + + // download the block that the tx was in + res, err := testfactory.QueryWithoutProof(s.cctx.Context, resp.TxHash) + require.NoError(err) + + block, err := s.cctx.Client.Block(s.cctx.GoContext(), &res.Height) + require.NoError(err) + + pBlock, err := block.Block.ToProto() + require.NoError(err) + + file, err := os.OpenFile("sample-block.json", os.O_CREATE|os.O_RDWR, os.ModePerm) + defer file.Close() //nolint: staticcheck + require.NoError(err) + + err = json.NewEncoder(file).Encode(pBlock) + require.NoError(err) +} diff --git a/state/metrics.go b/state/metrics.go new file mode 100644 index 0000000000..aa166e901d --- /dev/null +++ b/state/metrics.go @@ -0,0 +1,31 @@ +package state + +import ( + "context" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" +) + +var meter = otel.Meter("state") + +func WithMetrics(ca *CoreAccessor) { + pfbCounter, _ := meter.Int64ObservableCounter( + "pfb_count", + metric.WithDescription("Total count of submitted PayForBlob transactions"), + ) + lastPfbTimestamp, _ := meter.Int64ObservableCounter( + "last_pfb_timestamp", + metric.WithDescription("Timestamp of the last submitted PayForBlob transaction"), + ) + + callback := func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(pfbCounter, ca.PayForBlobCount()) + observer.ObserveInt64(lastPfbTimestamp, ca.LastPayForBlob()) + return nil + } + _, err := meter.RegisterCallback(callback, pfbCounter, lastPfbTimestamp) + if err != nil { + panic(err) + } +} diff --git a/state/state.go b/state/state.go new file mode 100644 index 0000000000..d55bb6901c --- /dev/null +++ b/state/state.go @@ -0,0 +1,58 @@ +package state + +import ( + "fmt" + "strings" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + coretypes "github.com/tendermint/tendermint/types" +) + +// Balance is an alias to the Coin type from Cosmos-SDK. +type Balance = sdk.Coin + +// Tx is an alias to the Tx type from celestia-core. +type Tx = coretypes.Tx + +// TxResponse is an alias to the TxResponse type from Cosmos-SDK. +type TxResponse = sdk.TxResponse + +// Address is an alias to the Address type from Cosmos-SDK. It is embedded into a struct to provide +// a non-interface type for JSON serialization. +type Address struct { + sdk.Address +} + +// ValAddress is an alias to the ValAddress type from Cosmos-SDK. +type ValAddress = sdk.ValAddress + +// AccAddress is an alias to the AccAddress type from Cosmos-SDK. +type AccAddress = sdk.AccAddress + +// Int is an alias to the Int type from Cosmos-SDK. +type Int = math.Int + +func (a *Address) UnmarshalJSON(data []byte) error { + // To convert the string back to a concrete type, we have to determine the correct implementation + var addr AccAddress + addrString := strings.Trim(string(data), "\"") + addr, err := sdk.AccAddressFromBech32(addrString) + if err != nil { + // first check if it is a validator address and can be converted + valAddr, err := sdk.ValAddressFromBech32(addrString) + if err != nil { + return fmt.Errorf("address must be a valid account or validator address: %w", err) + } + a.Address = valAddr + return nil + } + + a.Address = addr + return nil +} + +func (a Address) MarshalJSON() ([]byte, error) { + // The address is marshaled into a simple string value + return []byte("\"" + a.Address.String() + "\""), nil +}