diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e7e610edf8..f9ac165e7a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,6 +7,6 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @renaynay @Wondertan @vgonkivs @distractedm1nd @walldiss +* @renaynay @Wondertan @vgonkivs @distractedm1nd @walldiss @ramin docs/adr @adlerjohn @liamsi diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e2becce196..edcd86e11d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -21,6 +21,9 @@ updates: - renaynay labels: - kind:deps + ignore: + - dependency-name: "*otel*" + update-types: ["version-update:semver-patch"] groups: otel: patterns: diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml index 25970fdb11..73572e4cef 100644 --- a/.github/workflows/ci_release.yml +++ b/.github/workflows/ci_release.yml @@ -118,7 +118,7 @@ jobs: - run: git fetch --force --tags - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ needs.setup.outputs.go-version }} @@ -138,11 +138,3 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} - - # TODO: permission issue, but not worth fixing as this should be refactored - # into the celestiaorg/.github repo, at which point any permission issues will - # be resolved. - # - # docker: - # needs: [release] - # uses: ./.github/workflows/docker-build.yml diff --git a/.github/workflows/github_pages.yml b/.github/workflows/github_pages.yml new file mode 100644 index 0000000000..b212c2d885 --- /dev/null +++ b/.github/workflows/github_pages.yml @@ -0,0 +1,48 @@ +name: github-pages + +on: + push: + branches: + - main + paths: + - specs/** + pull_request: + paths: + - specs/** + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + + - name: Setup mdBook + uses: peaceiris/actions-mdbook@v1 + with: + mdbook-version: "latest" + + - name: Build book + run: mdbook build specs + + - name: Deploy main + if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./specs/book + # keep_files is to prevent PR preview files from being overwritten. + # If we need to overwrite such files, trigger this workflow manually. + keep_files: ${{ github.event_name != 'workflow_dispatch' }} + + - name: Deploy PR preview + # Only run this job if the PR was created from a branch on celestiaorg/celestia-node + # because this job will fail for branches from forks. + # https://github.com/celestiaorg/celestia-app/issues/1506 + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository + uses: rossjrw/pr-preview-action@v1 + with: + source-dir: ./specs/book diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml index 056e4816bf..0098c3d18a 100644 --- a/.github/workflows/go-ci.yml +++ b/.github/workflows/go-ci.yml @@ -14,6 +14,7 @@ concurrency: jobs: setup: + name: Setup runs-on: ubuntu-latest outputs: debug: ${{ steps.debug.outputs.debug }} @@ -35,12 +36,12 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ inputs.go-version }} - name: golangci-lint - uses: golangci/golangci-lint-action@v3.7.0 + uses: golangci/golangci-lint-action@v4.0.0 with: args: --timeout 10m version: v1.55 @@ -55,7 +56,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ inputs.go-version }} @@ -70,7 +71,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest, macos-14] runs-on: ${{ matrix.os }} env: OS: ${{ matrix.os }} @@ -79,7 +80,7 @@ jobs: - uses: actions/checkout@v4 - name: set up go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ inputs.go-version }} @@ -87,7 +88,7 @@ jobs: run: make test-unit ENABLE_VERBOSE=${{ needs.setup.outputs.debug }} - name: Upload unit test output - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() && needs.setup.outputs.debug == 'true' with: name: unit-test-output-${{ matrix.os }} @@ -97,44 +98,38 @@ jobs: retention-days: 5 - name: upload coverage - uses: codecov/codecov-action@v3.1.4 + uses: codecov/codecov-action@v4.0.2 with: env_vars: OS token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.txt name: coverage-${{ matrix.os }} - unit_race_test: - needs: [lint, go_mod_tidy_check] - name: Run Unit Tests with Race Detector - runs-on: ubuntu-latest + # @ramin - Temporarily removed while we figure out getting + # these unit tests consistently running on ubuntu-latest + # and then enabled for macos-latest. We aren't requiring + # unit_race_test to pass for PRs so lets remove and reintroduce + # once green + # + # unit_test_race: + # needs: [lint, go_mod_tidy_check] + # name: Unit Tests with Race Detector (ubuntu-latest) + # runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 + # steps: + # - uses: actions/checkout@v4 - - name: set up go - uses: actions/setup-go@v4 - with: - go-version: ${{ inputs.go-version }} + # - name: set up go + # uses: actions/setup-go@v5 + # with: + # go-version: ${{ inputs.go-version }} - - name: execute test run - run: make test-unit-race + # - name: execute test run + # run: make test-unit-race ENABLE_VERBOSE=${{ needs.setup.outputs.debug }} integration_test: + name: Integration Tests needs: [lint, go_mod_tidy_check] - name: Run Integration Tests - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: set up go - uses: actions/setup-go@v4 - with: - go-version: ${{ inputs.go-version }} - - - name: Swamp Tests - run: make test-swamp - - - name: Swamp Tests with Race Detector - run: make test-swamp-race + uses: ./.github/workflows/integration-tests.yml + with: + go-version: ${{ inputs.go-version }} diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 0000000000..cc1196fccf --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,130 @@ +name: Integration Tests + +on: + workflow_call: + inputs: + go-version: + description: 'Go version' + required: true + type: string + +jobs: + api_tests: + name: Integration Tests API + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run API tests + run: make test-integration TAGS=api + + blob_tests: + name: Integration Tests Blob + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run blob tests + run: make test-integration TAGS=blob + + da_tests: + name: Da Tests Sync + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run da tests + run: make test-integration SHORT=true TAGS=da + + fraud_tests: + name: Integration Tests Fraud + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run fraud tests + run: make test-integration TAGS=fraud + + nd_tests: + name: Integration Tests ND + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run nd tests + run: make test-integration TAGS=nd + + p2p_tests: + name: Integration Tests p2p + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run p2p tests + run: make test-integration TAGS=p2p + + reconstruction_tests: + name: Integration Tests Reconstruction + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run reconstruction tests + run: make test-integration SHORT=true TAGS=reconstruction + + sync_tests: + name: Integration Tests Sync + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: set up go + uses: actions/setup-go@v5 + with: + go-version: ${{ inputs.go-version }} + + - name: run sync tests + run: make test-integration SHORT=true TAGS=sync diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index d1088e5f33..595effed1f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} days-before-stale: 60 diff --git a/.goreleaser.yaml b/.goreleaser.yaml index b229b4c348..9a3991ecae 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -65,3 +65,5 @@ changelog: exclude: - "^docs:" - "^test:" +git: + prerelease_suffix: "-" diff --git a/Dockerfile b/Dockerfile index a3ac41f7aa..01fccafe2e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,7 +25,7 @@ RUN uname -a &&\ CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} \ make build && make cel-key -FROM docker.io/alpine:3.18.4 +FROM docker.io/alpine:3.19.1 # Read here why UID 10001: https://github.com/hexops/dockerfile/blob/main/README.md#do-not-use-a-uid-below-10000 ARG UID=10001 diff --git a/Makefile b/Makefile index 1133db0533..a43c917345 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,8 @@ PROJECTNAME=$(shell basename "$(PWD)") DIR_FULLPATH=$(shell pwd) versioningPath := "github.com/celestiaorg/celestia-node/nodebuilder/node" LDFLAGS=-ldflags="-X '$(versioningPath).buildTime=$(shell date)' -X '$(versioningPath).lastCommit=$(shell git rev-parse HEAD)' -X '$(versioningPath).semanticVersion=$(shell git describe --tags --dirty=-dev 2>/dev/null || git rev-parse --abbrev-ref HEAD)'" +TAGS=integration +SHORT= ifeq (${PREFIX},) PREFIX := /usr/local endif @@ -13,6 +15,11 @@ else VERBOSE = LOG_AND_FILTER = endif +ifeq ($(SHORT),true) + INTEGRATION_RUN_LENGTH = -short +else + INTEGRATION_RUN_LENGTH = +endif ## help: Get more info on make commands. help: Makefile @echo " Choose a command run in "$(PROJECTNAME)":" @@ -31,6 +38,12 @@ build: @go build -o build/ ${LDFLAGS} ./cmd/celestia .PHONY: build +## build-jemalloc: Build celestia-node binary with jemalloc allocator for BadgerDB instead of Go's native one +build-jemalloc: jemalloc + @echo "--> Building Celestia with jemalloc" + @go build -o build/ ${LDFLAGS} -tags jemalloc ./cmd/celestia +.PHONY: build-jemalloc + ## clean: Clean up celestia-node binary. clean: @echo "--> Cleaning up ./build" @@ -112,28 +125,20 @@ test-unit: ## test-unit-race: Running unit tests with data race detector test-unit-race: @echo "--> Running unit tests with data race detector" - @go test -race `go list ./... | grep -v nodebuilder/tests` + @go test $(VERBOSE) -race -covermode=atomic -coverprofile=coverage.txt `go list ./... | grep -v nodebuilder/tests` $(LOG_AND_FILTER) .PHONY: test-unit-race -## test-swamp: Running swamp tests located in nodebuilder/tests -test-swamp: - @echo "--> Running swamp tests" - @go test ./nodebuilder/tests -.PHONY: test-swamp - -## test-swamp-race: Running swamp tests with data race detector located in node/tests -test-swamp-race: - @echo "--> Running swamp tests with data race detector" - @go test -race ./nodebuilder/tests -.PHONY: test-swamp-race - -## test: Running both unit and swamp tests -test: - @echo "--> Running all tests without data race detector" - @go test ./... - @echo "--> Running all tests with data race detector" - @go test -race ./... -.PHONY: test +## test-integration: Running /integration tests located in nodebuilder/tests +test-integration: + @echo "--> Running integrations tests $(VERBOSE) -tags=$(TAGS) $(INTEGRATION_RUN_LENGTH)" + @go test $(VERBOSE) -tags=$(TAGS) $(INTEGRATION_RUN_LENGTH) ./nodebuilder/tests +.PHONY: test-integration + +## test-integration-race: Running integration tests with data race detector located in node/tests +test-integration-race: + @echo "--> Running integration tests with data race detector -tags=$(TAGS)" + @go test -race -tags=$(TAGS) ./nodebuilder/tests +.PHONY: test-integration-race ## benchmark: Running all benchmarks benchmark: @@ -158,11 +163,10 @@ pb-gen: done; .PHONY: pb-gen - ## openrpc-gen: Generate OpenRPC spec for Celestia-Node's RPC api openrpc-gen: @echo "--> Generating OpenRPC spec" - @go run ./cmd/docgen fraud header state share das p2p node blob + @go run ./cmd/docgen fraud header state share das p2p node blob da .PHONY: openrpc-gen ## lint-imports: Lint only Go imports. @@ -213,3 +217,29 @@ goreleaser-build: goreleaser-release: goreleaser release --clean --fail-fast --skip-publish .PHONY: goreleaser-release + +# Copied from https://github.com/dgraph-io/badger/blob/main/Makefile +USER_ID = $(shell id -u) +HAS_JEMALLOC = $(shell test -f /usr/local/lib/libjemalloc.a && echo "jemalloc") +JEMALLOC_URL = "https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2" + +## jemalloc installs jemalloc allocator +jemalloc: + @if [ -z "$(HAS_JEMALLOC)" ] ; then \ + mkdir -p /tmp/jemalloc-temp && cd /tmp/jemalloc-temp ; \ + echo "Downloading jemalloc..." ; \ + curl -s -L ${JEMALLOC_URL} -o jemalloc.tar.bz2 ; \ + tar xjf ./jemalloc.tar.bz2 ; \ + cd jemalloc-5.2.1 ; \ + ./configure --with-jemalloc-prefix='je_' --with-malloc-conf='background_thread:true,metadata_thp:auto'; \ + make ; \ + if [ "$(USER_ID)" -eq "0" ]; then \ + make install ; \ + else \ + echo "==== Need sudo access to install jemalloc" ; \ + sudo make install ; \ + fi ; \ + cd /tmp ; \ + rm -rf /tmp/jemalloc-temp ; \ + fi +.PHONY: jemalloc diff --git a/api/docgen/examples.go b/api/docgen/examples.go index b873e7e050..83d25da6df 100644 --- a/api/docgen/examples.go +++ b/api/docgen/examples.go @@ -3,6 +3,7 @@ package docgen import ( _ "embed" "encoding/json" + "errors" "fmt" "reflect" @@ -51,6 +52,7 @@ var ExampleValues = map[reflect.Type]interface{}{ reflect.TypeOf(42): 42, reflect.TypeOf(byte(7)): byte(7), reflect.TypeOf(float64(42)): float64(42), + reflect.TypeOf(blob.GasPrice(0)): blob.GasPrice(0.002), reflect.TypeOf(true): true, reflect.TypeOf([]byte{}): []byte("byte array"), reflect.TypeOf(node.Full): node.Full, @@ -65,7 +67,7 @@ var ExampleValues = map[reflect.Type]interface{}{ Shares: []*byzantine.ShareWithProof{}, }, ), - reflect.TypeOf((*error)(nil)).Elem(): fmt.Errorf("error"), + reflect.TypeOf((*error)(nil)).Elem(): errors.New("error"), } func init() { diff --git a/api/gateway/bindings.go b/api/gateway/bindings.go new file mode 100644 index 0000000000..c01bd2da47 --- /dev/null +++ b/api/gateway/bindings.go @@ -0,0 +1,73 @@ +package gateway + +import ( + "fmt" + "net/http" +) + +func (h *Handler) RegisterEndpoints(rpc *Server) { + // state endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", balanceEndpoint, addrKey), + h.handleBalanceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + submitTxEndpoint, + h.handleSubmitTx, + http.MethodPost, + ) + + rpc.RegisterHandlerFunc( + healthEndpoint, + h.handleHealthRequest, + http.MethodGet, + ) + + // share endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf( + "%s/{%s}/height/{%s}", + namespacedSharesEndpoint, + namespaceKey, + heightKey, + ), + h.handleSharesByNamespaceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, namespaceKey), + h.handleSharesByNamespaceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, namespaceKey, heightKey), + h.handleDataByNamespaceRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, namespaceKey), + h.handleDataByNamespaceRequest, + http.MethodGet, + ) + + // DAS endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", heightAvailabilityEndpoint, heightKey), + h.handleHeightAvailabilityRequest, + http.MethodGet, + ) + + // header endpoints + rpc.RegisterHandlerFunc( + fmt.Sprintf("%s/{%s}", headerByHeightEndpoint, heightKey), + h.handleHeaderRequest, + http.MethodGet, + ) + + rpc.RegisterHandlerFunc(headEndpoint, h.handleHeadRequest, http.MethodGet) +} diff --git a/api/gateway/bindings_test.go b/api/gateway/bindings_test.go new file mode 100644 index 0000000000..5d27d5e4c7 --- /dev/null +++ b/api/gateway/bindings_test.go @@ -0,0 +1,119 @@ +package gateway + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/require" +) + +func TestRegisterEndpoints(t *testing.T) { + handler := &Handler{} + rpc := NewServer("localhost", "6969") + + handler.RegisterEndpoints(rpc) + + testCases := []struct { + name string + path string + method string + expected bool + }{ + { + name: "Get balance endpoint", + path: fmt.Sprintf("%s/{%s}", balanceEndpoint, addrKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Submit transaction endpoint", + path: submitTxEndpoint, + method: http.MethodPost, + expected: true, + }, + { + name: "Get namespaced shares by height endpoint", + path: fmt.Sprintf("%s/{%s}/height/{%s}", namespacedSharesEndpoint, namespaceKey, heightKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get namespaced shares endpoint", + path: fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, namespaceKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get namespaced data by height endpoint", + path: fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, namespaceKey, heightKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get namespaced data endpoint", + path: fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, namespaceKey), + method: http.MethodGet, + expected: true, + }, + { + name: "Get health endpoint", + path: "/status/health", + method: http.MethodGet, + expected: true, + }, + + // Going forward, we can add previously deprecated and since + // removed endpoints here to ensure we don't accidentally re-enable + // them in the future and accidentally expand surface area + { + name: "example totally bogus endpoint", + path: fmt.Sprintf("/wutang/{%s}/%s", "chambers", "36"), + method: http.MethodGet, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + require.Equal( + t, + tc.expected, + hasEndpointRegistered(rpc.Router(), tc.path, tc.method), + "Endpoint registration mismatch for: %s %s %s", tc.name, tc.method, tc.path) + }) + } +} + +func hasEndpointRegistered(router *mux.Router, path string, method string) bool { + var registered bool + err := router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + template, err := route.GetPathTemplate() + if err != nil { + return err + } + + if template == path { + methods, err := route.GetMethods() + if err != nil { + return err + } + + for _, m := range methods { + if m == method { + registered = true + return nil + } + } + } + return nil + }) + + if err != nil { + fmt.Println("Error walking through routes:", err) + return false + } + + return registered +} diff --git a/api/gateway/config.go b/api/gateway/config.go index f7d8bb44b1..0485da486e 100644 --- a/api/gateway/config.go +++ b/api/gateway/config.go @@ -11,14 +11,6 @@ type Config struct { Port string } -func DefaultConfig() Config { - return Config{ - Address: "0.0.0.0", - // do NOT expose the same port as celestia-core by default so that both can run on the same machine - Port: "26658", - } -} - func (cfg *Config) Validate() error { if ip := net.ParseIP(cfg.Address); ip == nil { return fmt.Errorf("service/gateway: invalid listen address format: %s", cfg.Address) diff --git a/api/gateway/endpoints.go b/api/gateway/endpoints.go deleted file mode 100644 index 104d01b053..0000000000 --- a/api/gateway/endpoints.go +++ /dev/null @@ -1,32 +0,0 @@ -package gateway - -import ( - "fmt" - "net/http" -) - -func (h *Handler) RegisterEndpoints(rpc *Server) { - // state endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", balanceEndpoint, addrKey), h.handleBalanceRequest, - http.MethodGet) - rpc.RegisterHandlerFunc(submitTxEndpoint, h.handleSubmitTx, http.MethodPost) - - // share endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedSharesEndpoint, namespaceKey, heightKey), - h.handleSharesByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedSharesEndpoint, namespaceKey), - h.handleSharesByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedDataEndpoint, namespaceKey, heightKey), - h.handleDataByNamespaceRequest, http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", namespacedDataEndpoint, namespaceKey), - h.handleDataByNamespaceRequest, http.MethodGet) - - // DAS endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", heightAvailabilityEndpoint, heightKey), - h.handleHeightAvailabilityRequest, http.MethodGet) - - // header endpoints - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", headerByHeightEndpoint, heightKey), h.handleHeaderRequest, - http.MethodGet) - rpc.RegisterHandlerFunc(headEndpoint, h.handleHeadRequest, http.MethodGet) -} diff --git a/api/gateway/health.go b/api/gateway/health.go new file mode 100644 index 0000000000..2a96e0200e --- /dev/null +++ b/api/gateway/health.go @@ -0,0 +1,18 @@ +package gateway + +import ( + "net/http" +) + +const ( + healthEndpoint = "/status/health" +) + +func (h *Handler) handleHealthRequest(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte("ok")) + if err != nil { + log.Errorw("serving request", "endpoint", healthEndpoint, "err", err) + writeError(w, http.StatusBadGateway, healthEndpoint, err) + return + } +} diff --git a/api/gateway/middleware.go b/api/gateway/middleware.go index 2c88b34185..4b669113dd 100644 --- a/api/gateway/middleware.go +++ b/api/gateway/middleware.go @@ -2,13 +2,8 @@ package gateway import ( "context" - "errors" "net/http" "time" - - "github.com/gorilla/mux" - - "github.com/celestiaorg/celestia-node/nodebuilder/state" ) const timeout = time.Minute @@ -16,7 +11,6 @@ const timeout = time.Minute func (h *Handler) RegisterMiddleware(srv *Server) { srv.RegisterMiddleware( setContentType, - checkPostDisabled(h.state), wrapRequestContext, enableCors, ) @@ -36,20 +30,6 @@ func setContentType(next http.Handler) http.Handler { }) } -// checkPostDisabled ensures that context was canceled and prohibit POST requests. -func checkPostDisabled(state state.Module) mux.MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // check if state service was halted and deny the transaction - if r.Method == http.MethodPost && state.IsStopped(r.Context()) { - writeError(w, http.StatusMethodNotAllowed, r.URL.Path, errors.New("not possible to submit data")) - return - } - next.ServeHTTP(w, r) - }) - } -} - // wrapRequestContext ensures we implement a deadline on serving requests // via the gateway server-side to prevent context leaks. func wrapRequestContext(next http.Handler) http.Handler { diff --git a/api/gateway/server.go b/api/gateway/server.go index 181bfdfe55..7eab7c7bf9 100644 --- a/api/gateway/server.go +++ b/api/gateway/server.go @@ -36,6 +36,10 @@ func NewServer(address, port string) *Server { return server } +func (s *Server) Router() *mux.Router { + return s.srvMux +} + // Start starts the gateway Server, listening on the given address. func (s *Server) Start(context.Context) error { couldStart := s.started.CompareAndSwap(false, true) diff --git a/api/gateway/util.go b/api/gateway/util.go index bffd7ebc88..d3739f9e9c 100644 --- a/api/gateway/util.go +++ b/api/gateway/util.go @@ -1,7 +1,6 @@ package gateway import ( - "encoding/json" "net/http" ) @@ -9,13 +8,12 @@ func writeError(w http.ResponseWriter, statusCode int, endpoint string, err erro log.Debugw("serving request", "endpoint", endpoint, "err", err) w.WriteHeader(statusCode) - errBody, jerr := json.Marshal(err.Error()) - if jerr != nil { - log.Errorw("serializing error", "endpoint", endpoint, "err", jerr) - return - } - _, werr := w.Write(errBody) - if werr != nil { - log.Errorw("writing error response", "endpoint", endpoint, "err", werr) + + errorMessage := err.Error() // Get the error message as a string + errorBytes := []byte(errorMessage) + + _, err = w.Write(errorBytes) + if err != nil { + log.Errorw("writing error response", "endpoint", endpoint, "err", err) } } diff --git a/api/gateway/util_test.go b/api/gateway/util_test.go new file mode 100644 index 0000000000..d41b0918d2 --- /dev/null +++ b/api/gateway/util_test.go @@ -0,0 +1,24 @@ +package gateway + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteError(t *testing.T) { + t.Run("writeError", func(t *testing.T) { + // Create a mock HTTP response writer + w := httptest.NewRecorder() + + testErr := errors.New("test error") + + writeError(w, http.StatusInternalServerError, "/api/endpoint", testErr) + assert.Equal(t, http.StatusInternalServerError, w.Code) + responseBody := w.Body.Bytes() + assert.Equal(t, testErr.Error(), string(responseBody)) + }) +} diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index 9cd5fe08e3..1d8142048b 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -9,6 +9,7 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc/perms" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/header" @@ -33,6 +34,7 @@ type Client struct { P2P p2p.API Node node.API Blob blob.API + DA da.API closer multiClientCloser } @@ -91,5 +93,6 @@ func moduleMap(client *Client) map[string]interface{} { "p2p": &client.P2P.Internal, "node": &client.Node.Internal, "blob": &client.Blob.Internal, + "da": &client.DA.Internal, } } diff --git a/api/rpc/server.go b/api/rpc/server.go index 3357140e68..f247682083 100644 --- a/api/rpc/server.go +++ b/api/rpc/server.go @@ -20,16 +20,17 @@ import ( var log = logging.Logger("rpc") type Server struct { - srv *http.Server - rpc *jsonrpc.RPCServer - listener net.Listener + srv *http.Server + rpc *jsonrpc.RPCServer + listener net.Listener + authDisabled bool started atomic.Bool auth jwt.Signer } -func NewServer(address, port string, secret jwt.Signer) *Server { +func NewServer(address, port string, authDisabled bool, secret jwt.Signer) *Server { rpc := jsonrpc.NewServer() srv := &Server{ rpc: rpc, @@ -38,7 +39,8 @@ func NewServer(address, port string, secret jwt.Signer) *Server { // the amount of time allowed to read request headers. set to the default 2 seconds ReadHeaderTimeout: 2 * time.Second, }, - auth: secret, + auth: secret, + authDisabled: authDisabled, } srv.srv.Handler = &auth.Handler{ Verify: srv.verifyAuth, @@ -51,20 +53,22 @@ func NewServer(address, port string, secret jwt.Signer) *Server { // reached if a token is provided in the header of the request, otherwise only // methods with `read` permissions are accessible. func (s *Server) verifyAuth(_ context.Context, token string) ([]auth.Permission, error) { + if s.authDisabled { + return perms.AllPerms, nil + } return authtoken.ExtractSignedPermissions(s.auth, token) } // RegisterService registers a service onto the RPC server. All methods on the service will then be // exposed over the RPC. -func (s *Server) RegisterService(namespace string, service interface{}) { - s.rpc.Register(namespace, service) -} +func (s *Server) RegisterService(namespace string, service interface{}, out interface{}) { + if s.authDisabled { + s.rpc.Register(namespace, service) + return + } -// RegisterAuthedService registers a service onto the RPC server. All methods on the service will -// then be exposed over the RPC. -func (s *Server) RegisterAuthedService(namespace string, service interface{}, out interface{}) { auth.PermissionedProxy(perms.AllPerms, perms.DefaultPerms, service, getInternalStruct(out)) - s.RegisterService(namespace, out) + s.rpc.Register(namespace, out) } func getInternalStruct(api interface{}) interface{} { diff --git a/api/rpc_test.go b/api/rpc_test.go index 9ff35bf1e4..ff38a42045 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -22,6 +22,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/blob" blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/das" dasMock "github.com/celestiaorg/celestia-node/nodebuilder/das/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -91,6 +93,7 @@ type api struct { Node node.Module P2P p2p.Module Blob blob.Module + DA da.Module } func TestModulesImplementFullAPI(t *testing.T) { @@ -186,12 +189,15 @@ func TestAuthedRPC(t *testing.T) { // 2. Test method with write-level permissions expectedResp := &state.TxResponse{} if tt.perm > 2 { - server.State.EXPECT().SubmitTx(gomock.Any(), gomock.Any()).Return(expectedResp, nil) - txResp, err := rpcClient.State.SubmitTx(ctx, []byte{}) + server.State.EXPECT().Delegate(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedResp, nil) + txResp, err := rpcClient.State.Delegate(ctx, + state.ValAddress{}, state.Int{}, state.Int{}, 0) require.NoError(t, err) require.Equal(t, expectedResp, txResp) } else { - _, err := rpcClient.State.SubmitTx(ctx, []byte{}) + _, err := rpcClient.State.Delegate(ctx, + state.ValAddress{}, state.Int{}, state.Int{}, 0) require.Error(t, err) require.ErrorContains(t, err, "missing permission") } @@ -294,19 +300,21 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * p2pMock.NewMockModule(ctrl), nodeMock.NewMockModule(ctrl), blobMock.NewMockModule(ctrl), + daMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root // level module. For further information, check the documentation on fx.Invoke. invokeRPC := fx.Invoke(func(srv *rpc.Server) { - srv.RegisterAuthedService("state", mockAPI.State, &statemod.API{}) - srv.RegisterAuthedService("share", mockAPI.Share, &share.API{}) - srv.RegisterAuthedService("fraud", mockAPI.Fraud, &fraud.API{}) - srv.RegisterAuthedService("header", mockAPI.Header, &header.API{}) - srv.RegisterAuthedService("das", mockAPI.Das, &das.API{}) - srv.RegisterAuthedService("p2p", mockAPI.P2P, &p2p.API{}) - srv.RegisterAuthedService("node", mockAPI.Node, &node.API{}) - srv.RegisterAuthedService("blob", mockAPI.Blob, &blob.API{}) + srv.RegisterService("fraud", mockAPI.Fraud, &fraud.API{}) + srv.RegisterService("das", mockAPI.Das, &das.API{}) + srv.RegisterService("header", mockAPI.Header, &header.API{}) + srv.RegisterService("state", mockAPI.State, &statemod.API{}) + srv.RegisterService("share", mockAPI.Share, &share.API{}) + srv.RegisterService("p2p", mockAPI.P2P, &p2p.API{}) + srv.RegisterService("node", mockAPI.Node, &node.API{}) + srv.RegisterService("blob", mockAPI.Blob, &blob.API{}) + srv.RegisterService("da", mockAPI.DA, &da.API{}) }) // fx.Replace does not work here, but fx.Decorate does nd := nodebuilder.TestNode(t, node.Full, invokeRPC, fx.Decorate(func() (jwt.Signer, error) { @@ -331,4 +339,5 @@ type mockAPI struct { P2P *p2pMock.MockModule Node *nodeMock.MockModule Blob *blobMock.MockModule + DA *daMock.MockModule } diff --git a/blob/blob.go b/blob/blob.go index 3f7e92dd20..9843441dd2 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -35,6 +35,33 @@ type Proof []*nmt.Proof func (p Proof) Len() int { return len(p) } +func (p Proof) MarshalJSON() ([]byte, error) { + proofs := make([]string, 0, len(p)) + for _, proof := range p { + proofBytes, err := proof.MarshalJSON() + if err != nil { + return nil, err + } + proofs = append(proofs, string(proofBytes)) + } + return json.Marshal(proofs) +} + +func (p *Proof) UnmarshalJSON(b []byte) error { + var proofs []string + if err := json.Unmarshal(b, &proofs); err != nil { + return err + } + for _, proof := range proofs { + var nmtProof nmt.Proof + if err := nmtProof.UnmarshalJSON([]byte(proof)); err != nil { + return err + } + *p = append(*p, &nmtProof) + } + return nil +} + // equal is a temporary method that compares two proofs. // should be removed in BlobService V1. func (p Proof) equal(input Proof) error { @@ -142,8 +169,8 @@ func (b *Blob) UnmarshalJSON(data []byte) error { } // buildBlobsIfExist takes shares and tries building the Blobs from them. -// It will build blobs either until appShares will be empty or the first incomplete blob will appear, so in this -// specific case it will return all built blobs + remaining shares. +// It will build blobs either until appShares will be empty or the first incomplete blob will +// appear, so in this specific case it will return all built blobs + remaining shares. func buildBlobsIfExist(appShares []shares.Share) ([]*Blob, []shares.Share, error) { if len(appShares) == 0 { return nil, nil, errors.New("empty shares received") diff --git a/blob/service.go b/blob/service.go index 79e7dd7937..fc1d630e62 100644 --- a/blob/service.go +++ b/blob/service.go @@ -4,15 +4,24 @@ import ( "context" "errors" "fmt" + "math" "sync" - "cosmossdk.io/math" + sdkmath "cosmossdk.io/math" "github.com/cosmos/cosmos-sdk/types" + auth "github.com/cosmos/cosmos-sdk/x/auth/types" logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/shares" + blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" ) @@ -20,14 +29,25 @@ var ( ErrBlobNotFound = errors.New("blob: not found") ErrInvalidProof = errors.New("blob: invalid proof") - log = logging.Logger("blob") + log = logging.Logger("blob") + tracer = otel.Tracer("blob/service") ) +// GasPrice represents the amount to be paid per gas unit. Fee is set by +// multiplying GasPrice by GasLimit, which is determined by the blob sizes. +type GasPrice float64 + +// DefaultGasPrice returns the default gas price, letting node automatically +// determine the Fee based on the passed blob sizes. +func DefaultGasPrice() GasPrice { + return -1.0 +} + // Submitter is an interface that allows submitting blobs to the celestia-core. It is used to // avoid a circular dependency between the blob and the state package, since the state package needs // the blob.Blob type for this signature. type Submitter interface { - SubmitPayForBlob(ctx context.Context, fee math.Int, gasLim uint64, blobs []*Blob) (*types.TxResponse, error) + SubmitPayForBlob(ctx context.Context, fee sdkmath.Int, gasLim uint64, blobs []*Blob) (*types.TxResponse, error) } type Service struct { @@ -66,15 +86,21 @@ func DefaultSubmitOptions() *SubmitOptions { } } -// Submit sends PFB transaction and reports the height in which it was included. +// Submit sends PFB transaction and reports the height at which it was included. // Allows sending multiple Blobs atomically synchronously. // Uses default wallet registered on the Node. // Handles gas estimation and fee calculation. -func (s *Service) Submit(ctx context.Context, blobs []*Blob, options *SubmitOptions) (uint64, error) { +func (s *Service) Submit(ctx context.Context, blobs []*Blob, gasPrice GasPrice) (uint64, error) { log.Debugw("submitting blobs", "amount", len(blobs)) - if options == nil { - options = DefaultSubmitOptions() + options := DefaultSubmitOptions() + if gasPrice >= 0 { + blobSizes := make([]uint32, len(blobs)) + for i, blob := range blobs { + blobSizes[i] = uint32(len(blob.Data)) + } + options.GasLimit = blobtypes.EstimateGas(blobSizes, appconsts.DefaultGasPerBlobByte, auth.DefaultTxSizeCostPerByte) + options.Fee = types.NewInt(int64(math.Ceil(float64(gasPrice) * float64(options.GasLimit)))).Int64() } resp, err := s.blobSubmitter.SubmitPayForBlob(ctx, types.NewInt(options.Fee), options.GasLimit, blobs) @@ -165,7 +191,11 @@ func (s *Service) Included( namespace share.Namespace, proof *Proof, com Commitment, -) (bool, error) { +) (_ bool, err error) { + ctx, span := tracer.Start(ctx, "included") + defer func() { + utils.SetStatusAndEnd(span, err) + }() // In the current implementation, LNs will have to download all shares to recompute the commitment. // To achieve 1. we need to modify Proof structure and to store all subtree roots, that were // involved in commitment creation and then call `merkle.HashFromByteSlices`(tendermint package). @@ -193,24 +223,47 @@ func (s *Service) getByCommitment( height uint64, namespace share.Namespace, commitment Commitment, -) (*Blob, *Proof, error) { +) (_ *Blob, _ *Proof, err error) { log.Infow("requesting blob", "height", height, "namespace", namespace.String()) - header, err := s.headerGetter(ctx, height) + ctx, span := tracer.Start(ctx, "get-by-commitment") + defer func() { + utils.SetStatusAndEnd(span, err) + }() + span.SetAttributes( + attribute.Int64("height", int64(height)), + attribute.String("commitment", string(commitment)), + ) + + getCtx, headerGetterSpan := tracer.Start(ctx, "header-getter") + + header, err := s.headerGetter(getCtx, height) if err != nil { + headerGetterSpan.SetStatus(codes.Error, err.Error()) return nil, nil, err } - namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, header, namespace) + headerGetterSpan.SetStatus(codes.Ok, "") + headerGetterSpan.AddEvent("received eds", trace.WithAttributes( + attribute.Int64("eds-size", int64(len(header.DAH.RowRoots))))) + + getCtx, getSharesSpan := tracer.Start(ctx, "get-shares-by-namespace") + + namespacedShares, err := s.shareGetter.GetSharesByNamespace(getCtx, header, namespace) if err != nil { if errors.Is(err, share.ErrNotFound) { err = ErrBlobNotFound } + getSharesSpan.SetStatus(codes.Error, err.Error()) return nil, nil, err } + getSharesSpan.SetStatus(codes.Ok, "") + getSharesSpan.AddEvent("received shares", trace.WithAttributes( + attribute.Int64("eds-size", int64(len(header.DAH.RowRoots))))) + var ( rawShares = make([]shares.Share, 0) proofs = make(Proof, 0) @@ -240,6 +293,7 @@ func (s *Service) getByCommitment( } for _, b := range blobs { if b.Commitment.Equal(commitment) { + span.AddEvent("blob reconstructed") return b, &proofs, nil } // Falling under this flag means that the data from the last row @@ -276,7 +330,11 @@ func (s *Service) getBlobs( ctx context.Context, namespace share.Namespace, header *header.ExtendedHeader, -) ([]*Blob, error) { +) (_ []*Blob, err error) { + ctx, span := tracer.Start(ctx, "get-blobs") + defer func() { + utils.SetStatusAndEnd(span, err) + }() namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, header, namespace) if err != nil { return nil, err diff --git a/blob/service_test.go b/blob/service_test.go index 6777084eb4..3e22f887af 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -414,7 +414,28 @@ func TestService_GetAllWithoutPadding(t *testing.T) { require.NoError(t, err) } -func createService(ctx context.Context, t *testing.T, blobs []*Blob) *Service { +// BenchmarkGetByCommitment-12 3139 380827 ns/op 701647 B/op 4990 allocs/op +func BenchmarkGetByCommitment(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + b.Cleanup(cancel) + appBlobs, err := blobtest.GenerateV0Blobs([]int{32, 32}, true) + require.NoError(b, err) + + blobs, err := convertBlobs(appBlobs...) + require.NoError(b, err) + + service := createService(ctx, b, blobs) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.ReportAllocs() + _, _, err = service.getByCommitment( + ctx, 1, blobs[1].Namespace(), blobs[1].Commitment, + ) + require.NoError(b, err) + } +} + +func createService(ctx context.Context, t testing.TB, blobs []*Blob) *Service { bs := ipld.NewMemBlockservice() batching := ds_sync.MutexWrap(ds.NewMapDatastore()) headerStore, err := store.NewStore[*header.ExtendedHeader](batching) diff --git a/cmd/auth.go b/cmd/auth.go index a637373242..6ffdab656e 100644 --- a/cmd/auth.go +++ b/cmd/auth.go @@ -27,7 +27,7 @@ func AuthCmd(fsets ...*flag.FlagSet) *cobra.Command { "the node has already been initialized and started.", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { - return fmt.Errorf("must specify permissions") + return errors.New("must specify permissions") } permissions, err := convertToPerms(args[0]) if err != nil { diff --git a/cmd/cel-shed/header.go b/cmd/cel-shed/header.go index 8216f19698..379e8aac85 100644 --- a/cmd/cel-shed/header.go +++ b/cmd/cel-shed/header.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "strconv" "strings" @@ -30,12 +31,12 @@ Custom store path is not supported yet.`, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 3 { - return fmt.Errorf("not enough arguments") + return errors.New("not enough arguments") } tp := node.ParseType(args[0]) if !tp.IsValid() { - return fmt.Errorf("invalid node-type") + return errors.New("invalid node-type") } network := args[1] diff --git a/cmd/celestia/bridge.go b/cmd/celestia/bridge.go deleted file mode 100644 index c0e2ab0d1a..0000000000 --- a/cmd/celestia/bridge.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - cmdnode "github.com/celestiaorg/celestia-node/cmd" - "github.com/celestiaorg/celestia-node/nodebuilder/core" - "github.com/celestiaorg/celestia-node/nodebuilder/gateway" - "github.com/celestiaorg/celestia-node/nodebuilder/node" - "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/nodebuilder/rpc" - "github.com/celestiaorg/celestia-node/nodebuilder/state" -) - -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the -// PersistentPreRun func on parent command. - -func init() { - flags := []*pflag.FlagSet{ - cmdnode.NodeFlags(), - p2p.Flags(), - core.Flags(), - cmdnode.MiscFlags(), - rpc.Flags(), - gateway.Flags(), - state.Flags(), - } - - bridgeCmd.AddCommand( - cmdnode.Init(flags...), - cmdnode.Start(flags...), - cmdnode.AuthCmd(flags...), - cmdnode.ResetStore(flags...), - cmdnode.RemoveConfigCmd(flags...), - cmdnode.UpdateConfigCmd(flags...), - ) -} - -var bridgeCmd = &cobra.Command{ - Use: "bridge [subcommand]", - Args: cobra.NoArgs, - Short: "Manage your Bridge node", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - return cmdnode.PersistentPreRunEnv(cmd, node.Bridge, args) - }, -} diff --git a/cmd/celestia/cmd_test.go b/cmd/celestia/cmd_test.go index 9c26489e14..94dd3625b8 100644 --- a/cmd/celestia/cmd_test.go +++ b/cmd/celestia/cmd_test.go @@ -33,7 +33,7 @@ func TestCompletionHelpString(t *testing.T) { } methods := reflect.VisibleFields(reflect.TypeOf(TestFields{})) for i, method := range methods { - require.Equal(t, testOutputs[i], parseSignatureForHelpstring(method)) + require.Equal(t, testOutputs[i], parseSignatureForHelpString(method)) } } @@ -129,7 +129,7 @@ func TestBridge(t *testing.T) { */ } -func parseSignatureForHelpstring(methodSig reflect.StructField) string { +func parseSignatureForHelpString(methodSig reflect.StructField) string { simplifiedSignature := "(" in, out := methodSig.Type.NumIn(), methodSig.Type.NumOut() for i := 1; i < in; i++ { diff --git a/cmd/celestia/full.go b/cmd/celestia/full.go deleted file mode 100644 index 8baff1080e..0000000000 --- a/cmd/celestia/full.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - cmdnode "github.com/celestiaorg/celestia-node/cmd" - "github.com/celestiaorg/celestia-node/nodebuilder/core" - "github.com/celestiaorg/celestia-node/nodebuilder/gateway" - "github.com/celestiaorg/celestia-node/nodebuilder/header" - "github.com/celestiaorg/celestia-node/nodebuilder/node" - "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/nodebuilder/rpc" - "github.com/celestiaorg/celestia-node/nodebuilder/state" -) - -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the -// PersistentPreRun func on parent command. - -func init() { - flags := []*pflag.FlagSet{ - cmdnode.NodeFlags(), - p2p.Flags(), - header.Flags(), - cmdnode.MiscFlags(), - // NOTE: for now, state-related queries can only be accessed - // over an RPC connection with a celestia-core node. - core.Flags(), - rpc.Flags(), - gateway.Flags(), - state.Flags(), - } - - fullCmd.AddCommand( - cmdnode.Init(flags...), - cmdnode.Start(flags...), - cmdnode.AuthCmd(flags...), - cmdnode.ResetStore(flags...), - cmdnode.RemoveConfigCmd(flags...), - cmdnode.UpdateConfigCmd(flags...), - ) -} - -var fullCmd = &cobra.Command{ - Use: "full [subcommand]", - Args: cobra.NoArgs, - Short: "Manage your Full node", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - return cmdnode.PersistentPreRunEnv(cmd, node.Full, args) - }, -} diff --git a/cmd/celestia/light.go b/cmd/celestia/light.go deleted file mode 100644 index 553660c5d3..0000000000 --- a/cmd/celestia/light.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - cmdnode "github.com/celestiaorg/celestia-node/cmd" - "github.com/celestiaorg/celestia-node/nodebuilder/core" - "github.com/celestiaorg/celestia-node/nodebuilder/gateway" - "github.com/celestiaorg/celestia-node/nodebuilder/header" - "github.com/celestiaorg/celestia-node/nodebuilder/node" - "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/nodebuilder/rpc" - "github.com/celestiaorg/celestia-node/nodebuilder/state" -) - -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the -// PersistentPreRun func on parent command. - -func init() { - flags := []*pflag.FlagSet{ - cmdnode.NodeFlags(), - p2p.Flags(), - header.Flags(), - cmdnode.MiscFlags(), - // NOTE: for now, state-related queries can only be accessed - // over an RPC connection with a celestia-core node. - core.Flags(), - rpc.Flags(), - gateway.Flags(), - state.Flags(), - } - - lightCmd.AddCommand( - cmdnode.Init(flags...), - cmdnode.Start(flags...), - cmdnode.AuthCmd(flags...), - cmdnode.ResetStore(flags...), - cmdnode.RemoveConfigCmd(flags...), - cmdnode.UpdateConfigCmd(flags...), - ) -} - -var lightCmd = &cobra.Command{ - Use: "light [subcommand]", - Args: cobra.NoArgs, - Short: "Manage your Light node", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - return cmdnode.PersistentPreRunEnv(cmd, node.Light, args) - }, -} diff --git a/cmd/celestia/main.go b/cmd/celestia/main.go index 24c9423100..76287d998f 100644 --- a/cmd/celestia/main.go +++ b/cmd/celestia/main.go @@ -5,9 +5,28 @@ import ( "os" "github.com/spf13/cobra" + "github.com/spf13/pflag" + + cmdnode "github.com/celestiaorg/celestia-node/cmd" ) +func WithSubcommands() func(*cobra.Command, []*pflag.FlagSet) { + return func(c *cobra.Command, flags []*pflag.FlagSet) { + c.AddCommand( + cmdnode.Init(flags...), + cmdnode.Start(cmdnode.WithFlagSet(flags)), + cmdnode.AuthCmd(flags...), + cmdnode.ResetStore(flags...), + cmdnode.RemoveConfigCmd(flags...), + cmdnode.UpdateConfigCmd(flags...), + ) + } +} + func init() { + bridgeCmd := cmdnode.NewBridge(WithSubcommands()) + lightCmd := cmdnode.NewLight(WithSubcommands()) + fullCmd := cmdnode.NewFull(WithSubcommands()) rootCmd.AddCommand( bridgeCmd, lightCmd, diff --git a/cmd/node.go b/cmd/node.go new file mode 100644 index 0000000000..51ac4a6d2e --- /dev/null +++ b/cmd/node.go @@ -0,0 +1,88 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/gateway" + "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +func NewBridge(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command { + flags := []*pflag.FlagSet{ + NodeFlags(), + p2p.Flags(), + MiscFlags(), + core.Flags(), + rpc.Flags(), + gateway.Flags(), + state.Flags(), + } + cmd := &cobra.Command{ + Use: "bridge [subcommand]", + Args: cobra.NoArgs, + Short: "Manage your Bridge node", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return PersistentPreRunEnv(cmd, node.Bridge, args) + }, + } + for _, option := range options { + option(cmd, flags) + } + return cmd +} + +func NewLight(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command { + flags := []*pflag.FlagSet{ + NodeFlags(), + p2p.Flags(), + header.Flags(), + MiscFlags(), + core.Flags(), + rpc.Flags(), + gateway.Flags(), + state.Flags(), + } + cmd := &cobra.Command{ + Use: "light [subcommand]", + Args: cobra.NoArgs, + Short: "Manage your Light node", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return PersistentPreRunEnv(cmd, node.Light, args) + }, + } + for _, option := range options { + option(cmd, flags) + } + return cmd +} + +func NewFull(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command { + flags := []*pflag.FlagSet{ + NodeFlags(), + p2p.Flags(), + header.Flags(), + MiscFlags(), + core.Flags(), + rpc.Flags(), + gateway.Flags(), + state.Flags(), + } + cmd := &cobra.Command{ + Use: "full [subcommand]", + Args: cobra.NoArgs, + Short: "Manage your Full node", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return PersistentPreRunEnv(cmd, node.Full, args) + }, + } + for _, option := range options { + option(cmd, flags) + } + return cmd +} diff --git a/cmd/rpc.go b/cmd/rpc.go index 62e9fe7923..1935069229 100644 --- a/cmd/rpc.go +++ b/cmd/rpc.go @@ -49,7 +49,7 @@ func InitClient(cmd *cobra.Command, _ []string) error { if authTokenFlag == "" { storePath := "" if !cmd.Flag(nodeStoreFlag).Changed { - return fmt.Errorf("cant get the access to the auth token: token/node-store flag was not specified") + return errors.New("cant get the access to the auth token: token/node-store flag was not specified") } storePath = cmd.Flag(nodeStoreFlag).Value.String() token, err := getToken(storePath) diff --git a/cmd/start.go b/cmd/start.go index d46b553e20..281dfcc0e4 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -9,7 +9,6 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" - flag "github.com/spf13/pflag" "github.com/celestiaorg/celestia-app/app" "github.com/celestiaorg/celestia-app/app/encoding" @@ -18,7 +17,7 @@ import ( ) // Start constructs a CLI command to start Celestia Node daemon of any type with the given flags. -func Start(fsets ...*flag.FlagSet) *cobra.Command { +func Start(options ...func(*cobra.Command)) *cobra.Command { cmd := &cobra.Command{ Use: "start", Short: `Starts Node daemon. First stopping signal gracefully stops the Node and second terminates it. @@ -72,8 +71,9 @@ Options passed on start override configuration options only on start and are not return nd.Stop(ctx) }, } - for _, set := range fsets { - cmd.Flags().AddFlagSet(set) + // Apply each passed option to the command + for _, option := range options { + option(cmd) } return cmd } diff --git a/cmd/util.go b/cmd/util.go index 625685fe0b..08fa02155b 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/spf13/cobra" + flag "github.com/spf13/pflag" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/gateway" @@ -23,7 +24,7 @@ import ( func PrintOutput(data interface{}, err error, formatData func(interface{}) interface{}) error { switch { case err != nil: - data = err + data = err.Error() case formatData != nil: data = formatData(data) } @@ -125,3 +126,12 @@ func PersistentPreRunEnv(cmd *cobra.Command, nodeType node.Type, _ []string) err cmd.SetContext(ctx) return nil } + +// WithFlagSet adds the given flagset to the command. +func WithFlagSet(fset []*flag.FlagSet) func(*cobra.Command) { + return func(c *cobra.Command) { + for _, set := range fset { + c.Flags().AddFlagSet(set) + } + } +} diff --git a/core/exchange.go b/core/exchange.go index 06f648edad..cf889a38bb 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -22,18 +22,38 @@ type Exchange struct { fetcher *BlockFetcher store *eds.Store construct header.ConstructFn + + metrics *exchangeMetrics } func NewExchange( fetcher *BlockFetcher, store *eds.Store, construct header.ConstructFn, -) *Exchange { + opts ...Option, +) (*Exchange, error) { + p := new(params) + for _, opt := range opts { + opt(p) + } + + var ( + metrics *exchangeMetrics + err error + ) + if p.metrics { + metrics, err = newExchangeMetrics() + if err != nil { + return nil, err + } + } + return &Exchange{ fetcher: fetcher, store: store, construct: construct, - } + metrics: metrics, + }, nil } func (ce *Exchange) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { @@ -47,14 +67,18 @@ func (ce *Exchange) GetRangeByHeight( from *header.ExtendedHeader, to uint64, ) ([]*header.ExtendedHeader, error) { + start := time.Now() + amount := to - (from.Height() + 1) headers, err := ce.getRangeByHeight(ctx, from.Height()+1, amount) if err != nil { return nil, err } + ce.metrics.requestDurationPerHeader(ctx, time.Since(start), amount) + for _, h := range headers { - err := from.Verify(h) + err := libhead.Verify[*header.ExtendedHeader](from, h, libhead.DefaultHeightThreshold) if err != nil { return nil, fmt.Errorf("verifying next header against last verified height: %d: %w", from.Height(), err) diff --git a/core/exchange_metrics.go b/core/exchange_metrics.go new file mode 100644 index 0000000000..4e5bf5956c --- /dev/null +++ b/core/exchange_metrics.go @@ -0,0 +1,49 @@ +package core + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +type exchangeMetrics struct { + getByHeightDuration metric.Float64Histogram +} + +func newExchangeMetrics() (*exchangeMetrics, error) { + m := new(exchangeMetrics) + + var err error + m.getByHeightDuration, err = meter.Float64Histogram( + "core_ex_get_by_height_request_time", + metric.WithDescription("core exchange client getByHeight request time in seconds (per single height)"), + ) + if err != nil { + return nil, err + } + + return m, nil +} + +func (m *exchangeMetrics) observe(ctx context.Context, observeFn func(ctx context.Context)) { + if m == nil { + return + } + + ctx = utils.ResetContextOnError(ctx) + + observeFn(ctx) +} + +func (m *exchangeMetrics) requestDurationPerHeader(ctx context.Context, duration time.Duration, amount uint64) { + m.observe(ctx, func(ctx context.Context) { + if amount == 0 { + return + } + durationPerHeader := duration.Seconds() / float64(amount) + m.getByHeightDuration.Record(ctx, durationPerHeader) + }) +} diff --git a/core/exchange_test.go b/core/exchange_test.go index 853b5a8dc6..95c7f83385 100644 --- a/core/exchange_test.go +++ b/core/exchange_test.go @@ -20,14 +20,17 @@ func TestCoreExchange_RequestHeaders(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - fetcher, _ := createCoreFetcher(t, DefaultTestConfig()) + cfg := DefaultTestConfig() + cfg.ChainID = networkID + fetcher, _ := createCoreFetcher(t, cfg) // generate 10 blocks generateBlocks(t, fetcher) store := createStore(t) - ce := NewExchange(fetcher, store, header.MakeExtendedHeader) + ce, err := NewExchange(fetcher, store, header.MakeExtendedHeader) + require.NoError(t, err) // initialize store with genesis block genHeight := int64(1) diff --git a/core/fetcher.go b/core/fetcher.go index c24d6f0fac..35c9a83dc9 100644 --- a/core/fetcher.go +++ b/core/fetcher.go @@ -2,6 +2,7 @@ package core import ( "context" + "errors" "fmt" logging "github.com/ipfs/go-log/v2" @@ -127,7 +128,7 @@ func (f *BlockFetcher) ValidatorSet(ctx context.Context, height *int64) (*types. func (f *BlockFetcher) SubscribeNewBlockEvent(ctx context.Context) (<-chan types.EventDataSignedBlock, error) { // start the client if not started yet if !f.client.IsRunning() { - return nil, fmt.Errorf("client not running") + return nil, errors.New("client not running") } ctx, cancel := context.WithCancel(ctx) diff --git a/core/fetcher_no_race_test.go b/core/fetcher_no_race_test.go new file mode 100644 index 0000000000..890b7c35c1 --- /dev/null +++ b/core/fetcher_no_race_test.go @@ -0,0 +1,55 @@ +//go:build !race + +package core + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" +) + +// TestBlockFetcherHeaderValues tests that both the Commit and ValidatorSet +// endpoints are working as intended. +func TestBlockFetcherHeaderValues(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + client := StartTestNode(t).Client + fetcher := NewBlockFetcher(client) + + // generate some blocks + newBlockChan, err := fetcher.SubscribeNewBlockEvent(ctx) + require.NoError(t, err) + // read once from channel to generate next block + var h int64 + select { + case evt := <-newBlockChan: + h = evt.Header.Height + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + } + // get Commit from current height + commit, err := fetcher.Commit(ctx, &h) + require.NoError(t, err) + // get ValidatorSet from current height + valSet, err := fetcher.ValidatorSet(ctx, &h) + require.NoError(t, err) + // get next block + var nextBlock types.EventDataSignedBlock + select { + case nextBlock = <-newBlockChan: + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + } + // compare LastCommit from next block to Commit from first block height + assert.Equal(t, nextBlock.Header.LastCommitHash, commit.Hash()) + assert.Equal(t, nextBlock.Header.Height, commit.Height+1) + // compare ValidatorSet hash to the ValidatorsHash from first block height + hexBytes := valSet.Hash() + assert.Equal(t, nextBlock.ValidatorSet.Hash(), hexBytes) + require.NoError(t, fetcher.UnsubscribeNewBlockEvent(ctx)) +} diff --git a/core/fetcher_test.go b/core/fetcher_test.go index 3380dbb402..261b84d78c 100644 --- a/core/fetcher_test.go +++ b/core/fetcher_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/types" ) func TestBlockFetcher_GetBlock_and_SubscribeNewBlockEvent(t *testing.T) { @@ -38,45 +37,3 @@ func TestBlockFetcher_GetBlock_and_SubscribeNewBlockEvent(t *testing.T) { } require.NoError(t, fetcher.UnsubscribeNewBlockEvent(ctx)) } - -// TestBlockFetcherHeaderValues tests that both the Commit and ValidatorSet -// endpoints are working as intended. -func TestBlockFetcherHeaderValues(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - t.Cleanup(cancel) - - client := StartTestNode(t).Client - fetcher := NewBlockFetcher(client) - - // generate some blocks - newBlockChan, err := fetcher.SubscribeNewBlockEvent(ctx) - require.NoError(t, err) - // read once from channel to generate next block - var h int64 - select { - case evt := <-newBlockChan: - h = evt.Header.Height - case <-ctx.Done(): - require.NoError(t, ctx.Err()) - } - // get Commit from current height - commit, err := fetcher.Commit(ctx, &h) - require.NoError(t, err) - // get ValidatorSet from current height - valSet, err := fetcher.ValidatorSet(ctx, &h) - require.NoError(t, err) - // get next block - var nextBlock types.EventDataSignedBlock - select { - case nextBlock = <-newBlockChan: - case <-ctx.Done(): - require.NoError(t, ctx.Err()) - } - // compare LastCommit from next block to Commit from first block height - assert.Equal(t, nextBlock.Header.LastCommitHash, commit.Hash()) - assert.Equal(t, nextBlock.Header.Height, commit.Height+1) - // compare ValidatorSet hash to the ValidatorsHash from first block height - hexBytes := valSet.Hash() - assert.Equal(t, nextBlock.ValidatorSet.Hash(), hexBytes) - require.NoError(t, fetcher.UnsubscribeNewBlockEvent(ctx)) -} diff --git a/core/listener.go b/core/listener.go index 1754cb62b3..367aa34181 100644 --- a/core/listener.go +++ b/core/listener.go @@ -23,6 +23,8 @@ import ( var ( tracer = otel.Tracer("core/listener") retrySubscriptionDelay = 5 * time.Second + + errInvalidSubscription = errors.New("invalid subscription") ) // Listener is responsible for listening to Core for @@ -41,9 +43,12 @@ type Listener struct { headerBroadcaster libhead.Broadcaster[*header.ExtendedHeader] hashBroadcaster shrexsub.BroadcastFn - listenerTimeout time.Duration + metrics *listenerMetrics + + chainID string - cancel context.CancelFunc + listenerTimeout time.Duration + cancel context.CancelFunc } func NewListener( @@ -53,7 +58,24 @@ func NewListener( construct header.ConstructFn, store *eds.Store, blocktime time.Duration, -) *Listener { + opts ...Option, +) (*Listener, error) { + p := new(params) + for _, opt := range opts { + opt(p) + } + + var ( + metrics *listenerMetrics + err error + ) + if p.metrics { + metrics, err = newListenerMetrics() + if err != nil { + return nil, err + } + } + return &Listener{ fetcher: fetcher, headerBroadcaster: bcast, @@ -61,13 +83,15 @@ func NewListener( construct: construct, store: store, listenerTimeout: 5 * blocktime, - } + metrics: metrics, + chainID: p.chainID, + }, nil } // Start kicks off the Listener listener loop. func (cl *Listener) Start(context.Context) error { if cl.cancel != nil { - return fmt.Errorf("listener: already started") + return errors.New("listener: already started") } ctx, cancel := context.WithCancel(context.Background()) @@ -85,7 +109,7 @@ func (cl *Listener) Start(context.Context) error { func (cl *Listener) Stop(context.Context) error { cl.cancel() cl.cancel = nil - return nil + return cl.metrics.Close() } // runSubscriber runs a subscriber to receive event data of new signed blocks. It will attempt to @@ -97,6 +121,10 @@ func (cl *Listener) runSubscriber(ctx context.Context, sub <-chan types.EventDat // listener stopped because external context was canceled return } + if errors.Is(err, errInvalidSubscription) { + // stop node if there is a critical issue with the block subscription + log.Fatalf("listener: %v", err) + } log.Warnw("listener: subscriber error, resubscribing...", "err", err) sub = cl.resubscribe(ctx) @@ -143,7 +171,14 @@ func (cl *Listener) listen(ctx context.Context, sub <-chan types.EventDataSigned return errors.New("underlying subscription was closed") } + if cl.chainID != "" && b.Header.ChainID != cl.chainID { + log.Errorf("listener: received block with unexpected chain ID: expected %s,"+ + " received %s", cl.chainID, b.Header.ChainID) + return errInvalidSubscription + } + log.Debugw("listener: new block from core", "height", b.Header.Height) + err := cl.handleNewSignedBlock(ctx, b) if err != nil { log.Errorw("listener: handling new block msg", @@ -157,6 +192,7 @@ func (cl *Listener) listen(ctx context.Context, sub <-chan types.EventDataSigned } timeout.Reset(cl.listenerTimeout) case <-timeout.C: + cl.metrics.subscriptionStuck(ctx) return errors.New("underlying subscription is stuck") case <-ctx.Done(): return ctx.Err() @@ -178,6 +214,7 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS if err != nil { return fmt.Errorf("extending block data: %w", err) } + // generate extended header eh, err := cl.construct(&b.Header, &b.Commit, &b.ValidatorSet, eds) if err != nil { diff --git a/core/listener_metrics.go b/core/listener_metrics.go new file mode 100644 index 0000000000..f17903a91a --- /dev/null +++ b/core/listener_metrics.go @@ -0,0 +1,81 @@ +package core + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" +) + +var meter = otel.Meter("core") + +type listenerMetrics struct { + lastTimeSubscriptionStuck time.Time + lastTimeSubscriptionStuckInst metric.Int64ObservableGauge + lastTimeSubscriptionStuckReg metric.Registration + + subscriptionStuckInst metric.Int64Counter +} + +func newListenerMetrics() (*listenerMetrics, error) { + m := new(listenerMetrics) + + var err error + m.subscriptionStuckInst, err = meter.Int64Counter( + "core_listener_subscription_stuck_count", + metric.WithDescription("number of times core listener block subscription has been stuck/retried"), + ) + if err != nil { + return nil, err + } + + m.lastTimeSubscriptionStuckInst, err = meter.Int64ObservableGauge( + "core_listener_last_time_subscription_stuck_timestamp", + metric.WithDescription("last time the listener subscription was stuck"), + ) + if err != nil { + return nil, err + } + m.lastTimeSubscriptionStuckReg, err = meter.RegisterCallback( + m.observeLastTimeStuckCallback, + m.lastTimeSubscriptionStuckInst, + ) + if err != nil { + return nil, err + } + + return m, nil +} + +func (m *listenerMetrics) observe(ctx context.Context, observeFn func(ctx context.Context)) { + if m == nil { + return + } + + ctx = utils.ResetContextOnError(ctx) + + observeFn(ctx) +} + +func (m *listenerMetrics) subscriptionStuck(ctx context.Context) { + m.observe(ctx, func(ctx context.Context) { + m.subscriptionStuckInst.Add(ctx, 1) + m.lastTimeSubscriptionStuck = time.Now() + }) +} + +func (m *listenerMetrics) observeLastTimeStuckCallback(_ context.Context, obs metric.Observer) error { + obs.ObserveInt64(m.lastTimeSubscriptionStuckInst, m.lastTimeSubscriptionStuck.Unix()) + return nil +} + +func (m *listenerMetrics) Close() error { + if m == nil { + return nil + } + + return m.lastTimeSubscriptionStuckReg.Unregister() +} diff --git a/core/listener_no_race_test.go b/core/listener_no_race_test.go new file mode 100644 index 0000000000..eac12785ee --- /dev/null +++ b/core/listener_no_race_test.go @@ -0,0 +1,71 @@ +//go:build !race + +package core + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" +) + +// TestListenerWithNonEmptyBlocks ensures that non-empty blocks are actually +// stored to eds.Store. +func TestListenerWithNonEmptyBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + t.Cleanup(cancel) + + // create mocknet with two pubsub endpoints + ps0, _ := createMocknetWithTwoPubsubEndpoints(ctx, t) + + // create one block to store as Head in local store and then unsubscribe from block events + cfg := DefaultTestConfig() + cfg.ChainID = networkID + fetcher, cctx := createCoreFetcher(t, cfg) + eds := createEdsPubSub(ctx, t) + + store := createStore(t) + err := store.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + err = store.Stop(ctx) + require.NoError(t, err) + }) + + // create Listener and start listening + cl := createListener(ctx, t, fetcher, ps0, eds, store, networkID) + err = cl.Start(ctx) + require.NoError(t, err) + + // listen for eds hashes broadcasted through eds-sub and ensure store has + // already stored them + sub, err := eds.Subscribe() + require.NoError(t, err) + t.Cleanup(sub.Cancel) + + empty := share.EmptyRoot() + // TODO extract 16 + for i := 0; i < 16; i++ { + _, err := cctx.FillBlock(16, cfg.Accounts, flags.BroadcastBlock) + require.NoError(t, err) + msg, err := sub.Next(ctx) + require.NoError(t, err) + + if bytes.Equal(empty.Hash(), msg.DataHash) { + continue + } + + has, err := store.Has(ctx, msg.DataHash) + require.NoError(t, err) + require.True(t, has) + } + + err = cl.Stop(ctx) + require.NoError(t, err) + require.Nil(t, cl.cancel) +} diff --git a/core/listener_test.go b/core/listener_test.go index 9537860d78..b3ed11e571 100644 --- a/core/listener_test.go +++ b/core/listener_test.go @@ -1,12 +1,10 @@ package core import ( - "bytes" "context" "testing" "time" - "github.com/cosmos/cosmos-sdk/client/flags" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/event" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -17,7 +15,6 @@ import ( "github.com/celestiaorg/celestia-node/header" nodep2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) @@ -47,10 +44,14 @@ func TestListener(t *testing.T) { t.Cleanup(subs.Cancel) // create one block to store as Head in local store and then unsubscribe from block events - fetcher, _ := createCoreFetcher(t, DefaultTestConfig()) + cfg := DefaultTestConfig() + cfg.ChainID = networkID + fetcher, _ := createCoreFetcher(t, cfg) + eds := createEdsPubSub(ctx, t) + // create Listener and start listening - cl := createListener(ctx, t, fetcher, ps0, eds, createStore(t)) + cl := createListener(ctx, t, fetcher, ps0, eds, createStore(t), networkID) err = cl.Start(ctx) require.NoError(t, err) @@ -69,9 +70,7 @@ func TestListener(t *testing.T) { require.Nil(t, cl.cancel) } -// TestListenerWithNonEmptyBlocks ensures that non-empty blocks are actually -// stored to eds.Store. -func TestListenerWithNonEmptyBlocks(t *testing.T) { +func TestListenerWithWrongChainRPC(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) t.Cleanup(cancel) @@ -80,7 +79,8 @@ func TestListenerWithNonEmptyBlocks(t *testing.T) { // create one block to store as Head in local store and then unsubscribe from block events cfg := DefaultTestConfig() - fetcher, cctx := createCoreFetcher(t, cfg) + cfg.ChainID = networkID + fetcher, _ := createCoreFetcher(t, cfg) eds := createEdsPubSub(ctx, t) store := createStore(t) @@ -92,36 +92,12 @@ func TestListenerWithNonEmptyBlocks(t *testing.T) { }) // create Listener and start listening - cl := createListener(ctx, t, fetcher, ps0, eds, store) - err = cl.Start(ctx) + cl := createListener(ctx, t, fetcher, ps0, eds, store, "wrong-chain-rpc") + sub, err := cl.fetcher.SubscribeNewBlockEvent(ctx) require.NoError(t, err) - // listen for eds hashes broadcasted through eds-sub and ensure store has - // already stored them - sub, err := eds.Subscribe() - require.NoError(t, err) - t.Cleanup(sub.Cancel) - - empty := share.EmptyRoot() - // TODO extract 16 - for i := 0; i < 16; i++ { - _, err := cctx.FillBlock(16, cfg.Accounts, flags.BroadcastBlock) - require.NoError(t, err) - msg, err := sub.Next(ctx) - require.NoError(t, err) - - if bytes.Equal(empty.Hash(), msg.DataHash) { - continue - } - - has, err := store.Has(ctx, msg.DataHash) - require.NoError(t, err) - require.True(t, has) - } - - err = cl.Stop(ctx) - require.NoError(t, err) - require.Nil(t, cl.cancel) + err = cl.listen(ctx, sub) + assert.ErrorIs(t, err, errInvalidSubscription) } func createMocknetWithTwoPubsubEndpoints(ctx context.Context, t *testing.T) (*pubsub.PubSub, *pubsub.PubSub) { @@ -166,6 +142,7 @@ func createListener( ps *pubsub.PubSub, edsSub *shrexsub.PubSub, store *eds.Store, + chainID string, ) *Listener { p2pSub, err := p2p.NewSubscriber[*header.ExtendedHeader](ps, header.MsgID, p2p.WithSubscriberNetworkID(networkID)) require.NoError(t, err) @@ -180,7 +157,10 @@ func createListener( require.NoError(t, p2pSub.Stop(ctx)) }) - return NewListener(p2pSub, fetcher, edsSub.Broadcast, header.MakeExtendedHeader, store, nodep2p.BlockTime) + listener, err := NewListener(p2pSub, fetcher, edsSub.Broadcast, header.MakeExtendedHeader, + store, nodep2p.BlockTime, WithChainID(nodep2p.Network(chainID))) + require.NoError(t, err) + return listener } func createEdsPubSub(ctx context.Context, t *testing.T) *shrexsub.PubSub { diff --git a/core/option.go b/core/option.go new file mode 100644 index 0000000000..6916ced4d8 --- /dev/null +++ b/core/option.go @@ -0,0 +1,25 @@ +package core + +import "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + +type Option func(*params) + +type params struct { + metrics bool + + chainID string +} + +// WithMetrics is a functional option that enables metrics +// inside the core package. +func WithMetrics() Option { + return func(p *params) { + p.metrics = true + } +} + +func WithChainID(id p2p.Network) Option { + return func(p *params) { + p.chainID = id.String() + } +} diff --git a/das/daser.go b/das/daser.go index 29139d3a5f..c04a8dcdb8 100644 --- a/das/daser.go +++ b/das/daser.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "sync/atomic" + "time" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" @@ -78,7 +79,7 @@ func NewDASer( // Start initiates subscription for new ExtendedHeaders and spawns a sampling routine. func (d *DASer) Start(ctx context.Context) error { if !atomic.CompareAndSwapInt32(&d.running, 0, 1) { - return fmt.Errorf("da: DASer already started") + return errors.New("da: DASer already started") } sub, err := d.hsub.Subscribe() @@ -147,6 +148,14 @@ func (d *DASer) Stop(ctx context.Context) error { } func (d *DASer) sample(ctx context.Context, h *header.ExtendedHeader) error { + // short-circuit if pruning is enabled and the header is outside the + // availability window + if !d.isWithinSamplingWindow(h) { + log.Debugw("skipping header outside sampling window", "height", h.Height(), + "time", h.Time()) + return nil + } + err := d.da.SharesAvailable(ctx, h) if err != nil { var byzantineErr *byzantine.ErrByzantine @@ -162,6 +171,14 @@ func (d *DASer) sample(ctx context.Context, h *header.ExtendedHeader) error { return nil } +func (d *DASer) isWithinSamplingWindow(eh *header.ExtendedHeader) bool { + // if sampling window is not set, then all headers are within the window + if d.params.SamplingWindow == 0 { + return true + } + return time.Since(eh.Time()) <= d.params.SamplingWindow +} + // SamplingStats returns the current statistics over the DA sampling process. func (d *DASer) SamplingStats(ctx context.Context) (SamplingStats, error) { return d.sampler.stats(ctx) diff --git a/das/daser_test.go b/das/daser_test.go index 1ec160e224..9eec6392cc 100644 --- a/das/daser_test.go +++ b/das/daser_test.go @@ -2,6 +2,7 @@ package das import ( "context" + "strconv" "testing" "time" @@ -230,7 +231,8 @@ func TestDASerSampleTimeout(t *testing.T) { fserv := &fraudtest.DummyService[*header.ExtendedHeader]{} // create and start DASer - daser, err := NewDASer(avail, sub, getter, ds, fserv, newBroadcastMock(1), WithSampleTimeout(1)) + daser, err := NewDASer(avail, sub, getter, ds, fserv, newBroadcastMock(1), + WithSampleTimeout(1)) require.NoError(t, err) require.NoError(t, daser.Start(ctx)) @@ -243,6 +245,42 @@ func TestDASerSampleTimeout(t *testing.T) { } } +// TestDASer_SamplingWindow tests the sampling window determination +// for headers. +func TestDASer_SamplingWindow(t *testing.T) { + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + sub := new(headertest.Subscriber) + fserv := &fraudtest.DummyService[*header.ExtendedHeader]{} + getter := getterStub{} + avail := mocks.NewMockAvailability(gomock.NewController(t)) + + // create and start DASer + daser, err := NewDASer(avail, sub, getter, ds, fserv, newBroadcastMock(1), + WithSamplingWindow(time.Second)) + require.NoError(t, err) + + var tests = []struct { + timestamp time.Time + withinWindow bool + }{ + {timestamp: time.Now().Add(-(time.Second * 5)), withinWindow: false}, + {timestamp: time.Now().Add(-(time.Millisecond * 800)), withinWindow: true}, + {timestamp: time.Now().Add(-(time.Hour)), withinWindow: false}, + {timestamp: time.Now().Add(-(time.Hour * 24 * 30)), withinWindow: false}, + {timestamp: time.Now(), withinWindow: true}, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + eh := headertest.RandExtendedHeader(t) + eh.RawHeader.Time = tt.timestamp + + assert.Equal(t, tt.withinWindow, daser.isWithinSamplingWindow(eh)) + }) + } + +} + // createDASerSubcomponents takes numGetter (number of headers // to store in mockGetter) and numSub (number of headers to store // in the mock header.Subscriber), returning a newly instantiated diff --git a/das/metrics.go b/das/metrics.go index 42b472d909..6454e9d138 100644 --- a/das/metrics.go +++ b/das/metrics.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel/metric" "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" ) const ( @@ -19,9 +20,7 @@ const ( failedLabel = "failed" ) -var ( - meter = otel.Meter("das") -) +var meter = otel.Meter("das") type metrics struct { sampled metric.Int64Counter @@ -146,9 +145,9 @@ func (m *metrics) observeSample( if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + + ctx = utils.ResetContextOnError(ctx) + m.sampleTime.Record(ctx, sampleTime.Seconds(), metric.WithAttributes( attribute.Bool(failedLabel, err != nil), @@ -171,9 +170,7 @@ func (m *metrics) observeGetHeader(ctx context.Context, d time.Duration) { if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.getHeaderTime.Record(ctx, d.Seconds()) } @@ -182,8 +179,6 @@ func (m *metrics) observeNewHead(ctx context.Context) { if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.newHead.Add(ctx, 1) } diff --git a/das/options.go b/das/options.go index 6af8a02174..69deab52da 100644 --- a/das/options.go +++ b/das/options.go @@ -1,6 +1,7 @@ package das import ( + "errors" "fmt" "time" ) @@ -8,7 +9,7 @@ import ( // ErrInvalidOption is an error that is returned by Parameters.Validate // when supplied with invalid values. // This error will also be returned by NewDASer if supplied with an invalid option -var ErrInvalidOption = fmt.Errorf("das: invalid option") +var ErrInvalidOption = errors.New("das: invalid option") // errInvalidOptionValue is a utility function to dedup code for error-returning // when dealing with invalid parameter values @@ -40,6 +41,11 @@ type Parameters struct { // divided between parallel workers. SampleTimeout should be adjusted proportionally to // ConcurrencyLimit. SampleTimeout time.Duration + + // SamplingWindow determines the time window that headers should fall into + // in order to be sampled. If set to 0, the sampling window will include + // all headers. + SamplingWindow time.Duration } // DefaultParameters returns the default configuration values for the daser parameters @@ -148,10 +154,18 @@ func WithSampleFrom(sampleFrom uint64) Option { } } -// WithSampleFrom is a functional option to configure the daser's `SampleTimeout` parameter +// WithSampleTimeout is a functional option to configure the daser's `SampleTimeout` parameter // Refer to WithSamplingRange documentation to see an example of how to use this func WithSampleTimeout(sampleTimeout time.Duration) Option { return func(d *DASer) { d.params.SampleTimeout = sampleTimeout } } + +// WithSamplingWindow is a functional option to configure the DASer's +// `SamplingWindow` parameter. +func WithSamplingWindow(samplingWindow time.Duration) Option { + return func(d *DASer) { + d.params.SamplingWindow = samplingWindow + } +} diff --git a/go.mod b/go.mod index 04dd804cc4..da96f82757 100644 --- a/go.mod +++ b/go.mod @@ -3,21 +3,21 @@ module github.com/celestiaorg/celestia-node go 1.21.1 require ( - cosmossdk.io/errors v1.0.0 - cosmossdk.io/math v1.1.2 + cosmossdk.io/errors v1.0.1 + cosmossdk.io/math v1.3.0 github.com/BurntSushi/toml v1.3.2 github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b github.com/benbjohnson/clock v1.3.5 - github.com/celestiaorg/celestia-app v1.3.0 - github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 + github.com/celestiaorg/celestia-app v1.4.0 github.com/celestiaorg/go-fraud v0.2.0 - github.com/celestiaorg/go-header v0.4.1 + github.com/celestiaorg/go-header v0.5.3 github.com/celestiaorg/go-libp2p-messenger v0.2.0 github.com/celestiaorg/nmt v0.20.0 github.com/celestiaorg/rsmt2d v0.11.0 github.com/cosmos/cosmos-sdk v0.46.14 github.com/cosmos/cosmos-sdk/api v0.1.0 github.com/cristalhq/jwt v1.2.0 + github.com/dgraph-io/badger/v4 v4.2.1-0.20240106094458-1c417aa3799c github.com/etclabscore/go-openrpc-reflect v0.0.37 github.com/filecoin-project/dagstore v0.5.6 github.com/filecoin-project/go-jsonrpc v0.3.1 @@ -25,80 +25,70 @@ require ( github.com/gogo/protobuf v1.3.3 github.com/golang/mock v1.6.0 github.com/gorilla/mux v1.8.1 - github.com/hashicorp/go-retryablehttp v0.7.4 + github.com/hashicorp/go-retryablehttp v0.7.5 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/imdario/mergo v0.3.16 - github.com/ipfs/boxo v0.15.0 + github.com/ipfs/boxo v0.18.0 github.com/ipfs/go-block-format v0.2.0 + github.com/ipfs/go-blockservice v0.5.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-ds-badger4 v0.1.5 github.com/ipfs/go-ipld-cbor v0.1.0 github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipld/go-car v0.6.2 - github.com/libp2p/go-libp2p v0.32.0 - github.com/libp2p/go-libp2p-kad-dht v0.25.1 + github.com/libp2p/go-libp2p v0.32.2 + github.com/libp2p/go-libp2p-kad-dht v0.25.2 github.com/libp2p/go-libp2p-pubsub v0.10.0 github.com/libp2p/go-libp2p-record v0.2.0 github.com/libp2p/go-libp2p-routing-helpers v0.7.3 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.12.0 + github.com/multiformats/go-multiaddr v0.12.2 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multihash v0.2.3 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.18.0 github.com/pyroscope-io/client v0.7.2 - github.com/pyroscope-io/otel-profiling-go v0.4.0 + github.com/pyroscope-io/otel-profiling-go v0.5.0 + github.com/rollkit/go-da v0.4.0 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 github.com/tendermint/tendermint v0.34.28 go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0 - go.opentelemetry.io/otel v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 - go.opentelemetry.io/otel/metric v1.19.0 - go.opentelemetry.io/otel/sdk v1.19.0 - go.opentelemetry.io/otel/sdk/metric v1.19.0 - go.opentelemetry.io/otel/trace v1.19.0 - go.opentelemetry.io/proto/otlp v1.0.0 + go.opentelemetry.io/otel v1.24.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 + go.opentelemetry.io/otel/metric v1.24.0 + go.opentelemetry.io/otel/sdk v1.24.0 + go.opentelemetry.io/otel/sdk/metric v1.24.0 + go.opentelemetry.io/otel/trace v1.24.0 + go.opentelemetry.io/proto/otlp v1.1.0 go.uber.org/fx v1.20.1 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.14.0 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/sync v0.5.0 + golang.org/x/crypto v0.19.0 + golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e + golang.org/x/sync v0.6.0 golang.org/x/text v0.14.0 - google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/grpc v1.62.0 + google.golang.org/protobuf v1.32.0 ) require ( - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect - github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 // indirect - github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.1 // indirect - github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect - github.com/ethereum/c-kzg-4844 v0.3.1 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect - github.com/supranational/blst v0.3.11 // indirect - go.uber.org/mock v0.3.0 // indirect - rsc.io/tmplfunc v0.0.3 // indirect -) - -require ( - cloud.google.com/go v0.110.8 // indirect - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go v0.112.0 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.2 // indirect - cloud.google.com/go/storage v1.30.1 // indirect + cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/storage v1.36.0 // indirect filippo.io/edwards25519 v1.0.0-rc.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect @@ -108,8 +98,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect + github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -118,6 +110,8 @@ require ( github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect github.com/cometbft/cometbft-db v0.7.0 // indirect github.com/confio/ics23/go v0.9.1 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect @@ -128,6 +122,7 @@ require ( github.com/cosmos/iavl v0.19.6 // indirect github.com/cosmos/ibc-go/v6 v6.2.0 // indirect github.com/cosmos/ledger-cosmos-go v0.13.2 // indirect + github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect github.com/creachadair/taskgroup v0.3.2 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/danieljoos/wincred v1.1.2 // indirect @@ -138,24 +133,24 @@ require ( github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/badger/v4 v4.1.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.5.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect + github.com/ethereum/c-kzg-4844 v0.3.1 // indirect github.com/ethereum/go-ethereum v1.13.2 // indirect - github.com/felixge/httpsnoop v1.0.1 // indirect - github.com/flynn/noise v1.0.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/flynn/noise v1.0.1 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v0.2.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect @@ -167,25 +162,25 @@ require ( github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/gateway v1.1.0 // indirect - github.com/golang/glog v1.1.2 // indirect + github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect github.com/google/flatbuffers v1.12.1 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/orderedcode v0.0.1 // indirect - github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/google/uuid v1.3.1 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect + github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/gtank/merlin v0.1.1 // indirect github.com/gtank/ristretto255 v0.1.2 // indirect @@ -208,7 +203,6 @@ require ( github.com/influxdata/influxdb-client-go/v2 v2.12.2 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-blockservice v0.5.0 github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect @@ -221,7 +215,7 @@ require ( github.com/ipfs/go-metrics-interface v0.0.1 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect github.com/ipfs/go-verifcid v0.0.2 // indirect - github.com/ipld/go-car/v2 v2.11.0 // indirect + github.com/ipld/go-car/v2 v2.13.1 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect @@ -230,15 +224,15 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.17.2 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/klauspost/reedsolomon v1.11.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect @@ -251,8 +245,8 @@ require ( github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.56 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/miekg/dns v1.1.57 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect @@ -260,6 +254,7 @@ require ( github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect @@ -268,7 +263,7 @@ require ( github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.13.0 // indirect + github.com/onsi/ginkgo/v2 v2.13.2 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -279,13 +274,13 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/pyroscope-io/godeltaprof v0.1.2 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.4 // indirect - github.com/quic-go/quic-go v0.39.3 // indirect + github.com/quic-go/qtls-go1-20 v0.4.1 // indirect + github.com/quic-go/quic-go v0.40.1 // indirect github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/rakyll/statik v0.1.7 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect @@ -302,6 +297,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.14.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect + github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tendermint/go-amino v0.16.0 // indirect @@ -311,40 +307,44 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect - github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25 // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20240109153615-66e95c3e8a87 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.1 // indirect + go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/tools v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/api v0.128.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.16.1 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + gonum.org/v1/gonum v0.14.0 // indirect + google.golang.org/api v0.155.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect nhooyr.io/websocket v1.8.7 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( - github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.18.1-sdk-v0.46.14 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.18.3-sdk-v0.46.14 github.com/filecoin-project/dagstore => github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 // broken goleveldb needs to be replaced for the cosmos-sdk and celestia-app diff --git a/go.sum b/go.sum index 0458580b74..a5ea60f868 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9 cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= -cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= @@ -75,8 +75,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= @@ -116,8 +116,8 @@ cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y97 cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= -cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= @@ -179,8 +179,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= +cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= @@ -193,10 +193,10 @@ cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuW cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04= -cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0= -cosmossdk.io/math v1.1.2 h1:ORZetZCTyWkI5GlZ6CZS28fMHi83ZYf+A2vVnHNzZBM= -cosmossdk.io/math v1.1.2/go.mod h1:l2Gnda87F0su8a/7FEKJfFdJrM0JZRXQaohlgJeyQh0= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= +cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -358,20 +358,18 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v1.3.0 h1:vW1zMc1tQ216utzDOYSC3wGO023sKVt8+zujVXnXlOc= -github.com/celestiaorg/celestia-app v1.3.0/go.mod h1:zhdQIFGFZRRxrDVtFE4OFIT7/12RE8DRyfvNZdW8ceM= +github.com/celestiaorg/celestia-app v1.4.0 h1:hTId3xL8GssN5sHSHPP7svHi/iWp+XVxphStiR7ADiY= +github.com/celestiaorg/celestia-app v1.4.0/go.mod h1:zhdQIFGFZRRxrDVtFE4OFIT7/12RE8DRyfvNZdW8ceM= github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 h1:Fd7ymPUzExPGNl2gZw4i5S74arMw+iDHLE78M/cCxl4= github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29/go.mod h1:xrICN0PBhp3AdTaZ8q4wS5Jvi32V02HNjaC2EsWiEKk= -github.com/celestiaorg/cosmos-sdk v1.18.1-sdk-v0.46.14 h1:c4cMVLU2bGTesZW1ZVgeoCB++gOOJTF3OvBsqBvo6n0= -github.com/celestiaorg/cosmos-sdk v1.18.1-sdk-v0.46.14/go.mod h1:D5y5Exw0bJkcDv9fvYDiZfZrDV1b6+xsFyiungxrCsU= +github.com/celestiaorg/cosmos-sdk v1.18.3-sdk-v0.46.14 h1:+Te28r5Zp4Vp69f82kcON9/BIF8f1BNXb0go2+achuc= +github.com/celestiaorg/cosmos-sdk v1.18.3-sdk-v0.46.14/go.mod h1:Og5KKgoBEiQlI6u56lDLG191pfknIdXctFn3COWLQP8= github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJx5/hgZ+IeOLEIoLsAveJN/7/ZtQQtPSVw= github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403/go.mod h1:cCGM1UoMvyTk8k62mkc+ReVu8iHBCtSBAAL4wYU7KEI= -github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 h1:MJgXvhJP1Au8rXTvMMlBXodu9jplEK1DxiLtMnEphOs= -github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5/go.mod h1:r6xB3nvGotmlTACpAr3SunxtoXeesbqb57elgMJqflY= github.com/celestiaorg/go-fraud v0.2.0 h1:aaq2JiW0gTnhEdac3l51UCqSyJ4+VjFGTTpN83V4q7I= github.com/celestiaorg/go-fraud v0.2.0/go.mod h1:lNY1i4K6kUeeE60Z2VK8WXd+qXb8KRzfBhvwPkK6aUc= -github.com/celestiaorg/go-header v0.4.1 h1:bjbUcKDnhrJJ9EoE7vtPpgleNLVjc2S+cB4/qe8nQmo= -github.com/celestiaorg/go-header v0.4.1/go.mod h1:H8xhnDLDLbkpwmWPhCaZyTnIV3dlVxBHPnxNXS2Qu6c= +github.com/celestiaorg/go-header v0.5.3 h1:8CcflT6aIlcQXKNWcMekoBNs3EU50mEmDp17gbn1pP4= +github.com/celestiaorg/go-header v0.5.3/go.mod h1:7BVR6myjRfACbqW1de6s8OjuK66XzHm8MpFNYr0G+nU= github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO3jHV7wKHvWD/Irao= github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= @@ -426,6 +424,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -502,8 +502,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= @@ -549,8 +550,8 @@ github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/badger/v4 v4.1.0 h1:E38jc0f+RATYrycSUf9LMv/t47XAy+3CApyYSq4APOQ= -github.com/dgraph-io/badger/v4 v4.1.0/go.mod h1:P50u28d39ibBRmIJuQC/NSdBOg46HnHw7al2SW5QRHg= +github.com/dgraph-io/badger/v4 v4.2.1-0.20240106094458-1c417aa3799c h1:Z9rm0wkQBM+VF7vpyrbKnCcSbww0PKygLoptTpkX3d4= +github.com/dgraph-io/badger/v4 v4.2.1-0.20240106094458-1c417aa3799c/go.mod h1:T/uWAYxrXdaXw64ihI++9RMbKTCpKd/yE9+saARew7k= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= @@ -576,8 +577,8 @@ github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRP github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac h1:opbrjaN/L8gg6Xh5D04Tem+8xVcz6ajZlGCs49mQgyg= -github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -599,6 +600,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/etclabscore/go-jsonschema-walk v0.0.6 h1:DrNzoKWKd8f8XB5nFGBY00IcjakRE22OTI12k+2LkyY= github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= github.com/etclabscore/go-openrpc-reflect v0.0.37 h1:IH0e7JqIvR9OhbbFWi/BHIkXrqbR3Zyia3RJ733eT6c= @@ -617,16 +620,18 @@ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+ne github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/filecoin-project/go-jsonrpc v0.3.1 h1:qwvAUc5VwAkooquKJmfz9R2+F8znhiqcNHYjEp/NM10= github.com/filecoin-project/go-jsonrpc v0.3.1/go.mod h1:jBSvPTl8V1N7gSTuCR4bis8wnQnIjHbRPpROol6iQKM= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y= +github.com/flynn/noise v1.0.1/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -688,8 +693,9 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= @@ -756,8 +762,8 @@ github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -823,8 +829,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -861,24 +868,24 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 h1:dHLYa5D8/Ta0aLR2XcPsrkpAgGeFs6thhMcQK0oQ0n8= +github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4= -github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -923,8 +930,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -958,8 +965,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= +github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= @@ -1038,8 +1045,8 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1: github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.15.0 h1:BriLydj2nlK1nKeJQHxcKSuG5ZXcoutzhBklOtxC5pk= -github.com/ipfs/boxo v0.15.0/go.mod h1:X5ulcbR5Nh7sm3Db8+08AApUo6FsGC5mb23QDKAoB/M= +github.com/ipfs/boxo v0.18.0 h1:MOL9/AgoV3e7jlVMInicaSdbgralfqSsbkc31dZ9tmw= +github.com/ipfs/boxo v0.18.0/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= @@ -1101,6 +1108,8 @@ github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9Dr github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger4 v0.1.5 h1:MwrTsIUJIqH/ChuDdUOzxwxMxHx/Li1ECoSCKsCUxiA= +github.com/ipfs/go-ds-badger4 v0.1.5/go.mod h1:LUU2FbhNdmhAbJmMeoahVRbe4GsduAODSJHWJJh2Vo4= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= @@ -1233,8 +1242,8 @@ github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuF github.com/ipfs/go-unixfsnode v1.5.1/go.mod h1:ed79DaG9IEuZITJVQn4U6MZDftv6I3ygUBLPfhEbHvk= github.com/ipfs/go-unixfsnode v1.5.2/go.mod h1:NlOebRwYx8lMCNMdhAhEspYPBD3obp7TE0LvBqHY+ks= github.com/ipfs/go-unixfsnode v1.7.1/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= -github.com/ipfs/go-unixfsnode v1.7.4 h1:iLvKyAVKUYOIAW2t4kDYqsT7VLGj31eXJE2aeqGfbwA= -github.com/ipfs/go-unixfsnode v1.7.4/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= +github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= @@ -1247,8 +1256,8 @@ github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZze github.com/ipld/go-car/v2 v2.5.1/go.mod h1:jKjGOqoCj5zn6KjnabD6JbnCsMntqU2hLiU6baZVO3E= github.com/ipld/go-car/v2 v2.8.0/go.mod h1:a+BnAxUqgr7wcWxW/lI6ctyEQ2v9gjBChPytwFMp2f4= github.com/ipld/go-car/v2 v2.10.1/go.mod h1:sQEkXVM3csejlb1kCCb+vQ/pWBKX9QtvsrysMQjOgOg= -github.com/ipld/go-car/v2 v2.11.0 h1:lkAPwbbTFqbdfawgm+bfmFc8PjGC7D12VcaLXPCLNfM= -github.com/ipld/go-car/v2 v2.11.0/go.mod h1:aDszqev0zjtU8l96g4lwXHaU9bzArj56Y7eEN0q/xqA= +github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= +github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= github.com/ipld/go-codec-dagpb v1.4.1/go.mod h1:XdXTO/TUD/ra9RcK/NfmwBfr1JpFxM2uRKaB9oe4LxE= @@ -1332,8 +1341,8 @@ github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -1344,8 +1353,8 @@ github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8t github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= @@ -1414,12 +1423,12 @@ github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBx github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o= github.com/libp2p/go-libp2p v0.25.1/go.mod h1:xnK9/1d9+jeQCVvi/f1g12KqtVi/jP/SijtKV1hML3g= -github.com/libp2p/go-libp2p v0.32.0 h1:86I4B7nBUPIyTgw3+5Ibq6K7DdKRCuZw8URCfPc1hQM= -github.com/libp2p/go-libp2p v0.32.0/go.mod h1:hXXC3kXPlBZ1eu8Q2hptGrMB4mZ3048JUoS4EKaHW5c= +github.com/libp2p/go-libp2p v0.32.2 h1:s8GYN4YJzgUoyeYNPdW7JZeZ5Ee31iNaIBfGYMAY4FQ= +github.com/libp2p/go-libp2p v0.32.2/go.mod h1:E0LKe+diV/ZVJVnOJby8VC5xzHF0660osg71skcxJvk= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= -github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= -github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= @@ -1472,8 +1481,8 @@ github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQO github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-kad-dht v0.19.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I= -github.com/libp2p/go-libp2p-kad-dht v0.25.1 h1:ofFNrf6MMEy4vi3R1VbJ7LOcTn3Csh0cDcaWHTxtWNA= -github.com/libp2p/go-libp2p-kad-dht v0.25.1/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= +github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= +github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= @@ -1733,8 +1742,9 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -1745,8 +1755,8 @@ github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJys github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1828,8 +1838,8 @@ github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= -github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE= -github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8= +github.com/multiformats/go-multiaddr v0.12.2 h1:9G9sTY/wCYajKa9lyfWPmpZAwe6oV+Wb1zcmMS1HG24= +github.com/multiformats/go-multiaddr v0.12.2/go.mod h1:GKyaTYjZRdcUhyOetrxTk9z0cW+jA/YrnqTOvKgi44M= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1936,8 +1946,8 @@ github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7 github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= +github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1951,8 +1961,8 @@ github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeR github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= @@ -2034,8 +2044,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -2043,8 +2053,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -2059,8 +2069,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -2072,25 +2082,25 @@ github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pyroscope-io/client v0.7.2 h1:OX2qdUQsS8RSkn/3C8isD7f/P0YiZQlRbAlecAaj/R8= github.com/pyroscope-io/client v0.7.2/go.mod h1:FEocnjn+Ngzxy6EtU9ZxXWRvQ0+pffkrBxHLnPpxwi8= github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= -github.com/pyroscope-io/otel-profiling-go v0.4.0 h1:Hk/rbUqOWoByoWy1tt4r5BX5xoKAvs5drr0511Ki8ic= -github.com/pyroscope-io/otel-profiling-go v0.4.0/go.mod h1:MXaofiWU7PgLP7eISUZJYVO4Z8WYMqpkYgeP4XrPLyg= +github.com/pyroscope-io/otel-profiling-go v0.5.0 h1:LsTP9VuQ5TgeSiyY2gPHy1de/q3jbFyGWE1v3LtHzMk= +github.com/pyroscope-io/otel-profiling-go v0.5.0/go.mod h1:jUUUXTTgntvGJKS8p5uzypXwTyuGnQP31VnWauH/lUg= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= -github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= -github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/qtls-go1-20 v0.4.1 h1:D33340mCNDAIKBqXuAvexTNMUByrYmFYVfKfDN5nfFs= +github.com/quic-go/qtls-go1-20 v0.4.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= -github.com/quic-go/quic-go v0.39.3 h1:o3YB6t2SR+HU/pgwF29kJ6g4jJIJEwEZ8CKia1h1TKg= -github.com/quic-go/quic-go v0.39.3/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q= +github.com/quic-go/quic-go v0.40.1 h1:X3AGzUNFs0jVuO3esAGnTfvdgvL4fq655WaOi1snv1Q= +github.com/quic-go/quic-go v0.40.1/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c= github.com/quic-go/webtransport-go v0.5.1/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= @@ -2116,8 +2126,10 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rollkit/go-da v0.4.0 h1:/s7ZrVq7DC2aK8UXIvB7rsXrZ2mVGRw7zrexcxRvhlw= +github.com/rollkit/go-da v0.4.0/go.mod h1:Kef0XI5ecEKd3TXzI8S+9knAUJnZg0svh2DuXoCsPlM= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= @@ -2324,8 +2336,8 @@ github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/S github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20221220214510-0333c149dec0/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25 h1:yVYDLoN2gmB3OdBXFW8e1UwgVbmCvNlnAKhvHPaNARI= -github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20240109153615-66e95c3e8a87 h1:S4wCk+ZL4WGGaI+GsmqCRyt68ISbnZWsK9dD9jYL0fA= +github.com/whyrusleeping/cbor-gen v0.0.0-20240109153615-66e95c3e8a87/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -2378,42 +2390,45 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0 h1:2JydY5UiDpqvj2p7sO9bgHuhTy4hgTZ0ymehdq/Ob0Q= go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0/go.mod h1:ch3a5QxOqVWxas4CzjCFFOOQe+7HgAXC/N1oVxS9DK4= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 h1:2PunuO5SbkN5MhCbuHCd3tC6qrcaj+uDAkX/qBU5BAs= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1/go.mod h1:q8+Tha+5LThjeSU8BW93uUC5w5/+DnYHMKBMpRCsui0= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 h1:mM8nKi6/iFQ0iqst80wDHU2ge198Ye/TfN0WBS5U24Y= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0/go.mod h1:0PrIIzDteLSmNyxqcGYRL4mDIo8OTuBAOI/Bn1URxac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0/go.mod h1:kB3ufRbfU+CQ4MlUcqtW8Z7YEOBeK2DJ6CmR5rYYF3E= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= -go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= +go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= +go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -2436,8 +2451,8 @@ go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= -go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -2501,7 +2516,6 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -2510,8 +2524,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2534,8 +2548,8 @@ golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b/go.mod h1:cyybsKvd6eL0RnXn6p golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230129154200-a960b3787bd2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e h1:723BNChdd0c2Wk6WOE320qGBiPtYx0F0Bbm1kriShfE= +golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2571,8 +2585,8 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2664,8 +2678,8 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2693,8 +2707,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2711,8 +2725,8 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2858,8 +2872,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2868,8 +2882,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2892,8 +2906,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2970,8 +2984,8 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2979,13 +2993,14 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= -gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= @@ -3041,8 +3056,8 @@ google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= -google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= +google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA= +google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3051,8 +3066,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -3166,12 +3182,12 @@ google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqw google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= -google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13 h1:U7+wNaVuSTaUqNvK2+osJ9ejEZxbjHHk8F2b6Hpx0AE= -google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -3217,8 +3233,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3235,8 +3251,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -3306,8 +3322,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/header/headertest/testing.go b/header/headertest/testing.go index 9907fd7eb4..e97f7f7825 100644 --- a/header/headertest/testing.go +++ b/header/headertest/testing.go @@ -297,7 +297,7 @@ func RandBlockID(testing.TB) types.BlockID { return bid } -func ExtendedHeaderFromEDS(t *testing.T, height uint64, eds *rsmt2d.ExtendedDataSquare) *header.ExtendedHeader { +func ExtendedHeaderFromEDS(t testing.TB, height uint64, eds *rsmt2d.ExtendedDataSquare) *header.ExtendedHeader { valSet, vals := RandValidatorSet(10, 10) gen := RandRawHeader(t) dah, err := share.NewRoot(eds) diff --git a/libs/pidstore/pidstore.go b/libs/pidstore/pidstore.go index 7e416d98a0..17241aa4a9 100644 --- a/libs/pidstore/pidstore.go +++ b/libs/pidstore/pidstore.go @@ -28,6 +28,7 @@ func NewPeerIDStore(ctx context.Context, ds datastore.Datastore) (*PeerIDStore, pidstore := &PeerIDStore{ ds: namespace.Wrap(ds, storePrefix), } + // check if pidstore is already initialized, and if not, // initialize the pidstore exists, err := pidstore.ds.Has(ctx, peersKey) @@ -37,6 +38,14 @@ func NewPeerIDStore(ctx context.Context, ds datastore.Datastore) (*PeerIDStore, if !exists { return pidstore, pidstore.Put(ctx, []peer.ID{}) } + + // if pidstore exists, ensure its contents are uncorrupted + _, err = pidstore.Load(ctx) + if err != nil { + log.Warn("pidstore: corrupted pidstore detected, resetting...", "err", err) + return pidstore, pidstore.reset(ctx) + } + return pidstore, nil } @@ -75,3 +84,14 @@ func (p *PeerIDStore) Put(ctx context.Context, peers []peer.ID) error { log.Infow("Persisted peers successfully", "amount", len(peers)) return nil } + +// reset resets the pidstore in case of corruption. +func (p *PeerIDStore) reset(ctx context.Context) error { + log.Warn("pidstore: resetting the pidstore...") + err := p.ds.Delete(ctx, peersKey) + if err != nil { + return fmt.Errorf("pidstore: error resetting datastore: %w", err) + } + + return p.Put(ctx, []peer.ID{}) +} diff --git a/libs/pidstore/pidstore_test.go b/libs/pidstore/pidstore_test.go index d8d214c83e..4a35783db3 100644 --- a/libs/pidstore/pidstore_test.go +++ b/libs/pidstore/pidstore_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" @@ -21,7 +22,7 @@ func TestPutLoad(t *testing.T) { ds := sync.MutexWrap(datastore.NewMapDatastore()) - t.Run("unitialized-pidstore", func(t *testing.T) { + t.Run("uninitialized-pidstore", func(t *testing.T) { testPutLoad(ctx, ds, t) }) t.Run("initialized-pidstore", func(t *testing.T) { @@ -29,6 +30,27 @@ func TestPutLoad(t *testing.T) { }) } +// TestCorruptedPidstore tests whether a pidstore can detect +// corruption and reset itself on construction. +func TestCorruptedPidstore(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer t.Cleanup(cancel) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + + // intentionally corrupt the store + wrappedDS := namespace.Wrap(ds, storePrefix) + err := wrappedDS.Put(ctx, peersKey, []byte("corrupted")) + require.NoError(t, err) + + pidstore, err := NewPeerIDStore(ctx, ds) + require.NoError(t, err) + + got, err := pidstore.Load(ctx) + require.NoError(t, err) + assert.Equal(t, []peer.ID{}, got) +} + func testPutLoad(ctx context.Context, ds datastore.Datastore, t *testing.T) { peerstore, err := NewPeerIDStore(ctx, ds) require.NoError(t, err) diff --git a/libs/utils/address.go b/libs/utils/address.go index ae52a03b16..c20d11ad06 100644 --- a/libs/utils/address.go +++ b/libs/utils/address.go @@ -1,11 +1,14 @@ package utils import ( + "errors" "fmt" "net" "strings" ) +var ErrInvalidIP = errors.New("invalid IP address or hostname given") + // SanitizeAddr trims leading protocol scheme and port from the given // IP address or hostname if present. func SanitizeAddr(addr string) (string, error) { @@ -16,7 +19,7 @@ func SanitizeAddr(addr string) (string, error) { addr = strings.TrimSuffix(addr, "/") addr = strings.Split(addr, ":")[0] if addr == "" { - return "", fmt.Errorf("invalid IP address or hostname given: %s", original) + return "", fmt.Errorf("%w: %s", ErrInvalidIP, original) } return addr, nil } diff --git a/libs/utils/address_test.go b/libs/utils/address_test.go index 15452f4d1b..48a7747a4a 100644 --- a/libs/utils/address_test.go +++ b/libs/utils/address_test.go @@ -11,6 +11,7 @@ func TestSanitizeAddr(t *testing.T) { var tests = []struct { addr string want string + err error }{ // Testcase: trims protocol prefix {addr: "http://celestia.org", want: "celestia.org"}, @@ -20,13 +21,15 @@ func TestSanitizeAddr(t *testing.T) { {addr: "tcp://192.168.42.42:5050/", want: "192.168.42.42"}, // Testcase: invariant ip {addr: "192.168.42.42", want: "192.168.42.42"}, + // Testcase: empty addr + {addr: "", want: "", err: ErrInvalidIP}, } for _, tt := range tests { t.Run(tt.addr, func(t *testing.T) { got, err := SanitizeAddr(tt.addr) - require.NoError(t, err) require.Equal(t, tt.want, got) + require.ErrorIs(t, err, tt.err) }) } } diff --git a/libs/utils/fs.go b/libs/utils/fs.go index d67e9a1eaa..4ad8b6443e 100644 --- a/libs/utils/fs.go +++ b/libs/utils/fs.go @@ -1,6 +1,8 @@ package utils -import "os" +import ( + "os" +) // Exists checks whether file or directory exists under the given 'path' on the system. func Exists(path string) bool { diff --git a/libs/utils/resetctx.go b/libs/utils/resetctx.go new file mode 100644 index 0000000000..a108cc27b4 --- /dev/null +++ b/libs/utils/resetctx.go @@ -0,0 +1,14 @@ +package utils + +import ( + "context" +) + +// ResetContextOnError returns a fresh context if the given context has an error. +func ResetContextOnError(ctx context.Context) context.Context { + if ctx.Err() != nil { + ctx = context.Background() + } + + return ctx +} diff --git a/libs/utils/square.go b/libs/utils/square.go index ce2663fd81..68d7fc5ce7 100644 --- a/libs/utils/square.go +++ b/libs/utils/square.go @@ -1,6 +1,8 @@ package utils -import "math" +import ( + "math" +) // SquareSize returns the size of the square based on the given amount of shares. func SquareSize(lenShares int) uint64 { diff --git a/logs/logs.go b/logs/logs.go index 23d0683996..5cb9ed16c6 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -1,6 +1,8 @@ package logs -import logging "github.com/ipfs/go-log/v2" +import ( + logging "github.com/ipfs/go-log/v2" +) func SetAllLoggers(level logging.LogLevel) { logging.SetAllLoggers(level) diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go index 5e29d3b90c..f87105541a 100644 --- a/nodebuilder/blob/blob.go +++ b/nodebuilder/blob/blob.go @@ -16,10 +16,10 @@ type Module interface { // Submit sends Blobs and reports the height in which they were included. // Allows sending multiple Blobs atomically synchronously. // Uses default wallet registered on the Node. - Submit(_ context.Context, _ []*blob.Blob, _ *blob.SubmitOptions) (height uint64, _ error) + Submit(_ context.Context, _ []*blob.Blob, _ blob.GasPrice) (height uint64, _ error) // Get retrieves the blob by commitment under the given namespace and height. Get(_ context.Context, height uint64, _ share.Namespace, _ blob.Commitment) (*blob.Blob, error) - // GetAll returns all blobs under the given namespaces and height. + // GetAll returns all blobs at the given height under the given namespaces. GetAll(_ context.Context, height uint64, _ []share.Namespace) ([]*blob.Blob, error) // GetProof retrieves proofs in the given namespaces at the given height by commitment. GetProof(_ context.Context, height uint64, _ share.Namespace, _ blob.Commitment) (*blob.Proof, error) @@ -30,7 +30,7 @@ type Module interface { type API struct { Internal struct { - Submit func(context.Context, []*blob.Blob, *blob.SubmitOptions) (uint64, error) `perm:"write"` + Submit func(context.Context, []*blob.Blob, blob.GasPrice) (uint64, error) `perm:"write"` Get func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Blob, error) `perm:"read"` GetAll func(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) `perm:"read"` GetProof func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Proof, error) `perm:"read"` @@ -38,8 +38,8 @@ type API struct { } } -func (api *API) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { - return api.Internal.Submit(ctx, blobs, options) +func (api *API) Submit(ctx context.Context, blobs []*blob.Blob, gasPrice blob.GasPrice) (uint64, error) { + return api.Internal.Submit(ctx, blobs, gasPrice) } func (api *API) Get( diff --git a/nodebuilder/blob/cmd/blob.go b/nodebuilder/blob/cmd/blob.go index f3510edfe1..25a102843b 100644 --- a/nodebuilder/blob/cmd/blob.go +++ b/nodebuilder/blob/cmd/blob.go @@ -2,7 +2,9 @@ package cmd import ( "encoding/base64" + "errors" "fmt" + "path/filepath" "reflect" "strconv" @@ -16,8 +18,11 @@ import ( var ( base64Flag bool - fee int64 - gasLimit uint64 + gasPrice float64 + + // flagFileInput allows the user to provide file path to the json file + // for submitting multiple blobs. + flagFileInput = "input-file" ) func init() { @@ -37,23 +42,15 @@ func init() { "printed blob's data as a base64 string", ) - submitCmd.PersistentFlags().Int64Var( - &fee, - "fee", - -1, - "specifies fee (in utia) for blob submission.\n"+ - "Fee will be automatically calculated if negative value is passed [optional]", - ) - - submitCmd.PersistentFlags().Uint64Var( - &gasLimit, - "gas.limit", - 0, - "sets the amount of gas that is consumed during blob submission [optional]", + submitCmd.PersistentFlags().Float64Var( + &gasPrice, + "gas.price", + float64(blob.DefaultGasPrice()), + "specifies gas price (in utia) for blob submission.\n"+ + "Gas price will be set to default (0.002) if no value is passed", ) - // unset the default value to avoid users confusion - submitCmd.PersistentFlags().Lookup("fee").DefValue = "0" + submitCmd.PersistentFlags().String(flagFileInput, "", "Specify the file input") } var Cmd = &cobra.Command{ @@ -130,12 +127,47 @@ var getAllCmd = &cobra.Command{ } var submitCmd = &cobra.Command{ - Use: "submit [namespace] [blobData]", - Args: cobra.ExactArgs(2), - Short: "Submit the blob at the given namespace.\n" + + Use: "submit [namespace] [blobData]", + Args: func(cmd *cobra.Command, args []string) error { + path, err := cmd.Flags().GetString(flagFileInput) + if err != nil { + return err + } + + // If there is a file path input we'll check for the file extension + if path != "" { + if filepath.Ext(path) != ".json" { + return fmt.Errorf("invalid file extension, require json got %s", filepath.Ext(path)) + } + + return nil + } + + if len(args) < 2 { + return errors.New("submit requires two arguments: namespace and blobData") + } + + return nil + }, + Short: "Submit the blob(s) at the given namespace(s).\n" + + "User can use namespace and blobData as argument for single blob submission \n" + + "or use --input-file flag with the path to a json file for multiple blobs submission, \n" + + `where the json file contains: + + { + "Blobs": [ + { + "namespace": "0x00010203040506070809", + "blobData": "0x676d" + }, + { + "namespace": "0x42690c204d39600fddd3", + "blobData": "0x676d" + } + ] + }` + "Note:\n" + - "* only one blob is allowed to submit through the RPC.\n" + - "* fee and gas.limit params will be calculated automatically if they are not provided as arguments", + "* fee and gas limit params will be calculated automatically.\n", RunE: func(cmd *cobra.Command, args []string) error { client, err := cmdnode.ParseClientFromCtx(cmd.Context()) if err != nil { @@ -143,33 +175,66 @@ var submitCmd = &cobra.Command{ } defer client.Close() - namespace, err := cmdnode.ParseV0Namespace(args[0]) + path, err := cmd.Flags().GetString(flagFileInput) if err != nil { - return fmt.Errorf("error parsing a namespace:%v", err) + return err } - parsedBlob, err := blob.NewBlobV0(namespace, []byte(args[1])) - if err != nil { - return fmt.Errorf("error creating a blob:%v", err) + jsonBlobs := make([]blobJSON, 0) + // In case of there is a file input, get the namespace and blob from the arguments + if path != "" { + paresdBlobs, err := parseSubmitBlobs(path) + if err != nil { + return err + } + + jsonBlobs = append(jsonBlobs, paresdBlobs...) + } else { + jsonBlobs = append(jsonBlobs, blobJSON{Namespace: args[0], BlobData: args[1]}) + } + + var blobs []*blob.Blob + var commitments []blob.Commitment + for _, jsonBlob := range jsonBlobs { + blob, err := getBlobFromArguments(jsonBlob.Namespace, jsonBlob.BlobData) + if err != nil { + return err + } + blobs = append(blobs, blob) + commitments = append(commitments, blob.Commitment) } height, err := client.Blob.Submit( cmd.Context(), - []*blob.Blob{parsedBlob}, - &blob.SubmitOptions{Fee: fee, GasLimit: gasLimit}, + blobs, + blob.GasPrice(gasPrice), ) response := struct { - Height uint64 `json:"height"` - Commitment blob.Commitment `json:"commitment"` + Height uint64 `json:"height"` + Commitments []blob.Commitment `json:"commitments"` }{ - Height: height, - Commitment: parsedBlob.Commitment, + Height: height, + Commitments: commitments, } return cmdnode.PrintOutput(response, err, nil) }, } +func getBlobFromArguments(namespaceArg, blobArg string) (*blob.Blob, error) { + namespace, err := cmdnode.ParseV0Namespace(namespaceArg) + if err != nil { + return nil, fmt.Errorf("error parsing a namespace:%v", err) + } + + parsedBlob, err := blob.NewBlobV0(namespace, []byte(blobArg)) + if err != nil { + return nil, fmt.Errorf("error creating a blob:%v", err) + } + + return parsedBlob, nil +} + var getProofCmd = &cobra.Command{ Use: "get-proof [height] [namespace] [commitment]", Args: cobra.ExactArgs(3), diff --git a/nodebuilder/blob/cmd/util.go b/nodebuilder/blob/cmd/util.go new file mode 100644 index 0000000000..a33b9c1a84 --- /dev/null +++ b/nodebuilder/blob/cmd/util.go @@ -0,0 +1,32 @@ +package cmd + +import ( + "encoding/json" + "os" +) + +// Define the raw content from the file input. +type blobs struct { + Blobs []blobJSON +} + +type blobJSON struct { + Namespace string + BlobData string +} + +func parseSubmitBlobs(path string) ([]blobJSON, error) { + var rawBlobs blobs + + content, err := os.ReadFile(path) + if err != nil { + return []blobJSON{}, err + } + + err = json.Unmarshal(content, &rawBlobs) + if err != nil { + return []blobJSON{}, err + } + + return rawBlobs.Blobs, err +} diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go index 40bf299da1..0898e70459 100644 --- a/nodebuilder/blob/mocks/api.go +++ b/nodebuilder/blob/mocks/api.go @@ -8,10 +8,9 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" - blob "github.com/celestiaorg/celestia-node/blob" share "github.com/celestiaorg/celestia-node/share" + gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. @@ -98,7 +97,7 @@ func (mr *MockModuleMockRecorder) Included(arg0, arg1, arg2, arg3, arg4 interfac } // Submit mocks base method. -func (m *MockModule) Submit(arg0 context.Context, arg1 []*blob.Blob, arg2 *blob.SubmitOptions) (uint64, error) { +func (m *MockModule) Submit(arg0 context.Context, arg1 []*blob.Blob, arg2 blob.GasPrice) (uint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Submit", arg0, arg1, arg2) ret0, _ := ret[0].(uint64) diff --git a/nodebuilder/config_test.go b/nodebuilder/config_test.go index db9af3a64d..e7b64b0aed 100644 --- a/nodebuilder/config_test.go +++ b/nodebuilder/config_test.go @@ -69,7 +69,7 @@ var outdatedConfig = ` KeyringBackend = "test" [P2P] - ListenAddresses = ["/ip4/0.0.0.0/udp/2121/quic-v1", "/ip6/::/udp/2121/quic-v1", "/ip4/0.0.0.0/tcp/2121", + ListenAddresses = ["/ip4/0.0.0.0/udp/2121/quic-v1", "/ip6/::/udp/2121/quic-v1", "/ip4/0.0.0.0/tcp/2121", "/ip6/::/tcp/2121"] AnnounceAddresses = [] NoAnnounceAddresses = ["/ip4/0.0.0.0/udp/2121/quic-v1", "/ip4/127.0.0.1/udp/2121/quic-v1", "/ip6/::/udp/2121/quic-v1", @@ -91,7 +91,7 @@ var outdatedConfig = ` [Gateway] Address = "0.0.0.0" Port = "26659" - Enabled = true + Enabled = true [Share] PeersLimit = 5 diff --git a/nodebuilder/core/config.go b/nodebuilder/core/config.go index 4affcd3087..bb5eea5b83 100644 --- a/nodebuilder/core/config.go +++ b/nodebuilder/core/config.go @@ -7,6 +7,8 @@ import ( "github.com/celestiaorg/celestia-node/libs/utils" ) +var MetricsEnabled bool + // Config combines all configuration fields for managing the relationship with a Core node. type Config struct { IP string diff --git a/nodebuilder/core/module.go b/nodebuilder/core/module.go index 02863eae7e..7c5c9e6bfd 100644 --- a/nodebuilder/core/module.go +++ b/nodebuilder/core/module.go @@ -35,7 +35,20 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option return fx.Module("core", baseComponents, fx.Provide(core.NewBlockFetcher), - fxutil.ProvideAs(core.NewExchange, new(libhead.Exchange[*header.ExtendedHeader])), + fxutil.ProvideAs( + func( + fetcher *core.BlockFetcher, + store *eds.Store, + construct header.ConstructFn, + ) (*core.Exchange, error) { + var opts []core.Option + if MetricsEnabled { + opts = append(opts, core.WithMetrics()) + } + + return core.NewExchange(fetcher, store, construct, opts...) + }, + new(libhead.Exchange[*header.ExtendedHeader])), fx.Invoke(fx.Annotate( func( bcast libhead.Broadcaster[*header.ExtendedHeader], @@ -43,8 +56,14 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option pubsub *shrexsub.PubSub, construct header.ConstructFn, store *eds.Store, - ) *core.Listener { - return core.NewListener(bcast, fetcher, pubsub.Broadcast, construct, store, p2p.BlockTime) + chainID p2p.Network, + ) (*core.Listener, error) { + opts := []core.Option{core.WithChainID(chainID)} + if MetricsEnabled { + opts = append(opts, core.WithMetrics()) + } + + return core.NewListener(bcast, fetcher, pubsub.Broadcast, construct, store, p2p.BlockTime, opts...) }, fx.OnStart(func(ctx context.Context, listener *core.Listener) error { return listener.Start(ctx) diff --git a/nodebuilder/da/da.go b/nodebuilder/da/da.go new file mode 100644 index 0000000000..0d604d769f --- /dev/null +++ b/nodebuilder/da/da.go @@ -0,0 +1,54 @@ +package da + +import ( + "context" + + "github.com/rollkit/go-da" +) + +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + da.DA +} + +// API is a wrapper around Module for the RPC. +// TODO(@distractedm1nd): These structs need to be autogenerated. +type API struct { + Internal struct { + MaxBlobSize func(ctx context.Context) (uint64, error) `perm:"read"` + Get func(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Blob, error) `perm:"read"` + GetIDs func(ctx context.Context, height uint64, ns da.Namespace) ([]da.ID, error) `perm:"read"` + GetProofs func(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Proof, error) `perm:"read"` + Commit func(ctx context.Context, blobs []da.Blob, ns da.Namespace) ([]da.Commitment, error) `perm:"read"` + Validate func(context.Context, []da.ID, []da.Proof, da.Namespace) ([]bool, error) `perm:"read"` + Submit func(context.Context, []da.Blob, float64, da.Namespace) ([]da.ID, error) `perm:"write"` + } +} + +func (api *API) MaxBlobSize(ctx context.Context) (uint64, error) { + return api.Internal.MaxBlobSize(ctx) +} + +func (api *API) Get(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Blob, error) { + return api.Internal.Get(ctx, ids, ns) +} + +func (api *API) GetIDs(ctx context.Context, height uint64, ns da.Namespace) ([]da.ID, error) { + return api.Internal.GetIDs(ctx, height, ns) +} + +func (api *API) GetProofs(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Proof, error) { + return api.Internal.GetProofs(ctx, ids, ns) +} + +func (api *API) Commit(ctx context.Context, blobs []da.Blob, ns da.Namespace) ([]da.Commitment, error) { + return api.Internal.Commit(ctx, blobs, ns) +} + +func (api *API) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns da.Namespace) ([]bool, error) { + return api.Internal.Validate(ctx, ids, proofs, ns) +} + +func (api *API) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns da.Namespace) ([]da.ID, error) { + return api.Internal.Submit(ctx, blobs, gasPrice, ns) +} diff --git a/nodebuilder/da/mocks/api.go b/nodebuilder/da/mocks/api.go new file mode 100644 index 0000000000..5895240906 --- /dev/null +++ b/nodebuilder/da/mocks/api.go @@ -0,0 +1,140 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/da (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// Commit mocks base method. +func (m *MockModule) Commit(arg0 context.Context, arg1 [][]byte, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Commit", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Commit indicates an expected call of Commit. +func (mr *MockModuleMockRecorder) Commit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockModule)(nil).Commit), arg0, arg1, arg2) +} + +// Get mocks base method. +func (m *MockModule) Get(arg0 context.Context, arg1 [][]byte, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockModuleMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockModule)(nil).Get), arg0, arg1, arg2) +} + +// GetIDs mocks base method. +func (m *MockModule) GetIDs(arg0 context.Context, arg1 uint64, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIDs", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIDs indicates an expected call of GetIDs. +func (mr *MockModuleMockRecorder) GetIDs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIDs", reflect.TypeOf((*MockModule)(nil).GetIDs), arg0, arg1, arg2) +} + +// GetProofs mocks base method. +func (m *MockModule) GetProofs(arg0 context.Context, arg1 [][]byte, arg2 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProofs", arg0, arg1, arg2) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProofs indicates an expected call of GetProofs. +func (mr *MockModuleMockRecorder) GetProofs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProofs", reflect.TypeOf((*MockModule)(nil).GetProofs), arg0, arg1, arg2) +} + +// MaxBlobSize mocks base method. +func (m *MockModule) MaxBlobSize(arg0 context.Context) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MaxBlobSize", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MaxBlobSize indicates an expected call of MaxBlobSize. +func (mr *MockModuleMockRecorder) MaxBlobSize(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxBlobSize", reflect.TypeOf((*MockModule)(nil).MaxBlobSize), arg0) +} + +// Submit mocks base method. +func (m *MockModule) Submit(arg0 context.Context, arg1 [][]byte, arg2 float64, arg3 []byte) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Submit", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Submit indicates an expected call of Submit. +func (mr *MockModuleMockRecorder) Submit(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Submit", reflect.TypeOf((*MockModule)(nil).Submit), arg0, arg1, arg2, arg3) +} + +// Validate mocks base method. +func (m *MockModule) Validate(arg0 context.Context, arg1, arg2 [][]byte, arg3 []byte) ([]bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Validate indicates an expected call of Validate. +func (mr *MockModuleMockRecorder) Validate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockModule)(nil).Validate), arg0, arg1, arg2, arg3) +} diff --git a/nodebuilder/da/module.go b/nodebuilder/da/module.go new file mode 100644 index 0000000000..b119d11076 --- /dev/null +++ b/nodebuilder/da/module.go @@ -0,0 +1,14 @@ +package da + +import ( + "go.uber.org/fx" +) + +func ConstructModule() fx.Option { + return fx.Module("da", + fx.Provide(NewService), + fx.Provide(func(serv *Service) Module { + return serv + }), + ) +} diff --git a/nodebuilder/da/service.go b/nodebuilder/da/service.go new file mode 100644 index 0000000000..b775e10396 --- /dev/null +++ b/nodebuilder/da/service.go @@ -0,0 +1,188 @@ +package da + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + + logging "github.com/ipfs/go-log/v2" + "github.com/rollkit/go-da" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + + "github.com/celestiaorg/celestia-node/blob" + nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/share" +) + +var _ da.DA = (*Service)(nil) + +var log = logging.Logger("go-da") + +// heightLen is a length (in bytes) of serialized height. +// +// This is 8 as uint64 consist of 8 bytes. +const heightLen = 8 + +type Service struct { + blobServ nodeblob.Module +} + +func NewService(blobMod nodeblob.Module) *Service { + return &Service{ + blobServ: blobMod, + } +} + +// MaxBlobSize returns the max blob size +func (s *Service) MaxBlobSize(context.Context) (uint64, error) { + return appconsts.DefaultMaxBytes, nil +} + +// Get returns Blob for each given ID, or an error. +func (s *Service) Get(ctx context.Context, ids []da.ID, ns da.Namespace) ([]da.Blob, error) { + blobs := make([]da.Blob, 0, len(ids)) + for _, id := range ids { + height, commitment := SplitID(id) + log.Debugw("getting blob", "height", height, "commitment", commitment, "namespace", share.Namespace(ns)) + currentBlob, err := s.blobServ.Get(ctx, height, ns, commitment) + log.Debugw("got blob", "height", height, "commitment", commitment, "namespace", share.Namespace(ns)) + if err != nil { + return nil, err + } + blobs = append(blobs, currentBlob.Data) + } + return blobs, nil +} + +// GetIDs returns IDs of all Blobs located in DA at given height. +func (s *Service) GetIDs(ctx context.Context, height uint64, namespace da.Namespace) ([]da.ID, error) { + var ids []da.ID //nolint:prealloc + log.Debugw("getting ids", "height", height, "namespace", share.Namespace(namespace)) + blobs, err := s.blobServ.GetAll(ctx, height, []share.Namespace{namespace}) + log.Debugw("got ids", "height", height, "namespace", share.Namespace(namespace)) + if err != nil { + if strings.Contains(err.Error(), blob.ErrBlobNotFound.Error()) { + return nil, nil + } + return nil, err + } + for _, b := range blobs { + ids = append(ids, MakeID(height, b.Commitment)) + } + return ids, nil +} + +// GetProofs returns inclusion Proofs for all Blobs located in DA at given height. +func (s *Service) GetProofs(ctx context.Context, ids []da.ID, namespace da.Namespace) ([]da.Proof, error) { + proofs := make([]da.Proof, len(ids)) + for i, id := range ids { + height, commitment := SplitID(id) + proof, err := s.blobServ.GetProof(ctx, height, namespace, commitment) + if err != nil { + return nil, err + } + proofs[i], err = proof.MarshalJSON() + if err != nil { + return nil, err + } + } + return proofs, nil +} + +// Commit creates a Commitment for each given Blob. +func (s *Service) Commit(_ context.Context, daBlobs []da.Blob, namespace da.Namespace) ([]da.Commitment, error) { + _, commitments, err := s.blobsAndCommitments(daBlobs, namespace) + return commitments, err +} + +// Submit submits the Blobs to Data Availability layer. +func (s *Service) Submit( + ctx context.Context, + daBlobs []da.Blob, + gasPrice float64, + namespace da.Namespace, +) ([]da.ID, error) { + blobs, _, err := s.blobsAndCommitments(daBlobs, namespace) + if err != nil { + return nil, err + } + + height, err := s.blobServ.Submit(ctx, blobs, blob.GasPrice(gasPrice)) + if err != nil { + log.Error("failed to submit blobs", "height", height, "gas price", gasPrice) + return nil, err + } + log.Info("successfully submitted blobs", "height", height, "gas price", gasPrice) + ids := make([]da.ID, len(blobs)) + for i, blob := range blobs { + ids[i] = MakeID(height, blob.Commitment) + } + return ids, nil +} + +// blobsAndCommitments converts []da.Blob to []*blob.Blob and generates corresponding +// []da.Commitment +func (s *Service) blobsAndCommitments( + daBlobs []da.Blob, namespace da.Namespace, +) ([]*blob.Blob, []da.Commitment, error) { + blobs := make([]*blob.Blob, 0, len(daBlobs)) + commitments := make([]da.Commitment, 0, len(daBlobs)) + for _, daBlob := range daBlobs { + b, err := blob.NewBlobV0(namespace, daBlob) + if err != nil { + return nil, nil, err + } + blobs = append(blobs, b) + + commitments = append(commitments, b.Commitment) + } + return blobs, commitments, nil +} + +// Validate validates Commitments against the corresponding Proofs. This should be possible without +// retrieving the Blobs. +func (s *Service) Validate( + ctx context.Context, + ids []da.ID, + daProofs []da.Proof, + namespace da.Namespace, +) ([]bool, error) { + included := make([]bool, len(ids)) + proofs := make([]*blob.Proof, len(ids)) + for i, daProof := range daProofs { + blobProof := &blob.Proof{} + err := blobProof.UnmarshalJSON(daProof) + if err != nil { + return nil, err + } + proofs[i] = blobProof + } + for i, id := range ids { + height, commitment := SplitID(id) + // TODO(tzdybal): for some reason, if proof doesn't match commitment, API returns (false, "blob: + // invalid proof") but analysis of the code in celestia-node implies this should never happen - + // maybe it's caused by openrpc? there is no way of gently handling errors here, but returned + // value is fine for us + fmt.Println("proof", proofs[i] == nil, "commitment", commitment == nil) + isIncluded, _ := s.blobServ.Included(ctx, height, namespace, proofs[i], commitment) + included = append(included, isIncluded) + } + return included, nil +} + +func MakeID(height uint64, commitment da.Commitment) da.ID { + id := make([]byte, heightLen+len(commitment)) + binary.LittleEndian.PutUint64(id, height) + copy(id[heightLen:], commitment) + return id +} + +func SplitID(id da.ID) (uint64, da.Commitment) { + if len(id) <= heightLen { + return 0, nil + } + commitment := id[heightLen:] + return binary.LittleEndian.Uint64(id[:heightLen]), commitment +} diff --git a/nodebuilder/das/constructors.go b/nodebuilder/das/constructors.go index 7c6b5bed4f..973aca5679 100644 --- a/nodebuilder/das/constructors.go +++ b/nodebuilder/das/constructors.go @@ -3,6 +3,7 @@ package das import ( "context" "fmt" + "time" "github.com/ipfs/go-datastore" @@ -12,6 +13,7 @@ import ( "github.com/celestiaorg/celestia-node/das" "github.com/celestiaorg/celestia-node/header" modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + "github.com/celestiaorg/celestia-node/pruner" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" @@ -44,8 +46,11 @@ func newDASer( batching datastore.Batching, fraudServ fraud.Service[*header.ExtendedHeader], bFn shrexsub.BroadcastFn, + availWindow pruner.AvailabilityWindow, options ...das.Option, ) (*das.DASer, *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader], error) { + options = append(options, das.WithSamplingWindow(time.Duration(availWindow))) + ds, err := das.NewDASer(da, hsub, store, batching, fraudServ, bFn, options...) if err != nil { return nil, nil, err diff --git a/nodebuilder/das/mocks/api.go b/nodebuilder/das/mocks/api.go index 68ffaf3c8c..c4046e90e8 100644 --- a/nodebuilder/das/mocks/api.go +++ b/nodebuilder/das/mocks/api.go @@ -8,9 +8,8 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" - das "github.com/celestiaorg/celestia-node/das" + gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/gateway/config.go b/nodebuilder/gateway/config.go index 903a27489a..c49a4749a3 100644 --- a/nodebuilder/gateway/config.go +++ b/nodebuilder/gateway/config.go @@ -15,9 +15,9 @@ type Config struct { func DefaultConfig() Config { return Config{ - Address: "0.0.0.0", + Address: defaultBindAddress, // do NOT expose the same port as celestia-core by default so that both can run on the same machine - Port: "26659", + Port: defaultPort, Enabled: false, } } diff --git a/nodebuilder/gateway/config_test.go b/nodebuilder/gateway/config_test.go new file mode 100644 index 0000000000..9ef3f1e310 --- /dev/null +++ b/nodebuilder/gateway/config_test.go @@ -0,0 +1,18 @@ +package gateway + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestDefaultConfig tests that the default gateway config is correct. +func TestDefaultConfig(t *testing.T) { + expected := Config{ + Address: defaultBindAddress, + Port: defaultPort, + Enabled: false, + } + + assert.Equal(t, expected, DefaultConfig()) +} diff --git a/nodebuilder/gateway/defaults.go b/nodebuilder/gateway/defaults.go new file mode 100644 index 0000000000..e6c48d5d4e --- /dev/null +++ b/nodebuilder/gateway/defaults.go @@ -0,0 +1,6 @@ +package gateway + +const ( + defaultBindAddress = "localhost" + defaultPort = "26659" +) diff --git a/nodebuilder/gateway/defaults_test.go b/nodebuilder/gateway/defaults_test.go new file mode 100644 index 0000000000..c504f8cca4 --- /dev/null +++ b/nodebuilder/gateway/defaults_test.go @@ -0,0 +1,12 @@ +package gateway + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestServerDefaultConstants(t *testing.T) { + assert.Equal(t, "localhost", defaultBindAddress) + assert.Equal(t, "26659", defaultPort) +} diff --git a/nodebuilder/gateway/flags.go b/nodebuilder/gateway/flags.go index cd13e47162..6da4a66f03 100644 --- a/nodebuilder/gateway/flags.go +++ b/nodebuilder/gateway/flags.go @@ -1,6 +1,8 @@ package gateway import ( + "fmt" + "github.com/spf13/cobra" flag "github.com/spf13/pflag" ) @@ -23,12 +25,12 @@ func Flags() *flag.FlagSet { flags.String( addrFlag, "", - "Set a custom gateway listen address (default: localhost)", + fmt.Sprintf("Set a custom gateway listen address (default: %s)", defaultBindAddress), ) flags.String( portFlag, "", - "Set a custom gateway port (default: 26659)", + fmt.Sprintf("Set a custom gateway port (default: %s)", defaultPort), ) return flags diff --git a/nodebuilder/gateway/flags_test.go b/nodebuilder/gateway/flags_test.go new file mode 100644 index 0000000000..5f55ac77f2 --- /dev/null +++ b/nodebuilder/gateway/flags_test.go @@ -0,0 +1,95 @@ +package gateway + +import ( + "fmt" + "strconv" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFlags(t *testing.T) { + flags := Flags() + + enabled := flags.Lookup(enabledFlag) + require.NotNil(t, enabled) + assert.Equal(t, "false", enabled.Value.String()) + assert.Equal(t, "Enables the REST gateway", enabled.Usage) + + addr := flags.Lookup(addrFlag) + require.NotNil(t, addr) + assert.Equal(t, "", addr.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom gateway listen address (default: %s)", defaultBindAddress), addr.Usage) + + port := flags.Lookup(portFlag) + require.NotNil(t, port) + assert.Equal(t, "", port.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom gateway port (default: %s)", defaultPort), port.Usage) +} + +func TestParseFlags(t *testing.T) { + tests := []struct { + name string + enabledFlag bool + addrFlag string + portFlag string + expectedCfg *Config + }{ + { + name: "Enabled flag is true", + enabledFlag: true, + addrFlag: "127.0.0.1", + portFlag: "8080", + expectedCfg: &Config{ + Enabled: true, + Address: "127.0.0.1", + Port: "8080", + }, + }, + { + name: "Enabled flag is false", + enabledFlag: false, + addrFlag: "127.0.0.1", + portFlag: "8080", + expectedCfg: &Config{ + Enabled: false, + Address: "127.0.0.1", + Port: "8080", + }, + }, + { + name: "Enabled flag is false and address/port flags are not changed", + enabledFlag: false, + addrFlag: "", + portFlag: "", + expectedCfg: &Config{ + Enabled: false, + Address: "", + Port: "", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := &cobra.Command{} + cfg := &Config{} + + cmd.Flags().AddFlagSet(Flags()) + + err := cmd.Flags().Set(enabledFlag, strconv.FormatBool(test.enabledFlag)) + assert.NoError(t, err) + err = cmd.Flags().Set(addrFlag, test.addrFlag) + assert.NoError(t, err) + err = cmd.Flags().Set(portFlag, test.portFlag) + assert.NoError(t, err) + + ParseFlags(cmd, cfg) + assert.Equal(t, test.expectedCfg.Enabled, cfg.Enabled) + assert.Equal(t, test.expectedCfg.Address, cfg.Address) + assert.Equal(t, test.expectedCfg.Port, cfg.Port) + }) + } +} diff --git a/nodebuilder/header/config.go b/nodebuilder/header/config.go index d7265373ce..3fb7162ae0 100644 --- a/nodebuilder/header/config.go +++ b/nodebuilder/header/config.go @@ -43,13 +43,18 @@ func DefaultConfig(tp node.Type) Config { Store: store.DefaultParameters(), Syncer: sync.DefaultParameters(), Server: p2p_exchange.DefaultServerParameters(), + Client: p2p_exchange.DefaultClientParameters(), } switch tp { case node.Bridge: return cfg - case node.Light, node.Full: - cfg.Client = p2p_exchange.DefaultClientParameters() + case node.Full: + return cfg + case node.Light: + cfg.Store.StoreCacheSize = 512 + cfg.Store.IndexCacheSize = 2048 + cfg.Store.WriteBatchSize = 512 return cfg default: panic("header: invalid node type") diff --git a/nodebuilder/header/constructors.go b/nodebuilder/header/constructors.go index 15d2da09b1..a78d609d8e 100644 --- a/nodebuilder/header/constructors.go +++ b/nodebuilder/header/constructors.go @@ -99,16 +99,14 @@ func newInitStore[H libhead.Header[H]]( ds datastore.Batching, ex libhead.Exchange[H], ) (libhead.Store[H], error) { - s, err := store.NewStore[H](ds, store.WithParams(cfg.Store)) - if err != nil { - return nil, err + opts := []store.Option{store.WithParams(cfg.Store)} + if MetricsEnabled { + opts = append(opts, store.WithMetrics()) } - if MetricsEnabled { - err = libhead.WithMetrics[H](s) - if err != nil { - return nil, err - } + s, err := store.NewStore[H](ds, opts...) + if err != nil { + return nil, err } trustedHash, err := cfg.trustedHash(net) diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 3068113102..ad287b1ac8 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -9,12 +9,14 @@ import ( "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/core" + "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/gateway" modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/prune" "github.com/celestiaorg/celestia-node/nodebuilder/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/share" "github.com/celestiaorg/celestia-node/nodebuilder/state" @@ -49,13 +51,15 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store state.ConstructModule(tp, &cfg.State, &cfg.Core), modhead.ConstructModule[*header.ExtendedHeader](tp, &cfg.Header), share.ConstructModule(tp, &cfg.Share), - rpc.ConstructModule(tp, &cfg.RPC), gateway.ConstructModule(tp, &cfg.Gateway), core.ConstructModule(tp, &cfg.Core), das.ConstructModule(tp, &cfg.DASer), fraud.ConstructModule(tp), blob.ConstructModule(), + da.ConstructModule(), node.ConstructModule(tp), + prune.ConstructModule(tp), + rpc.ConstructModule(tp, &cfg.RPC), ) return fx.Module( diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 3e6950a6ae..b16a376cc1 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -22,6 +22,7 @@ import ( "github.com/celestiaorg/celestia-node/api/gateway" "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/header" @@ -71,6 +72,7 @@ type Node struct { BlobServ blob.Module // not optional DASer das.Module // not optional AdminServ node.Module // not optional + DAMod da.Module // not optional // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop start, stop lifecycleFunc @@ -108,8 +110,12 @@ func (n *Node) Start(ctx context.Context) error { return fmt.Errorf("node: failed to start: %w", err) } - log.Infof("\n\n/_____/ /_____/ /_____/ /_____/ /_____/ \n\nStarted celestia DA node \nnode "+ - "type: %s\nnetwork: %s\n\n/_____/ /_____/ /_____/ /_____/ /_____/ \n", strings.ToLower(n.Type.String()), + log.Infof("\n\n/_____/ /_____/ /_____/ /_____/ /_____/ \n\n"+ + "Started celestia DA node \n"+ + "node version: %s\nnode type: %s\nnetwork: %s\n\n"+ + "/_____/ /_____/ /_____/ /_____/ /_____/ \n", + node.GetBuildInfo().SemanticVersion, + strings.ToLower(n.Type.String()), n.Network) addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(n.Host)) diff --git a/nodebuilder/node/admin.go b/nodebuilder/node/admin.go index 8063835f1d..f71af66081 100644 --- a/nodebuilder/node/admin.go +++ b/nodebuilder/node/admin.go @@ -38,6 +38,14 @@ func (m *module) Info(context.Context) (Info, error) { }, nil } +func (m *module) Ready(context.Context) (bool, error) { + // Because the node uses FX to provide the RPC last, all services' lifecycles have been started by + // the point this endpoint is available. It is not 100% guaranteed at this point that all services + // are fully ready, but it is very high probability and all endpoints are available at this point + // regardless. + return true, nil +} + func (m *module) LogLevelSet(_ context.Context, name, level string) error { return logging.SetLogLevel(name, level) } diff --git a/nodebuilder/node/mocks/api.go b/nodebuilder/node/mocks/api.go index 14357316dc..d8789a771c 100644 --- a/nodebuilder/node/mocks/api.go +++ b/nodebuilder/node/mocks/api.go @@ -8,10 +8,9 @@ import ( context "context" reflect "reflect" + node "github.com/celestiaorg/celestia-node/nodebuilder/node" auth "github.com/filecoin-project/go-jsonrpc/auth" gomock "github.com/golang/mock/gomock" - - node "github.com/celestiaorg/celestia-node/nodebuilder/node" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/node/node.go b/nodebuilder/node/node.go index 18ce93615b..b2bc7dac31 100644 --- a/nodebuilder/node/node.go +++ b/nodebuilder/node/node.go @@ -14,6 +14,9 @@ type Module interface { // Info returns administrative information about the node. Info(context.Context) (Info, error) + // Ready returns true once the node's RPC is ready to accept requests. + Ready(context.Context) (bool, error) + // LogLevelSet sets the given component log level to the given level. LogLevelSet(ctx context.Context, name, level string) error @@ -28,6 +31,7 @@ var _ Module = (*API)(nil) type API struct { Internal struct { Info func(context.Context) (Info, error) `perm:"admin"` + Ready func(context.Context) (bool, error) `perm:"read"` LogLevelSet func(ctx context.Context, name, level string) error `perm:"admin"` AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"admin"` AuthNew func(ctx context.Context, perms []auth.Permission) (string, error) `perm:"admin"` @@ -38,6 +42,10 @@ func (api *API) Info(ctx context.Context) (Info, error) { return api.Internal.Info(ctx) } +func (api *API) Ready(ctx context.Context) (bool, error) { + return api.Internal.Ready(ctx) +} + func (api *API) LogLevelSet(ctx context.Context, name, level string) error { return api.Internal.LogLevelSet(ctx, name, level) } diff --git a/nodebuilder/node_test.go b/nodebuilder/node_test.go index 1d0f3c4fad..41eff32fab 100644 --- a/nodebuilder/node_test.go +++ b/nodebuilder/node_test.go @@ -1,3 +1,5 @@ +//go:build !race + package nodebuilder import ( diff --git a/nodebuilder/p2p/addrs.go b/nodebuilder/p2p/addrs.go index 27fbd244a3..d8f50c8144 100644 --- a/nodebuilder/p2p/addrs.go +++ b/nodebuilder/p2p/addrs.go @@ -2,7 +2,6 @@ package p2p import ( "fmt" - "slices" p2pconfig "github.com/libp2p/go-libp2p/config" hst "github.com/libp2p/go-libp2p/core/host" @@ -12,25 +11,13 @@ import ( // Listen returns invoke function that starts listening for inbound connections with libp2p.Host. func Listen(listen []string) func(h hst.Host) (err error) { return func(h hst.Host) (err error) { - maListen := make([]ma.Multiaddr, 0, len(listen)) - for _, addr := range listen { - maddr, err := ma.NewMultiaddr(addr) + maListen := make([]ma.Multiaddr, len(listen)) + for i, addr := range listen { + maListen[i], err = ma.NewMultiaddr(addr) if err != nil { return fmt.Errorf("failure to parse config.P2P.ListenAddresses: %s", err) } - - if !enableQUIC { - // TODO(@WonderTan): Remove this check when QUIC is stable - if slices.ContainsFunc(maddr.Protocols(), func(p ma.Protocol) bool { - return p.Code == ma.P_QUIC_V1 || p.Code == ma.P_WEBTRANSPORT - }) { - continue - } - } - - maListen = append(maListen, maddr) } - return h.Network().Listen(maListen...) } } diff --git a/nodebuilder/p2p/bitswap.go b/nodebuilder/p2p/bitswap.go index 19f98609fc..014435071a 100644 --- a/nodebuilder/p2p/bitswap.go +++ b/nodebuilder/p2p/bitswap.go @@ -2,10 +2,12 @@ package p2p import ( "context" + "errors" "fmt" - "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/client" "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/bitswap/server" "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-datastore" @@ -28,17 +30,38 @@ const ( // dataExchange provides a constructor for IPFS block's DataExchange over BitSwap. func dataExchange(params bitSwapParams) exchange.Interface { - prefix := protocol.ID(fmt.Sprintf("/celestia/%s", params.Net)) - return bitswap.New( + prefix := protocolID(params.Net) + net := network.NewFromIpfsHost(params.Host, &routinghelpers.Null{}, network.Prefix(prefix)) + srvr := server.New( params.Ctx, - network.NewFromIpfsHost(params.Host, &routinghelpers.Null{}, network.Prefix(prefix)), + net, params.Bs, - bitswap.ProvideEnabled(false), - // NOTE: These below ar required for our protocol to work reliably. - // See https://github.com/celestiaorg/celestia-node/issues/732 - bitswap.SetSendDontHaves(false), - bitswap.SetSimulateDontHavesOnTimeout(false), + server.ProvideEnabled(false), // we don't provide blocks over DHT + // NOTE: These below are required for our protocol to work reliably. + // // See https://github.com/celestiaorg/celestia-node/issues/732 + server.SetSendDontHaves(false), ) + + clnt := client.New( + params.Ctx, + net, + params.Bs, + client.WithBlockReceivedNotifier(srvr), + client.SetSimulateDontHavesOnTimeout(false), + client.WithoutDuplicatedBlockStats(), + ) + net.Start(srvr, clnt) // starting with hook does not work + + params.Lifecycle.Append(fx.Hook{ + OnStop: func(ctx context.Context) (err error) { + err = errors.Join(err, clnt.Close()) + err = errors.Join(err, srvr.Close()) + net.Stop() + return err + }, + }) + + return clnt } func blockstoreFromDatastore(ctx context.Context, ds datastore.Batching) (blockstore.Blockstore, error) { @@ -66,8 +89,13 @@ func blockstoreFromEDSStore(ctx context.Context, store *eds.Store) (blockstore.B type bitSwapParams struct { fx.In - Ctx context.Context - Net Network - Host hst.Host - Bs blockstore.Blockstore + Lifecycle fx.Lifecycle + Ctx context.Context + Net Network + Host hst.Host + Bs blockstore.Blockstore +} + +func protocolID(network Network) protocol.ID { + return protocol.ID(fmt.Sprintf("/celestia/%s", network)) } diff --git a/nodebuilder/p2p/bootstrap.go b/nodebuilder/p2p/bootstrap.go index 3e9da1d77d..8e1856f6fb 100644 --- a/nodebuilder/p2p/bootstrap.go +++ b/nodebuilder/p2p/bootstrap.go @@ -49,10 +49,10 @@ var bootstrapList = map[Network][]string{ "/dns4/da-full-3.celestia-bootstrap.net/tcp/2121/p2p/12D3KooWK6Ftsd4XsWCsQZgZPNhTrE5urwmkoo5P61tGvnKmNVyv", }, Arabica: { - "/dns4/da-bridge.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWM3e9MWtyc8GkP8QRt74Riu17QuhGfZMytB2vq5NwkWAu", - "/dns4/da-bridge-2.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWKj8mcdiBGxQRe1jqhaMnh2tGoC3rPDmr5UH2q8H4WA9M", - "/dns4/da-full-1.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWBWkgmN7kmJSFovVrCjkeG47FkLGq7yEwJ2kEqNKCsBYk", - "/dns4/da-full-2.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWRByRF67a2kVM2j4MP5Po3jgTw7H2iL2Spu8aUwPkrRfP", + "/dns4/da-bridge-1.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWGqwzdEqM54Dce6LXzfFr97Bnhvm6rN7KM7MFwdomfm4S", + "/dns4/da-bridge-2.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWCMGM5eZWVfCN9ZLAViGfLUWAfXP5pCm78NFKb9jpBtua", + "/dns4/da-bridge-3.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWEWuqrjULANpukDFGVoHW3RoeUU53Ec9t9v5cwW3MkVdQ", + "/dns4/da-bridge-4.celestia-arabica-11.com/tcp/2121/p2p/12D3KooWLT1ysSrD7XWSBjh7tU1HQanF5M64dHV6AuM6cYEJxMPk", }, Mocha: { "/dns4/da-bridge-mocha-4.celestia-mocha.com/tcp/2121/p2p/12D3KooWCBAbQbJSpCpCGKzqz3rAN4ixYbc63K68zJg9aisuAajg", diff --git a/nodebuilder/p2p/cmd/p2p.go b/nodebuilder/p2p/cmd/p2p.go index 5951595fa4..8b44802947 100644 --- a/nodebuilder/p2p/cmd/p2p.go +++ b/nodebuilder/p2p/cmd/p2p.go @@ -224,7 +224,7 @@ var connectednessCmd = &cobra.Command{ var natStatusCmd = &cobra.Command{ Use: "nat-status", - Short: "Gets the currrent NAT status", + Short: "Gets the current NAT status", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { client, err := cmdnode.ParseClientFromCtx(cmd.Context()) diff --git a/nodebuilder/p2p/flags.go b/nodebuilder/p2p/flags.go index 0faf15e3ca..8e7c0f8bc0 100644 --- a/nodebuilder/p2p/flags.go +++ b/nodebuilder/p2p/flags.go @@ -32,10 +32,11 @@ Peers must bidirectionally point to each other. (Format: multiformats.io/multiad ) flags.String( networkFlag, - "", - "The name of the network to connect to, e.g. "+ - listProvidedNetworks()+ - ". Must be passed on both init and start to take effect.", + DefaultNetwork.String(), + fmt.Sprintf("The name of the network to connect to, e.g. %s. Must be passed on "+ + "both init and start to take effect. Assumes mainnet (%s) unless otherwise specified.", + listProvidedNetworks(), + DefaultNetwork.String()), ) return flags @@ -67,22 +68,23 @@ func ParseFlags( // ParseNetwork tries to parse the network from the flags and environment, // and returns either the parsed network or the build's default network func ParseNetwork(cmd *cobra.Command) (Network, error) { - parsed := cmd.Flag(networkFlag).Value.String() - // no network set through the flags, so check if there is an override in the env - if parsed == "" { - envNetwork, err := parseNetworkFromEnv() - // no network found in env, so use the default network - if envNetwork == "" { - return DefaultNetwork, err - } + if envNetwork, err := parseNetworkFromEnv(); envNetwork != "" { return envNetwork, err } - // check if user provided the actual network value - // or an alias - if net, err := Network(parsed).Validate(); err == nil { - return net, nil + parsed := cmd.Flag(networkFlag).Value.String() + switch parsed { + case "": + return "", fmt.Errorf("no network provided, allowed values: %s", listProvidedNetworks()) + + case DefaultNetwork.String(): + return DefaultNetwork, nil + + default: + if net, err := Network(parsed).Validate(); err == nil { + return net, nil + } + return "", fmt.Errorf("invalid network specified: %s, allowed values: %s", parsed, listProvidedNetworks()) } - return "", fmt.Errorf("invalid network specified: %s", parsed) } // parseNetworkFromEnv tries to parse the network from the environment. diff --git a/nodebuilder/p2p/flags_test.go b/nodebuilder/p2p/flags_test.go index bec49f6074..cfbb5fed5d 100644 --- a/nodebuilder/p2p/flags_test.go +++ b/nodebuilder/p2p/flags_test.go @@ -69,3 +69,59 @@ func createCmdWithNetworkFlag() *cobra.Command { cmd.Flags().AddFlagSet(flags) return cmd } + +// Set empty network flag and ensure error returned +func TestParseNetwork_emptyFlag(t *testing.T) { + cmd := createCmdWithNetworkFlag() + + err := cmd.Flags().Set(networkFlag, "") + require.NoError(t, err) + + _, err = ParseNetwork(cmd) + assert.Error(t, err) + +} + +// Set empty network flag and ensure error returned +func TestParseNetwork_emptyEnvEmptyFlag(t *testing.T) { + + t.Setenv(EnvCustomNetwork, "") + + cmd := createCmdWithNetworkFlag() + err := cmd.Flags().Set(networkFlag, "") + require.NoError(t, err) + + _, err = ParseNetwork(cmd) + require.Error(t, err) + +} + +// Env overrides empty flag to take precedence +func TestParseNetwork_envOverridesEmptyFlag(t *testing.T) { + + t.Setenv(EnvCustomNetwork, "custom-network") + + cmd := createCmdWithNetworkFlag() + err := cmd.Flags().Set(networkFlag, "") + require.NoError(t, err) + + network, err := ParseNetwork(cmd) + require.NoError(t, err) + assert.Equal(t, Network("custom-network"), network) + +} + +// Explicitly set flag but env should still override +func TestParseNetwork_envOverridesFlag(t *testing.T) { + + t.Setenv(EnvCustomNetwork, "custom-network") + + cmd := createCmdWithNetworkFlag() + err := cmd.Flags().Set(networkFlag, string(Mocha)) + require.NoError(t, err) + + network, err := ParseNetwork(cmd) + require.NoError(t, err) + assert.Equal(t, Network("custom-network"), network) + +} diff --git a/nodebuilder/p2p/genesis.go b/nodebuilder/p2p/genesis.go index dcc19dfa49..e35ca9bf29 100644 --- a/nodebuilder/p2p/genesis.go +++ b/nodebuilder/p2p/genesis.go @@ -24,7 +24,7 @@ func GenesisFor(net Network) (string, error) { // NOTE: Every time we add a new long-running network, its genesis hash has to be added here. var genesisList = map[Network]string{ Mainnet: "6BE39EFD10BA412A9DB5288488303F5DD32CF386707A5BEF33617F4C43301872", - Arabica: "5904E55478BA4B3002EE885621E007A2A6A2399662841912219AECD5D5CBE393", + Arabica: "27122593765E07329BC348E8D16E92DCB4C75B34CCCB35C640FD7A4484D4C711", Mocha: "B93BBE20A0FBFDF955811B6420F8433904664D45DB4BF51022BE4200C1A1680D", Private: "", } diff --git a/nodebuilder/p2p/host.go b/nodebuilder/p2p/host.go index c3943a02fa..e55cb65d1f 100644 --- a/nodebuilder/p2p/host.go +++ b/nodebuilder/p2p/host.go @@ -3,7 +3,6 @@ package p2p import ( "context" "fmt" - "os" "github.com/libp2p/go-libp2p" p2pconfig "github.com/libp2p/go-libp2p/config" @@ -17,22 +16,12 @@ import ( "github.com/libp2p/go-libp2p/core/routing" routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" "github.com/libp2p/go-libp2p/p2p/net/conngater" - quic "github.com/libp2p/go-libp2p/p2p/transport/quic" - "github.com/libp2p/go-libp2p/p2p/transport/tcp" - webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport" "github.com/prometheus/client_golang/prometheus" "go.uber.org/fx" "github.com/celestiaorg/celestia-node/nodebuilder/node" ) -var enableQUIC bool - -func init() { - _, ok := os.LookupEnv("CELESTIA_ENABLE_QUIC") - enableQUIC = ok -} - // routedHost constructs a wrapped Host that may fallback to address discovery, // if any top-level operation on the Host is provided with PeerID(Hash(PbK)) only. func routedHost(base HostBase, r routing.PeerRouting) hst.Host { @@ -55,15 +44,8 @@ func host(params hostParams) (HostBase, error) { libp2p.ResourceManager(params.ResourceManager), // to clearly define what defaults we rely upon libp2p.DefaultSecurity, + libp2p.DefaultTransports, libp2p.DefaultMuxers, - libp2p.Transport(tcp.NewTCPTransport), - } - - if enableQUIC { - opts = append(opts, - libp2p.Transport(quic.NewTransport), - libp2p.Transport(webtransport.New), - ) } if params.Registry != nil { diff --git a/nodebuilder/p2p/network.go b/nodebuilder/p2p/network.go index a7f9ff7236..53893eff7c 100644 --- a/nodebuilder/p2p/network.go +++ b/nodebuilder/p2p/network.go @@ -12,7 +12,7 @@ const ( // DefaultNetwork is the default network of the current build. DefaultNetwork = Mainnet // Arabica testnet. See: celestiaorg/networks. - Arabica Network = "arabica-10" + Arabica Network = "arabica-11" // Mocha testnet. See: celestiaorg/networks. Mocha Network = "mocha-4" // Private can be used to set up any private network, including local testing setups. diff --git a/nodebuilder/prune/module.go b/nodebuilder/prune/module.go new file mode 100644 index 0000000000..2141b74bf1 --- /dev/null +++ b/nodebuilder/prune/module.go @@ -0,0 +1,47 @@ +package prune + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/pruner/archival" + "github.com/celestiaorg/celestia-node/pruner/light" +) + +func ConstructModule(tp node.Type) fx.Option { + baseComponents := fx.Options( + fx.Provide(fx.Annotate( + pruner.NewService, + fx.OnStart(func(ctx context.Context, p *pruner.Service) error { + return p.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, p *pruner.Service) error { + return p.Stop(ctx) + }), + )), + ) + + switch tp { + case node.Full, node.Bridge: + return fx.Module("prune", + baseComponents, + fx.Provide(func() pruner.Pruner { + return archival.NewPruner() + }), + fx.Supply(archival.Window), + ) + case node.Light: + return fx.Module("prune", + baseComponents, + fx.Provide(func() pruner.Pruner { + return light.NewPruner() + }), + fx.Supply(light.Window), + ) + default: + panic("unknown node type") + } +} diff --git a/nodebuilder/rpc/config.go b/nodebuilder/rpc/config.go index 306dd562e3..d6031082a8 100644 --- a/nodebuilder/rpc/config.go +++ b/nodebuilder/rpc/config.go @@ -8,15 +8,17 @@ import ( ) type Config struct { - Address string - Port string + Address string + Port string + SkipAuth bool } func DefaultConfig() Config { return Config{ - Address: "0.0.0.0", + Address: defaultBindAddress, // do NOT expose the same port as celestia-core by default so that both can run on the same machine - Port: "26658", + Port: defaultPort, + SkipAuth: false, } } diff --git a/nodebuilder/rpc/config_test.go b/nodebuilder/rpc/config_test.go new file mode 100644 index 0000000000..1c78a1a19f --- /dev/null +++ b/nodebuilder/rpc/config_test.go @@ -0,0 +1,59 @@ +package rpc + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestDefaultConfig tests that the default gateway config is correct. +func TestDefaultConfig(t *testing.T) { + expected := Config{ + Address: defaultBindAddress, + Port: defaultPort, + } + + assert.Equal(t, expected, DefaultConfig()) +} + +func TestConfigValidate(t *testing.T) { + tests := []struct { + name string + cfg Config + err bool + }{ + { + name: "valid config", + cfg: Config{ + Address: "127.0.0.1", + Port: "8080", + }, + err: false, + }, + { + name: "invalid address", + cfg: Config{ + Address: "invalid", + Port: "8080", + }, + err: true, + }, + { + name: "invalid port", + cfg: Config{ + Address: "127.0.0.1", + Port: "invalid", + }, + err: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.cfg.Validate() + if (err != nil) != tt.err { + t.Errorf("Config.Validate() error = %v, err %v", err, tt.err) + } + }) + } +} diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index ca30af6305..43a8055207 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -5,6 +5,7 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/header" @@ -24,18 +25,20 @@ func registerEndpoints( p2pMod p2p.Module, nodeMod node.Module, blobMod blob.Module, + daMod da.Module, serv *rpc.Server, ) { - serv.RegisterAuthedService("fraud", fraudMod, &fraud.API{}) - serv.RegisterAuthedService("das", daserMod, &das.API{}) - serv.RegisterAuthedService("header", headerMod, &header.API{}) - serv.RegisterAuthedService("state", stateMod, &state.API{}) - serv.RegisterAuthedService("share", shareMod, &share.API{}) - serv.RegisterAuthedService("p2p", p2pMod, &p2p.API{}) - serv.RegisterAuthedService("node", nodeMod, &node.API{}) - serv.RegisterAuthedService("blob", blobMod, &blob.API{}) + serv.RegisterService("fraud", fraudMod, &fraud.API{}) + serv.RegisterService("das", daserMod, &das.API{}) + serv.RegisterService("header", headerMod, &header.API{}) + serv.RegisterService("state", stateMod, &state.API{}) + serv.RegisterService("share", shareMod, &share.API{}) + serv.RegisterService("p2p", p2pMod, &p2p.API{}) + serv.RegisterService("node", nodeMod, &node.API{}) + serv.RegisterService("blob", blobMod, &blob.API{}) + serv.RegisterService("da", daMod, &da.API{}) } func server(cfg *Config, auth jwt.Signer) *rpc.Server { - return rpc.NewServer(cfg.Address, cfg.Port, auth) + return rpc.NewServer(cfg.Address, cfg.Port, cfg.SkipAuth, auth) } diff --git a/nodebuilder/rpc/defaults.go b/nodebuilder/rpc/defaults.go new file mode 100644 index 0000000000..55e51a7c9b --- /dev/null +++ b/nodebuilder/rpc/defaults.go @@ -0,0 +1,6 @@ +package rpc + +const ( + defaultBindAddress = "localhost" + defaultPort = "26658" +) diff --git a/nodebuilder/rpc/defaults_test.go b/nodebuilder/rpc/defaults_test.go new file mode 100644 index 0000000000..74d9c98cfc --- /dev/null +++ b/nodebuilder/rpc/defaults_test.go @@ -0,0 +1,12 @@ +package rpc + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestServerDefaultConstants(t *testing.T) { + assert.Equal(t, "localhost", defaultBindAddress) + assert.Equal(t, "26658", defaultPort) +} diff --git a/nodebuilder/rpc/flags.go b/nodebuilder/rpc/flags.go index 167dbc803a..d37014004d 100644 --- a/nodebuilder/rpc/flags.go +++ b/nodebuilder/rpc/flags.go @@ -1,13 +1,18 @@ package rpc import ( + "fmt" + + logging "github.com/ipfs/go-log/v2" "github.com/spf13/cobra" flag "github.com/spf13/pflag" ) var ( + log = logging.Logger("rpc") addrFlag = "rpc.addr" portFlag = "rpc.port" + authFlag = "rpc.skip-auth" ) // Flags gives a set of hardcoded node/rpc package flags. @@ -17,12 +22,17 @@ func Flags() *flag.FlagSet { flags.String( addrFlag, "", - "Set a custom RPC listen address (default: localhost)", + fmt.Sprintf("Set a custom RPC listen address (default: %s)", defaultBindAddress), ) flags.String( portFlag, "", - "Set a custom RPC port (default: 26658)", + fmt.Sprintf("Set a custom RPC port (default: %s)", defaultPort), + ) + flags.Bool( + authFlag, + false, + "Skips authentication for RPC requests", ) return flags @@ -38,4 +48,12 @@ func ParseFlags(cmd *cobra.Command, cfg *Config) { if port != "" { cfg.Port = port } + ok, err := cmd.Flags().GetBool(authFlag) + if err != nil { + panic(err) + } + if ok { + log.Warn("RPC authentication is disabled") + cfg.SkipAuth = true + } } diff --git a/nodebuilder/rpc/flags_test.go b/nodebuilder/rpc/flags_test.go new file mode 100644 index 0000000000..1370995833 --- /dev/null +++ b/nodebuilder/rpc/flags_test.go @@ -0,0 +1,95 @@ +package rpc + +import ( + "fmt" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFlags(t *testing.T) { + flags := Flags() + + // Test addrFlag + addr := flags.Lookup(addrFlag) + require.NotNil(t, addr) + assert.Equal(t, "", addr.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom RPC listen address (default: %s)", defaultBindAddress), addr.Usage) + + // Test portFlag + port := flags.Lookup(portFlag) + require.NotNil(t, port) + assert.Equal(t, "", port.Value.String()) + assert.Equal(t, fmt.Sprintf("Set a custom RPC port (default: %s)", defaultPort), port.Usage) +} + +// TestParseFlags tests the ParseFlags function in rpc/flags.go +func TestParseFlags(t *testing.T) { + tests := []struct { + name string + addrFlag string + portFlag string + expected *Config + }{ + { + name: "addrFlag is set", + addrFlag: "127.0.0.1:8080", + portFlag: "", + expected: &Config{ + Address: "127.0.0.1:8080", + Port: "", + }, + }, + { + name: "portFlag is set", + addrFlag: "", + portFlag: "9090", + expected: &Config{ + Address: "", + Port: "9090", + }, + }, + { + name: "both addrFlag and portFlag are set", + addrFlag: "192.168.0.1:1234", + portFlag: "5678", + expected: &Config{ + Address: "192.168.0.1:1234", + Port: "5678", + }, + }, + { + name: "neither addrFlag nor portFlag are set", + addrFlag: "", + portFlag: "", + expected: &Config{ + Address: "", + Port: "", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := &cobra.Command{} + cfg := &Config{} + + cmd.Flags().AddFlagSet(Flags()) + + err := cmd.Flags().Set(addrFlag, test.addrFlag) + if err != nil { + t.Errorf(err.Error()) + } + err = cmd.Flags().Set(portFlag, test.portFlag) + if err != nil { + t.Errorf(err.Error()) + } + + ParseFlags(cmd, cfg) + assert.Equal(t, test.expected.Address, cfg.Address) + assert.Equal(t, test.expected.Port, cfg.Port) + }) + } +} diff --git a/nodebuilder/settings.go b/nodebuilder/settings.go index 29019c7e77..298976fda4 100644 --- a/nodebuilder/settings.go +++ b/nodebuilder/settings.go @@ -24,6 +24,7 @@ import ( "github.com/celestiaorg/go-fraud" "github.com/celestiaorg/celestia-node/header" + modcore "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/das" modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" @@ -51,6 +52,7 @@ func WithPyroscope(endpoint string, nodeType node.Type) fx.Option { return fx.Options( fx.Invoke(func(peerID peer.ID) error { _, err := pyroscope.Start(pyroscope.Config{ + UploadRate: 15 * time.Second, ApplicationName: "celestia.da-node", ServerAddress: endpoint, Tags: map[string]string{ @@ -64,6 +66,7 @@ func WithPyroscope(endpoint string, nodeType node.Type) fx.Option { pyroscope.ProfileAllocSpace, pyroscope.ProfileInuseObjects, pyroscope.ProfileInuseSpace, + pyroscope.ProfileGoroutines, }, }) return err @@ -76,6 +79,7 @@ func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type) fx.Opti // TODO @renaynay: this will be refactored when there is more granular // control over which module to enable metrics for modhead.MetricsEnabled = true + modcore.MetricsEnabled = true baseComponents := fx.Options( fx.Supply(metricOpts), diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index aa2ac5bec1..96be2b5d20 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -37,12 +37,12 @@ func newDiscovery(cfg *disc.Parameters, h, routingdisc.NewRoutingDiscovery(r), fullNodesTag, - disc.WithOnPeersUpdate(manager.UpdateFullNodePool), + disc.WithOnPeersUpdate(manager.UpdateNodePool), ) } } -func newModule(getter share.Getter, avail share.Availability) Module { +func newShareModule(getter share.Getter, avail share.Availability) Module { return &module{getter, avail} } diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go index 78a124a20d..4e21cecae0 100644 --- a/nodebuilder/share/mocks/api.go +++ b/nodebuilder/share/mocks/api.go @@ -8,7 +8,7 @@ import ( context "context" reflect "reflect" - da "github.com/celestiaorg/celestia-app/pkg/da" + header "github.com/celestiaorg/celestia-node/header" share "github.com/celestiaorg/celestia-node/share" rsmt2d "github.com/celestiaorg/rsmt2d" gomock "github.com/golang/mock/gomock" @@ -38,7 +38,7 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // GetEDS mocks base method. -func (m *MockModule) GetEDS(arg0 context.Context, arg1 *da.DataAvailabilityHeader) (*rsmt2d.ExtendedDataSquare, error) { +func (m *MockModule) GetEDS(arg0 context.Context, arg1 *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEDS", arg0, arg1) ret0, _ := ret[0].(*rsmt2d.ExtendedDataSquare) @@ -53,7 +53,7 @@ func (mr *MockModuleMockRecorder) GetEDS(arg0, arg1 interface{}) *gomock.Call { } // GetShare mocks base method. -func (m *MockModule) GetShare(arg0 context.Context, arg1 *da.DataAvailabilityHeader, arg2, arg3 int) ([]byte, error) { +func (m *MockModule) GetShare(arg0 context.Context, arg1 *header.ExtendedHeader, arg2, arg3 int) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetShare", arg0, arg1, arg2, arg3) ret0, _ := ret[0].([]byte) @@ -68,7 +68,7 @@ func (mr *MockModuleMockRecorder) GetShare(arg0, arg1, arg2, arg3 interface{}) * } // GetSharesByNamespace mocks base method. -func (m *MockModule) GetSharesByNamespace(arg0 context.Context, arg1 *da.DataAvailabilityHeader, arg2 share.Namespace) (share.NamespacedShares, error) { +func (m *MockModule) GetSharesByNamespace(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 share.Namespace) (share.NamespacedShares, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSharesByNamespace", arg0, arg1, arg2) ret0, _ := ret[0].(share.NamespacedShares) @@ -83,7 +83,7 @@ func (mr *MockModuleMockRecorder) GetSharesByNamespace(arg0, arg1, arg2 interfac } // SharesAvailable mocks base method. -func (m *MockModule) SharesAvailable(arg0 context.Context, arg1 *da.DataAvailabilityHeader) error { +func (m *MockModule) SharesAvailable(arg0 context.Context, arg1 *header.ExtendedHeader) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SharesAvailable", arg0, arg1) ret0, _ := ret[0].(error) diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index 3fa55b2d35..7caaf39a92 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -34,7 +34,72 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fx.Supply(*cfg), fx.Error(cfgErr), fx.Options(options...), - fx.Provide(newModule), + fx.Provide(newShareModule), + peerManagerComponents(tp, cfg), + discoveryComponents(cfg), + shrexSubComponents(), + ) + + bridgeAndFullComponents := fx.Options( + fx.Provide(getters.NewStoreGetter), + shrexServerComponents(cfg), + edsStoreComponents(cfg), + fullAvailabilityComponents(), + shrexGetterComponents(cfg), + fx.Provide(func(shrexSub *shrexsub.PubSub) shrexsub.BroadcastFn { + return shrexSub.Broadcast + }), + ) + + switch tp { + case node.Bridge: + return fx.Module( + "share", + baseComponents, + bridgeAndFullComponents, + fx.Provide(func() peers.Parameters { + return cfg.PeerManagerParams + }), + fx.Provide(bridgeGetter), + fx.Invoke(func(lc fx.Lifecycle, sub *shrexsub.PubSub) error { + lc.Append(fx.Hook{ + OnStart: sub.Start, + OnStop: sub.Stop, + }) + return nil + }), + ) + case node.Full: + return fx.Module( + "share", + baseComponents, + bridgeAndFullComponents, + fx.Provide(getters.NewIPLDGetter), + fx.Provide(fullGetter), + ) + case node.Light: + return fx.Module( + "share", + baseComponents, + shrexGetterComponents(cfg), + lightAvailabilityComponents(cfg), + fx.Invoke(ensureEmptyEDSInBS), + fx.Provide(getters.NewIPLDGetter), + fx.Provide(lightGetter), + // shrexsub broadcaster stub for daser + fx.Provide(func() shrexsub.BroadcastFn { + return func(context.Context, shrexsub.Notification) error { + return nil + } + }), + ) + default: + panic("invalid node type") + } +} + +func discoveryComponents(cfg *Config) fx.Option { + return fx.Options( fx.Invoke(func(disc *disc.Discovery) {}), fx.Provide(fx.Annotate( newDiscovery(cfg.Discovery), @@ -45,29 +110,71 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option return d.Stop(ctx) }), )), - fx.Provide( - func(ctx context.Context, h host.Host, network modp2p.Network) (*shrexsub.PubSub, error) { - return shrexsub.NewPubSub(ctx, h, network.String()) - }, - ), ) +} - shrexGetterComponents := fx.Options( - fx.Provide(func() peers.Parameters { - return cfg.PeerManagerParams - }), +func peerManagerComponents(tp node.Type, cfg *Config) fx.Option { + switch tp { + case node.Full, node.Light: + return fx.Options( + fx.Provide(func() peers.Parameters { + return cfg.PeerManagerParams + }), + fx.Provide( + func( + params peers.Parameters, + host host.Host, + connGater *conngater.BasicConnectionGater, + shrexSub *shrexsub.PubSub, + headerSub libhead.Subscriber[*header.ExtendedHeader], + // we must ensure Syncer is started before PeerManager + // so that Syncer registers header validator before PeerManager subscribes to headers + _ *sync.Syncer[*header.ExtendedHeader], + ) (*peers.Manager, error) { + return peers.NewManager( + params, + host, + connGater, + peers.WithShrexSubPools(shrexSub, headerSub), + ) + }, + ), + ) + case node.Bridge: + return fx.Provide(peers.NewManager) + default: + panic("invalid node type") + } +} + +func shrexSubComponents() fx.Option { + return fx.Provide( + func(ctx context.Context, h host.Host, network modp2p.Network) (*shrexsub.PubSub, error) { + return shrexsub.NewPubSub(ctx, h, network.String()) + }, + ) +} + +// shrexGetterComponents provides components for a shrex getter that +// is capable of requesting +func shrexGetterComponents(cfg *Config) fx.Option { + return fx.Options( + // shrex-nd client fx.Provide( func(host host.Host, network modp2p.Network) (*shrexnd.Client, error) { cfg.ShrExNDParams.WithNetworkID(network.String()) return shrexnd.NewClient(cfg.ShrExNDParams, host) }, ), + + // shrex-eds client fx.Provide( func(host host.Host, network modp2p.Network) (*shrexeds.Client, error) { cfg.ShrExEDSParams.WithNetworkID(network.String()) return shrexeds.NewClient(cfg.ShrExEDSParams, host) }, ), + fx.Provide(fx.Annotate( getters.NewShrexGetter, fx.OnStart(func(ctx context.Context, getter *getters.ShrexGetter) error { @@ -78,9 +185,10 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option }), )), ) +} - bridgeAndFullComponents := fx.Options( - fx.Provide(getters.NewStoreGetter), +func shrexServerComponents(cfg *Config) fx.Option { + return fx.Options( fx.Invoke(func(edsSrv *shrexeds.Server, ndSrc *shrexnd.Server) {}), fx.Provide(fx.Annotate( func(host host.Host, store *eds.Store, network modp2p.Network) (*shrexeds.Server, error) { @@ -108,8 +216,13 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option }), fx.OnStop(func(ctx context.Context, server *shrexnd.Server) error { return server.Stop(ctx) - }), - )), + })), + ), + ) +} + +func edsStoreComponents(cfg *Config) fx.Option { + return fx.Options( fx.Provide(fx.Annotate( func(path node.StorePath, ds datastore.Batching) (*eds.Store, error) { return eds.NewStore(cfg.EDSStoreParams, string(path), ds) @@ -125,6 +238,11 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option return store.Stop(ctx) }), )), + ) +} + +func fullAvailabilityComponents() fx.Option { + return fx.Options( fx.Provide(fx.Annotate( full.NewShareAvailability, fx.OnStart(func(ctx context.Context, avail *full.ShareAvailability) error { @@ -137,91 +255,24 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fx.Provide(func(avail *full.ShareAvailability) share.Availability { return avail }), - fx.Provide(func(shrexSub *shrexsub.PubSub) shrexsub.BroadcastFn { - return shrexSub.Broadcast - }), - ) - - peerManagerWithShrexPools := fx.Options( - fx.Provide( - func( - params peers.Parameters, - host host.Host, - connGater *conngater.BasicConnectionGater, - shrexSub *shrexsub.PubSub, - headerSub libhead.Subscriber[*header.ExtendedHeader], - // we must ensure Syncer is started before PeerManager - // so that Syncer registers header validator before PeerManager subscribes to headers - _ *sync.Syncer[*header.ExtendedHeader], - ) (*peers.Manager, error) { - return peers.NewManager( - params, - host, - connGater, - peers.WithShrexSubPools(shrexSub, headerSub), - ) - }, - ), ) +} - switch tp { - case node.Bridge: - return fx.Module( - "share", - baseComponents, - fx.Provide(peers.NewManager), - bridgeAndFullComponents, - shrexGetterComponents, - fx.Provide(bridgeGetter), - fx.Invoke(func(lc fx.Lifecycle, sub *shrexsub.PubSub) error { - lc.Append(fx.Hook{ - OnStart: sub.Start, - OnStop: sub.Stop, - }) - return nil - }), - ) - case node.Full: - return fx.Module( - "share", - peerManagerWithShrexPools, - baseComponents, - bridgeAndFullComponents, - shrexGetterComponents, - fx.Provide(getters.NewIPLDGetter), - fx.Provide(fullGetter), - ) - case node.Light: - return fx.Module( - "share", - baseComponents, - fx.Provide(func() []light.Option { - return []light.Option{ - light.WithSampleAmount(cfg.LightAvailability.SampleAmount), - } - }), - peerManagerWithShrexPools, - shrexGetterComponents, - fx.Invoke(ensureEmptyEDSInBS), - fx.Provide(getters.NewIPLDGetter), - fx.Provide(lightGetter), - // shrexsub broadcaster stub for daser - fx.Provide(func() shrexsub.BroadcastFn { - return func(context.Context, shrexsub.Notification) error { - return nil - } - }), - fx.Provide(fx.Annotate( - light.NewShareAvailability, - fx.OnStop(func(ctx context.Context, la *light.ShareAvailability) error { - return la.Close(ctx) - }), - )), - fx.Provide(func(avail *light.ShareAvailability) share.Availability { - return avail +func lightAvailabilityComponents(cfg *Config) fx.Option { + return fx.Options( + fx.Provide(fx.Annotate( + light.NewShareAvailability, + fx.OnStop(func(ctx context.Context, la *light.ShareAvailability) error { + return la.Close(ctx) }), - ) - default: - panic("invalid node type") - } + )), + fx.Provide(func() []light.Option { + return []light.Option{ + light.WithSampleAmount(cfg.LightAvailability.SampleAmount), + } + }), + fx.Provide(func(avail *light.ShareAvailability) share.Availability { + return avail + }), + ) } diff --git a/nodebuilder/state/config.go b/nodebuilder/state/config.go index e6db813e06..f42e646b76 100644 --- a/nodebuilder/state/config.go +++ b/nodebuilder/state/config.go @@ -1,6 +1,8 @@ package state -import "github.com/cosmos/cosmos-sdk/crypto/keyring" +import ( + "github.com/cosmos/cosmos-sdk/crypto/keyring" +) var defaultKeyringBackend = keyring.BackendTest diff --git a/nodebuilder/state/mocks/api.go b/nodebuilder/state/mocks/api.go index 754920dee2..1861a86e66 100644 --- a/nodebuilder/state/mocks/api.go +++ b/nodebuilder/state/mocks/api.go @@ -9,13 +9,12 @@ import ( reflect "reflect" math "cosmossdk.io/math" + blob "github.com/celestiaorg/celestia-node/blob" + state "github.com/celestiaorg/celestia-node/state" types "github.com/cosmos/cosmos-sdk/types" types0 "github.com/cosmos/cosmos-sdk/x/staking/types" gomock "github.com/golang/mock/gomock" types1 "github.com/tendermint/tendermint/types" - - blob "github.com/celestiaorg/celestia-node/blob" - state "github.com/celestiaorg/celestia-node/state" ) // MockModule is a mock of Module interface. @@ -131,20 +130,6 @@ func (mr *MockModuleMockRecorder) Delegate(arg0, arg1, arg2, arg3, arg4 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delegate", reflect.TypeOf((*MockModule)(nil).Delegate), arg0, arg1, arg2, arg3, arg4) } -// IsStopped mocks base method. -func (m *MockModule) IsStopped(arg0 context.Context) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsStopped", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsStopped indicates an expected call of IsStopped. -func (mr *MockModuleMockRecorder) IsStopped(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsStopped", reflect.TypeOf((*MockModule)(nil).IsStopped), arg0) -} - // QueryDelegation mocks base method. func (m *MockModule) QueryDelegation(arg0 context.Context, arg1 types.ValAddress) (*types0.QueryDelegationResponse, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/state/state.go b/nodebuilder/state/state.go index 83408680da..52a2317445 100644 --- a/nodebuilder/state/state.go +++ b/nodebuilder/state/state.go @@ -16,9 +16,8 @@ var _ Module = (*API)(nil) // messages to the Celestia network. // //go:generate mockgen -destination=mocks/api.go -package=mocks . Module +//nolint:dupl type Module interface { - // IsStopped checks if the Module's context has been stopped - IsStopped(ctx context.Context) bool // AccountAddress retrieves the address of the node's account/signer AccountAddress(ctx context.Context) (state.Address, error) @@ -97,10 +96,11 @@ type Module interface { // API is a wrapper around Module for the RPC. // TODO(@distractedm1nd): These structs need to be autogenerated. +// +//nolint:dupl type API struct { Internal struct { AccountAddress func(ctx context.Context) (state.Address, error) `perm:"read"` - IsStopped func(ctx context.Context) bool `perm:"read"` Balance func(ctx context.Context) (*state.Balance, error) `perm:"read"` BalanceForAddress func(ctx context.Context, addr state.Address) (*state.Balance, error) `perm:"read"` Transfer func( @@ -110,7 +110,7 @@ type API struct { fee state.Int, gasLimit uint64, ) (*state.TxResponse, error) `perm:"write"` - SubmitTx func(ctx context.Context, tx state.Tx) (*state.TxResponse, error) `perm:"write"` + SubmitTx func(ctx context.Context, tx state.Tx) (*state.TxResponse, error) `perm:"read"` SubmitPayForBlob func( ctx context.Context, fee state.Int, @@ -167,10 +167,6 @@ func (api *API) AccountAddress(ctx context.Context) (state.Address, error) { return api.Internal.AccountAddress(ctx) } -func (api *API) IsStopped(ctx context.Context) bool { - return api.Internal.IsStopped(ctx) -} - func (api *API) BalanceForAddress(ctx context.Context, addr state.Address) (*state.Balance, error) { return api.Internal.BalanceForAddress(ctx, addr) } diff --git a/nodebuilder/state/stub.go b/nodebuilder/state/stub.go index 8d17d651dd..30a431aba5 100644 --- a/nodebuilder/state/stub.go +++ b/nodebuilder/state/stub.go @@ -17,10 +17,6 @@ var ErrNoStateAccess = errors.New("node is running without state access. run wit // to a core endpoint. type stubbedStateModule struct{} -func (s stubbedStateModule) IsStopped(context.Context) bool { - return true -} - func (s stubbedStateModule) AccountAddress(context.Context) (state.Address, error) { return state.Address{}, ErrNoStateAccess } diff --git a/nodebuilder/store.go b/nodebuilder/store.go index 6d313893b1..7f67a9e782 100644 --- a/nodebuilder/store.go +++ b/nodebuilder/store.go @@ -4,17 +4,19 @@ import ( "errors" "fmt" "path/filepath" + "runtime" "sync" "time" "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/dgraph-io/badger/v4/options" "github.com/ipfs/go-datastore" + dsbadger "github.com/ipfs/go-ds-badger4" "github.com/mitchellh/go-homedir" - dsbadger "github.com/celestiaorg/go-ds-badger4" - "github.com/celestiaorg/celestia-node/libs/fslock" "github.com/celestiaorg/celestia-node/libs/keystore" + "github.com/celestiaorg/celestia-node/share" ) var ( @@ -66,7 +68,7 @@ func OpenStore(path string, ring keyring.Keyring) (Store, error) { ok := IsInit(path) if !ok { - flock.Unlock() //nolint: errcheck + flock.Unlock() //nolint:errcheck return nil, ErrNotInited } @@ -118,10 +120,8 @@ func (f *fsStore) Datastore() (datastore.Batching, error) { return f.data, nil } - opts := dsbadger.DefaultOptions // this should be copied - opts.GcInterval = time.Minute * 10 - - ds, err := dsbadger.NewDatastore(dataPath(f.path), &opts) + cfg := constraintBadgerConfig() + ds, err := dsbadger.NewDatastore(dataPath(f.path), cfg) if err != nil { return nil, fmt.Errorf("node: can't open Badger Datastore: %w", err) } @@ -182,3 +182,67 @@ func indexPath(base string) string { func dataPath(base string) string { return filepath.Join(base, "data") } + +// constraintBadgerConfig returns BadgerDB configuration optimized for low memory usage and more frequent +// compaction which prevents memory spikes. +// This is particularly important for LNs with restricted memory resources. +// +// With the following configuration, a LN uses up to 300iB of RAM during initial sync/sampling +// and up to 200MiB during normal operation. (on 4 core CPU, 8GiB RAM droplet) +// +// With the following configuration and "-tags=jemalloc", a LN uses no more than 180MiB during initial +// sync/sampling and up to 100MiB during normal operation. (same hardware spec) +// NOTE: To enable jemalloc, build celestia-node with "-tags=jemalloc" flag, which configures Badger to +// use jemalloc instead of Go's default allocator. +// +// TODO(@Wondertan): Consider alternative less constraint configuration for FN/BN +// TODO(@Wondertan): Consider dynamic memory allocation based on available RAM +func constraintBadgerConfig() *dsbadger.Options { + opts := dsbadger.DefaultOptions // this must be copied + // ValueLog: + // 2mib default => share.Size - makes sure headers and samples are stored in value log + // This *tremendously* reduces the amount of memory used by the node, up to 10 times less during + // compaction + opts.ValueThreshold = share.Size + // make sure we don't have any limits for stored headers + opts.ValueLogMaxEntries = 100000000 + // run value log GC more often to spread the work over time + opts.GcInterval = time.Minute * 1 + // default 0.5 => 0.125 - makes sure value log GC is more aggressive on reclaiming disk space + opts.GcDiscardRatio = 0.125 + + // badger stores checksum for every value, but doesn't verify it by default + // enabling this option may allow us to see detect corrupted data + opts.ChecksumVerificationMode = options.OnBlockRead + opts.VerifyValueChecksum = true + // default 64mib => 0 - disable block cache + // most of our component maintain their own caches, so this is not needed + opts.BlockCacheSize = 0 + // not much gain as it compresses the LSM only as well compression requires block cache + opts.Compression = options.None + + // MemTables: + // default 64mib => 16mib - decreases memory usage and makes compaction more often + opts.MemTableSize = 16 << 20 + // default 5 => 3 + opts.NumMemtables = 3 + // default 5 => 3 + opts.NumLevelZeroTables = 3 + // default 15 => 5 - this prevents memory growth on CPU constraint systems by blocking all writers + opts.NumLevelZeroTablesStall = 5 + + // Compaction: + // Dynamic compactor allocation + compactors := runtime.NumCPU() / 2 + if compactors < 2 { + compactors = 2 // can't be less than 2 + } + if compactors > opts.MaxLevels { // ensure there is no more compactors than db table levels + compactors = opts.MaxLevels + } + opts.NumCompactors = compactors + // makes sure badger is always compacted on shutdown + opts.CompactL0OnClose = true + + return &opts +} diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index bd179c1258..51bd89c5a7 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -1,3 +1,5 @@ +//go:build !race + package nodebuilder import ( diff --git a/nodebuilder/tests/README.md b/nodebuilder/tests/README.md index 176ee2ba21..dd2040ab42 100644 --- a/nodebuilder/tests/README.md +++ b/nodebuilder/tests/README.md @@ -1,6 +1,6 @@ # Swamp: In-Memory Test Tool -Swamp is a testing tool that creates an environment for deploying `celestia-node` and testing instances against each other. +Swamp is our integration testing tool that creates an environment for deploying `celestia-node` and testing instances against each other. While the swamp takes care of setting up networking and initial configuration of node types, the user can focus on tailoring test scenarios. ## Usage @@ -38,7 +38,7 @@ require.NoError(t, err) light := sw.NewLightClient(node.WithTrustedPeer(addrs[0].String())) ``` -## Concenptual overview +## Conceptual overview Each of the test scenario requires flexibility in network topology. The user can define the necessary amount of each type of node and be able to control each of them. diff --git a/nodebuilder/tests/api_test.go b/nodebuilder/tests/api_test.go index 2fd4b2d3da..a3b99a750b 100644 --- a/nodebuilder/tests/api_test.go +++ b/nodebuilder/tests/api_test.go @@ -1,3 +1,5 @@ +//go:build api || integration + package tests import ( @@ -13,26 +15,14 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc/client" "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/blob/blobtest" - "github.com/celestiaorg/celestia-node/libs/authtoken" "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" ) -func getAdminClient(ctx context.Context, nd *nodebuilder.Node, t *testing.T) *client.Client { - t.Helper() - - signer := nd.AdminSigner - listenAddr := "ws://" + nd.RPCServer.ListenAddr() - - jwt, err := authtoken.NewSignedJWT(signer, []auth.Permission{"public", "read", "write", "admin"}) - require.NoError(t, err) - - client, err := client.NewClient(ctx, listenAddr, jwt) - require.NoError(t, err) - - return client -} +const ( + btime = time.Millisecond * 300 +) func TestNodeModule(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) @@ -58,6 +48,10 @@ func TestNodeModule(t *testing.T) { require.NoError(t, err) require.Equal(t, info.APIVersion, node.APIVersion) + ready, err := client.Node.Ready(ctx) + require.NoError(t, err) + require.True(t, ready) + perms, err := client.Node.AuthVerify(ctx, jwt) require.NoError(t, err) require.Equal(t, perms, adminPerms) @@ -99,7 +93,7 @@ func TestGetByHeight(t *testing.T) { require.ErrorContains(t, err, "given height is from the future") } -// TestBlobRPC ensures that blobs can be submited via rpc +// TestBlobRPC ensures that blobs can be submitted via rpc func TestBlobRPC(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) @@ -123,7 +117,7 @@ func TestBlobRPC(t *testing.T) { ) require.NoError(t, err) - height, err := rpcClient.Blob.Submit(ctx, []*blob.Blob{newBlob}, nil) + height, err := rpcClient.Blob.Submit(ctx, []*blob.Blob{newBlob}, blob.DefaultGasPrice()) require.NoError(t, err) require.True(t, height != 0) } diff --git a/nodebuilder/tests/blob_test.go b/nodebuilder/tests/blob_test.go index ccbc6440cc..d0aeefd568 100644 --- a/nodebuilder/tests/blob_test.go +++ b/nodebuilder/tests/blob_test.go @@ -1,3 +1,5 @@ +//go:build blob || integration + package tests import ( @@ -58,7 +60,7 @@ func TestBlobModule(t *testing.T) { fullClient := getAdminClient(ctx, fullNode, t) lightClient := getAdminClient(ctx, lightNode, t) - height, err := fullClient.Blob.Submit(ctx, blobs, nil) + height, err := fullClient.Blob.Submit(ctx, blobs, blob.DefaultGasPrice()) require.NoError(t, err) _, err = fullClient.Header.WaitForHeight(ctx, height) @@ -73,6 +75,8 @@ func TestBlobModule(t *testing.T) { { name: "Get", doFn: func(t *testing.T) { + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) blob1, err := fullClient.Blob.Get(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) require.NoError(t, err) require.Equal(t, blobs[0], blob1) @@ -81,6 +85,8 @@ func TestBlobModule(t *testing.T) { { name: "GetAll", doFn: func(t *testing.T) { + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) newBlobs, err := fullClient.Blob.GetAll(ctx, height, []share.Namespace{blobs[0].Namespace()}) require.NoError(t, err) require.Len(t, newBlobs, len(appBlobs0)) @@ -91,6 +97,8 @@ func TestBlobModule(t *testing.T) { { name: "Included", doFn: func(t *testing.T) { + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) proof, err := fullClient.Blob.GetProof(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) require.NoError(t, err) @@ -123,12 +131,76 @@ func TestBlobModule(t *testing.T) { require.ErrorContains(t, err, blob.ErrBlobNotFound.Error()) }, }, + { + name: "Submit equal blobs", + doFn: func(t *testing.T) { + appBlob, err := blobtest.GenerateV0Blobs([]int{8, 4}, true) + require.NoError(t, err) + b, err := blob.NewBlob( + appBlob[0].ShareVersion, + append([]byte{appBlob[0].NamespaceVersion}, appBlob[0].NamespaceID...), + appBlob[0].Data, + ) + require.NoError(t, err) + + height, err := fullClient.Blob.Submit(ctx, []*blob.Blob{b, b}, blob.DefaultGasPrice()) + require.NoError(t, err) + + _, err = fullClient.Header.WaitForHeight(ctx, height) + require.NoError(t, err) + + b0, err := fullClient.Blob.Get(ctx, height, b.Namespace(), b.Commitment) + require.NoError(t, err) + require.Equal(t, b, b0) + + // give some time to store the data, + // otherwise the test will hang on the IPLD level. + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) + + proof, err := fullClient.Blob.GetProof(ctx, height, b.Namespace(), b.Commitment) + require.NoError(t, err) + + included, err := fullClient.Blob.Included(ctx, height, b.Namespace(), proof, b.Commitment) + require.NoError(t, err) + require.True(t, included) + }, + }, + { + // This test allows to check that the blob won't be + // deduplicated if it will be sent multiple times in + // different pfbs. + name: "Submit the same blob in different pfb", + doFn: func(t *testing.T) { + h, err := fullClient.Blob.Submit(ctx, []*blob.Blob{blobs[0]}, blob.DefaultGasPrice()) + require.NoError(t, err) + + _, err = fullClient.Header.WaitForHeight(ctx, h) + require.NoError(t, err) + + b0, err := fullClient.Blob.Get(ctx, h, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + require.Equal(t, blobs[0], b0) + + // give some time to store the data, + // otherwise the test will hang on the IPLD level. + // https://github.com/celestiaorg/celestia-node/issues/2915 + time.Sleep(time.Second) + + proof, err := fullClient.Blob.GetProof(ctx, h, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + + included, err := fullClient.Blob.Included(ctx, h, blobs[0].Namespace(), proof, blobs[0].Commitment) + require.NoError(t, err) + require.True(t, included) + + }, + }, } for _, tt := range test { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() tt.doFn(t) }) } diff --git a/nodebuilder/tests/da_test.go b/nodebuilder/tests/da_test.go new file mode 100644 index 0000000000..bdcd4e638c --- /dev/null +++ b/nodebuilder/tests/da_test.go @@ -0,0 +1,145 @@ +//go:build da || integration + +package tests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/nodebuilder/da" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share" +) + +func TestDaModule(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) + t.Cleanup(cancel) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) + + namespace, err := share.NewBlobNamespaceV0([]byte("namespace")) + require.NoError(t, err) + + appBlobs0, err := blobtest.GenerateV0Blobs([]int{8, 4}, true) + require.NoError(t, err) + appBlobs1, err := blobtest.GenerateV0Blobs([]int{4}, false) + require.NoError(t, err) + blobs := make([]*blob.Blob, 0, len(appBlobs0)+len(appBlobs1)) + daBlobs := make([][]byte, 0, len(appBlobs0)+len(appBlobs1)) + + for _, b := range append(appBlobs0, appBlobs1...) { + blob, err := blob.NewBlob(b.ShareVersion, namespace, b.Data) + require.NoError(t, err) + blobs = append(blobs, blob) + daBlobs = append(daBlobs, blob.Data) + } + + require.NoError(t, err) + bridge := sw.NewBridgeNode() + require.NoError(t, bridge.Start(ctx)) + + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) + + fullCfg := sw.DefaultTestConfig(node.Full) + fullCfg.Header.TrustedPeers = append(fullCfg.Header.TrustedPeers, addrs[0].String()) + fullNode := sw.NewNodeWithConfig(node.Full, fullCfg) + require.NoError(t, fullNode.Start(ctx)) + + addrsFull, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(fullNode.Host)) + require.NoError(t, err) + + lightCfg := sw.DefaultTestConfig(node.Light) + lightCfg.Header.TrustedPeers = append(lightCfg.Header.TrustedPeers, addrsFull[0].String()) + lightNode := sw.NewNodeWithConfig(node.Light, lightCfg) + require.NoError(t, lightNode.Start(ctx)) + + fullClient := getAdminClient(ctx, fullNode, t) + lightClient := getAdminClient(ctx, lightNode, t) + + ids, err := fullClient.DA.Submit(ctx, daBlobs, -1, namespace) + require.NoError(t, err) + + var test = []struct { + name string + doFn func(t *testing.T) + }{ + { + name: "MaxBlobSize", + doFn: func(t *testing.T) { + mbs, err := fullClient.DA.MaxBlobSize(ctx) + require.NoError(t, err) + require.Equal(t, mbs, uint64(appconsts.DefaultMaxBytes)) + }, + }, + { + name: "GetProofs + Validate", + doFn: func(t *testing.T) { + t.Skip() + h, _ := da.SplitID(ids[0]) + lightClient.Header.WaitForHeight(ctx, h) + proofs, err := lightClient.DA.GetProofs(ctx, ids, namespace) + require.NoError(t, err) + require.NotEmpty(t, proofs) + valid, err := fullClient.DA.Validate(ctx, ids, proofs, namespace) + require.NoError(t, err) + for _, v := range valid { + require.True(t, v) + } + }, + }, + { + name: "GetIDs", + doFn: func(t *testing.T) { + t.Skip() + height, _ := da.SplitID(ids[0]) + ids2, err := fullClient.DA.GetIDs(ctx, height, namespace) + require.NoError(t, err) + require.EqualValues(t, ids, ids2) + }, + }, + { + name: "Get", + doFn: func(t *testing.T) { + h, _ := da.SplitID(ids[0]) + lightClient.Header.WaitForHeight(ctx, h) + fetched, err := lightClient.DA.Get(ctx, ids, namespace) + require.NoError(t, err) + require.Len(t, fetched, len(ids)) + for i := range fetched { + require.True(t, bytes.Equal(fetched[i], daBlobs[i])) + } + }, + }, + { + name: "Commit", + doFn: func(t *testing.T) { + t.Skip() + fetched, err := fullClient.DA.Commit(ctx, ids, namespace) + require.NoError(t, err) + require.Len(t, fetched, len(ids)) + for i := range fetched { + _, commitment := da.SplitID(ids[i]) + require.EqualValues(t, fetched[i], commitment) + } + }, + }, + } + + for _, tt := range test { + tt := tt + t.Run(tt.name, func(t *testing.T) { + tt.doFn(t) + }) + } +} diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go index 1927cdaf42..6496cdbb53 100644 --- a/nodebuilder/tests/fraud_test.go +++ b/nodebuilder/tests/fraud_test.go @@ -1,3 +1,5 @@ +//go:build fraud || integration + package tests import ( @@ -126,9 +128,9 @@ func TestFraudProofHandling(t *testing.T) { // blockTime=1 sec, expected befp.height=10 timeOut := blockTime * 5 // during befp validation the node can still receive headers and it mostly depends on - // the operating system or hardware(e.g. on macOS tests is working 100% time with a single height=15, - // and on the Linux VM sometimes the last height is 17-18). So, lets give a chance for our befp validator to check - // the fraud proof and stop the syncer. + // the operating system or hardware(e.g. on macOS tests is working 100% time with a single + // height=15, and on the Linux VM sometimes the last height is 17-18). So, lets give a chance for + // our befp validator to check the fraud proof and stop the syncer. for height < 20 { syncCtx, syncCancel := context.WithTimeout(context.Background(), timeOut) _, err = full.HeaderServ.WaitForHeight(syncCtx, height) diff --git a/nodebuilder/tests/helpers_test.go b/nodebuilder/tests/helpers_test.go new file mode 100644 index 0000000000..978b66553d --- /dev/null +++ b/nodebuilder/tests/helpers_test.go @@ -0,0 +1,35 @@ +//nolint:unused +package tests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/libs/authtoken" + "github.com/celestiaorg/celestia-node/nodebuilder" +) + +func getAdminClient(ctx context.Context, nd *nodebuilder.Node, t *testing.T) *client.Client { + t.Helper() + + signer := nd.AdminSigner + listenAddr := "ws://" + nd.RPCServer.ListenAddr() + + jwt, err := authtoken.NewSignedJWT(signer, []auth.Permission{"public", "read", "write", "admin"}) + require.NoError(t, err) + + client, err := client.NewClient(ctx, listenAddr, jwt) + require.NoError(t, err) + + return client +} + +func setTimeInterval(cfg *nodebuilder.Config, interval time.Duration) { + cfg.P2P.RoutingTableRefreshPeriod = interval + cfg.Share.Discovery.AdvertiseInterval = interval +} diff --git a/nodebuilder/tests/nd_test.go b/nodebuilder/tests/nd_test.go index f3338ec294..338aa6d0c1 100644 --- a/nodebuilder/tests/nd_test.go +++ b/nodebuilder/tests/nd_test.go @@ -1,3 +1,5 @@ +//go:build nd || integration + package tests import ( diff --git a/nodebuilder/tests/p2p_test.go b/nodebuilder/tests/p2p_test.go index 9fe63fb931..98e9fc15b4 100644 --- a/nodebuilder/tests/p2p_test.go +++ b/nodebuilder/tests/p2p_test.go @@ -1,3 +1,5 @@ +//go:build p2p || integration + package tests import ( @@ -199,8 +201,3 @@ func TestRestartNodeDiscovery(t *testing.T) { require.NoError(t, err) assert.Equal(t, connectedness, network.Connected) } - -func setTimeInterval(cfg *nodebuilder.Config, interval time.Duration) { - cfg.P2P.RoutingTableRefreshPeriod = interval - cfg.Share.Discovery.AdvertiseInterval = interval -} diff --git a/nodebuilder/tests/reconstruct_test.go b/nodebuilder/tests/reconstruct_test.go index 89f4a0171a..d047182669 100644 --- a/nodebuilder/tests/reconstruct_test.go +++ b/nodebuilder/tests/reconstruct_test.go @@ -1,7 +1,4 @@ -// Test with light nodes spawns more goroutines than in the race detectors budget, -// and thus we're disabling the race detector. -// TODO(@Wondertan): Remove this once we move to go1.19 with unlimited race detector -//go:build !race +//go:build reconstruction || integration package tests @@ -89,6 +86,10 @@ Test-Case: Full Node reconstructs blocks from each other, after unsuccessfully s block from LN subnetworks. Analog to TestShareAvailable_DisconnectedFullNodes. */ func TestFullReconstructFromFulls(t *testing.T) { + if testing.Short() { + t.Skip() + } + light.DefaultSampleAmount = 10 // s const ( blocks = 10 @@ -255,6 +256,10 @@ Steps: 9. Check that the FN can retrieve shares from 1 to 20 blocks */ func TestFullReconstructFromLights(t *testing.T) { + if testing.Short() { + t.Skip() + } + eds.RetrieveQuadrantTimeout = time.Millisecond * 100 light.DefaultSampleAmount = 20 const ( diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go index 55426cf70c..9faf69744d 100644 --- a/nodebuilder/tests/swamp/swamp.go +++ b/nodebuilder/tests/swamp/swamp.go @@ -39,7 +39,7 @@ import ( var blackholeIP6 = net.ParseIP("100::") -// DefaultTestTimeout should be used as the default timout on all the Swamp tests. +// DefaultTestTimeout should be used as the default timeout on all the Swamp tests. // It's generously set to 5 minutes to give enough time for CI. const DefaultTestTimeout = time.Minute * 5 @@ -78,6 +78,7 @@ func NewSwamp(t *testing.T, options ...Option) *Swamp { // Now, we are making an assumption that consensus mechanism is already tested out // so, we are not creating bridge nodes with each one containing its own core client // instead we are assigning all created BNs to 1 Core from the swamp + ic.WithChainID("private") cctx := core.StartTestNodeWithConfig(t, ic) swp := &Swamp{ t: t, @@ -176,11 +177,12 @@ func (s *Swamp) setupGenesis() { store, err := eds.NewStore(eds.DefaultParameters(), s.t.TempDir(), ds) require.NoError(s.t, err) - ex := core.NewExchange( + ex, err := core.NewExchange( core.NewBlockFetcher(s.ClientContext.Client), store, header.MakeExtendedHeader, ) + require.NoError(s.t, err) h, err := ex.GetByHeight(ctx, 1) require.NoError(s.t, err) @@ -329,7 +331,7 @@ func (s *Swamp) Disconnect(t *testing.T, peerA, peerB *nodebuilder.Node) { // SetBootstrapper sets the given bootstrappers as the "bootstrappers" for the // Swamp test suite. Every new full or light node created on the suite afterwards // will automatically add the suite's bootstrappers as trusted peers to their config. -// NOTE: Bridge nodes do not automaatically add the bootstrappers as trusted peers. +// NOTE: Bridge nodes do not automatically add the bootstrappers as trusted peers. // NOTE: Use `NewNodeWithStore` to avoid this automatic configuration. func (s *Swamp) SetBootstrapper(t *testing.T, bootstrappers ...*nodebuilder.Node) { for _, trusted := range bootstrappers { diff --git a/nodebuilder/tests/sync_test.go b/nodebuilder/tests/sync_test.go index 8b939749d6..ec8386ea43 100644 --- a/nodebuilder/tests/sync_test.go +++ b/nodebuilder/tests/sync_test.go @@ -1,3 +1,5 @@ +//go:build sync || integration + package tests import ( @@ -16,7 +18,7 @@ import ( const ( numBlocks = 20 bsize = 16 - btime = time.Millisecond * 300 + sbtime = time.Millisecond * 300 ) /* @@ -45,7 +47,7 @@ func TestSyncAgainstBridge_NonEmptyChain(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) - sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(sbtime)) // wait for core network to fill 20 blocks fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) sw.WaitTillHeight(ctx, numBlocks) @@ -138,7 +140,7 @@ func TestSyncAgainstBridge_EmptyChain(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) - sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(sbtime)) sw.WaitTillHeight(ctx, numBlocks) // create bridge node and set it as the bootstrapper for the suite @@ -211,6 +213,10 @@ Steps: 9. Check LN is synced to height 40 */ func TestSyncStartStopLightWithBridge(t *testing.T) { + if testing.Short() { + t.Skip("skipping TestSyncStartStopLightWithBridge test in short mode.") + } + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) defer cancel() diff --git a/pruner/archival/pruner.go b/pruner/archival/pruner.go new file mode 100644 index 0000000000..7b1cb935f3 --- /dev/null +++ b/pruner/archival/pruner.go @@ -0,0 +1,20 @@ +package archival + +import ( + "context" + + "github.com/celestiaorg/celestia-node/header" +) + +// Pruner is a noop implementation of the pruner.Factory interface +// that allows archival nodes to sync and retain historical data +// that is out of the availability window. +type Pruner struct{} + +func NewPruner() *Pruner { + return &Pruner{} +} + +func (p *Pruner) Prune(context.Context, ...*header.ExtendedHeader) error { + return nil +} diff --git a/pruner/archival/window.go b/pruner/archival/window.go new file mode 100644 index 0000000000..b89a779816 --- /dev/null +++ b/pruner/archival/window.go @@ -0,0 +1,5 @@ +package archival + +import "github.com/celestiaorg/celestia-node/pruner" + +const Window = pruner.AvailabilityWindow(0) diff --git a/pruner/light/pruner.go b/pruner/light/pruner.go new file mode 100644 index 0000000000..513bfa2b66 --- /dev/null +++ b/pruner/light/pruner.go @@ -0,0 +1,17 @@ +package light + +import ( + "context" + + "github.com/celestiaorg/celestia-node/header" +) + +type Pruner struct{} + +func NewPruner() *Pruner { + return &Pruner{} +} + +func (p *Pruner) Prune(context.Context, ...*header.ExtendedHeader) error { + return nil +} diff --git a/pruner/light/window.go b/pruner/light/window.go new file mode 100644 index 0000000000..dc1a9e4444 --- /dev/null +++ b/pruner/light/window.go @@ -0,0 +1,11 @@ +package light + +import ( + "time" + + "github.com/celestiaorg/celestia-node/pruner" +) + +// Window is the availability window for light nodes in the Celestia +// network (30 days). +const Window = pruner.AvailabilityWindow(time.Second * 86400 * 30) diff --git a/pruner/pruner.go b/pruner/pruner.go new file mode 100644 index 0000000000..fae60e483c --- /dev/null +++ b/pruner/pruner.go @@ -0,0 +1,13 @@ +package pruner + +import ( + "context" + + "github.com/celestiaorg/celestia-node/header" +) + +// Pruner contains methods necessary to prune data +// from the node's datastore. +type Pruner interface { + Prune(context.Context, ...*header.ExtendedHeader) error +} diff --git a/pruner/service.go b/pruner/service.go new file mode 100644 index 0000000000..f67265977a --- /dev/null +++ b/pruner/service.go @@ -0,0 +1,25 @@ +package pruner + +import ( + "context" +) + +// Service handles the pruning routine for the node using the +// prune Pruner. +type Service struct { + pruner Pruner +} + +func NewService(p Pruner) *Service { + return &Service{ + pruner: p, + } +} + +func (s *Service) Start(context.Context) error { + return nil +} + +func (s *Service) Stop(context.Context) error { + return nil +} diff --git a/pruner/window.go b/pruner/window.go new file mode 100644 index 0000000000..0a86c535ce --- /dev/null +++ b/pruner/window.go @@ -0,0 +1,7 @@ +package pruner + +import ( + "time" +) + +type AvailabilityWindow time.Duration diff --git a/share/eds/byzantine/bad_encoding_test.go b/share/eds/byzantine/bad_encoding_test.go index e7032107ca..e42e3c287c 100644 --- a/share/eds/byzantine/bad_encoding_test.go +++ b/share/eds/byzantine/bad_encoding_test.go @@ -2,9 +2,15 @@ package byzantine import ( "context" + "crypto/sha256" + "hash" "testing" "time" + "github.com/ipfs/boxo/blockservice" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + mhcore "github.com/multiformats/go-multihash/core" "github.com/stretchr/testify/require" core "github.com/tendermint/tendermint/types" @@ -35,10 +41,13 @@ func TestBEFP_Validate(t *testing.T) { err = square.Repair(dah.RowRoots, dah.ColumnRoots) require.ErrorAs(t, err, &errRsmt2d) - errByz := NewErrByzantine(ctx, bServ, &dah, errRsmt2d) - - befp := CreateBadEncodingProof([]byte("hash"), 0, errByz) + byzantine := NewErrByzantine(ctx, bServ, &dah, errRsmt2d) + var errByz *ErrByzantine + require.ErrorAs(t, byzantine, &errByz) + proof := CreateBadEncodingProof([]byte("hash"), 0, errByz) + befp, ok := proof.(*BadEncodingProof) + require.True(t, ok) var test = []struct { name string prepareFn func() error @@ -47,7 +56,7 @@ func TestBEFP_Validate(t *testing.T) { { name: "valid BEFP", prepareFn: func() error { - return befp.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) }, expectedResult: func(err error) { require.NoError(t, err) @@ -69,7 +78,9 @@ func TestBEFP_Validate(t *testing.T) { Shares: validShares[0:4], }, ) - invalidBefp := CreateBadEncodingProof([]byte("hash"), 0, errInvalidByz) + var errInvalid *ErrByzantine + require.ErrorAs(t, errInvalidByz, &errInvalid) + invalidBefp := CreateBadEncodingProof([]byte("hash"), 0, errInvalid) return invalidBefp.Validate(&header.ExtendedHeader{DAH: &validDah}) }, expectedResult: func(err error) { @@ -79,10 +90,11 @@ func TestBEFP_Validate(t *testing.T) { { name: "incorrect share with Proof", prepareFn: func() error { - befp, ok := befp.(*BadEncodingProof) - require.True(t, ok) - befp.Shares[0].Share = befp.Shares[1].Share - return befp.Validate(&header.ExtendedHeader{DAH: &dah}) + // break the first shareWithProof to test negative case + sh := sharetest.RandShares(t, 2) + nmtProof := nmt.NewInclusionProof(0, 1, nil, false) + befp.Shares[0] = &ShareWithProof{sh[0], &nmtProof} + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectShare) @@ -91,10 +103,8 @@ func TestBEFP_Validate(t *testing.T) { { name: "invalid amount of shares", prepareFn: func() error { - befp, ok := befp.(*BadEncodingProof) - require.True(t, ok) befp.Shares = befp.Shares[0 : len(befp.Shares)/2] - return befp.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectAmountOfShares) @@ -103,10 +113,8 @@ func TestBEFP_Validate(t *testing.T) { { name: "not enough shares to recompute the root", prepareFn: func() error { - befp, ok := befp.(*BadEncodingProof) - require.True(t, ok) befp.Shares[0] = nil - return befp.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectAmountOfShares) @@ -115,11 +123,8 @@ func TestBEFP_Validate(t *testing.T) { { name: "index out of bounds", prepareFn: func() error { - befp, ok := befp.(*BadEncodingProof) - require.True(t, ok) - befpCopy := *befp - befpCopy.Index = 100 - return befpCopy.Validate(&header.ExtendedHeader{DAH: &dah}) + befp.Index = 100 + return proof.Validate(&header.ExtendedHeader{DAH: &dah}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectIndex) @@ -128,7 +133,7 @@ func TestBEFP_Validate(t *testing.T) { { name: "heights mismatch", prepareFn: func() error { - return befp.Validate(&header.ExtendedHeader{ + return proof.Validate(&header.ExtendedHeader{ RawHeader: core.Header{ Height: 42, }, @@ -198,18 +203,21 @@ func TestIncorrectBadEncodingFraudProof(t *testing.T) { } func TestBEFP_ValidateOutOfOrderShares(t *testing.T) { - // skipping it for now because `malicious` package has a small issue: Constructor does not apply - // passed options, so it's not possible to store shares and thus get proofs for them. - // should be ok once app team will fix it. - t.Skip() - eds := edstest.RandEDS(t, 16) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + t.Cleanup(cancel) + + size := 4 + eds := edstest.RandEDS(t, size) + shares := eds.Flattened() - shares[0], shares[1] = shares[1], shares[0] // corrupting eds - bServ := ipld.NewMemBlockservice() - batchAddr := ipld.NewNmtNodeAdder(context.Background(), bServ, ipld.MaxSizeBatchOption(16*2)) + shares[0], shares[4] = shares[4], shares[0] // corrupting eds + + bServ := newNamespacedBlockService() + batchAddr := ipld.NewNmtNodeAdder(ctx, bServ, ipld.MaxSizeBatchOption(size*2)) + eds, err := rsmt2d.ImportExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), - malicious.NewConstructor(16, nmt.NodeVisitor(batchAddr.Visit)), + malicious.NewConstructor(uint64(size), nmt.NodeVisitor(batchAddr.Visit)), ) require.NoError(t, err, "failure to recompute the extended data square") @@ -223,9 +231,80 @@ func TestBEFP_ValidateOutOfOrderShares(t *testing.T) { err = eds.Repair(dah.RowRoots, dah.ColumnRoots) require.ErrorAs(t, err, &errRsmt2d) - errByz := NewErrByzantine(context.Background(), bServ, &dah, errRsmt2d) + byzantine := NewErrByzantine(ctx, bServ, &dah, errRsmt2d) + var errByz *ErrByzantine + require.ErrorAs(t, byzantine, &errByz) befp := CreateBadEncodingProof([]byte("hash"), 0, errByz) err = befp.Validate(&header.ExtendedHeader{DAH: &dah}) - require.Error(t, err) + require.NoError(t, err) +} + +// namespacedBlockService wraps `BlockService` and extends the verification part +// to avoid returning blocks that has out of order namespaces. +type namespacedBlockService struct { + blockservice.BlockService + // the data structure that is used on the networking level, in order + // to verify the order of the namespaces + prefix *cid.Prefix +} + +func newNamespacedBlockService() *namespacedBlockService { + sha256NamespaceFlagged := uint64(0x7701) + // register the nmt hasher to validate the order of namespaces + mhcore.Register(sha256NamespaceFlagged, func() hash.Hash { + nh := nmt.NewNmtHasher(sha256.New(), share.NamespaceSize, true) + nh.Reset() + return nh + }) + + bs := &namespacedBlockService{} + bs.BlockService = ipld.NewMemBlockservice() + + bs.prefix = &cid.Prefix{ + Version: 1, + Codec: sha256NamespaceFlagged, + MhType: sha256NamespaceFlagged, + // equals to NmtHasher.Size() + MhLength: sha256.New().Size() + 2*share.NamespaceSize, + } + return bs +} + +func (n *namespacedBlockService) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + block, err := n.BlockService.GetBlock(ctx, c) + if err != nil { + return nil, err + } + + _, err = n.prefix.Sum(block.RawData()) + if err != nil { + return nil, err + } + return block, nil +} + +func (n *namespacedBlockService) GetBlocks(ctx context.Context, cids []cid.Cid) <-chan blocks.Block { + blockCh := n.BlockService.GetBlocks(ctx, cids) + resultCh := make(chan blocks.Block) + + go func() { + for { + select { + case <-ctx.Done(): + close(resultCh) + return + case block, ok := <-blockCh: + if !ok { + close(resultCh) + return + } + if _, err := n.prefix.Sum(block.RawData()); err != nil { + continue + } + resultCh <- block + } + } + }() + return resultCh } diff --git a/share/eds/byzantine/byzantine.go b/share/eds/byzantine/byzantine.go index dfdf681f04..d20b56deed 100644 --- a/share/eds/byzantine/byzantine.go +++ b/share/eds/byzantine/byzantine.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/ipfs/boxo/blockservice" - "golang.org/x/sync/errgroup" "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/rsmt2d" @@ -35,7 +34,7 @@ func NewErrByzantine( bGetter blockservice.BlockGetter, dah *da.DataAvailabilityHeader, errByz *rsmt2d.ErrByzantineData, -) *ErrByzantine { +) error { // changing the order to collect proofs against an orthogonal axis roots := [][][]byte{ dah.ColumnRoots, @@ -43,41 +42,41 @@ func NewErrByzantine( }[errByz.Axis] sharesWithProof := make([]*ShareWithProof, len(errByz.Shares)) - sharesAmount := 0 - errGr, ctx := errgroup.WithContext(ctx) + type result struct { + share *ShareWithProof + index int + } + resultCh := make(chan *result) for index, share := range errByz.Shares { - // skip further shares if we already requested half of them, which is enough to recompute the row - // or col - if sharesAmount == len(dah.RowRoots)/2 { - break - } - if share == nil { continue } - sharesAmount++ index := index - errGr.Go(func() error { + go func() { share, err := getProofsAt( ctx, bGetter, ipld.MustCidFromNamespacedSha256(roots[index]), int(errByz.Index), len(errByz.Shares), ) - sharesWithProof[index] = share - return err - }) + if err != nil { + log.Warn("requesting proof failed", "root", roots[index], "err", err) + return + } + resultCh <- &result{share, index} + }() } - if err := errGr.Wait(); err != nil { - // Fatal as rsmt2d proved that error is byzantine, - // but we cannot properly collect the proof, - // so verification will fail and thus services won't be stopped - // while we still have to stop them. - // TODO(@Wondertan): Find a better way to handle - log.Fatalw("getting proof for ErrByzantine", "err", err) + for i := 0; i < len(dah.RowRoots)/2; i++ { + select { + case t := <-resultCh: + sharesWithProof[t.index] = t.share + case <-ctx.Done(): + return ipld.ErrNodeNotFound + } } + return &ErrByzantine{ Index: uint32(errByz.Index), Shares: sharesWithProof, diff --git a/share/eds/inverted_index.go b/share/eds/inverted_index.go index 8b9dcb5d95..799ab6208d 100644 --- a/share/eds/inverted_index.go +++ b/share/eds/inverted_index.go @@ -4,19 +4,20 @@ import ( "context" "errors" "fmt" + "runtime" + "github.com/dgraph-io/badger/v4/options" "github.com/filecoin-project/dagstore/index" "github.com/filecoin-project/dagstore/shard" ds "github.com/ipfs/go-datastore" + dsbadger "github.com/ipfs/go-ds-badger4" "github.com/multiformats/go-multihash" - - dsbadger "github.com/celestiaorg/go-ds-badger4" ) const invertedIndexPath = "/inverted_index/" // ErrNotFoundInIndex is returned instead of ErrNotFound if the multihash doesn't exist in the index -var ErrNotFoundInIndex = fmt.Errorf("does not exist in index") +var ErrNotFoundInIndex = errors.New("does not exist in index") // simpleInvertedIndex is an inverted index that only stores a single shard key per multihash. Its // implementation is modified from the default upstream implementation in dagstore/index. @@ -29,14 +30,24 @@ type simpleInvertedIndex struct { // don't care which shard is used to serve a cid. func newSimpleInvertedIndex(storePath string) (*simpleInvertedIndex, error) { opts := dsbadger.DefaultOptions // this should be copied - // turn off value log GC + // turn off value log GC as we don't use value log opts.GcInterval = 0 - // 20 compactors show to have no hangups on put operation up to 40k blocks with eds size 128. - opts.NumCompactors = 20 // use minimum amount of NumLevelZeroTables to trigger L0 compaction faster opts.NumLevelZeroTables = 1 // MaxLevels = 8 will allow the db to grow to ~11.1 TiB opts.MaxLevels = 8 + // inverted index stores unique hash keys, so we don't need to detect conflicts + opts.DetectConflicts = false + // we don't need compression for inverted index as it just hashes + opts.Compression = options.None + compactors := runtime.NumCPU() + if compactors < 2 { + compactors = 2 + } + if compactors > opts.MaxLevels { // ensure there is no more compactors than db table levels + compactors = opts.MaxLevels + } + opts.NumCompactors = compactors ds, err := dsbadger.NewDatastore(storePath+invertedIndexPath, &opts) if err != nil { diff --git a/share/eds/metrics.go b/share/eds/metrics.go index cbebf8321a..8d69a3ec41 100644 --- a/share/eds/metrics.go +++ b/share/eds/metrics.go @@ -7,6 +7,8 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" ) const ( @@ -27,9 +29,7 @@ const ( dagstoreShardStatusKey = "shard_status" ) -var ( - meter = otel.Meter("eds_store") -) +var meter = otel.Meter("eds_store") type putResult string @@ -163,9 +163,7 @@ func (m *metrics) observeGCtime(ctx context.Context, dur time.Duration, failed b if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.gcTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) } @@ -174,9 +172,7 @@ func (m *metrics) observeShardFailure(ctx context.Context, shardKey string) { if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.shardFailureCount.Add(ctx, 1, metric.WithAttributes(attribute.String("shard_key", shardKey))) } @@ -185,9 +181,7 @@ func (m *metrics) observePut(ctx context.Context, dur time.Duration, result putR if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.putTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.String(putResultKey, string(result)), @@ -198,9 +192,7 @@ func (m *metrics) observeLongOp(ctx context.Context, opName string, dur time.Dur if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.longOpTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.String(opNameKey, opName), @@ -211,9 +203,7 @@ func (m *metrics) observeGetCAR(ctx context.Context, dur time.Duration, failed b if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.getCARTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) @@ -223,9 +213,7 @@ func (m *metrics) observeCARBlockstore(ctx context.Context, dur time.Duration, f if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.getCARBlockstoreTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) @@ -235,9 +223,7 @@ func (m *metrics) observeGetDAH(ctx context.Context, dur time.Duration, failed b if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.getDAHTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) @@ -247,9 +233,7 @@ func (m *metrics) observeRemove(ctx context.Context, dur time.Duration, failed b if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.removeTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) @@ -259,9 +243,7 @@ func (m *metrics) observeGet(ctx context.Context, dur time.Duration, failed bool if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.getTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) @@ -271,9 +253,7 @@ func (m *metrics) observeHas(ctx context.Context, dur time.Duration, failed bool if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.hasTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) @@ -283,9 +263,7 @@ func (m *metrics) observeList(ctx context.Context, dur time.Duration, failed boo if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.listTime.Record(ctx, dur.Seconds(), metric.WithAttributes( attribute.Bool(failedKey, failed))) diff --git a/share/eds/retriever_no_race_test.go b/share/eds/retriever_no_race_test.go new file mode 100644 index 0000000000..15c6aa2fc4 --- /dev/null +++ b/share/eds/retriever_no_race_test.go @@ -0,0 +1,55 @@ +// go:build !race + +package eds + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +func TestRetriever_ByzantineError(t *testing.T) { + const width = 8 + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + bserv := ipld.NewMemBlockservice() + shares := edstest.RandEDS(t, width).Flattened() + _, err := ipld.ImportShares(ctx, shares, bserv) + require.NoError(t, err) + + // corrupt shares so that eds erasure coding does not match + copy(shares[14][share.NamespaceSize:], shares[15][share.NamespaceSize:]) + + // import corrupted eds + batchAdder := ipld.NewNmtNodeAdder(ctx, bserv, ipld.MaxSizeBatchOption(width*2)) + attackerEDS, err := rsmt2d.ImportExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(width), + nmt.NodeVisitor(batchAdder.Visit)), + ) + require.NoError(t, err) + err = batchAdder.Commit() + require.NoError(t, err) + + // ensure we rcv an error + dah, err := da.NewDataAvailabilityHeader(attackerEDS) + require.NoError(t, err) + r := NewRetriever(bserv) + _, err = r.Retrieve(ctx, &dah) + var errByz *byzantine.ErrByzantine + require.ErrorAs(t, err, &errByz) +} diff --git a/share/eds/retriever_test.go b/share/eds/retriever_test.go index f3f7ccca64..95da345d17 100644 --- a/share/eds/retriever_test.go +++ b/share/eds/retriever_test.go @@ -12,8 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" @@ -67,40 +65,6 @@ func TestRetriever_Retrieve(t *testing.T) { } } -func TestRetriever_ByzantineError(t *testing.T) { - const width = 8 - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - bserv := ipld.NewMemBlockservice() - shares := edstest.RandEDS(t, width).Flattened() - _, err := ipld.ImportShares(ctx, shares, bserv) - require.NoError(t, err) - - // corrupt shares so that eds erasure coding does not match - copy(shares[14][share.NamespaceSize:], shares[15][share.NamespaceSize:]) - - // import corrupted eds - batchAdder := ipld.NewNmtNodeAdder(ctx, bserv, ipld.MaxSizeBatchOption(width*2)) - attackerEDS, err := rsmt2d.ImportExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(width), - nmt.NodeVisitor(batchAdder.Visit)), - ) - require.NoError(t, err) - err = batchAdder.Commit() - require.NoError(t, err) - - // ensure we rcv an error - dah, err := da.NewDataAvailabilityHeader(attackerEDS) - require.NoError(t, err) - r := NewRetriever(bserv) - _, err = r.Retrieve(ctx, &dah) - var errByz *byzantine.ErrByzantine - require.ErrorAs(t, err, &errByz) -} - // TestRetriever_MultipleRandQuadrants asserts that reconstruction succeeds // when any three random quadrants requested. func TestRetriever_MultipleRandQuadrants(t *testing.T) { diff --git a/share/eds/store_options.go b/share/eds/store_options.go index e5e6ffa73d..c8dcc69136 100644 --- a/share/eds/store_options.go +++ b/share/eds/store_options.go @@ -1,7 +1,7 @@ package eds import ( - "fmt" + "errors" "time" ) @@ -29,15 +29,15 @@ func DefaultParameters() *Parameters { func (p *Parameters) Validate() error { if p.GCInterval < 0 { - return fmt.Errorf("eds: GC interval cannot be negative") + return errors.New("eds: GC interval cannot be negative") } if p.RecentBlocksCacheSize < 1 { - return fmt.Errorf("eds: recent blocks cache size must be positive") + return errors.New("eds: recent blocks cache size must be positive") } if p.BlockstoreCacheSize < 1 { - return fmt.Errorf("eds: blockstore cache size must be positive") + return errors.New("eds: blockstore cache size must be positive") } return nil } diff --git a/share/eds/store_test.go b/share/eds/store_test.go index 09357347d0..6bc6972bb4 100644 --- a/share/eds/store_test.go +++ b/share/eds/store_test.go @@ -13,12 +13,12 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-datastore/sync" + dsbadger "github.com/ipfs/go-ds-badger4" "github.com/ipld/go-car" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" - dsbadger "github.com/celestiaorg/go-ds-badger4" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index 77c470dae9..7297766652 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -10,12 +10,12 @@ import ( "github.com/ipfs/boxo/exchange/offline" "github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-datastore/sync" + dsbadger "github.com/ipfs/go-ds-badger4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" - dsbadger "github.com/celestiaorg/go-ds-badger4" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" @@ -231,7 +231,7 @@ func TestIPLDGetter(t *testing.T) { require.NoError(t, err) require.Empty(t, emptyShares.Flatten()) - // nid doesnt exist in root + // nid doesn't exist in root emptyRoot := da.MinDataAvailabilityHeader() eh.DAH = &emptyRoot emptyShares, err = sg.GetSharesByNamespace(ctx, eh, namespace) diff --git a/share/getters/shrex.go b/share/getters/shrex.go index 0586826e22..826c6b1a10 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -43,9 +43,7 @@ func (m *metrics) recordEDSAttempt(ctx context.Context, attemptCount int, succes if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.edsAttempts.Record(ctx, int64(attemptCount), metric.WithAttributes( attribute.Bool("success", success))) @@ -55,9 +53,7 @@ func (m *metrics) recordNDAttempt(ctx context.Context, attemptCount int, success if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.ndAttempts.Record(ctx, int64(attemptCount), metric.WithAttributes( attribute.Bool("success", success))) @@ -135,9 +131,8 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader utils.SetStatusAndEnd(span, err) }() - dah := header.DAH // short circuit if the data root is empty - if dah.Equals(share.EmptyRoot()) { + if header.DAH.Equals(share.EmptyRoot()) { return share.EmptyExtendedDataSquare(), nil } for { @@ -147,10 +142,10 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader } attempt++ start := time.Now() - peer, setStatus, getErr := sg.peerManager.Peer(ctx, dah.Hash()) + peer, setStatus, getErr := sg.peerManager.Peer(ctx, header.DAH.Hash(), header.Height()) if getErr != nil { log.Debugw("eds: couldn't find peer", - "hash", dah.String(), + "hash", header.DAH.String(), "err", getErr, "finished (s)", time.Since(start)) sg.metrics.recordEDSAttempt(ctx, attempt, false) @@ -159,11 +154,11 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader reqStart := time.Now() reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - eds, getErr := sg.edsClient.RequestEDS(reqCtx, dah.Hash(), peer) + eds, getErr := sg.edsClient.RequestEDS(reqCtx, header.DAH.Hash(), peer) cancel() switch { case getErr == nil: - setStatus(peers.ResultSynced) + setStatus(peers.ResultNoop) sg.metrics.recordEDSAttempt(ctx, attempt, true) return eds, nil case errors.Is(getErr, context.DeadlineExceeded), @@ -182,7 +177,7 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader err = errors.Join(err, getErr) } log.Debugw("eds: request failed", - "hash", dah.String(), + "hash", header.DAH.String(), "peer", peer.String(), "attempt", attempt, "err", getErr, @@ -223,7 +218,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( } attempt++ start := time.Now() - peer, setStatus, getErr := sg.peerManager.Peer(ctx, dah.Hash()) + peer, setStatus, getErr := sg.peerManager.Peer(ctx, header.DAH.Hash(), header.Height()) if getErr != nil { log.Debugw("nd: couldn't find peer", "hash", dah.String(), diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index b625bb4c10..075735579b 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -121,7 +121,7 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - eds, dah, maxNamesapce := generateTestEDS(t) + eds, dah, maxNamespace := generateTestEDS(t) eh := headertest.RandExtendedHeaderWithRoot(t, dah) require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ @@ -129,7 +129,7 @@ func TestShrexGetter(t *testing.T) { Height: 1, }) - namespace, err := addToNamespace(maxNamesapce, 1) + namespace, err := addToNamespace(maxNamespace, 1) require.NoError(t, err) // check for namespace to be not in root require.Len(t, ipld.FilterRootByNamespace(dah, namespace), 0) diff --git a/share/ipld/corrupted_data_test.go b/share/ipld/corrupted_data_test.go index d1d6e6b4d5..0d0af6dd35 100644 --- a/share/ipld/corrupted_data_test.go +++ b/share/ipld/corrupted_data_test.go @@ -25,7 +25,7 @@ func TestNamespaceHasher_CorruptedData(t *testing.T) { t.Cleanup(cancel) net := availability_test.NewTestDAGNet(ctx, t) - requestor := full.Node(net) + requester := full.Node(net) provider, mockBS := availability_test.MockNode(t, net) provider.Availability = full.TestAvailability(t, getters.NewIPLDGetter(provider.BlockService)) net.ConnectAll() @@ -37,15 +37,15 @@ func TestNamespaceHasher_CorruptedData(t *testing.T) { eh := headertest.RandExtendedHeaderWithRoot(t, root) getCtx, cancelGet := context.WithTimeout(ctx, sharesAvailableTimeout) t.Cleanup(cancelGet) - err := requestor.SharesAvailable(getCtx, eh) + err := requester.SharesAvailable(getCtx, eh) require.NoError(t, err) // clear the storage of the requester so that it must retrieve again, then start attacking // we reinitialize the node to clear the eds store - requestor = full.Node(net) + requester = full.Node(net) mockBS.Attacking = true getCtx, cancelGet = context.WithTimeout(ctx, sharesAvailableTimeout) t.Cleanup(cancelGet) - err = requestor.SharesAvailable(getCtx, eh) + err = requester.SharesAvailable(getCtx, eh) require.ErrorIs(t, err, share.ErrNotAvailable) } diff --git a/share/p2p/discovery/backoff.go b/share/p2p/discovery/backoff.go index 70581bc519..7294915727 100644 --- a/share/p2p/discovery/backoff.go +++ b/share/p2p/discovery/backoff.go @@ -91,6 +91,8 @@ func (b *backoffConnector) HasBackoff(p peer.ID) bool { // GC is a perpetual GCing loop. func (b *backoffConnector) GC(ctx context.Context) { ticker := time.NewTicker(gcInterval) + defer ticker.Stop() + for { select { case <-ctx.Done(): diff --git a/share/p2p/discovery/discovery.go b/share/p2p/discovery/discovery.go index 0f44d42dbe..c40c979d16 100644 --- a/share/p2p/discovery/discovery.go +++ b/share/p2p/discovery/discovery.go @@ -117,7 +117,7 @@ func (d *Discovery) Stop(context.Context) error { return nil } -// Peers provides a list of discovered peers in the "full" topic. +// Peers provides a list of discovered peers in the given topic. // If Discovery hasn't found any peers, it blocks until at least one peer is found. func (d *Discovery) Peers(ctx context.Context) ([]peer.ID, error) { return d.set.Peers(ctx) @@ -212,9 +212,9 @@ func (d *Discovery) discoveryLoop(ctx context.Context) { case <-warnTicker.C: if d.set.Size() < d.set.Limit() { log.Warnf( - "Potentially degraded connectivity, unable to discover the desired amount of full node peers in %v. "+ + "Potentially degraded connectivity, unable to discover the desired amount of %s peers in %v. "+ "Number of peers discovered: %d. Required: %d.", - logInterval, d.set.Size(), d.set.Limit(), + d.tag, logInterval, d.set.Size(), d.set.Limit(), ) } // Do not break the loop; just continue diff --git a/share/p2p/discovery/discovery_test.go b/share/p2p/discovery/discovery_test.go index c02931e1a4..1d0078196f 100644 --- a/share/p2p/discovery/discovery_test.go +++ b/share/p2p/discovery/discovery_test.go @@ -1,3 +1,5 @@ +// go:build !race + package discovery import ( diff --git a/share/p2p/discovery/metrics.go b/share/p2p/discovery/metrics.go index d0be1c219d..78b62a7d97 100644 --- a/share/p2p/discovery/metrics.go +++ b/share/p2p/discovery/metrics.go @@ -8,6 +8,8 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" ) const ( @@ -24,9 +26,7 @@ const ( advertiseFailedKey = "failed" ) -var ( - meter = otel.Meter("share_discovery") -) +var meter = otel.Meter("share_discovery") type handlePeerResult string @@ -118,9 +118,7 @@ func (m *metrics) observeFindPeers(ctx context.Context, isEnoughPeers bool) { if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.discoveryResult.Add(ctx, 1, metric.WithAttributes( @@ -131,9 +129,7 @@ func (m *metrics) observeHandlePeer(ctx context.Context, result handlePeerResult if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.handlePeerResult.Add(ctx, 1, metric.WithAttributes( @@ -144,9 +140,7 @@ func (m *metrics) observeAdvertise(ctx context.Context, err error) { if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.advertise.Add(ctx, 1, metric.WithAttributes( diff --git a/share/p2p/metrics.go b/share/p2p/metrics.go index fee3b12413..55aefda81d 100644 --- a/share/p2p/metrics.go +++ b/share/p2p/metrics.go @@ -7,6 +7,8 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/libs/utils" ) var meter = otel.Meter("shrex/eds") @@ -35,9 +37,7 @@ func (m *Metrics) ObserveRequests(ctx context.Context, count int64, status statu if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.totalRequestCounter.Add(ctx, count, metric.WithAttributes( attribute.String("status", string(status)), diff --git a/share/p2p/params.go b/share/p2p/params.go index f11179293d..6636e38fc5 100644 --- a/share/p2p/params.go +++ b/share/p2p/params.go @@ -45,7 +45,7 @@ func (p *Parameters) Validate() error { return fmt.Errorf("invalid write timeout: %v, %s", p.ServerWriteTimeout, errSuffix) } if p.HandleRequestTimeout <= 0 { - return fmt.Errorf("invalid hadnle request timeout: %v, %s", p.HandleRequestTimeout, errSuffix) + return fmt.Errorf("invalid handle request timeout: %v, %s", p.HandleRequestTimeout, errSuffix) } if p.ConcurrencyLimit <= 0 { return fmt.Errorf("invalid concurrency limit: %s", errSuffix) diff --git a/share/p2p/peers/manager.go b/share/p2p/peers/manager.go index e39a181150..0ae21ff015 100644 --- a/share/p2p/peers/manager.go +++ b/share/p2p/peers/manager.go @@ -27,8 +27,6 @@ import ( const ( // ResultNoop indicates operation was successful and no extra action is required ResultNoop result = "result_noop" - // ResultSynced will save the status of pool as "synced" and will remove peers from it - ResultSynced = "result_synced" // ResultCooldownPeer will put returned peer on cooldown, meaning it won't be available by Peer // method for some time ResultCooldownPeer = "result_cooldown_peer" @@ -39,6 +37,10 @@ const ( // eventbusBufSize is the size of the buffered channel to handle // events in libp2p eventbusBufSize = 32 + + // storedPoolsAmount is the amount of pools for recent headers that will be stored in the peer + // manager + storedPoolsAmount = 10 ) type result string @@ -56,14 +58,17 @@ type Manager struct { host host.Host connGater *conngater.BasicConnectionGater - // pools collecting peers from shrexSub + // pools collecting peers from shrexSub and stores them by datahash pools map[string]*syncPool - // messages from shrex.Sub with height below initialHeight will be ignored, since we don't need to - // track peers for those headers + + // initialHeight is the height of the first header received from headersub initialHeight atomic.Uint64 + // messages from shrex.Sub with height below storeFrom will be ignored, since we don't need to + // track peers for those headers + storeFrom atomic.Uint64 - // fullNodes collects full nodes peer.ID found via discovery - fullNodes *pool + // nodes collects nodes' peer.IDs found via discovery + nodes *pool // hashes that are not in the chain blacklistedHashes map[string]bool @@ -85,11 +90,8 @@ type syncPool struct { // isValidatedDataHash indicates if datahash was validated by receiving corresponding extended // header from headerSub isValidatedDataHash atomic.Bool - // headerHeight is the height of header corresponding to syncpool - headerHeight atomic.Uint64 - // isSynced will be true if DoneFunc was called with ResultSynced. It indicates that given datahash - // was synced and peer-manager no longer need to keep peers for it - isSynced atomic.Bool + // height is the height of the header that corresponds to datahash + height uint64 // createdAt is the syncPool creation time createdAt time.Time } @@ -121,7 +123,7 @@ func NewManager( } } - s.fullNodes = newPool(s.params.PeerCooldown) + s.nodes = newPool(s.params.PeerCooldown) return s, nil } @@ -186,29 +188,28 @@ func (m *Manager) Stop(ctx context.Context) error { } // Peer returns peer collected from shrex.Sub for given datahash if any available. -// If there is none, it will look for full nodes collected from discovery. If there is no discovered -// full nodes, it will wait until any peer appear in either source or timeout happen. +// If there is none, it will look for nodes collected from discovery. If there is no discovered +// nodes, it will wait until any peer appear in either source or timeout happen. // After fetching data using given peer, caller is required to call returned DoneFunc using // appropriate result value -func (m *Manager) Peer( - ctx context.Context, datahash share.DataHash, +func (m *Manager) Peer(ctx context.Context, datahash share.DataHash, height uint64, ) (peer.ID, DoneFunc, error) { - p := m.validatedPool(datahash.String()) + p := m.validatedPool(datahash.String(), height) // first, check if a peer is available for the given datahash peerID, ok := p.tryGet() if ok { if m.removeIfUnreachable(p, peerID) { - return m.Peer(ctx, datahash) + return m.Peer(ctx, datahash, height) } return m.newPeer(ctx, datahash, peerID, sourceShrexSub, p.len(), 0) } - // if no peer for datahash is currently available, try to use full node + // if no peer for datahash is currently available, try to use node // obtained from discovery - peerID, ok = m.fullNodes.tryGet() + peerID, ok = m.nodes.tryGet() if ok { - return m.newPeer(ctx, datahash, peerID, sourceFullNodes, m.fullNodes.len(), 0) + return m.newPeer(ctx, datahash, peerID, sourceFullNodes, m.nodes.len(), 0) } // no peers are available right now, wait for the first one @@ -216,30 +217,30 @@ func (m *Manager) Peer( select { case peerID = <-p.next(ctx): if m.removeIfUnreachable(p, peerID) { - return m.Peer(ctx, datahash) + return m.Peer(ctx, datahash, height) } return m.newPeer(ctx, datahash, peerID, sourceShrexSub, p.len(), time.Since(start)) - case peerID = <-m.fullNodes.next(ctx): - return m.newPeer(ctx, datahash, peerID, sourceFullNodes, m.fullNodes.len(), time.Since(start)) + case peerID = <-m.nodes.next(ctx): + return m.newPeer(ctx, datahash, peerID, sourceFullNodes, m.nodes.len(), time.Since(start)) case <-ctx.Done(): return "", nil, ctx.Err() } } -// UpdateFullNodePool is called by discovery when new full node is discovered or removed -func (m *Manager) UpdateFullNodePool(peerID peer.ID, isAdded bool) { +// UpdateNodePool is called by discovery when new node is discovered or removed. +func (m *Manager) UpdateNodePool(peerID peer.ID, isAdded bool) { if isAdded { if m.isBlacklistedPeer(peerID) { log.Debugw("got blacklisted peer from discovery", "peer", peerID.String()) return } - m.fullNodes.add(peerID) - log.Debugw("added to full nodes", "peer", peerID) + m.nodes.add(peerID) + log.Debugw("added to discovered nodes pool", "peer", peerID) return } - log.Debugw("removing peer from discovered full nodes", "peer", peerID.String()) - m.fullNodes.remove(peerID) + log.Debugw("removing peer from discovered nodes pool", "peer", peerID.String()) + m.nodes.remove(peerID) } func (m *Manager) newPeer( @@ -270,14 +271,12 @@ func (m *Manager) doneFunc(datahash share.DataHash, peerID peer.ID, source peerS m.metrics.observeDoneResult(source, result) switch result { case ResultNoop: - case ResultSynced: - m.markPoolAsSynced(datahash.String()) case ResultCooldownPeer: if source == sourceFullNodes { - m.fullNodes.putOnCooldown(peerID) + m.nodes.putOnCooldown(peerID) return } - m.getOrCreatePool(datahash.String()).putOnCooldown(peerID) + m.getPool(datahash.String()).putOnCooldown(peerID) case ResultBlacklistPeer: m.blacklistPeers(reasonMisbehave, peerID) } @@ -298,17 +297,21 @@ func (m *Manager) subscribeHeader(ctx context.Context, headerSub libhead.Subscri log.Errorw("get next header from sub", "err", err) continue } - m.validatedPool(h.DataHash.String()) + m.validatedPool(h.DataHash.String(), h.Height()) // store first header for validation purposes if m.initialHeight.CompareAndSwap(0, h.Height()) { log.Debugw("stored initial height", "height", h.Height()) } + + // update storeFrom if header height + m.storeFrom.Store(uint64(max(0, int(h.Height())-storedPoolsAmount))) + log.Debugw("updated lowest stored height", "height", h.Height()) } } // subscribeDisconnectedPeers subscribes to libp2p connectivity events and removes disconnected -// peers from full nodes pool +// peers from nodes pool func (m *Manager) subscribeDisconnectedPeers(ctx context.Context, sub event.Subscription) { defer close(m.disconnectedPeersDone) defer sub.Close() @@ -321,13 +324,14 @@ func (m *Manager) subscribeDisconnectedPeers(ctx context.Context, sub event.Subs log.Fatal("Subscription for connectedness events is closed.") //nolint:gocritic return } - // listen to disconnect event to remove peer from full nodes pool + // listen to disconnect event to remove peer from nodes pool connStatus := e.(event.EvtPeerConnectednessChanged) if connStatus.Connectedness == network.NotConnected { peer := connStatus.Peer - if m.fullNodes.has(peer) { - log.Debugw("peer disconnected, removing from full nodes", "peer", peer.String()) - m.fullNodes.remove(peer) + if m.nodes.has(peer) { + log.Debugw("peer disconnected, removing from discovered nodes pool", + "peer", peer.String()) + m.nodes.remove(peer) } } } @@ -355,39 +359,36 @@ func (m *Manager) Validate(_ context.Context, peerID peer.ID, msg shrexsub.Notif return pubsub.ValidationReject } - if msg.Height == 0 { - logger.Debug("received message with 0 height") - return pubsub.ValidationReject - } - - if msg.Height < m.initialHeight.Load() { - // we can use peers from discovery for headers before the first one from headerSub - // if we allow pool creation for those headers, there is chance the pool will not be validated in - // time and will be false-positively trigger blacklisting of hash and all peers that sent msgs for - // that hash + if msg.Height < m.storeFrom.Load() { logger.Debug("received message for past header") return pubsub.ValidationIgnore } - p := m.getOrCreatePool(msg.DataHash.String()) - p.headerHeight.Store(msg.Height) + p := m.getOrCreatePool(msg.DataHash.String(), msg.Height) logger.Debugw("got hash from shrex-sub") p.add(peerID) if p.isValidatedDataHash.Load() { - // add peer to full nodes pool only if datahash has been already validated - m.fullNodes.add(peerID) + // add peer to discovered nodes pool only if datahash has been already validated + m.nodes.add(peerID) } return pubsub.ValidationIgnore } -func (m *Manager) getOrCreatePool(datahash string) *syncPool { +func (m *Manager) getPool(datahash string) *syncPool { + m.lock.Lock() + defer m.lock.Unlock() + return m.pools[datahash] +} + +func (m *Manager) getOrCreatePool(datahash string, height uint64) *syncPool { m.lock.Lock() defer m.lock.Unlock() p, ok := m.pools[datahash] if !ok { p = &syncPool{ + height: height, pool: newPool(m.params.PeerCooldown), createdAt: time.Now(), } @@ -408,7 +409,7 @@ func (m *Manager) blacklistPeers(reason blacklistPeerReason, peerIDs ...peer.ID) continue } - m.fullNodes.remove(peerID) + m.nodes.remove(peerID) // add peer to the blacklist, so we can't connect to it in the future. err := m.connGater.BlockPeer(peerID) if err != nil { @@ -432,19 +433,19 @@ func (m *Manager) isBlacklistedHash(hash share.DataHash) bool { return m.blacklistedHashes[hash.String()] } -func (m *Manager) validatedPool(hashStr string) *syncPool { - p := m.getOrCreatePool(hashStr) +func (m *Manager) validatedPool(hashStr string, height uint64) *syncPool { + p := m.getOrCreatePool(hashStr, height) if p.isValidatedDataHash.CompareAndSwap(false, true) { log.Debugw("pool marked validated", "datahash", hashStr) - // if pool is proven to be valid, add all collected peers to full nodes - m.fullNodes.add(p.peers()...) + // if pool is proven to be valid, add all collected peers to discovered nodes + m.nodes.add(p.peers()...) } return p } // removeIfUnreachable removes peer from some pool if it is blacklisted or disconnected func (m *Manager) removeIfUnreachable(pool *syncPool, peerID peer.ID) bool { - if m.isBlacklistedPeer(peerID) || !m.fullNodes.has(peerID) { + if m.isBlacklistedPeer(peerID) || !m.nodes.has(peerID) { log.Debugw("removing outdated peer from pool", "peer", peerID.String()) pool.remove(peerID) return true @@ -482,12 +483,24 @@ func (m *Manager) cleanUp() []peer.ID { addToBlackList := make(map[peer.ID]struct{}) for h, p := range m.pools { - if !p.isValidatedDataHash.Load() && time.Since(p.createdAt) > m.params.PoolValidationTimeout { - delete(m.pools, h) - if p.headerHeight.Load() < m.initialHeight.Load() { - // outdated pools could still be valid even if not validated, no need to blacklist - continue + if p.isValidatedDataHash.Load() { + // remove pools that are outdated + if p.height < m.storeFrom.Load() { + delete(m.pools, h) } + continue + } + + // can't validate datahashes below initial height + if p.height < m.initialHeight.Load() { + delete(m.pools, h) + continue + } + + // find pools that are not validated in time + if time.Since(p.createdAt) > m.params.PoolValidationTimeout { + delete(m.pools, h) + log.Debug("blacklisting datahash with all corresponding peers", "hash", h, "peer_list", p.peersList) @@ -507,17 +520,3 @@ func (m *Manager) cleanUp() []peer.ID { } return blacklist } - -func (m *Manager) markPoolAsSynced(datahash string) { - p := m.getOrCreatePool(datahash) - if p.isSynced.CompareAndSwap(false, true) { - p.isSynced.Store(true) - p.reset() - } -} - -func (p *syncPool) add(peers ...peer.ID) { - if !p.isSynced.Load() { - p.pool.add(peers...) - } -} diff --git a/share/p2p/peers/manager_test.go b/share/p2p/peers/manager_test.go index 94ec5d5ea2..52a6fd7302 100644 --- a/share/p2p/peers/manager_test.go +++ b/share/p2p/peers/manager_test.go @@ -2,12 +2,12 @@ package peers import ( "context" - sync2 "sync" + "sync" "testing" "time" "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" + dssync "github.com/ipfs/go-datastore/sync" dht "github.com/libp2p/go-libp2p-kad-dht" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" @@ -26,10 +26,9 @@ import ( "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) -// TODO: add broadcast to tests func TestManager(t *testing.T) { t.Run("Validate pool by headerSub", func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*50) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) // create headerSub mock @@ -64,21 +63,16 @@ func TestManager(t *testing.T) { result := manager.Validate(ctx, peerID, msg) require.Equal(t, pubsub.ValidationIgnore, result) - pID, done, err := manager.Peer(ctx, h.DataHash.Bytes()) + pID, _, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) require.NoError(t, err) require.Equal(t, peerID, pID) // check pool validation - require.True(t, manager.getOrCreatePool(h.DataHash.String()).isValidatedDataHash.Load()) - - done(ResultSynced) - // pool should not be removed after success - require.Len(t, manager.pools, 1) - require.Len(t, manager.getOrCreatePool(h.DataHash.String()).pool.peersList, 0) + require.True(t, manager.getPool(h.DataHash.String()).isValidatedDataHash.Load()) }) t.Run("validator", func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*50) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) // create headerSub mock @@ -100,7 +94,7 @@ func TestManager(t *testing.T) { require.Equal(t, pubsub.ValidationIgnore, result) // mark peer as misbehaved to blacklist it - pID, done, err := manager.Peer(ctx, h.DataHash.Bytes()) + pID, done, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) require.NoError(t, err) require.Equal(t, peerID, pID) manager.params.EnableBlackListing = true @@ -132,19 +126,21 @@ func TestManager(t *testing.T) { // create unvalidated pool peerID := peer.ID("peer1") msg := shrexsub.Notification{ - DataHash: share.DataHash("datahash1"), + DataHash: share.DataHash("datahash1datahash1datahash1datahash1datahash1"), Height: 2, } manager.Validate(ctx, peerID, msg) // create validated pool validDataHash := share.DataHash("datahash2") - manager.fullNodes.add("full") // add FN to unblock Peer call - manager.Peer(ctx, validDataHash) //nolint:errcheck + manager.nodes.add("full") // add FN to unblock Peer call + manager.Peer(ctx, validDataHash, h.Height()) //nolint:errcheck + require.Len(t, manager.pools, 3) // trigger cleanup blacklisted := manager.cleanUp() require.Contains(t, blacklisted, peerID) + require.Len(t, manager.pools, 2) // messages with blacklisted hash should be rejected right away peerID2 := peer.ID("peer2") @@ -170,10 +166,9 @@ func TestManager(t *testing.T) { // add peers to fullnodes, imitating discovery add peers := []peer.ID{"peer1", "peer2", "peer3"} - manager.fullNodes.add(peers...) + manager.nodes.add(peers...) - peerID, done, err := manager.Peer(ctx, h.DataHash.Bytes()) - done(ResultSynced) + peerID, _, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) require.NoError(t, err) require.Contains(t, peers, peerID) @@ -195,7 +190,7 @@ func TestManager(t *testing.T) { // make sure peers are not returned before timeout timeoutCtx, cancel := context.WithTimeout(context.Background(), time.Millisecond) t.Cleanup(cancel) - _, _, err = manager.Peer(timeoutCtx, h.DataHash.Bytes()) + _, _, err = manager.Peer(timeoutCtx, h.DataHash.Bytes(), h.Height()) require.ErrorIs(t, err, context.DeadlineExceeded) peers := []peer.ID{"peer1", "peer2", "peer3"} @@ -204,14 +199,13 @@ func TestManager(t *testing.T) { doneCh := make(chan struct{}) go func() { defer close(doneCh) - peerID, done, err := manager.Peer(ctx, h.DataHash.Bytes()) - done(ResultSynced) + peerID, _, err := manager.Peer(ctx, h.DataHash.Bytes(), h.Height()) require.NoError(t, err) require.Contains(t, peers, peerID) }() // send peers - manager.fullNodes.add(peers...) + manager.nodes.add(peers...) // wait for peer to be received select { @@ -223,38 +217,7 @@ func TestManager(t *testing.T) { stopManager(t, manager) }) - t.Run("mark pool synced", func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - h := testHeader() - headerSub := newSubLock(h, nil) - - // start test manager - manager, err := testManager(ctx, headerSub) - require.NoError(t, err) - - peerID, msg := peer.ID("peer1"), newShrexSubMsg(h) - result := manager.Validate(ctx, peerID, msg) - require.Equal(t, pubsub.ValidationIgnore, result) - - pID, done, err := manager.Peer(ctx, h.DataHash.Bytes()) - require.NoError(t, err) - require.Equal(t, peerID, pID) - done(ResultSynced) - - // check pool is soft deleted and marked synced - pool := manager.getOrCreatePool(h.DataHash.String()) - require.Len(t, pool.peersList, 0) - require.True(t, pool.isSynced.Load()) - - // add peer on synced pool should be noop - result = manager.Validate(ctx, "peer2", msg) - require.Equal(t, pubsub.ValidationIgnore, result) - require.Len(t, pool.peersList, 0) - }) - - t.Run("shrexSub sends a message lower than first headerSub header height, msg first", func(t *testing.T) { + t.Run("shrexSub sends a message lower than first headerSub header height, headerSub first", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -278,12 +241,16 @@ func TestManager(t *testing.T) { } result := manager.Validate(ctx, "peer", msg) require.Equal(t, pubsub.ValidationIgnore, result) + // pool will be created for first shrexSub message + require.Len(t, manager.pools, 2) - // amount of pools should not change + blacklisted := manager.cleanUp() + require.Empty(t, blacklisted) + // trigger cleanup and outdated pool should be removed require.Len(t, manager.pools, 1) }) - t.Run("shrexSub sends a message lower than first headerSub header height, headerSub first", func(t *testing.T) { + t.Run("shrexSub sends a message lower than first headerSub header height, shrexSub first", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -303,18 +270,50 @@ func TestManager(t *testing.T) { result := manager.Validate(ctx, "peer", msg) require.Equal(t, pubsub.ValidationIgnore, result) - // unlock header sub after message validator + // pool will be created for first shrexSub message + require.Len(t, manager.pools, 1) + + // unlock headerSub to allow it to send next message require.NoError(t, headerSub.wait(ctx, 1)) - // pool will be created for first headerSub header datahash + // second pool should be created require.Len(t, manager.pools, 2) - // trigger cleanup and check that no peers or hashes were blacklisted - manager.params.PoolValidationTimeout = 0 + // trigger cleanup and outdated pool should be removed blacklisted := manager.cleanUp() + require.Len(t, manager.pools, 1) + + // check that no peers or hashes were blacklisted + manager.params.PoolValidationTimeout = 0 require.Len(t, blacklisted, 0) require.Len(t, manager.blacklistedHashes, 0) + }) + + t.Run("pools store window", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + h := testHeader() + h.RawHeader.Height = storedPoolsAmount * 2 + headerSub := newSubLock(h, nil) + + // start test manager + manager, err := testManager(ctx, headerSub) + require.NoError(t, err) + + // unlock headerSub to read first header + require.NoError(t, headerSub.wait(ctx, 1)) + // pool will be created for first headerSub header datahash + require.Len(t, manager.pools, 1) + + // create shrexSub msg with height lower than storedPoolsAmount + msg := shrexsub.Notification{ + DataHash: share.DataHash("datahash"), + Height: h.Height() - storedPoolsAmount - 3, + } + result := manager.Validate(ctx, "peer", msg) + require.Equal(t, pubsub.ValidationIgnore, result) - // outdated pool should be removed + // shrexSub message should be discarded and amount of pools should not change require.Len(t, manager.pools, 1) }) } @@ -355,7 +354,7 @@ func TestIntegration(t *testing.T) { })) // FN should get message - gotPeer, _, err := fnPeerManager.Peer(ctx, randHash) + gotPeer, _, err := fnPeerManager.Peer(ctx, randHash, 13) require.NoError(t, err) // check that gotPeer matched bridge node @@ -409,7 +408,7 @@ func TestIntegration(t *testing.T) { require.NoError(t, err) // init peer manager for full node - connGater, err := conngater.NewBasicConnectionGater(sync.MutexWrap(datastore.NewMapDatastore())) + connGater, err := conngater.NewBasicConnectionGater(dssync.MutexWrap(datastore.NewMapDatastore())) require.NoError(t, err) fnPeerManager, err := NewManager( DefaultParameters(), @@ -435,7 +434,7 @@ func TestIntegration(t *testing.T) { fnHost, routingdisc.NewRoutingDiscovery(fnRouter), fullNodesTag, - discovery.WithOnPeersUpdate(fnPeerManager.UpdateFullNodePool), + discovery.WithOnPeersUpdate(fnPeerManager.UpdateNodePool), discovery.WithOnPeersUpdate(checkDiscoveredPeer), ) require.NoError(t, fnDisc.Start(ctx)) @@ -451,7 +450,7 @@ func TestIntegration(t *testing.T) { select { case <-waitCh: - require.Contains(t, fnPeerManager.fullNodes.peersList, bnHost.ID()) + require.Contains(t, fnPeerManager.nodes.peersList, bnHost.ID()) case <-ctx.Done(): require.NoError(t, ctx.Err()) } @@ -469,7 +468,7 @@ func testManager(ctx context.Context, headerSub libhead.Subscriber[*header.Exten return nil, err } - connGater, err := conngater.NewBasicConnectionGater(sync.MutexWrap(datastore.NewMapDatastore())) + connGater, err := conngater.NewBasicConnectionGater(dssync.MutexWrap(datastore.NewMapDatastore())) if err != nil { return nil, err } @@ -504,7 +503,7 @@ func testHeader() *header.ExtendedHeader { type subLock struct { next chan struct{} - wg *sync2.WaitGroup + wg *sync.WaitGroup expected []*header.ExtendedHeader } @@ -530,7 +529,7 @@ func (s subLock) release(ctx context.Context) error { } func newSubLock(expected ...*header.ExtendedHeader) *subLock { - wg := &sync2.WaitGroup{} + wg := &sync.WaitGroup{} wg.Add(1) return &subLock{ next: make(chan struct{}), diff --git a/share/p2p/peers/metrics.go b/share/p2p/peers/metrics.go index 95d1ce65d9..eb42254430 100644 --- a/share/p2p/peers/metrics.go +++ b/share/p2p/peers/metrics.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) @@ -39,19 +40,14 @@ const ( poolStatusKey = "pool_status" poolStatusCreated poolStatus = "created" poolStatusValidated poolStatus = "validated" - poolStatusSynced poolStatus = "synced" poolStatusBlacklisted poolStatus = "blacklisted" // Pool status model: // created(unvalidated) // / \ - // validated(unsynced) blacklisted - // | - // synced + // validated blacklisted ) -var ( - meter = otel.Meter("shrex_peer_manager") -) +var meter = otel.Meter("shrex_peer_manager") type blacklistPeerReason string @@ -141,10 +137,10 @@ func initMetrics(manager *Manager) (*metrics, error) { attribute.String(poolStatusKey, string(poolStatus)))) } - observer.ObserveInt64(fullNodesPool, int64(manager.fullNodes.len()), + observer.ObserveInt64(fullNodesPool, int64(manager.nodes.len()), metric.WithAttributes( attribute.String(peerStatusKey, string(peerStatusActive)))) - observer.ObserveInt64(fullNodesPool, int64(manager.fullNodes.cooldown.len()), + observer.ObserveInt64(fullNodesPool, int64(manager.nodes.cooldown.len()), metric.WithAttributes( attribute.String(peerStatusKey, string(peerStatusCooldown)))) @@ -172,9 +168,7 @@ func (m *metrics) observeGetPeer( if m == nil { return } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.getPeer.Add(ctx, 1, metric.WithAttributes( attribute.String(sourceKey, string(source)), @@ -225,9 +219,7 @@ func (m *metrics) validationObserver(validator shrexsub.ValidatorFn) shrexsub.Va resStr = "unknown" } - if ctx.Err() != nil { - ctx = context.Background() - } + ctx = utils.ResetContextOnError(ctx) m.validationResult.Add(ctx, 1, metric.WithAttributes( @@ -266,11 +258,6 @@ func (m *Manager) shrexPools() map[poolStatus]int64 { continue } - if p.isSynced.Load() { - shrexPools[poolStatusSynced]++ - continue - } - // pool is validated but not synced shrexPools[poolStatusValidated]++ } diff --git a/share/p2p/peers/pool.go b/share/p2p/peers/pool.go index d0cc45ac44..365ef0306d 100644 --- a/share/p2p/peers/pool.go +++ b/share/p2p/peers/pool.go @@ -224,12 +224,3 @@ func (p *pool) len() int { defer p.m.RUnlock() return p.activeCount } - -// reset will reset the pool to its initial state. -func (p *pool) reset() { - lock := &p.m - lock.Lock() - defer lock.Lock() - // swap the pool with an empty one - *p = *newPool(time.Second) -} diff --git a/share/p2p/shrexsub/pubsub_test.go b/share/p2p/shrexsub/pubsub_test.go index dcf85515e8..85b16c055b 100644 --- a/share/p2p/shrexsub/pubsub_test.go +++ b/share/p2p/shrexsub/pubsub_test.go @@ -45,7 +45,7 @@ func TestPubSub(t *testing.T) { errExpected bool }{ { - name: "vaild height, valid hash", + name: "valid height, valid hash", notif: Notification{ Height: 1, DataHash: rand.Bytes(32), diff --git a/specs/.gitignore b/specs/.gitignore new file mode 100644 index 0000000000..7585238efe --- /dev/null +++ b/specs/.gitignore @@ -0,0 +1 @@ +book diff --git a/specs/book.toml b/specs/book.toml new file mode 100644 index 0000000000..2ab3d5a398 --- /dev/null +++ b/specs/book.toml @@ -0,0 +1,13 @@ +[book] +authors = ["Celestia Labs"] +language = "en" +multilingual = false +src = "src" +title = "Celestia Node Specification" + +[output.html] +git-repository-url = "https://github.com/celestiaorg/celestia-node" + +[preprocessor.toc] +command = "mdbook-toc" +renderer = ["html"] diff --git a/specs/src/SUMMARY.md b/specs/src/SUMMARY.md new file mode 100644 index 0000000000..dd6d59d972 --- /dev/null +++ b/specs/src/SUMMARY.md @@ -0,0 +1,3 @@ +# Summary + +- [WIP](./WIP.md) diff --git a/specs/src/WIP.md b/specs/src/WIP.md new file mode 100644 index 0000000000..85e6ff194b --- /dev/null +++ b/specs/src/WIP.md @@ -0,0 +1 @@ +# WIP diff --git a/state/core_access.go b/state/core_access.go index 2a49e70a03..c3fbd4836a 100644 --- a/state/core_access.go +++ b/state/core_access.go @@ -181,7 +181,7 @@ func (ca *CoreAccessor) constructSignedTx( } // SubmitPayForBlob builds, signs, and synchronously submits a MsgPayForBlob. It blocks until the -// transaction is committed and returns the TxReponse. If gasLim is set to 0, the method will +// transaction is committed and returns the TxResponse. If gasLim is set to 0, the method will // automatically estimate the gas limit. If the fee is negative, the method will use the nodes min // gas price multiplied by the gas limit. func (ca *CoreAccessor) SubmitPayForBlob( @@ -573,10 +573,6 @@ func (ca *CoreAccessor) queryMinimumGasPrice( return coins.AmountOf(app.BondDenom).MustFloat64(), nil } -func (ca *CoreAccessor) IsStopped(context.Context) bool { - return ca.ctx.Err() != nil -} - func withFee(fee Int) apptypes.TxBuilderOption { gasFee := sdktypes.NewCoins(sdktypes.NewCoin(app.BondDenom, fee)) return apptypes.SetFeeAmount(gasFee) diff --git a/state/core_access_test.go b/state/core_access_test.go index 69e9f251c0..ad7b916ea3 100644 --- a/state/core_access_test.go +++ b/state/core_access_test.go @@ -1,3 +1,5 @@ +//go:build !race + package state import ( @@ -27,7 +29,7 @@ func TestSubmitPayForBlob(t *testing.T) { tmCfg.Consensus.TimeoutCommit = time.Millisecond * 1 appConf := testnode.DefaultAppConfig() appConf.API.Enable = true - appConf.MinGasPrices = fmt.Sprintf("0.1%s", app.BondDenom) + appConf.MinGasPrices = fmt.Sprintf("0.002%s", app.BondDenom) config := testnode.DefaultConfig().WithTendermintConfig(tmCfg).WithAppConfig(appConf).WithAccounts(accounts) cctx, rpcAddr, grpcAddr := testnode.NewNetwork(t, config)